From 5a125a8182d4c94a739be127f9d36241b554f7b1 Mon Sep 17 00:00:00 2001 From: shengaoya Date: Mon, 22 Jul 2024 01:49:07 -0400 Subject: [PATCH] introduce 5.3.16.1004 sfc driver Signed-off-by: shengaoya --- kmod-sfc.spec | 121 + sfc-disttag | 31 + src/Makefile | 42 + src/drivers/net/ethernet/sfc/Makefile | 306 + src/drivers/net/ethernet/sfc/bitfield.h | 621 + src/drivers/net/ethernet/sfc/debugfs.c | 1237 + src/drivers/net/ethernet/sfc/debugfs.h | 225 + src/drivers/net/ethernet/sfc/driverlink.c | 468 + src/drivers/net/ethernet/sfc/driverlink_api.h | 981 + src/drivers/net/ethernet/sfc/dump.c | 488 + src/drivers/net/ethernet/sfc/dump.h | 40 + src/drivers/net/ethernet/sfc/ef10.c | 6162 +++ src/drivers/net/ethernet/sfc/ef100.c | 640 + src/drivers/net/ethernet/sfc/ef100.h | 14 + src/drivers/net/ethernet/sfc/ef100_bsp.c | 241 + src/drivers/net/ethernet/sfc/ef100_bsp.h | 11 + src/drivers/net/ethernet/sfc/ef100_dump.c | 446 + src/drivers/net/ethernet/sfc/ef100_dump.h | 21 + src/drivers/net/ethernet/sfc/ef100_ethtool.c | 198 + src/drivers/net/ethernet/sfc/ef100_ethtool.h | 10 + src/drivers/net/ethernet/sfc/ef100_netdev.c | 1002 + src/drivers/net/ethernet/sfc/ef100_netdev.h | 29 + src/drivers/net/ethernet/sfc/ef100_nic.c | 2080 + src/drivers/net/ethernet/sfc/ef100_nic.h | 193 + src/drivers/net/ethernet/sfc/ef100_regs.h | 728 + src/drivers/net/ethernet/sfc/ef100_rep.c | 880 + src/drivers/net/ethernet/sfc/ef100_rep.h | 63 + src/drivers/net/ethernet/sfc/ef100_rx.c | 317 + src/drivers/net/ethernet/sfc/ef100_rx.h | 23 + src/drivers/net/ethernet/sfc/ef100_sriov.c | 93 + src/drivers/net/ethernet/sfc/ef100_sriov.h | 12 + src/drivers/net/ethernet/sfc/ef100_tx.c | 580 + src/drivers/net/ethernet/sfc/ef100_tx.h | 27 + src/drivers/net/ethernet/sfc/ef100_vdpa.c | 859 + src/drivers/net/ethernet/sfc/ef100_vdpa.h | 222 + src/drivers/net/ethernet/sfc/ef100_vdpa_dbg.c | 121 + src/drivers/net/ethernet/sfc/ef100_vdpa_dbg.h | 17 + src/drivers/net/ethernet/sfc/ef100_vdpa_ops.c | 1079 + src/drivers/net/ethernet/sfc/ef10_regs.h | 439 + src/drivers/net/ethernet/sfc/ef10_sriov.c | 1020 + src/drivers/net/ethernet/sfc/ef10_sriov.h | 99 + src/drivers/net/ethernet/sfc/efx.c | 1930 + src/drivers/net/ethernet/sfc/efx.h | 429 + src/drivers/net/ethernet/sfc/efx_auxbus.c | 203 + src/drivers/net/ethernet/sfc/efx_auxbus.h | 27 + src/drivers/net/ethernet/sfc/efx_channels.c | 2471 ++ src/drivers/net/ethernet/sfc/efx_channels.h | 76 + src/drivers/net/ethernet/sfc/efx_common.c | 2785 ++ src/drivers/net/ethernet/sfc/efx_common.h | 162 + src/drivers/net/ethernet/sfc/efx_devlink.c | 575 + src/drivers/net/ethernet/sfc/efx_devlink.h | 24 + src/drivers/net/ethernet/sfc/efx_ethtool.h | 17 + src/drivers/net/ethernet/sfc/efx_ioctl.h | 626 + .../net/ethernet/sfc/efx_linux_types.h | 48 + src/drivers/net/ethernet/sfc/efx_reflash.c | 594 + src/drivers/net/ethernet/sfc/efx_reflash.h | 18 + src/drivers/net/ethernet/sfc/enum.h | 179 + src/drivers/net/ethernet/sfc/ethtool.c | 594 + src/drivers/net/ethernet/sfc/ethtool_common.c | 2056 + src/drivers/net/ethernet/sfc/ethtool_common.h | 171 + src/drivers/net/ethernet/sfc/filter.h | 518 + src/drivers/net/ethernet/sfc/io.h | 270 + src/drivers/net/ethernet/sfc/ioctl.c | 346 + src/drivers/net/ethernet/sfc/ioctl_common.c | 425 + src/drivers/net/ethernet/sfc/ioctl_common.h | 18 + src/drivers/net/ethernet/sfc/kabi_compat.h | 15 + src/drivers/net/ethernet/sfc/kernel_compat.c | 758 + src/drivers/net/ethernet/sfc/kernel_compat.h | 2737 ++ src/drivers/net/ethernet/sfc/kernel_compat.sh | 583 + src/drivers/net/ethernet/sfc/linux_mtd_mtd.h | 23 + src/drivers/net/ethernet/sfc/mae.c | 2408 ++ src/drivers/net/ethernet/sfc/mae.h | 164 + .../net/ethernet/sfc/mae_counter_format.h | 72 + src/drivers/net/ethernet/sfc/mcdi.c | 2981 ++ src/drivers/net/ethernet/sfc/mcdi.h | 809 + src/drivers/net/ethernet/sfc/mcdi_filters.c | 3300 ++ src/drivers/net/ethernet/sfc/mcdi_filters.h | 156 + src/drivers/net/ethernet/sfc/mcdi_functions.c | 463 + src/drivers/net/ethernet/sfc/mcdi_functions.h | 34 + src/drivers/net/ethernet/sfc/mcdi_mon.c | 1660 + src/drivers/net/ethernet/sfc/mcdi_pcol.h | 33895 ++++++++++++++++ src/drivers/net/ethernet/sfc/mcdi_pcol_mae.h | 23 + src/drivers/net/ethernet/sfc/mcdi_port.c | 116 + src/drivers/net/ethernet/sfc/mcdi_port.h | 18 + .../net/ethernet/sfc/mcdi_port_common.c | 2128 + .../net/ethernet/sfc/mcdi_port_common.h | 121 + src/drivers/net/ethernet/sfc/mcdi_vdpa.c | 358 + src/drivers/net/ethernet/sfc/mcdi_vdpa.h | 93 + src/drivers/net/ethernet/sfc/mtd.c | 311 + src/drivers/net/ethernet/sfc/net_driver.h | 2855 ++ src/drivers/net/ethernet/sfc/nic.c | 593 + src/drivers/net/ethernet/sfc/nic.h | 269 + src/drivers/net/ethernet/sfc/nic_common.h | 289 + src/drivers/net/ethernet/sfc/ptp.c | 3928 ++ src/drivers/net/ethernet/sfc/ptp.h | 156 + src/drivers/net/ethernet/sfc/rx.c | 1274 + src/drivers/net/ethernet/sfc/rx_common.c | 1982 + src/drivers/net/ethernet/sfc/rx_common.h | 147 + src/drivers/net/ethernet/sfc/selftest.c | 930 + src/drivers/net/ethernet/sfc/selftest.h | 56 + src/drivers/net/ethernet/sfc/sfctool.c | 389 + src/drivers/net/ethernet/sfc/sfctool.h | 133 + src/drivers/net/ethernet/sfc/sriov.c | 138 + src/drivers/net/ethernet/sfc/sriov.h | 43 + src/drivers/net/ethernet/sfc/tc.c | 3661 ++ src/drivers/net/ethernet/sfc/tc.h | 442 + src/drivers/net/ethernet/sfc/tc_bindings.c | 376 + src/drivers/net/ethernet/sfc/tc_bindings.h | 55 + src/drivers/net/ethernet/sfc/tc_conntrack.c | 678 + src/drivers/net/ethernet/sfc/tc_conntrack.h | 62 + src/drivers/net/ethernet/sfc/tc_counters.c | 600 + src/drivers/net/ethernet/sfc/tc_counters.h | 74 + src/drivers/net/ethernet/sfc/tc_debugfs.c | 1126 + src/drivers/net/ethernet/sfc/tc_debugfs.h | 20 + .../net/ethernet/sfc/tc_encap_actions.c | 765 + .../net/ethernet/sfc/tc_encap_actions.h | 107 + src/drivers/net/ethernet/sfc/tx.c | 659 + src/drivers/net/ethernet/sfc/tx.h | 26 + src/drivers/net/ethernet/sfc/tx_common.c | 654 + src/drivers/net/ethernet/sfc/tx_common.h | 45 + src/drivers/net/ethernet/sfc/tx_tso.c | 610 + src/drivers/net/ethernet/sfc/workarounds.h | 44 + src/drivers/net/ethernet/sfc/xdp.c | 776 + src/drivers/net/ethernet/sfc/xdp.h | 51 + src/include/linux/net/sfc/sfc_rdma.h | 88 + src/include/trace/events/sfc.h | 193 + src/scripts/Makefile.common | 144 + src/scripts/kernel_compat_funcs.sh | 649 + udev/75-xilinx.rules | 22 + udev/produce-representor-name.sh | 407 + udev/trigger-representors.sh | 68 + 131 files changed, 115558 insertions(+) create mode 100644 kmod-sfc.spec create mode 100644 sfc-disttag create mode 100644 src/Makefile create mode 100644 src/drivers/net/ethernet/sfc/Makefile create mode 100644 src/drivers/net/ethernet/sfc/bitfield.h create mode 100644 src/drivers/net/ethernet/sfc/debugfs.c create mode 100644 src/drivers/net/ethernet/sfc/debugfs.h create mode 100644 src/drivers/net/ethernet/sfc/driverlink.c create mode 100644 src/drivers/net/ethernet/sfc/driverlink_api.h create mode 100644 src/drivers/net/ethernet/sfc/dump.c create mode 100644 src/drivers/net/ethernet/sfc/dump.h create mode 100644 src/drivers/net/ethernet/sfc/ef10.c create mode 100644 src/drivers/net/ethernet/sfc/ef100.c create mode 100644 src/drivers/net/ethernet/sfc/ef100.h create mode 100644 src/drivers/net/ethernet/sfc/ef100_bsp.c create mode 100644 src/drivers/net/ethernet/sfc/ef100_bsp.h create mode 100644 src/drivers/net/ethernet/sfc/ef100_dump.c create mode 100644 src/drivers/net/ethernet/sfc/ef100_dump.h create mode 100644 src/drivers/net/ethernet/sfc/ef100_ethtool.c create mode 100644 src/drivers/net/ethernet/sfc/ef100_ethtool.h create mode 100644 src/drivers/net/ethernet/sfc/ef100_netdev.c create mode 100644 src/drivers/net/ethernet/sfc/ef100_netdev.h create mode 100644 src/drivers/net/ethernet/sfc/ef100_nic.c create mode 100644 src/drivers/net/ethernet/sfc/ef100_nic.h create mode 100644 src/drivers/net/ethernet/sfc/ef100_regs.h create mode 100644 src/drivers/net/ethernet/sfc/ef100_rep.c create mode 100644 src/drivers/net/ethernet/sfc/ef100_rep.h create mode 100644 src/drivers/net/ethernet/sfc/ef100_rx.c create mode 100644 src/drivers/net/ethernet/sfc/ef100_rx.h create mode 100644 src/drivers/net/ethernet/sfc/ef100_sriov.c create mode 100644 src/drivers/net/ethernet/sfc/ef100_sriov.h create mode 100644 src/drivers/net/ethernet/sfc/ef100_tx.c create mode 100644 src/drivers/net/ethernet/sfc/ef100_tx.h create mode 100644 src/drivers/net/ethernet/sfc/ef100_vdpa.c create mode 100644 src/drivers/net/ethernet/sfc/ef100_vdpa.h create mode 100644 src/drivers/net/ethernet/sfc/ef100_vdpa_dbg.c create mode 100644 src/drivers/net/ethernet/sfc/ef100_vdpa_dbg.h create mode 100644 src/drivers/net/ethernet/sfc/ef100_vdpa_ops.c create mode 100644 src/drivers/net/ethernet/sfc/ef10_regs.h create mode 100644 src/drivers/net/ethernet/sfc/ef10_sriov.c create mode 100644 src/drivers/net/ethernet/sfc/ef10_sriov.h create mode 100644 src/drivers/net/ethernet/sfc/efx.c create mode 100644 src/drivers/net/ethernet/sfc/efx.h create mode 100644 src/drivers/net/ethernet/sfc/efx_auxbus.c create mode 100644 src/drivers/net/ethernet/sfc/efx_auxbus.h create mode 100644 src/drivers/net/ethernet/sfc/efx_channels.c create mode 100644 src/drivers/net/ethernet/sfc/efx_channels.h create mode 100644 src/drivers/net/ethernet/sfc/efx_common.c create mode 100644 src/drivers/net/ethernet/sfc/efx_common.h create mode 100644 src/drivers/net/ethernet/sfc/efx_devlink.c create mode 100644 src/drivers/net/ethernet/sfc/efx_devlink.h create mode 100644 src/drivers/net/ethernet/sfc/efx_ethtool.h create mode 100644 src/drivers/net/ethernet/sfc/efx_ioctl.h create mode 100644 src/drivers/net/ethernet/sfc/efx_linux_types.h create mode 100644 src/drivers/net/ethernet/sfc/efx_reflash.c create mode 100644 src/drivers/net/ethernet/sfc/efx_reflash.h create mode 100644 src/drivers/net/ethernet/sfc/enum.h create mode 100644 src/drivers/net/ethernet/sfc/ethtool.c create mode 100644 src/drivers/net/ethernet/sfc/ethtool_common.c create mode 100644 src/drivers/net/ethernet/sfc/ethtool_common.h create mode 100644 src/drivers/net/ethernet/sfc/filter.h create mode 100644 src/drivers/net/ethernet/sfc/io.h create mode 100644 src/drivers/net/ethernet/sfc/ioctl.c create mode 100644 src/drivers/net/ethernet/sfc/ioctl_common.c create mode 100644 src/drivers/net/ethernet/sfc/ioctl_common.h create mode 100644 src/drivers/net/ethernet/sfc/kabi_compat.h create mode 100644 src/drivers/net/ethernet/sfc/kernel_compat.c create mode 100644 src/drivers/net/ethernet/sfc/kernel_compat.h create mode 100755 src/drivers/net/ethernet/sfc/kernel_compat.sh create mode 100644 src/drivers/net/ethernet/sfc/linux_mtd_mtd.h create mode 100644 src/drivers/net/ethernet/sfc/mae.c create mode 100644 src/drivers/net/ethernet/sfc/mae.h create mode 100644 src/drivers/net/ethernet/sfc/mae_counter_format.h create mode 100644 src/drivers/net/ethernet/sfc/mcdi.c create mode 100644 src/drivers/net/ethernet/sfc/mcdi.h create mode 100644 src/drivers/net/ethernet/sfc/mcdi_filters.c create mode 100644 src/drivers/net/ethernet/sfc/mcdi_filters.h create mode 100644 src/drivers/net/ethernet/sfc/mcdi_functions.c create mode 100644 src/drivers/net/ethernet/sfc/mcdi_functions.h create mode 100644 src/drivers/net/ethernet/sfc/mcdi_mon.c create mode 100644 src/drivers/net/ethernet/sfc/mcdi_pcol.h create mode 100644 src/drivers/net/ethernet/sfc/mcdi_pcol_mae.h create mode 100644 src/drivers/net/ethernet/sfc/mcdi_port.c create mode 100644 src/drivers/net/ethernet/sfc/mcdi_port.h create mode 100644 src/drivers/net/ethernet/sfc/mcdi_port_common.c create mode 100644 src/drivers/net/ethernet/sfc/mcdi_port_common.h create mode 100644 src/drivers/net/ethernet/sfc/mcdi_vdpa.c create mode 100644 src/drivers/net/ethernet/sfc/mcdi_vdpa.h create mode 100644 src/drivers/net/ethernet/sfc/mtd.c create mode 100644 src/drivers/net/ethernet/sfc/net_driver.h create mode 100644 src/drivers/net/ethernet/sfc/nic.c create mode 100644 src/drivers/net/ethernet/sfc/nic.h create mode 100644 src/drivers/net/ethernet/sfc/nic_common.h create mode 100644 src/drivers/net/ethernet/sfc/ptp.c create mode 100644 src/drivers/net/ethernet/sfc/ptp.h create mode 100644 src/drivers/net/ethernet/sfc/rx.c create mode 100644 src/drivers/net/ethernet/sfc/rx_common.c create mode 100644 src/drivers/net/ethernet/sfc/rx_common.h create mode 100644 src/drivers/net/ethernet/sfc/selftest.c create mode 100644 src/drivers/net/ethernet/sfc/selftest.h create mode 100644 src/drivers/net/ethernet/sfc/sfctool.c create mode 100644 src/drivers/net/ethernet/sfc/sfctool.h create mode 100644 src/drivers/net/ethernet/sfc/sriov.c create mode 100644 src/drivers/net/ethernet/sfc/sriov.h create mode 100644 src/drivers/net/ethernet/sfc/tc.c create mode 100644 src/drivers/net/ethernet/sfc/tc.h create mode 100644 src/drivers/net/ethernet/sfc/tc_bindings.c create mode 100644 src/drivers/net/ethernet/sfc/tc_bindings.h create mode 100644 src/drivers/net/ethernet/sfc/tc_conntrack.c create mode 100644 src/drivers/net/ethernet/sfc/tc_conntrack.h create mode 100644 src/drivers/net/ethernet/sfc/tc_counters.c create mode 100644 src/drivers/net/ethernet/sfc/tc_counters.h create mode 100644 src/drivers/net/ethernet/sfc/tc_debugfs.c create mode 100644 src/drivers/net/ethernet/sfc/tc_debugfs.h create mode 100644 src/drivers/net/ethernet/sfc/tc_encap_actions.c create mode 100644 src/drivers/net/ethernet/sfc/tc_encap_actions.h create mode 100644 src/drivers/net/ethernet/sfc/tx.c create mode 100644 src/drivers/net/ethernet/sfc/tx.h create mode 100644 src/drivers/net/ethernet/sfc/tx_common.c create mode 100644 src/drivers/net/ethernet/sfc/tx_common.h create mode 100644 src/drivers/net/ethernet/sfc/tx_tso.c create mode 100644 src/drivers/net/ethernet/sfc/workarounds.h create mode 100644 src/drivers/net/ethernet/sfc/xdp.c create mode 100644 src/drivers/net/ethernet/sfc/xdp.h create mode 100644 src/include/linux/net/sfc/sfc_rdma.h create mode 100644 src/include/trace/events/sfc.h create mode 100644 src/scripts/Makefile.common create mode 100644 src/scripts/kernel_compat_funcs.sh create mode 100644 udev/75-xilinx.rules create mode 100755 udev/produce-representor-name.sh create mode 100755 udev/trigger-representors.sh diff --git a/kmod-sfc.spec b/kmod-sfc.spec new file mode 100644 index 0000000..c337f6e --- /dev/null +++ b/kmod-sfc.spec @@ -0,0 +1,121 @@ +%global pkg sfc +%global kernel kernel version +%define pkg_version 5.3.16.1004 +%define anolis_release 1 + +%global debug_package %{nil} + +Name: kmod-%{pkg} +Version: %(echo %{kernel} | sed -E 's/-/~/g; s/\.(an|al)[0-9]+$//g') +Release: %{pkg_version}~%{anolis_release}%{?dist} +Summary: BROADCOM MPI3MR Kernel Module Package +License: GPL-2.0 +URL: https://www.broadcom.cn/products/storage/host-bus-adapters/sas-nvme-9600-16i +Source0: kmod-%{pkg}-%{pkg_version}.tar.gz + +# 安装依赖, 和内核版本对应 +Requires: kernel >= %{kernel} +Requires(posttrans): %{_sbindir}/depmod +Requires(postun): %{_sbindir}/depmod +Requires(posttrans): %{_sbindir}/weak-modules +Requires(postun): %{_sbindir}/weak-modules +Requires(posttrans): %{_bindir}/sort +Requires(postun): %{_bindir}/sort + +# 构建依赖, 和内核版本对应 +BuildRequires: kernel-devel = %{kernel} +BuildRequires: kernel-headers = %{kernel} +BuildRequires: elfutils-libelf-devel +BuildRequires: gcc +BuildRequires: kmod +BuildRequires: make +BuildRequires: system-rpm-config + +Provides: kmod-%{pkg}-%{kernel}.%{_arch} = %{version}-%{release} +Obsoletes: kmod-%{pkg}-%{kernel}.%{_arch} < %{version}-%{release} + +%description +This is a Kernel Module Package for the BROADCOM MPI3MR driver. + +# prep字段无需修改,保持不变即可 +%prep +%setup -q -n kmod-%{pkg}-%{pkg_version} + +%build +%{__make} -C src modules +#pushd src +#%{__make} -C /usr/src/kernels/%{kernel}.%{_arch} %{?_smp_mflags} M=$PWD modules +#popd + +%install +mkdir -p %{buildroot}/lib/modules/%{kernel}.%{_arch}/extra/drivers/net/ethernet/sfc +%{__install} -D -t %{buildroot}/lib/modules/%{kernel}.%{_arch}/extra/drivers/net/ethernet/sfc src/drivers/net/ethernet/sfc/sfc.ko +%{__install} -D -t %{buildroot}/lib/modules/%{kernel}.%{_arch}/extra/drivers/net/ethernet/sfc src/drivers/net/ethernet/sfc/Module.symvers +%{__install} -D -t %{buildroot}/lib/modules/%{kernel}.%{_arch}/extra/drivers/net/ethernet/sfc src/drivers/net/ethernet/sfc/driverlink_api.h +%{__install} -D -t %{buildroot}/lib/modules/%{kernel}.%{_arch}/extra/drivers/net/ethernet/sfc src/drivers/net/ethernet/sfc/filter.h +mkdir -p "%{buildroot}/lib/udev/rules.d" +%{__install} -D -t %{buildroot}/lib/udev/rules.d udev/75-xilinx.rules +mkdir -p "%{buildroot}/lib/sfc" +%{__install} -D -t %{buildroot}/lib/sfc/ udev/produce-representor-name.sh +%{__install} -D -t %{buildroot}/lib/sfc/ udev/trigger-representors.sh + +# install的该段需要保留,weak-module相关 +# Make .ko objects temporarily executable for automatic stripping +find %{buildroot}/lib/modules -type f -name \*.ko -exec chmod u+x \{\} \+ + +# Generate depmod.conf +%{__install} -d %{buildroot}/%{_sysconfdir}/depmod.d/ +for kmod in $(find %{buildroot}/lib/modules/%{kernel}.%{_arch}/extra -type f -name \*.ko -printf "%%P\n" | sort) +do +echo "override $(basename $kmod .ko) * weak-updates/$(dirname $kmod)" >> %{buildroot}/%{_sysconfdir}/depmod.d/%{pkg}.conf +echo "override $(basename $kmod .ko) * extra/$(dirname $kmod)" >> %{buildroot}/%{_sysconfdir}/depmod.d/%{pkg}.conf +done + +%clean +%{__rm} -rf %{buildroot} + +%post +if [ -x /etc/hotplug/pci.rc ]; then + /etc/hotplug/pci.rc start >/dev/null +fi +modprobe sfc + +udevadm control --reload +true + +depmod -a > /dev/null 2>&1 + +#if [ -x "/usr/sbin/weak-modules" ]; then +# printf '%s\n' "/lib/modules/%{kernel}.%{_arch}/extra/drivers/net/ethernet/sfc/sfc.ko" | /usr/sbin/weak-modules --no-initramfs --add-modules +#fi + +%preun +echo "/lib/modules/%{kernel}.%{_arch}/extra/drivers/net/ethernet/sfc/%{pkg}.ko" >> /var/run/rpm-%{pkg}-modules.list + +%postun +depmod -a > /dev/null 2>&1 + +if [ -x "/usr/sbin/weak-modules" ]; then + modules=( $(cat /var/run/rpm-%{pkg}-modules.list) ) + printf '%s\n' "${modules[@]}" | /usr/sbin/weak-modules --no-initramfs --remove-modules +fi +rm /var/run/rpm-%{pkg}-modules.list + + +%files +%defattr(644,root,root,755) +%license licenses +/lib/modules/%{kernel}.%{_arch} +%config(noreplace) %{_sysconfdir}/depmod.d/%{pkg}.conf + +%dir /lib/udev/rules.d/ +%dir /lib/sfc/ + +/lib/udev/rules.d/75-xilinx.rules +/lib/sfc/produce-representor-name.sh +/lib/sfc/trigger-representors.sh + +%changelog +* Mon Jul 22 2024 shengaoya - 5.3.16.1004 +- init sfc spec (shengaoya) + diff --git a/sfc-disttag b/sfc-disttag new file mode 100644 index 0000000..d0ff4df --- /dev/null +++ b/sfc-disttag @@ -0,0 +1,31 @@ +# -*- Mode: shell-script; -*- +################################################################################ +# +# Driver for Solarflare network controllers and boards +# Copyright 2019 Solarflare Communications Inc. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 as published +# by the Free Software Foundation, incorporated herein by reference. +# +################################################################################ + +sfc_disttag() { + if [ -f /etc/redhat-release ]; then + awk ' + /Red Hat Linux release/ { gsub(/\./,""); printf "RH%s\n", $5; exit } + /Red Hat Enterprise Linux release/ { printf "RHEL%s\n", substr($6, 1, 1); exit } + /Red Hat Enterprise Linux (WS|Server|Client|Workstation)/ { printf "RHEL%s\n", substr($7, 1, 1); exit } + ' /etc/redhat-release + elif [ -f /etc/debian_version ]; then + echo "Debian"`sed 's/\//-/g' /etc/debian_version` + elif [ -x "$(command -v hostnamectl)" ]; then + hostnamectl | awk ' + /SUSE Linux Enterprise Server/ { printf "SLES%s\n", $7; exit } + ' + else + echo "unsupportedOS" + return 1 + fi + return 0 +} diff --git a/src/Makefile b/src/Makefile new file mode 100644 index 0000000..72dd818 --- /dev/null +++ b/src/Makefile @@ -0,0 +1,42 @@ +# SPDX-License-Identifier: GPL-2.0 +################################################################################ +# +# Driver for Solarflare and Xilinx network controllers and boards +# Copyright 2019 Solarflare Communications Inc. +# Copyright 2019-2020 Xilinx Inc. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 as published +# by the Free Software Foundation, incorporated herein by reference. +# +################################################################################ + + +all modules: + $(MAKE) -C drivers/net/ethernet/sfc $@ + $(build_selftests) + +install modules_install: + @echo INSTALL_MOD_PATH=$(INSTALL_MOD_PATH) + @echo INSTALL_MOD_DIR=$(INSTALL_MOD_DIR) + $(MAKE) -C drivers/net/ethernet/sfc modules_install + $(build_selftests) + depmod + +export-srpm export: + $(MAKE) -C drivers/net/ethernet/sfc $@ + $(MAKE) -C include/linux $@ + $(build_selftests) + +clean: + $(MAKE) -C drivers/net/ethernet/sfc $@ + $(RM) -r deb rpm .version + $(build_selftests) + +version: + @if [ -d .git ] && [ "$(USE_BASE_DRIVER_VERSION)" == "" ]; then \ + git describe --tags --match v* HEAD | sed 's/^v//; s/_/./g'; \ + else \ + grep EFX_DRIVER_VERSION drivers/net/ethernet/sfc/net_driver.h | cut -d\" -f2; \ + fi + diff --git a/src/drivers/net/ethernet/sfc/Makefile b/src/drivers/net/ethernet/sfc/Makefile new file mode 100644 index 0000000..70eff32 --- /dev/null +++ b/src/drivers/net/ethernet/sfc/Makefile @@ -0,0 +1,306 @@ +################################################################################ +# +# Driver for Solarflare network controllers and boards +# Copyright 2019 Solarflare Communications Inc. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 as published +# by the Free Software Foundation, incorporated herein by reference. +# +################################################################################ + +ifndef EFX_UPSTREAM + +# The EFX_UPSTREAM variable is never defined. Sections of the Makefile +# that are only needed for an out-of-tree build are guarded by +# "ifndef EFX_UPSTREAM" and removed using sed when it is exported to a +# kernel tree. + +# The KERNELRELEASE variable is defined by kbuild. Some parts of the +# Makefile should only be included when this is the top-level Makefile +# and not when it is included by kbuild; these are guarded by +# "ifndef KERNELRELEASE". + +# Configuration +ifndef KERNELRELEASE +export CONFIG_NET_VENDOR_SOLARFLARE := y +export CONFIG_SFC := m +export CONFIG_SFC_DUMP := y +export CONFIG_SFC_MCDI_MON := y +export CONFIG_SFC_MTD := y +export CONFIG_SFC_SRIOV := y +export CONFIG_SFC_PTP := y +export CONFIG_SFC_TRACING := +export CONFIG_SFC_MCDI_LOGGING := y +export CONFIG_SFC_DRIVERLINK := m +export CONFIG_SFC_BUSYPOLL := n +export CONFIG_SFC_VDPA := y + +ifndef CONFIG_SFC_SIENA +export CONFIG_SFC_SIENA := +endif +else +export CONFIG_SFC_SIENA := +endif + +ifdef KSRCDIR +$(error KSRCDIR has been replaced with KDIR) +endif + +ifdef KERNELRELEASE + +# Check config dependencies under Kbuild. We should only do this if +# .config has actually been included (it isn't when cleaning), so test +# for a config symbol that should always be defined. +ifdef CONFIG_NET +ifndef CONFIG_X86 +ifndef CONFIG_ARCH_DMA_ADDR_T_64BIT +$(error Unsupported architecture) +endif +endif +ifndef CONFIG_MTD +ifdef CONFIG_SFC_MTD +override CONFIG_SFC_MTD := +endif +endif +ifdef CONFIG_SFC_FALCON +$(warning SFE4001/Falcon is no longer supported) +endif +ifdef CONFIG_SFC_MCDI_MON +ifndef CONFIG_HWMON +$(warning Kernel does not support HWMON) +override CONFIG_SFC_MCDI_MON := +endif +endif # CONFIG_SFC_MCDI_MON +ifndef CONFIG_SFC_MCDI_MON +$(warning => Temperature and voltage reporting for SFC9000-family boards will be disabled) +endif +ifdef CONFIG_SFC_SRIOV +ifndef CONFIG_PCI_IOV +$(warning Kernel does not support PCI_IOV) +override CONFIG_SFC_SRIOV := +$(warning => SR-IOV functionality will be disabled) +endif +endif +ifdef CONFIG_SFC_TRACING +ifndef CONFIG_EVENT_TRACING +$(error Kernel does not support EVENT_TRACING) +endif +endif +ifndef CONFIG_SFC_MCDI_LOGGING +$(warning => MCDI tracing will not be supported) +endif +ifdef CONFIG_SFC_VDPA +ifndef CONFIG_VDPA +$(warning Kernel does not support vDPA) +override CONFIG_SFC_VDPA := +$(warning => vDPA functionality will be disabled) +else +ifeq ($(CONFIG_VDPA),m) +VDPA := $(shell PATH=$PATH:/sbin:/usr/sbin modinfo vdpa 2> /dev/null) +ifeq ($(VDPA),) +$(warning vdpa kernel module is not installed) +override CONFIG_SFC_VDPA := +$(warning => vDPA functionality will be disabled) +endif +endif +endif +endif + +endif # CONFIG_NET + +else # !KERNELRELEASE ###### The following is for the standalone Makefile case + +TOPDIR = ../../../.. +include $(TOPDIR)/scripts/Makefile.common + +ifneq ($(MAKECMDGOALS),export-srpm) + +KVERPARTS = $(subst -, ,$(subst ., ,$(KVER))) +ifeq ($(word 1,$(KVERPARTS)),2) +ifneq ($(word 2,$(KVERPARTS)),6) +$(error Kernel version $(KVER) is not supported; minimum version is 2.6.16) +endif +ifneq ($(filter 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15,$(word 3,$(KVERPARTS))),) +$(error Kernel version $(KVER) is not supported; minimum version is 2.6.16) +endif +endif + +endif # export-srpm + +endif # !KERNELRELEASE + +endif # !EFX_UPSTREAM +# +sfc-y := efx.o efx_common.o efx_channels.o nic.o ef10.o \ + tx.o tx_common.o rx.o rx_common.o \ + selftest.o ethtool_common.o ethtool.o \ + mcdi.o mcdi_port.o mcdi_port_common.o mcdi_functions.o \ + mcdi_filters.o ef10_sriov.o tx_tso.o \ + efx_devlink.o xdp.o \ + ef100.o ef100_nic.o ef100_netdev.o ef100_ethtool.o \ + ef100_rx.o ef100_tx.o ef100_sriov.o ef100_rep.o \ + tc.o tc_bindings.o tc_encap_actions.o tc_counters.o \ + tc_conntrack.o tc_debugfs.o mae.o \ + ef100_bsp.o efx_reflash.o + +sfc_driverlink-y := driverlink.o + +sfc-$(CONFIG_SFC_SRIOV) += sriov.o +sfc-$(CONFIG_SFC_MCDI_MON) += mcdi_mon.o +sfc-$(CONFIG_SFC_PTP) += ptp.o +sfc-$(CONFIG_SFC_MTD) += mtd.o +sfc-$(CONFIG_SFC_VDPA) += ef100_vdpa.o ef100_vdpa_ops.o mcdi_vdpa.o +sfc-$(CONFIG_AUXILIARY_BUS) += efx_auxbus.o +sfc-$(CONFIG_DEBUG_FS) += debugfs.o +ifndef EFX_NO_KCOMPAT +ifneq ($(MAKECMDGOALS),export) +ifndef EFX_FOR_UPSTREAM +sfc-y += ioctl.o ioctl_common.o +CFLAGS_efx.o = -DEFX_USE_PRIVATE_IOCTL +endif +sfc-y += kernel_compat.o +endif +endif # !EFX_NO_KCOMPAT +ifndef EFX_UPSTREAM +ifeq ($(filter export export-xen,$(MAKECMDGOALS)),) +ifndef EFX_FOR_UPSTREAM +sfc-y += sfctool.o ef100_dump.o +sfc-$(CONFIG_SFC_VDPA) += ef100_vdpa_dbg.o +endif # !EFX_FOR_UPSTREAM +sfc-$(CONFIG_SFC_DUMP) += dump.o +endif +endif # !EFX_UPSTREAM + +obj-$(CONFIG_SFC) += sfc.o +obj-$(CONFIG_SFC_DRIVERLINK) += sfc_driverlink.o +ifneq ($(CONFIG_SFC_SIENA),) +obj-$(CONFIG_SFC_SIENA) += siena/ +endif + +# +ifndef EFX_UPSTREAM + +# Compiler flags +EXTRA_CFLAGS += -I$(src) -DWITH_MCDI_V2 + +ifdef KMP_RELEASE +EXTRA_CFLAGS += -DKMP_RELEASE +endif + + +ifndef EFX_DISABLE_GRO +EXTRA_CFLAGS += -DEFX_USE_GRO +endif +ifndef EFX_DISABLE_SFC_LRO +EXTRA_CFLAGS += -DEFX_USE_SFC_LRO +endif +ifndef EFX_DISABLE_OVERLAY_TX_CSUM +EXTRA_CFLAGS += -DEFX_USE_OVERLAY_TX_CSUM +endif + +all_sfc_ko = $(subst .o,.ko,$(filter-out siena/,$(obj-m))) +ifdef CONFIG_SFC_SIENA +all_sfc_ko += siena/sfc-siena.ko +endif + +ifdef KERNELRELEASE +TOPDIR = $(src)/../../../.. +include $(TOPDIR)/scripts/Makefile.common + +define filechk_config.h + printf "$(foreach name,SFC SFC_DUMP SFC_MCDI_MON SFC_MTD SFC_SIENA SFC_SRIOV SFC_PTP SFC_PPS SFC_TRACING SFC_MCDI_LOGGING SFC_DRIVERLINK SFC_BUSYPOLL SFC_VDPA,$(if $(filter y,$(CONFIG_$(name))),#undef CONFIG_$(name)\n#define CONFIG_$(name) 1\n,$(if $(filter m,$(CONFIG_$(name))),#undef CONFIG_$(name)_MODULE\n#define CONFIG_$(name)_MODULE 1\n,#undef CONFIG_$(name)\n#undef CONFIG_$(name)_MODULE\n)))" +endef + +$(obj)/config.h: $(src)/Makefile FORCE + $(call filechk,config.h) + +$(addprefix $(obj)/,$(sfc-y) $(sfc_driverlink-y)): \ + $(obj)/.kpath $(obj)/autocompat.h $(obj)/config.h + +# Select the right warnings - complicated by working out which options work +ifndef try-run +try-run = $(shell set -e; \ + TMP="$(obj)/.$$$$.tmp"; \ + TMPO="$(obj)/.$$$$.o"; \ + if ($(1)) >/dev/null 2>&1; \ + then echo "$(2)"; \ + else echo "$(3)"; \ + fi; \ + rm -f "$$TMP" "$$TMPO") +endif +ifndef cc-disable-warning +cc-disable-warning = $(call try-run,\ + $(CC) $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS) -W$(strip $(1)) -c -xc /dev/null -o "$$TMP",-Wno-$(strip $(1))) +endif +override EXTRA_CFLAGS += $(call cc-disable-warning, unused-but-set-variable) \ + $(call cc-disable-warning, format-zero-length) + +else # !KERNELRELEASE + +# Some distribution packages of 2.6.16 fail to define KERNELRELEASE +# when compiling out-of-tree modules. +ifneq ($(filter 2.6.16-% 2.6.16.%,$(KVER)),) +EXTRA_MAKEFLAGS := KERNELRELEASE=$(KVER) +endif + +default : top + +top: + $(MAKE) -C ../../../.. all + +all : modules +ifdef BUILD_INKERNEL_TESTS + ! [ -d unittest_filters ] || $(MAKE) -C unittest_filters $(EXTRA_MAKEFLAGS) +endif + +modules : + $(MAKE) -C $(KDIR) $(EXTRA_MAKEFLAGS) M=$$(pwd) +# strip objects +ifdef NDEBUG + $(CROSS_COMPILE)strip --strip-debug $(all_sfc_ko) +endif + +modules_install : + mkdir -p $(INSTALL_MOD_PATH)/lib/modules/$(KVER)/$(INSTALL_MOD_DIR) + $(MAKE) -C $(KDIR) $(EXTRA_MAKEFLAGS) M=$$(pwd) INSTALL_MOD_DIR=$(INSTALL_MOD_DIR) modules_install +ifneq ($(CONFIG_SFC_SIENA),) + $(MAKE) -C siena $@ +endif + +clean : clean_modules clean_tests + +clean_tests: + ! [ -d unittest_filters ] || $(MAKE) -C unittest_filters clean + +clean_modules : + rm -f *.o *.s *.ko *.mod.c *.symvers config.h autocompat.h .kpath + $(MAKE) -C $(KDIR) $(EXTRA_MAKEFLAGS) M=$$(pwd) clean +ifneq ($(CONFIG_SFC_SIENA),) + ! [ -d siena ] || $(MAKE) -C siena clean +endif + +.PHONY : all modules modules_install clean clean_modules top FORCE + + +# Export into kernel tree +export: + $(EXPORT_CMD) $(KDIR) $(sfc_driverlink-y:%.o=%.c) $(sfc-y:%.o=%.c) efx_auxbus.c debugfs.c + $(MAKE) -C siena $@ + +export-resolve-kcompat: + $(EXPORT_CMD) -k -kk $(KDIR) $(sfc-y:%.o=%.c) efx_auxbus.c debugfs.c + +# Export into a Xen kernel tree +export-xen: + $(EXPORT_CMD) -k -kk --suffix=xen $(KDIR) $(sfc_driverlink-y:%.o=%.c) $(sfc-y:%.o=%.c) efx_auxbus.c debugfs.c + +# Export for SRPM +export-srpm: + mkdir -p $(KDIR)/mtd + $(EXPORT_CMD) -o $(KDIR) $(sfc_driverlink-y:%.o=%.c) $(sfc-y:%.o=%.c) mtd/mtd-abi.h trace/events/sfc.h kernel_compat.sh $(TOPDIR)/scripts/kernel_compat_funcs.sh efx_auxbus.c debugfs.c + +.PHONY : intranet doc clean_doc util clean_util export export-xen export-srpm + +endif # !KERELRELEASE +endif # !EFX_UPSTREAM diff --git a/src/drivers/net/ethernet/sfc/bitfield.h b/src/drivers/net/ethernet/sfc/bitfield.h new file mode 100644 index 0000000..e925a41 --- /dev/null +++ b/src/drivers/net/ethernet/sfc/bitfield.h @@ -0,0 +1,621 @@ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2005-2006 Fen Systems Ltd. + * Copyright 2006-2017 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#ifndef EFX_BITFIELD_H +#define EFX_BITFIELD_H + +/* + * Efx bitfield access + * + * Efx NICs make extensive use of bitfields up to 128 bits + * wide. Since there is no native 128-bit datatype on most systems, + * and since 64-bit datatypes are inefficient on 32-bit systems and + * vice versa, we wrap accesses in a way that uses the most efficient + * datatype. + * + * The NICs are PCI devices and therefore little-endian. Since most + * of the quantities that we deal with are DMAed to/from host memory, + * we define our datatypes (efx_oword_t, efx_qword_t and + * efx_dword_t) to be little-endian. + */ +#ifdef EFX_NOT_UPSTREAM +/* + * In the less common case of using PIO for individual register + * writes, we construct the little-endian datatype in host memory and + * then use non-swapping equivalents of writel/writeq, rather than + * constructing a native-endian datatype and relying on the implicit + * byte-swapping done by writel/writeq. (We use a similar strategy + * for register reads.) + */ +#endif + +/* Lowest bit numbers and widths */ +#define EFX_DUMMY_FIELD_LBN 0 +#define EFX_DUMMY_FIELD_WIDTH 0 +#define EFX_BYTE_0_LBN 0 +#define EFX_BYTE_0_WIDTH 8 +#define EFX_WORD_0_LBN 0 +#define EFX_WORD_0_WIDTH 16 +#define EFX_WORD_1_LBN 16 +#define EFX_WORD_1_WIDTH 16 +#define EFX_DWORD_0_LBN 0 +#define EFX_DWORD_0_WIDTH 32 +#define EFX_DWORD_1_LBN 32 +#define EFX_DWORD_1_WIDTH 32 +#define EFX_DWORD_2_LBN 64 +#define EFX_DWORD_2_WIDTH 32 +#define EFX_DWORD_3_LBN 96 +#define EFX_DWORD_3_WIDTH 32 +#define EFX_QWORD_0_LBN 0 +#define EFX_QWORD_0_WIDTH 64 + +/* Specified attribute (e.g. LBN) of the specified field */ +#define EFX_VAL(field, attribute) field ## _ ## attribute +/* Low bit number of the specified field */ +#define EFX_LOW_BIT(field) EFX_VAL(field, LBN) +/* Bit width of the specified field */ +#define EFX_WIDTH(field) EFX_VAL(field, WIDTH) +/* High bit number of the specified field */ +#define EFX_HIGH_BIT(field) (EFX_LOW_BIT(field) + EFX_WIDTH(field) - 1) +/* Mask equal in width to the specified field. + * + * For example, a field with width 5 would have a mask of 0x1f. + * + * The maximum width mask that can be generated is 64 bits. + */ +#define EFX_MASK64(width) \ + ((width) == 64 ? ~((u64) 0) : \ + (((((u64) 1) << (width))) - 1)) + +/* Mask equal in width to the specified field. + * + * For example, a field with width 5 would have a mask of 0x1f. + * + * The maximum width mask that can be generated is 32 bits. Use + * EFX_MASK64 for higher width fields. + */ +#define EFX_MASK32(width) \ + ((width) == 32 ? ~((u32) 0) : \ + (((((u32) 1) << (width))) - 1)) + +/* A doubleword (i.e. 4 byte) datatype - little-endian in HW */ +typedef union efx_dword { + __le32 u32[1]; +} efx_dword_t; + +/* A quadword (i.e. 8 byte) datatype - little-endian in HW */ +typedef union efx_qword { + __le64 u64[1]; + __le32 u32[2]; + efx_dword_t dword[2]; +} efx_qword_t; + +/* An octword (eight-word, i.e. 16 byte) datatype - little-endian in HW */ +typedef union efx_oword { + __le64 u64[2]; + efx_qword_t qword[2]; + __le32 u32[4]; + efx_dword_t dword[4]; +} efx_oword_t; + +/* Format string and value expanders for printk */ +#define EFX_DWORD_FMT "%08x" +#define EFX_QWORD_FMT "%08x:%08x" +#define EFX_OWORD_FMT "%08x:%08x:%08x:%08x" +#define EFX_DWORD_VAL(dword) \ + ((unsigned int) le32_to_cpu((dword).u32[0])) +#define EFX_QWORD_VAL(qword) \ + ((unsigned int) le32_to_cpu((qword).u32[1])), \ + ((unsigned int) le32_to_cpu((qword).u32[0])) +#define EFX_OWORD_VAL(oword) \ + ((unsigned int) le32_to_cpu((oword).u32[3])), \ + ((unsigned int) le32_to_cpu((oword).u32[2])), \ + ((unsigned int) le32_to_cpu((oword).u32[1])), \ + ((unsigned int) le32_to_cpu((oword).u32[0])) + +/* + * Extract bit field portion [low,high) from the native-endian element + * which contains bits [min,max). + * + * For example, suppose "element" represents the high 32 bits of a + * 64-bit value, and we wish to extract the bits belonging to the bit + * field occupying bits 28-45 of this 64-bit value. + * + * Then EFX_EXTRACT ( element, 32, 63, 28, 45 ) would give + * + * ( element ) << 4 + * + * The result will contain the relevant bits filled in in the range + * [0,high-low), with garbage in bits [high-low+1,...). + */ +#define EFX_EXTRACT_NATIVE(native_element, min, max, low, high) \ + ((low) > (max) || (high) < (min) ? 0 : \ + (low) > (min) ? \ + (native_element) >> ((low) - (min)) : \ + (native_element) << ((min) - (low))) + +/* + * Extract bit field portion [low,high) from the 64-bit little-endian + * element which contains bits [min,max) + */ +#define EFX_EXTRACT64(element, min, max, low, high) \ + EFX_EXTRACT_NATIVE(le64_to_cpu(element), min, max, low, high) + +/* + * Extract bit field portion [low,high) from the 32-bit little-endian + * element which contains bits [min,max) + */ +#define EFX_EXTRACT32(element, min, max, low, high) \ + EFX_EXTRACT_NATIVE(le32_to_cpu(element), min, max, low, high) + +#define EFX_EXTRACT_OWORD64(oword, low, high) \ + ((EFX_EXTRACT64((oword).u64[0], 0, 63, low, high) | \ + EFX_EXTRACT64((oword).u64[1], 64, 127, low, high)) & \ + EFX_MASK64((high) + 1 - (low))) + +#define EFX_EXTRACT_QWORD64(qword, low, high) \ + (EFX_EXTRACT64((qword).u64[0], 0, 63, low, high) & \ + EFX_MASK64((high) + 1 - (low))) + +#define EFX_EXTRACT_OWORD32(oword, low, high) \ + ((EFX_EXTRACT32((oword).u32[0], 0, 31, low, high) | \ + EFX_EXTRACT32((oword).u32[1], 32, 63, low, high) | \ + EFX_EXTRACT32((oword).u32[2], 64, 95, low, high) | \ + EFX_EXTRACT32((oword).u32[3], 96, 127, low, high)) & \ + EFX_MASK32((high) + 1 - (low))) + +#define EFX_EXTRACT_QWORD32(qword, low, high) \ + ((EFX_EXTRACT32((qword).u32[0], 0, 31, low, high) | \ + EFX_EXTRACT32((qword).u32[1], 32, 63, low, high)) & \ + EFX_MASK32((high) + 1 - (low))) + +#define EFX_EXTRACT_DWORD(dword, low, high) \ + (EFX_EXTRACT32((dword).u32[0], 0, 31, low, high) & \ + EFX_MASK32((high) + 1 - (low))) + +#define EFX_OWORD_FIELD64(oword, field) \ + EFX_EXTRACT_OWORD64(oword, EFX_LOW_BIT(field), \ + EFX_HIGH_BIT(field)) + +#define EFX_QWORD_FIELD64(qword, field) \ + EFX_EXTRACT_QWORD64(qword, EFX_LOW_BIT(field), \ + EFX_HIGH_BIT(field)) + +#define EFX_OWORD_FIELD32(oword, field) \ + EFX_EXTRACT_OWORD32(oword, EFX_LOW_BIT(field), \ + EFX_HIGH_BIT(field)) + +#define EFX_QWORD_FIELD32(qword, field) \ + EFX_EXTRACT_QWORD32(qword, EFX_LOW_BIT(field), \ + EFX_HIGH_BIT(field)) + +#define EFX_DWORD_FIELD(dword, field) \ + EFX_EXTRACT_DWORD(dword, EFX_LOW_BIT(field), \ + EFX_HIGH_BIT(field)) + +#define EFX_OWORD_IS_ZERO64(oword) \ + (((oword).u64[0] | (oword).u64[1]) == (__force __le64) 0) + +#define EFX_QWORD_IS_ZERO64(qword) \ + (((qword).u64[0]) == (__force __le64) 0) + +#define EFX_OWORD_IS_ZERO32(oword) \ + (((oword).u32[0] | (oword).u32[1] | (oword).u32[2] | (oword).u32[3]) \ + == (__force __le32) 0) + +#define EFX_QWORD_IS_ZERO32(qword) \ + (((qword).u32[0] | (qword).u32[1]) == (__force __le32) 0) + +#define EFX_DWORD_IS_ZERO(dword) \ + (((dword).u32[0]) == (__force __le32) 0) + +#define EFX_OWORD_IS_ALL_ONES64(oword) \ + (((oword).u64[0] & (oword).u64[1]) == ~((__force __le64) 0)) + +#define EFX_QWORD_IS_ALL_ONES64(qword) \ + ((qword).u64[0] == ~((__force __le64) 0)) + +#define EFX_OWORD_IS_ALL_ONES32(oword) \ + (((oword).u32[0] & (oword).u32[1] & (oword).u32[2] & (oword).u32[3]) \ + == ~((__force __le32) 0)) + +#define EFX_QWORD_IS_ALL_ONES32(qword) \ + (((qword).u32[0] & (qword).u32[1]) == ~((__force __le32) 0)) + +#define EFX_DWORD_IS_ALL_ONES(dword) \ + ((dword).u32[0] == ~((__force __le32) 0)) + +#if BITS_PER_LONG == 64 +#define EFX_OWORD_FIELD EFX_OWORD_FIELD64 +#define EFX_QWORD_FIELD EFX_QWORD_FIELD64 +#define EFX_OWORD_IS_ZERO EFX_OWORD_IS_ZERO64 +#define EFX_QWORD_IS_ZERO EFX_QWORD_IS_ZERO64 +#define EFX_OWORD_IS_ALL_ONES EFX_OWORD_IS_ALL_ONES64 +#define EFX_QWORD_IS_ALL_ONES EFX_QWORD_IS_ALL_ONES64 +#else +#define EFX_OWORD_FIELD EFX_OWORD_FIELD32 +#define EFX_QWORD_FIELD EFX_QWORD_FIELD32 +#define EFX_OWORD_IS_ZERO EFX_OWORD_IS_ZERO32 +#define EFX_QWORD_IS_ZERO EFX_QWORD_IS_ZERO32 +#define EFX_OWORD_IS_ALL_ONES EFX_OWORD_IS_ALL_ONES32 +#define EFX_QWORD_IS_ALL_ONES EFX_QWORD_IS_ALL_ONES32 +#endif + +/* + * Construct bit field portion + * + * Creates the portion of the bit field [low,high) that lies within + * the range [min,max). + */ +#define EFX_INSERT_NATIVE64(min, max, low, high, value) \ + (((low > max) || (high < min)) ? 0 : \ + ((low > min) ? \ + (((u64) (value)) << (low - min)) : \ + (((u64) (value)) >> (min - low)))) + +#define EFX_INSERT_NATIVE32(min, max, low, high, value) \ + (((low > max) || (high < min)) ? 0 : \ + ((low > min) ? \ + (((u32) (value)) << (low - min)) : \ + (((u32) (value)) >> (min - low)))) + +#define EFX_INSERT_NATIVE(min, max, low, high, value) \ + ((((max - min) >= 32) || ((high - low) >= 32)) ? \ + EFX_INSERT_NATIVE64(min, max, low, high, value) : \ + EFX_INSERT_NATIVE32(min, max, low, high, value)) + +/* + * Construct bit field portion + * + * Creates the portion of the named bit field that lies within the + * range [min,max). + */ +#define EFX_INSERT_FIELD_NATIVE(min, max, field, value) \ + EFX_INSERT_NATIVE(min, max, EFX_LOW_BIT(field), \ + EFX_HIGH_BIT(field), value) + +/* + * Construct bit field + * + * Creates the portion of the named bit fields that lie within the + * range [min,max). + */ +#define EFX_INSERT_FIELDS_NATIVE(min, max, \ + field1, value1, \ + field2, value2, \ + field3, value3, \ + field4, value4, \ + field5, value5, \ + field6, value6, \ + field7, value7, \ + field8, value8, \ + field9, value9, \ + field10, value10, \ + field11, value11, \ + field12, value12, \ + field13, value13, \ + field14, value14, \ + field15, value15, \ + field16, value16, \ + field17, value17, \ + field18, value18, \ + field19, value19) \ + (EFX_INSERT_FIELD_NATIVE((min), (max), field1, (value1)) | \ + EFX_INSERT_FIELD_NATIVE((min), (max), field2, (value2)) | \ + EFX_INSERT_FIELD_NATIVE((min), (max), field3, (value3)) | \ + EFX_INSERT_FIELD_NATIVE((min), (max), field4, (value4)) | \ + EFX_INSERT_FIELD_NATIVE((min), (max), field5, (value5)) | \ + EFX_INSERT_FIELD_NATIVE((min), (max), field6, (value6)) | \ + EFX_INSERT_FIELD_NATIVE((min), (max), field7, (value7)) | \ + EFX_INSERT_FIELD_NATIVE((min), (max), field8, (value8)) | \ + EFX_INSERT_FIELD_NATIVE((min), (max), field9, (value9)) | \ + EFX_INSERT_FIELD_NATIVE((min), (max), field10, (value10)) | \ + EFX_INSERT_FIELD_NATIVE((min), (max), field11, (value11)) | \ + EFX_INSERT_FIELD_NATIVE((min), (max), field12, (value12)) | \ + EFX_INSERT_FIELD_NATIVE((min), (max), field13, (value13)) | \ + EFX_INSERT_FIELD_NATIVE((min), (max), field14, (value14)) | \ + EFX_INSERT_FIELD_NATIVE((min), (max), field15, (value15)) | \ + EFX_INSERT_FIELD_NATIVE((min), (max), field16, (value16)) | \ + EFX_INSERT_FIELD_NATIVE((min), (max), field17, (value17)) | \ + EFX_INSERT_FIELD_NATIVE((min), (max), field18, (value18)) | \ + EFX_INSERT_FIELD_NATIVE((min), (max), field19, (value19))) + +#define EFX_INSERT_FIELDS64(...) \ + cpu_to_le64(EFX_INSERT_FIELDS_NATIVE(__VA_ARGS__)) + +#define EFX_INSERT_FIELDS32(...) \ + cpu_to_le32(EFX_INSERT_FIELDS_NATIVE(__VA_ARGS__)) + +#define EFX_POPULATE_OWORD64(oword, ...) do { \ + (oword).u64[0] = EFX_INSERT_FIELDS64(0, 63, __VA_ARGS__); \ + (oword).u64[1] = EFX_INSERT_FIELDS64(64, 127, __VA_ARGS__); \ + } while (0) + +#define EFX_POPULATE_QWORD64(qword, ...) do { \ + (qword).u64[0] = EFX_INSERT_FIELDS64(0, 63, __VA_ARGS__); \ + } while (0) + +#define EFX_POPULATE_OWORD32(oword, ...) do { \ + (oword).u32[0] = EFX_INSERT_FIELDS32(0, 31, __VA_ARGS__); \ + (oword).u32[1] = EFX_INSERT_FIELDS32(32, 63, __VA_ARGS__); \ + (oword).u32[2] = EFX_INSERT_FIELDS32(64, 95, __VA_ARGS__); \ + (oword).u32[3] = EFX_INSERT_FIELDS32(96, 127, __VA_ARGS__); \ + } while (0) + +#define EFX_POPULATE_QWORD32(qword, ...) do { \ + (qword).u32[0] = EFX_INSERT_FIELDS32(0, 31, __VA_ARGS__); \ + (qword).u32[1] = EFX_INSERT_FIELDS32(32, 63, __VA_ARGS__); \ + } while (0) + +#define EFX_POPULATE_DWORD(dword, ...) do { \ + (dword).u32[0] = EFX_INSERT_FIELDS32(0, 31, __VA_ARGS__); \ + } while (0) + +#if BITS_PER_LONG == 64 +#define EFX_POPULATE_OWORD EFX_POPULATE_OWORD64 +#define EFX_POPULATE_QWORD EFX_POPULATE_QWORD64 +#else +#define EFX_POPULATE_OWORD EFX_POPULATE_OWORD32 +#define EFX_POPULATE_QWORD EFX_POPULATE_QWORD32 +#endif + +/* Populate an octword field with various numbers of arguments */ +#define EFX_POPULATE_OWORD_19 EFX_POPULATE_OWORD +#define EFX_POPULATE_OWORD_18(oword, ...) \ + EFX_POPULATE_OWORD_19(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_OWORD_17(oword, ...) \ + EFX_POPULATE_OWORD_18(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_OWORD_16(oword, ...) \ + EFX_POPULATE_OWORD_17(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_OWORD_15(oword, ...) \ + EFX_POPULATE_OWORD_16(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_OWORD_14(oword, ...) \ + EFX_POPULATE_OWORD_15(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_OWORD_13(oword, ...) \ + EFX_POPULATE_OWORD_14(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_OWORD_12(oword, ...) \ + EFX_POPULATE_OWORD_13(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_OWORD_11(oword, ...) \ + EFX_POPULATE_OWORD_12(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_OWORD_10(oword, ...) \ + EFX_POPULATE_OWORD_11(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_OWORD_9(oword, ...) \ + EFX_POPULATE_OWORD_10(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_OWORD_8(oword, ...) \ + EFX_POPULATE_OWORD_9(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_OWORD_7(oword, ...) \ + EFX_POPULATE_OWORD_8(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_OWORD_6(oword, ...) \ + EFX_POPULATE_OWORD_7(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_OWORD_5(oword, ...) \ + EFX_POPULATE_OWORD_6(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_OWORD_4(oword, ...) \ + EFX_POPULATE_OWORD_5(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_OWORD_3(oword, ...) \ + EFX_POPULATE_OWORD_4(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_OWORD_2(oword, ...) \ + EFX_POPULATE_OWORD_3(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_OWORD_1(oword, ...) \ + EFX_POPULATE_OWORD_2(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_ZERO_OWORD(oword) \ + EFX_POPULATE_OWORD_1(oword, EFX_DUMMY_FIELD, 0) +#define EFX_SET_OWORD(oword) \ + EFX_POPULATE_OWORD_4(oword, \ + EFX_DWORD_0, 0xffffffff, \ + EFX_DWORD_1, 0xffffffff, \ + EFX_DWORD_2, 0xffffffff, \ + EFX_DWORD_3, 0xffffffff) + +/* Populate a quadword field with various numbers of arguments */ +#define EFX_POPULATE_QWORD_19 EFX_POPULATE_QWORD +#define EFX_POPULATE_QWORD_18(qword, ...) \ + EFX_POPULATE_QWORD_19(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_QWORD_17(qword, ...) \ + EFX_POPULATE_QWORD_18(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_QWORD_16(qword, ...) \ + EFX_POPULATE_QWORD_17(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_QWORD_15(qword, ...) \ + EFX_POPULATE_QWORD_16(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_QWORD_14(qword, ...) \ + EFX_POPULATE_QWORD_15(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_QWORD_13(qword, ...) \ + EFX_POPULATE_QWORD_14(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_QWORD_12(qword, ...) \ + EFX_POPULATE_QWORD_13(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_QWORD_11(qword, ...) \ + EFX_POPULATE_QWORD_12(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_QWORD_10(qword, ...) \ + EFX_POPULATE_QWORD_11(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_QWORD_9(qword, ...) \ + EFX_POPULATE_QWORD_10(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_QWORD_8(qword, ...) \ + EFX_POPULATE_QWORD_9(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_QWORD_7(qword, ...) \ + EFX_POPULATE_QWORD_8(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_QWORD_6(qword, ...) \ + EFX_POPULATE_QWORD_7(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_QWORD_5(qword, ...) \ + EFX_POPULATE_QWORD_6(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_QWORD_4(qword, ...) \ + EFX_POPULATE_QWORD_5(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_QWORD_3(qword, ...) \ + EFX_POPULATE_QWORD_4(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_QWORD_2(qword, ...) \ + EFX_POPULATE_QWORD_3(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_QWORD_1(qword, ...) \ + EFX_POPULATE_QWORD_2(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_ZERO_QWORD(qword) \ + EFX_POPULATE_QWORD_1(qword, EFX_DUMMY_FIELD, 0) +#define EFX_SET_QWORD(qword) \ + EFX_POPULATE_QWORD_2(qword, \ + EFX_DWORD_0, 0xffffffff, \ + EFX_DWORD_1, 0xffffffff) + +/* Populate a dword field with various numbers of arguments */ +#define EFX_POPULATE_DWORD_19 EFX_POPULATE_DWORD +#define EFX_POPULATE_DWORD_18(dword, ...) \ + EFX_POPULATE_DWORD_19(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_DWORD_17(dword, ...) \ + EFX_POPULATE_DWORD_18(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_DWORD_16(dword, ...) \ + EFX_POPULATE_DWORD_17(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_DWORD_15(dword, ...) \ + EFX_POPULATE_DWORD_16(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_DWORD_14(dword, ...) \ + EFX_POPULATE_DWORD_15(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_DWORD_13(dword, ...) \ + EFX_POPULATE_DWORD_14(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_DWORD_12(dword, ...) \ + EFX_POPULATE_DWORD_13(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_DWORD_11(dword, ...) \ + EFX_POPULATE_DWORD_12(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_DWORD_10(dword, ...) \ + EFX_POPULATE_DWORD_11(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_DWORD_9(dword, ...) \ + EFX_POPULATE_DWORD_10(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_DWORD_8(dword, ...) \ + EFX_POPULATE_DWORD_9(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_DWORD_7(dword, ...) \ + EFX_POPULATE_DWORD_8(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_DWORD_6(dword, ...) \ + EFX_POPULATE_DWORD_7(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_DWORD_5(dword, ...) \ + EFX_POPULATE_DWORD_6(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_DWORD_4(dword, ...) \ + EFX_POPULATE_DWORD_5(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_DWORD_3(dword, ...) \ + EFX_POPULATE_DWORD_4(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_DWORD_2(dword, ...) \ + EFX_POPULATE_DWORD_3(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_DWORD_1(dword, ...) \ + EFX_POPULATE_DWORD_2(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_ZERO_DWORD(dword) \ + EFX_POPULATE_DWORD_1(dword, EFX_DUMMY_FIELD, 0) +#define EFX_SET_DWORD(dword) \ + EFX_POPULATE_DWORD_1(dword, EFX_DWORD_0, 0xffffffff) + +/* + * Modify a named field within an already-populated structure. Used + * for read-modify-write operations. + * + */ +#define EFX_INVERT_OWORD(oword) do { \ + (oword).u64[0] = ~((oword).u64[0]); \ + (oword).u64[1] = ~((oword).u64[1]); \ + } while (0) + +#define EFX_AND_OWORD(oword, from, mask) \ + do { \ + (oword).u64[0] = (from).u64[0] & (mask).u64[0]; \ + (oword).u64[1] = (from).u64[1] & (mask).u64[1]; \ + } while (0) + +#define EFX_AND_QWORD(qword, from, mask) \ + (qword).u64[0] = (from).u64[0] & (mask).u64[0] + +#define EFX_OR_OWORD(oword, from, mask) \ + do { \ + (oword).u64[0] = (from).u64[0] | (mask).u64[0]; \ + (oword).u64[1] = (from).u64[1] | (mask).u64[1]; \ + } while (0) + +#define EFX_INSERT64(min, max, low, high, value) \ + cpu_to_le64(EFX_INSERT_NATIVE(min, max, low, high, value)) + +#define EFX_INSERT32(min, max, low, high, value) \ + cpu_to_le32(EFX_INSERT_NATIVE(min, max, low, high, value)) + +#define EFX_INPLACE_MASK64(min, max, low, high) \ + EFX_INSERT64(min, max, low, high, EFX_MASK64((high) + 1 - (low))) + +#define EFX_INPLACE_MASK32(min, max, low, high) \ + EFX_INSERT32(min, max, low, high, EFX_MASK32((high) + 1 - (low))) + +#define EFX_SET_OWORD64(oword, low, high, value) do { \ + (oword).u64[0] = (((oword).u64[0] \ + & ~EFX_INPLACE_MASK64(0, 63, low, high)) \ + | EFX_INSERT64(0, 63, low, high, value)); \ + (oword).u64[1] = (((oword).u64[1] \ + & ~EFX_INPLACE_MASK64(64, 127, low, high)) \ + | EFX_INSERT64(64, 127, low, high, value)); \ + } while (0) + +#define EFX_SET_QWORD64(qword, low, high, value) do { \ + (qword).u64[0] = (((qword).u64[0] \ + & ~EFX_INPLACE_MASK64(0, 63, low, high)) \ + | EFX_INSERT64(0, 63, low, high, value)); \ + } while (0) + +#define EFX_SET_OWORD32(oword, low, high, value) do { \ + (oword).u32[0] = (((oword).u32[0] \ + & ~EFX_INPLACE_MASK32(0, 31, low, high)) \ + | EFX_INSERT32(0, 31, low, high, value)); \ + (oword).u32[1] = (((oword).u32[1] \ + & ~EFX_INPLACE_MASK32(32, 63, low, high)) \ + | EFX_INSERT32(32, 63, low, high, value)); \ + (oword).u32[2] = (((oword).u32[2] \ + & ~EFX_INPLACE_MASK32(64, 95, low, high)) \ + | EFX_INSERT32(64, 95, low, high, value)); \ + (oword).u32[3] = (((oword).u32[3] \ + & ~EFX_INPLACE_MASK32(96, 127, low, high)) \ + | EFX_INSERT32(96, 127, low, high, value)); \ + } while (0) + +#define EFX_SET_QWORD32(qword, low, high, value) do { \ + (qword).u32[0] = (((qword).u32[0] \ + & ~EFX_INPLACE_MASK32(0, 31, low, high)) \ + | EFX_INSERT32(0, 31, low, high, value)); \ + (qword).u32[1] = (((qword).u32[1] \ + & ~EFX_INPLACE_MASK32(32, 63, low, high)) \ + | EFX_INSERT32(32, 63, low, high, value)); \ + } while (0) + +#define EFX_SET_DWORD32(dword, low, high, value) do { \ + (dword).u32[0] = (((dword).u32[0] \ + & ~EFX_INPLACE_MASK32(0, 31, low, high)) \ + | EFX_INSERT32(0, 31, low, high, value)); \ + } while (0) + +#define EFX_SET_OWORD_FIELD64(oword, field, value) \ + EFX_SET_OWORD64(oword, EFX_LOW_BIT(field), \ + EFX_HIGH_BIT(field), value) + +#define EFX_SET_QWORD_FIELD64(qword, field, value) \ + EFX_SET_QWORD64(qword, EFX_LOW_BIT(field), \ + EFX_HIGH_BIT(field), value) + +#define EFX_SET_OWORD_FIELD32(oword, field, value) \ + EFX_SET_OWORD32(oword, EFX_LOW_BIT(field), \ + EFX_HIGH_BIT(field), value) + +#define EFX_SET_QWORD_FIELD32(qword, field, value) \ + EFX_SET_QWORD32(qword, EFX_LOW_BIT(field), \ + EFX_HIGH_BIT(field), value) + +#define EFX_SET_DWORD_FIELD(dword, field, value) \ + EFX_SET_DWORD32(dword, EFX_LOW_BIT(field), \ + EFX_HIGH_BIT(field), value) + + + +#if BITS_PER_LONG == 64 +#define EFX_SET_OWORD_FIELD EFX_SET_OWORD_FIELD64 +#define EFX_SET_QWORD_FIELD EFX_SET_QWORD_FIELD64 +#else +#define EFX_SET_OWORD_FIELD EFX_SET_OWORD_FIELD32 +#define EFX_SET_QWORD_FIELD EFX_SET_QWORD_FIELD32 +#endif + +/* Static initialiser */ +#define EFX_OWORD32(a, b, c, d) \ + { .u32 = { cpu_to_le32(a), cpu_to_le32(b), \ + cpu_to_le32(c), cpu_to_le32(d) } } + +#endif /* EFX_BITFIELD_H */ diff --git a/src/drivers/net/ethernet/sfc/debugfs.c b/src/drivers/net/ethernet/sfc/debugfs.c new file mode 100644 index 0000000..106cd0d --- /dev/null +++ b/src/drivers/net/ethernet/sfc/debugfs.c @@ -0,0 +1,1237 @@ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2005-2006 Fen Systems Ltd. + * Copyright 2006-2017 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#include +#include +#include +#include +#include +#include +#include "net_driver.h" +#include "efx.h" +#include "debugfs.h" +#include "nic.h" +#ifdef CONFIG_SFC_VDPA +#include "ef100_vdpa.h" +#include "mcdi_filters.h" +#endif + + +/* Parameter definition bound to a structure - each file has one of these */ +struct efx_debugfs_bound_param { + const struct efx_debugfs_parameter *param; + void *(*get_struct)(void *, unsigned int); + void *ref; + unsigned int index; +}; + +/* Maximum length for a name component or symlink target */ +#define EFX_DEBUGFS_NAME_LEN 32 + + +/* Top-level debug directory ([/sys/kernel]/debug/sfc) */ +static struct dentry *efx_debug_root; + +/* "cards" directory ([/sys/kernel]/debug/sfc/cards) */ +static struct dentry *efx_debug_cards; + +/* Sequential file interface to bound parameters */ + +static int efx_debugfs_seq_show(struct seq_file *file, void *v) +{ + struct efx_debugfs_bound_param *binding = file->private; + void *structure; + int rc; + + rtnl_lock(); + structure = binding->get_struct(binding->ref, binding->index); + if (structure) + rc = binding->param->reader(file, + structure + binding->param->offset); + else + rc = -EINVAL; + rtnl_unlock(); + return rc; +} + +static int efx_debugfs_open(struct inode *inode, struct file *file) +{ + return single_open(file, efx_debugfs_seq_show, inode->i_private); +} + + +static struct file_operations efx_debugfs_file_ops = { + .owner = THIS_MODULE, + .open = efx_debugfs_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release +}; + +/* Functions for printing various types of parameter. */ + +int efx_debugfs_read_ushort(struct seq_file *file, void *data) +{ + seq_printf(file, "%u\n", *(unsigned short int *)data); + return 0; +} + +int efx_debugfs_read_uint(struct seq_file *file, void *data) +{ + seq_printf(file, "%#x\n", *(unsigned int *)data); + return 0; +} + +int efx_debugfs_read_int(struct seq_file *file, void *data) +{ + seq_printf(file, "%d\n", *(int *)data); + return 0; +} + +int efx_debugfs_read_ulong(struct seq_file *file, void *data) +{ + seq_printf(file, "%lu\n", *(unsigned long *)data); + return 0; +} + +int efx_debugfs_read_atomic(struct seq_file *file, void *data) +{ + unsigned int value = atomic_read((atomic_t *) data); + + seq_printf(file, "%#x\n", value); + return 0; +} + +int efx_debugfs_read_u64(struct seq_file *file, void *data) +{ + unsigned long long value = *((u64 *) data); + + seq_printf(file, "%llu\n", value); + return 0; +} + +#ifdef CONFIG_SFC_VDPA +int efx_debugfs_read_x64(struct seq_file *file, void *data) +{ + unsigned long long value = *((u64 *) data); + + seq_printf(file, "0x%llx\n", value); + return 0; +} +#endif + +int efx_debugfs_read_bool(struct seq_file *file, void *data) +{ + seq_printf(file, "%d\n", *(bool *)data); + return 0; +} + +static int efx_debugfs_read_int_mode(struct seq_file *file, void *data) +{ + unsigned int value = *(enum efx_int_mode *) data; + + seq_printf(file, "%d => %s\n", value, + STRING_TABLE_LOOKUP(value, efx_interrupt_mode)); + return 0; +} + +#define EFX_INT_MODE_PARAMETER(container_type, parameter) \ + EFX_PARAMETER(container_type, parameter, \ + enum efx_int_mode, efx_debugfs_read_int_mode) + +static int efx_debugfs_read_loop_mode(struct seq_file *file, void *data) +{ + unsigned int value = *(enum efx_loopback_mode *)data; + + seq_printf(file, "%d => %s\n", value, + STRING_TABLE_LOOKUP(value, efx_loopback_mode)); + return 0; +} + +#define EFX_LOOPBACK_MODE_PARAMETER(container_type, parameter) \ + EFX_PARAMETER(container_type, parameter, \ + enum efx_loopback_mode, efx_debugfs_read_loop_mode) + +static const char *const nic_state_names[] = { + [STATE_UNINIT] = "UNINIT", + [STATE_PROBED] = "PROBED", + [STATE_NET_UP] = "READY", + [STATE_NET_DOWN] = "DOWN", + [STATE_DISABLED] = "DISABLED", + [STATE_NET_UP | STATE_RECOVERY] = "READY_RECOVERY", + [STATE_NET_DOWN | STATE_RECOVERY] = "DOWN_RECOVERY", + [STATE_PROBED | STATE_RECOVERY] = "PROBED_RECOVERY", + [STATE_NET_UP | STATE_FROZEN] = "READY_FROZEN", + [STATE_NET_DOWN | STATE_FROZEN] = "DOWN_FROZEN", +}; +static const unsigned int nic_state_max = sizeof(nic_state_names); + +static int efx_debugfs_read_nic_state(struct seq_file *file, void *data) +{ + unsigned int value = *(enum nic_state *)data; + + seq_printf(file, "%d => %s\n", value, + STRING_TABLE_LOOKUP(value, nic_state)); + return 0; +} + +#define EFX_NIC_STATE_PARAMETER(container_type, parameter) \ + EFX_PARAMETER(container_type, parameter, \ + enum nic_state, efx_debugfs_read_nic_state) + +int efx_debugfs_read_string(struct seq_file *file, void *data) +{ + seq_printf(file, "%s\n", (const char *)data); + return 0; +} + + +/** + * efx_init_debugfs_files - create parameter-files in a debugfs directory + * @parent: Containing directory + * @params: Pointer to zero-terminated parameter definition array + * @ignore: Bitmask of array entries to ignore + * @get_struct: Obtain the structure containing parameters + * @ref: Opaque reference passed into the @get_struct call + * @struct_index: Index passed into the @get_struct call + * + * Add parameter-files to the given debugfs directory. + * + * Return: a negative error code or 0 on success. + */ +static int +efx_init_debugfs_files(struct dentry *parent, + const struct efx_debugfs_parameter *params, u64 ignore, + void *(*get_struct)(void *, unsigned int), + void *ref, unsigned int struct_index) +{ + struct efx_debugfs_bound_param *binding; + unsigned int pos; + + for (pos = 0; params[pos].name; pos++) { + struct dentry *entry; + + if ((1ULL << pos) & ignore) + continue; + + binding = kmalloc(sizeof(*binding), GFP_KERNEL); + if (!binding) + goto err; + binding->param = ¶ms[pos]; + binding->get_struct = get_struct; + binding->ref = ref; + binding->index = struct_index; + + entry = debugfs_create_file(params[pos].name, S_IRUGO, parent, + binding, &efx_debugfs_file_ops); + if (!entry) { + kfree(binding); + goto err; + } + } + + return 0; + +err: + while (pos--) { + if ((1ULL << pos) & ignore) + continue; + + debugfs_lookup_and_remove(params[pos].name, parent); + } + return -ENOMEM; +} + +/** + * efx_init_debugfs_netdev - create debugfs sym-links for net device + * @net_dev: Net device + * + * Create sym-links named after @net_dev to the debugfs directories for the + * corresponding NIC and port. The sym-links must be cleaned up using + * efx_fini_debugfs_netdev(). + * + * Return: a negative error code or 0 on success. + */ +int efx_init_debugfs_netdev(struct net_device *net_dev) +{ + struct efx_nic *efx = efx_netdev_priv(net_dev); + char name[EFX_DEBUGFS_NAME_LEN]; + char target[EFX_DEBUGFS_NAME_LEN]; + int rc = -ENAMETOOLONG; + size_t len; + + if (snprintf(name, sizeof(name), "nic_%s", net_dev->name) >= + sizeof(name)) + return -ENAMETOOLONG; + if (snprintf(target, sizeof(target), "cards/%s", pci_name(efx->pci_dev)) + >= sizeof(target)) + return -ENAMETOOLONG; + efx->debug_symlink = debugfs_create_symlink(name, + efx_debug_root, target); + if (IS_ERR_OR_NULL(efx->debug_symlink)) { + rc = efx->debug_symlink ? PTR_ERR(efx->debug_symlink) : -ENOMEM; + return rc; + } + + if (snprintf(name, sizeof(name), "if_%s", net_dev->name) >= + sizeof(name)) + goto err; + len = snprintf(target, sizeof(target), + "cards/%s/port0", pci_name(efx->pci_dev)); + if (len >= sizeof(target)) + goto err; + efx->debug_port_symlink = debugfs_create_symlink(name, + efx_debug_root, + target); + if (IS_ERR_OR_NULL(efx->debug_port_symlink)) { + rc = efx->debug_port_symlink ? + PTR_ERR(efx->debug_port_symlink) : + -ENOMEM; + goto err; + } + + return 0; + +err: + debugfs_remove(efx->debug_symlink); + return rc; +} + +/** + * efx_fini_debugfs_netdev - remove debugfs sym-links for net device + * @net_dev: Net device + * + * Remove sym-links created for @net_dev by efx_init_debugfs_netdev(). + */ +void efx_fini_debugfs_netdev(struct net_device *net_dev) +{ + struct efx_nic *efx = efx_netdev_priv(net_dev); + + debugfs_remove(efx->debug_port_symlink); + efx->debug_port_symlink = NULL; + debugfs_remove(efx->debug_symlink); + efx->debug_symlink = NULL; +} + +void efx_update_debugfs_netdev(struct efx_nic *efx) +{ + mutex_lock(&efx->debugfs_symlink_mutex); + if (efx->debug_symlink) + efx_fini_debugfs_netdev(efx->net_dev); + efx_init_debugfs_netdev(efx->net_dev); + mutex_unlock(&efx->debugfs_symlink_mutex); +} + +/* Per-port parameters */ +static const struct efx_debugfs_parameter efx_debugfs_port_parameters[] = { + EFX_NAMED_PARAMETER(enabled, struct efx_nic, port_enabled, + bool, efx_debugfs_read_bool), +#if defined(EFX_USE_KCOMPAT) && !defined(NETIF_F_LRO) + EFX_BOOL_PARAMETER(struct efx_nic, lro_enabled), +#endif +#if defined(EFX_USE_KCOMPAT) && !defined(EFX_HAVE_NDO_SET_FEATURES) && !defined(EFX_HAVE_EXT_NDO_SET_FEATURES) + EFX_BOOL_PARAMETER(struct efx_nic, rx_checksum_enabled), +#endif + EFX_NAMED_PARAMETER(link_up, struct efx_nic, link_state.up, + bool, efx_debugfs_read_bool), + EFX_NAMED_PARAMETER(link_fd, struct efx_nic, link_state.fd, + bool, efx_debugfs_read_bool), + EFX_NAMED_PARAMETER(link_speed, struct efx_nic, link_state.speed, + unsigned int, efx_debugfs_read_uint), + EFX_U64_PARAMETER(struct efx_nic, loopback_modes), + EFX_LOOPBACK_MODE_PARAMETER(struct efx_nic, loopback_mode), + EFX_UINT_PARAMETER(struct efx_nic, phy_type), + EFX_STRING_PARAMETER(struct efx_nic, phy_name), + EFX_UINT_PARAMETER(struct efx_nic, n_link_state_changes), + EFX_ULONG_PARAMETER(struct efx_nic, supported_bitmap), + EFX_ULONG_PARAMETER(struct efx_nic, guaranteed_bitmap), + {NULL}, +}; + +static void *efx_debugfs_get_same(void *ref, unsigned int index) +{ + return ref; +} + +/** + * efx_extend_debugfs_port - add parameter-files to directory for port + * @efx: Efx NIC + * @structure: Structure containing parameters + * @ignore: Bitmask of structure elements to ignore + * @params: Pointer to zero-terminated parameter definition array + * + * Add parameter-files to the debugfs directory for @efx. This is intended + * for PHY-specific parameters. The files must be cleaned up using + * efx_trim_debugfs_port(). + * + * Return: a negative error code or 0 on success. + */ +int efx_extend_debugfs_port(struct efx_nic *efx, + void *structure, u64 ignore, + const struct efx_debugfs_parameter *params) +{ + if (WARN_ON(!efx->debug_port_dir)) + return -ENOENT; + + return efx_init_debugfs_files(efx->debug_port_dir, params, ignore, + efx_debugfs_get_same, structure, 0); +} + +/** + * efx_trim_debugfs_port - remove parameter-files from directory for port + * @efx: Efx NIC + * @params: Pointer to zero-terminated parameter definition array + * + * Remove parameter-files previously added to the debugfs directory + * for @efx using efx_extend_debugfs_port(). + */ +void efx_trim_debugfs_port(struct efx_nic *efx, + const struct efx_debugfs_parameter *params) +{ + struct dentry *dir = efx->debug_port_dir; + + if (dir) { + const struct efx_debugfs_parameter *field; + for (field = params; field->name; field++) + debugfs_lookup_and_remove(field->name, dir); + } +} + +#ifdef CONFIG_SFC_VDPA + +/* vDPA device MAC address */ +static int efx_debugfs_read_vdpa_mac(struct seq_file *file, void *data) +{ + struct ef100_vdpa_nic *vdpa_nic = data; + + seq_printf(file, "%pM\n", vdpa_nic->mac_address); + return 0; +} + +static int ef100_vdpa_debugfs_read_filter_list(struct seq_file *file, void *data) +{ + struct ef100_vdpa_nic *vdpa_nic = data; + + return efx_debugfs_read_filter_list(file, vdpa_nic->efx); +} + +/* Per vdpa parameters */ +static const struct efx_debugfs_parameter efx_debugfs_vdpa_parameters[] = { + EFX_UINT_PARAMETER(struct ef100_vdpa_nic, vdpa_state), + EFX_UINT_PARAMETER(struct ef100_vdpa_nic, pf_index), + EFX_UINT_PARAMETER(struct ef100_vdpa_nic, vf_index), + EFX_X64_PARAMETER(struct ef100_vdpa_nic, features), + EFX_UINT_PARAMETER(struct ef100_vdpa_nic, max_queue_pairs), + _EFX_RAW_PARAMETER(mac_address, efx_debugfs_read_vdpa_mac), + _EFX_RAW_PARAMETER(filters, ef100_vdpa_debugfs_read_filter_list), + {NULL}, +}; + +/** + * efx_init_debugfs_vdpa - create debugfs directories for vdpa device + * @vdpa: ef100_vdpa_nic + * + * Create subdirectories of @vdpa debugfs directory for the corresponding + * vDPA device. The symlinks must be cleaned up using + * efx_fini_debugfs_vdpa(). + * + * Return: a negative error code or 0 on success. + */ +int efx_init_debugfs_vdpa(struct ef100_vdpa_nic *vdpa) +{ + char name[EFX_DEBUGFS_NAME_LEN]; + int rc = 0; + + if (snprintf(name, sizeof(name), EFX_VDPA_NAME(vdpa)) >= sizeof(name)) + goto err_len; + vdpa->debug_dir = debugfs_create_dir(name, efx_debug_root); + if (IS_ERR(vdpa->debug_dir)) { + rc = PTR_ERR(vdpa->debug_dir); + vdpa->debug_dir = NULL; + goto err; + } + if (!vdpa->debug_dir) + goto err_mem; + + /* Create files */ + rc = efx_init_debugfs_files(vdpa->debug_dir, + efx_debugfs_vdpa_parameters, 0, + efx_debugfs_get_same, vdpa, 0); + if (rc) + goto err; + + return 0; + + err_len: + rc = -ENAMETOOLONG; + goto err; +err_mem: + rc = -ENOMEM; + err: + efx_fini_debugfs_vdpa(vdpa); + return rc; +} + +/** + * efx_fini_debugfs_vdpa - remove debugfs directory for vDPA + * @vdpa: ef100_vdpa_nic device + * + * Remove debugfs directory created for @vdpa by efx_init_debugfs_vdpa(). + */ +void efx_fini_debugfs_vdpa(struct ef100_vdpa_nic *vdpa) +{ + debugfs_remove_recursive(vdpa->debug_dir); + vdpa->debug_dir = NULL; +} + +/* Per vring parameters */ +static const +struct efx_debugfs_parameter efx_debugfs_vdpa_vring_parameters[] = { + EFX_UINT_PARAMETER(struct ef100_vdpa_vring_info, size), + EFX_USHORT_PARAMETER(struct ef100_vdpa_vring_info, vring_state), + EFX_UINT_PARAMETER(struct ef100_vdpa_vring_info, last_avail_idx), + EFX_UINT_PARAMETER(struct ef100_vdpa_vring_info, last_used_idx), + EFX_UINT_PARAMETER(struct ef100_vdpa_vring_info, doorbell_offset), + EFX_STRING_PARAMETER(struct ef100_vdpa_vring_info, msix_name), + {NULL}, +}; + +/** + * efx_init_debugfs_vdpa_vring - create debugfs directories for vdpa vring + * @vdpa: ef100_vdpa_nic + * @vdpa_vring: ef100_vdpa_vring_info + * @idx: index of the vring + * + * Create subdirectories of @vdpa_vring debugfs directory for the + * corresponding vDPA device, vring with index idx. The debugfs must be + * cleaned up using efx_fini_debugfs_vdpa_vring(). + * + * Return: a negative error code or 0 on success. + */ +int efx_init_debugfs_vdpa_vring(struct ef100_vdpa_nic *vdpa, + struct ef100_vdpa_vring_info *vdpa_vring, + u16 idx) +{ + char name[EFX_DEBUGFS_NAME_LEN]; + int rc = 0; + + if (snprintf(name, sizeof(name), EFX_VDPA_VRING_NAME(idx)) + >= sizeof(name)) + goto err_len; + dev_info(&vdpa->vdpa_dev.dev, "vdpa vring Device Name:%s", name); + vdpa_vring->debug_dir = debugfs_create_dir(name, vdpa->debug_dir); + if (IS_ERR(vdpa_vring->debug_dir)) { + rc = PTR_ERR(vdpa_vring->debug_dir); + vdpa_vring->debug_dir = NULL; + goto err; + } + if (!vdpa_vring->debug_dir) + goto err_mem; + + /* Create files */ + rc = efx_init_debugfs_files(vdpa_vring->debug_dir, + efx_debugfs_vdpa_vring_parameters, 0, + efx_debugfs_get_same, vdpa_vring, 0); + if (rc) + goto err; + + return 0; + + err_len: + rc = -ENAMETOOLONG; + goto err; +err_mem: + rc = -ENOMEM; + err: + efx_fini_debugfs_vdpa_vring(vdpa_vring); + return rc; +} + +/** + * efx_fini_debugfs_vdpa_vring - remove debugfs directory for vring + * @vdpa_vring: ef100_vdpa_vring_info + * + * Remove debug fs created for @vdpa_vring by efx_init_debugfs_vdpa_vring(). + */ +void efx_fini_debugfs_vdpa_vring(struct ef100_vdpa_vring_info *vdpa_vring) +{ + debugfs_remove_recursive(vdpa_vring->debug_dir); + vdpa_vring->debug_dir = NULL; +} +#endif + +/* Per-TX-queue parameters */ +static const struct efx_debugfs_parameter efx_debugfs_tx_queue_parameters[] = { + EFX_UINT_PARAMETER(struct efx_tx_queue, queue), + EFX_UINT_PARAMETER(struct efx_tx_queue, insert_count), + EFX_UINT_PARAMETER(struct efx_tx_queue, write_count), + EFX_UINT_PARAMETER(struct efx_tx_queue, read_count), + EFX_UINT_PARAMETER(struct efx_tx_queue, tso_bursts), + EFX_UINT_PARAMETER(struct efx_tx_queue, tso_long_headers), + EFX_UINT_PARAMETER(struct efx_tx_queue, tso_packets), + EFX_UINT_PARAMETER(struct efx_tx_queue, tso_version), + EFX_UINT_PARAMETER(struct efx_tx_queue, pushes), + EFX_UINT_PARAMETER(struct efx_tx_queue, doorbell_notify_comp), + EFX_UINT_PARAMETER(struct efx_tx_queue, doorbell_notify_tx), + EFX_UINT_PARAMETER(struct efx_tx_queue, csum_offload), + EFX_BOOL_PARAMETER(struct efx_tx_queue, timestamping), + EFX_U64_PARAMETER(struct efx_tx_queue, tx_bytes), + EFX_ULONG_PARAMETER(struct efx_tx_queue, tx_packets), + {NULL}, +}; + +static void *efx_debugfs_get_tx_queue(void *ref, unsigned int index) +{ + struct efx_nic *efx = ref; + + return efx_get_tx_queue_from_index(efx, index); +} + +static void efx_fini_debugfs_tx_queue(struct efx_tx_queue *tx_queue); + +/** + * efx_init_debugfs_tx_queue - create debugfs directory for TX queue + * @tx_queue: Efx TX queue + * + * Create a debugfs directory containing parameter-files for @tx_queue. + * The directory must be cleaned up using efx_fini_debugfs_tx_queue(). + * + * Return: a negative error code or 0 on success. + */ +static int efx_init_debugfs_tx_queue(struct efx_tx_queue *tx_queue) +{ + char name[EFX_DEBUGFS_NAME_LEN]; + char target[EFX_DEBUGFS_NAME_LEN]; + int rc; + + /* Create directory */ + if (snprintf(name, sizeof(name), EFX_TX_QUEUE_NAME(tx_queue)) + >= sizeof(name)) + goto err_len; + tx_queue->debug_dir = debugfs_create_dir(name, + tx_queue->efx->debug_dir); + if (IS_ERR(tx_queue->debug_dir)) { + rc = PTR_ERR(tx_queue->debug_dir); + tx_queue->debug_dir = NULL; + goto err; + } + if (!tx_queue->debug_dir) + goto err_mem; + + /* Create files */ + rc = efx_init_debugfs_files(tx_queue->debug_dir, + efx_debugfs_tx_queue_parameters, 0, + efx_debugfs_get_tx_queue, tx_queue->efx, + tx_queue->queue); + if (rc) + goto err; + + /* Create symlink to channel */ + if (snprintf(target, sizeof(target), + "../" EFX_CHANNEL_NAME(tx_queue->channel)) >= + sizeof(target)) + goto err_len; + if (!debugfs_create_symlink("channel", tx_queue->debug_dir, target)) + goto err_mem; + + /* Create symlink to port */ + if (!debugfs_create_symlink("port", tx_queue->debug_dir, "../port0")) + goto err_mem; + + return 0; + + err_len: + rc = -ENAMETOOLONG; + goto err; + err_mem: + rc = -ENOMEM; + err: + efx_fini_debugfs_tx_queue(tx_queue); + return rc; +} + +/** + * efx_fini_debugfs_tx_queue - remove debugfs directory for TX queue + * @tx_queue: Efx TX queue + * + * Remove directory created for @tx_queue by efx_init_debugfs_tx_queue(). + */ +static void efx_fini_debugfs_tx_queue(struct efx_tx_queue *tx_queue) +{ + debugfs_remove_recursive(tx_queue->debug_dir); + tx_queue->debug_dir = NULL; +} + +/* Per-RX-queue parameters */ +static const struct efx_debugfs_parameter efx_debugfs_rx_queue_parameters[] = { + EFX_UINT_PARAMETER(struct efx_rx_queue, added_count), + EFX_UINT_PARAMETER(struct efx_rx_queue, removed_count), + EFX_UINT_PARAMETER(struct efx_rx_queue, max_fill), + EFX_UINT_PARAMETER(struct efx_rx_queue, fast_fill_trigger), + EFX_UINT_PARAMETER(struct efx_rx_queue, min_fill), + EFX_UINT_PARAMETER(struct efx_rx_queue, recycle_count), +#if !defined(EFX_NOT_UPSTREAM) || defined(EFX_RX_PAGE_SHARE) + EFX_UINT_PARAMETER(struct efx_rx_queue, page_add), + EFX_UINT_PARAMETER(struct efx_rx_queue, page_recycle_count), + EFX_UINT_PARAMETER(struct efx_rx_queue, page_recycle_failed), + EFX_UINT_PARAMETER(struct efx_rx_queue, page_recycle_full), + EFX_UINT_PARAMETER(struct efx_rx_queue, page_repost_count), +#endif +#if defined(EFX_NOT_UPSTREAM) && defined(EFX_USE_SFC_LRO) + EFX_UINT_PARAMETER(struct efx_rx_queue, ssr.n_merges), + EFX_UINT_PARAMETER(struct efx_rx_queue, ssr.n_bursts), + EFX_UINT_PARAMETER(struct efx_rx_queue, ssr.n_slow_start), + EFX_UINT_PARAMETER(struct efx_rx_queue, ssr.n_misorder), + EFX_UINT_PARAMETER(struct efx_rx_queue, ssr.n_too_many), + EFX_UINT_PARAMETER(struct efx_rx_queue, ssr.n_new_stream), + EFX_UINT_PARAMETER(struct efx_rx_queue, ssr.n_drop_idle), + EFX_UINT_PARAMETER(struct efx_rx_queue, ssr.n_drop_closed), +#endif + EFX_UINT_PARAMETER(struct efx_rx_queue, slow_fill_count), + EFX_UINT_PARAMETER(struct efx_rx_queue, n_rx_tobe_disc), + EFX_UINT_PARAMETER(struct efx_rx_queue, n_rx_ip_hdr_chksum_err), + EFX_UINT_PARAMETER(struct efx_rx_queue, n_rx_tcp_udp_chksum_err), + EFX_UINT_PARAMETER(struct efx_rx_queue, n_rx_outer_ip_hdr_chksum_err), + EFX_UINT_PARAMETER(struct efx_rx_queue, n_rx_outer_tcp_udp_chksum_err), + EFX_UINT_PARAMETER(struct efx_rx_queue, n_rx_inner_ip_hdr_chksum_err), + EFX_UINT_PARAMETER(struct efx_rx_queue, n_rx_inner_tcp_udp_chksum_err), + EFX_UINT_PARAMETER(struct efx_rx_queue, n_rx_eth_crc_err), + EFX_UINT_PARAMETER(struct efx_rx_queue, n_rx_mcast_mismatch), + EFX_UINT_PARAMETER(struct efx_rx_queue, n_rx_frm_trunc), + EFX_UINT_PARAMETER(struct efx_rx_queue, n_rx_overlength), + EFX_UINT_PARAMETER(struct efx_rx_queue, n_rx_nodesc_trunc), + {NULL}, +}; + +static void *efx_debugfs_get_rx_queue(void *ref, unsigned int index) +{ + return efx_channel_get_rx_queue(efx_get_channel(ref, index)); +} + +static void efx_fini_debugfs_rx_queue(struct efx_rx_queue *rx_queue); + +/** + * efx_init_debugfs_rx_queue - create debugfs directory for RX queue + * @rx_queue: Efx RX queue + * + * Create a debugfs directory containing parameter-files for @rx_queue. + * The directory must be cleaned up using efx_fini_debugfs_rx_queue(). + * + * Return: a negative error code or 0 on success. + */ +static int efx_init_debugfs_rx_queue(struct efx_rx_queue *rx_queue) +{ + char name[EFX_DEBUGFS_NAME_LEN]; + char target[EFX_DEBUGFS_NAME_LEN]; + int rc; + + /* Create directory */ + if (snprintf(name, sizeof(name), EFX_RX_QUEUE_NAME(rx_queue)) + >= sizeof(name)) + goto err_len; + rx_queue->debug_dir = debugfs_create_dir(name, + rx_queue->efx->debug_dir); + if (IS_ERR(rx_queue->debug_dir)) { + rc = PTR_ERR(rx_queue->debug_dir); + rx_queue->debug_dir = NULL; + goto err; + } + if (!rx_queue->debug_dir) + goto err_mem; + + /* Create files */ + rc = efx_init_debugfs_files(rx_queue->debug_dir, + efx_debugfs_rx_queue_parameters, 0, + efx_debugfs_get_rx_queue, rx_queue->efx, + efx_rx_queue_index(rx_queue)); + if (rc) + goto err; + + /* Create symlink to channel */ + if (snprintf(target, sizeof(target), + "../" EFX_CHANNEL_NAME(efx_rx_queue_channel(rx_queue))) >= + sizeof(target)) + goto err_len; + if (!debugfs_create_symlink("channel", rx_queue->debug_dir, target)) + goto err_mem; + + return 0; + + err_len: + rc = -ENAMETOOLONG; + goto err; + err_mem: + rc = -ENOMEM; + err: + efx_fini_debugfs_rx_queue(rx_queue); + return rc; +} + +/** + * efx_fini_debugfs_rx_queue - remove debugfs directory for RX queue + * @rx_queue: Efx RX queue + * + * Remove directory created for @rx_queue by efx_init_debugfs_rx_queue(). + */ +static void efx_fini_debugfs_rx_queue(struct efx_rx_queue *rx_queue) +{ + debugfs_remove_recursive(rx_queue->debug_dir); + rx_queue->debug_dir = NULL; +} + +/* Per-channel parameters */ +static const struct efx_debugfs_parameter efx_debugfs_channel_parameters[] = { + EFX_BOOL_PARAMETER(struct efx_channel, enabled), + EFX_INT_PARAMETER(struct efx_channel, irq), + EFX_UINT_PARAMETER(struct efx_channel, irq_moderation_us), + EFX_UINT_PARAMETER(struct efx_channel, eventq_read_ptr), + {NULL}, +}; + +static void *efx_debugfs_get_channel(void *ref, unsigned int index) +{ + return efx_get_channel(ref, index); +} + +static void efx_fini_debugfs_channel(struct efx_channel *channel); + +/** + * efx_init_debugfs_channel - create debugfs directory for channel + * @channel: Efx channel + * + * Create a debugfs directory containing parameter-files for @channel. + * The directory must be cleaned up using efx_fini_debugfs_channel(). + * + * Return: a negative error code or 0 on success. + */ +static int efx_init_debugfs_channel(struct efx_channel *channel) +{ + char name[EFX_DEBUGFS_NAME_LEN]; + int rc; + + /* Create directory */ + if (snprintf(name, sizeof(name), EFX_CHANNEL_NAME(channel)) + >= sizeof(name)) + goto err_len; + channel->debug_dir = debugfs_create_dir(name, channel->efx->debug_dir); + if (IS_ERR(channel->debug_dir)) { + rc = PTR_ERR(channel->debug_dir); + channel->debug_dir = NULL; + goto err; + } + if (!channel->debug_dir) + goto err_mem; + + /* Create files */ + rc = efx_init_debugfs_files(channel->debug_dir, + efx_debugfs_channel_parameters, 0, + efx_debugfs_get_channel, channel->efx, + channel->channel); + if (rc) + goto err; + + return 0; + + err_len: + rc = -ENAMETOOLONG; + goto err; + err_mem: + rc = -ENOMEM; + err: + netif_err(channel->efx, drv, channel->efx->net_dev, + "Unable to create debugfs channel file (error %d)\n", rc); + efx_fini_debugfs_channel(channel); + return rc; +} + +/** + * efx_fini_debugfs_channel - remove debugfs directory for channel + * @channel: Efx channel + * + * Remove directory created for @channel by efx_init_debugfs_channel(). + */ +static void efx_fini_debugfs_channel(struct efx_channel *channel) +{ + debugfs_remove_recursive(channel->debug_dir); + channel->debug_dir = NULL; +} + +/* Per-NIC parameters */ +static const struct efx_debugfs_parameter efx_debugfs_nic_parameters[] = { + EFX_UINT_PARAMETER(struct efx_nic, rx_dma_len), + EFX_UINT_PARAMETER(struct efx_nic, rx_buffer_order), + EFX_UINT_PARAMETER(struct efx_nic, rx_buffer_truesize), + EFX_INT_MODE_PARAMETER(struct efx_nic, interrupt_mode), + EFX_NIC_STATE_PARAMETER(struct efx_nic, state), + {NULL}, +}; + +/* Per-NIC error counts */ +static const +struct efx_debugfs_parameter efx_debugfs_nic_error_parameters[] = { + EFX_ATOMIC_PARAMETER(struct efx_nic_errors, missing_event), + EFX_ATOMIC_PARAMETER(struct efx_nic_errors, rx_reset), + EFX_ATOMIC_PARAMETER(struct efx_nic_errors, rx_desc_fetch), + EFX_ATOMIC_PARAMETER(struct efx_nic_errors, tx_desc_fetch), + EFX_ATOMIC_PARAMETER(struct efx_nic_errors, spurious_tx), + {NULL}, +}; + +/** + * efx_init_debugfs_channels - create debugfs directories for NIC channels + * @efx: Efx NIC + * + * Create subdirectories of @efx's debugfs directory for all the channels, + * RX queues and TX queues used by this driver. The subdirectories must be + * cleaned up using efx_fini_debugfs_channels(). + * + * Return: a negative error code or 0 on success. + */ +int efx_init_debugfs_channels(struct efx_nic *efx) +{ + struct efx_channel *channel; + struct efx_rx_queue *rx_queue; + struct efx_tx_queue *tx_queue; + int rc; + + efx_for_each_channel(channel, efx) { + rc = efx_init_debugfs_channel(channel); + if (rc) + goto err; + + efx_for_each_channel_rx_queue(rx_queue, channel) { + rc = efx_init_debugfs_rx_queue(rx_queue); + if (rc) + goto err; + } + + efx_for_each_channel_tx_queue(tx_queue, channel) { + rc = efx_init_debugfs_tx_queue(tx_queue); + if (rc) + goto err; + } + } + + return 0; + + err: + efx_fini_debugfs_channels(efx); + return rc; +} + +/** + * efx_fini_debugfs_channels - remove debugfs directories for NIC queues + * @efx: Efx NIC + * + * Remove subdirectories of @efx's debugfs directory created by + * efx_init_debugfs_channels(). + */ +void efx_fini_debugfs_channels(struct efx_nic *efx) +{ + struct efx_channel *channel; + struct efx_rx_queue *rx_queue; + struct efx_tx_queue *tx_queue; + + efx_for_each_channel(channel, efx) { + efx_for_each_channel_tx_queue(tx_queue, channel) + efx_fini_debugfs_tx_queue(tx_queue); + + efx_for_each_channel_rx_queue(rx_queue, channel) + efx_fini_debugfs_rx_queue(rx_queue); + + efx_fini_debugfs_channel(channel); + } +} + +/** + * efx_init_debugfs_nic - create debugfs directory for NIC + * @efx: Efx NIC + * + * Create debugfs directory containing parameter-files for @efx, + * and a subdirectory "errors" containing per-NIC error counts. + * The directories must be cleaned up using efx_fini_debugfs_nic(). + * + * Return: a negative error code or 0 on success. + */ +int efx_init_debugfs_nic(struct efx_nic *efx) +{ + int rc; + + /* Create directory */ + efx->debug_dir = debugfs_create_dir(pci_name(efx->pci_dev), + efx_debug_cards); + if (IS_ERR(efx->debug_dir)) { + rc = PTR_ERR(efx->debug_dir); + efx->debug_dir = NULL; + goto err; + } + if (!efx->debug_dir) + goto err_mem; + + /* Create errors directory */ + efx->errors.debug_dir = debugfs_create_dir("errors", efx->debug_dir); + if (IS_ERR(efx->errors.debug_dir)) { + rc = PTR_ERR(efx->errors.debug_dir); + efx->errors.debug_dir = NULL; + goto err; + } + if (!efx->errors.debug_dir) + goto err_mem; + + /* Create port directory */ + efx->debug_port_dir = debugfs_create_dir("port0", efx->debug_dir); + if (IS_ERR(efx->debug_port_dir)) { + rc = PTR_ERR(efx->debug_port_dir); + efx->debug_port_dir = NULL; + goto err; + } + if (!efx->debug_port_dir) + goto err_mem; + + /* Create files */ + rc = efx_init_debugfs_files(efx->debug_dir, + efx_debugfs_nic_parameters, 0, + efx_debugfs_get_same, efx, 0); + if (rc) + goto err; + rc = efx_init_debugfs_files(efx->errors.debug_dir, + efx_debugfs_nic_error_parameters, 0, + efx_debugfs_get_same, &efx->errors, 0); + if (rc) + goto err; + rc = efx_init_debugfs_files(efx->debug_port_dir, + efx_debugfs_port_parameters, 0, + efx_debugfs_get_same, efx, 0); + if (rc) + goto err; + + return 0; + + err_mem: + rc = -ENOMEM; + err: + efx_fini_debugfs_nic(efx); + return rc; +} + +/** + * efx_fini_debugfs_nic - remove debugfs directories for NIC + * @efx: Efx NIC + * + * Remove debugfs directories created for @efx by efx_init_debugfs_nic(). + */ +void efx_fini_debugfs_nic(struct efx_nic *efx) +{ + debugfs_remove_recursive(efx->debug_port_dir); + efx->debug_port_dir = NULL; + debugfs_remove_recursive(efx->errors.debug_dir); + efx->errors.debug_dir = NULL; + debugfs_remove_recursive(efx->debug_dir); + efx->debug_dir = NULL; +} + +/** + * efx_init_debugfs - create debugfs directories for sfc driver + * + * Create debugfs directories "sfc" and "sfc/cards". This must be + * called before any of the other functions that create debugfs + * directories. The directories must be cleaned up using + * efx_fini_debugfs(). + * + * Return: a negative error code or 0 on success. + */ +int efx_init_debugfs(void) +{ + int rc; + + /* Create top-level directory */ + efx_debug_root = debugfs_create_dir(KBUILD_MODNAME, NULL); + if (!efx_debug_root) { + pr_err("debugfs_create_dir %s failed.\n", KBUILD_MODNAME); + rc = -ENOMEM; + goto err; + } else if (IS_ERR(efx_debug_root)) { + rc = PTR_ERR(efx_debug_root); + pr_err("debugfs_create_dir %s failed, rc=%d.\n", + KBUILD_MODNAME, rc); + goto err; + } + + /* Create "cards" directory */ + efx_debug_cards = debugfs_create_dir("cards", efx_debug_root); + if (!efx_debug_cards) { + printk(KERN_ERR "debugfs_create_dir cards failed.\n"); + rc = -ENOMEM; + goto err; + } else if (IS_ERR(efx_debug_cards)) { + rc = PTR_ERR(efx_debug_cards); + printk(KERN_ERR "debugfs_create_dir cards failed, rc=%d.\n", + rc); + goto err; + } + + return 0; + + err: + efx_fini_debugfs(); + return rc; +} + +/** + * efx_fini_debugfs - remove debugfs directories for sfc driver + * + * Remove directories created by efx_init_debugfs(). + */ +void efx_fini_debugfs(void) +{ + debugfs_remove_recursive(efx_debug_root); + efx_debug_cards = NULL; + efx_debug_root = NULL; +} + +#ifdef EFX_NOT_UPSTREAM +#if IS_MODULE(CONFIG_SFC_DRIVERLINK) +int efx_debugfs_read_kernel_blocked(struct seq_file *file, void *data) +{ + unsigned int i; + bool *kernel_blocked = data; + + for (i = 0; i < EFX_DL_FILTER_BLOCK_KERNEL_MAX; i++) + seq_printf(file, "%u:%d\n", i, kernel_blocked[i]); + return 0; +} +#endif +#endif + +void efx_debugfs_print_filter(char *s, size_t l, struct efx_filter_spec *spec) +{ + u32 ip[4]; + int p = snprintf(s, l, "match=%#x,pri=%d,flags=%#x,q=%d", + spec->match_flags, spec->priority, spec->flags, + spec->dmaq_id); + + if (spec->stack_id) + p += snprintf(s + p, l - p, ",stack=%#x", + spec->stack_id); + if (spec->vport_id) + p += snprintf(s + p, l - p, ",vport=%#x", + spec->vport_id); + + if (spec->flags & EFX_FILTER_FLAG_RX_RSS) { + if (spec->rss_context == EFX_FILTER_RSS_CONTEXT_DEFAULT) + p += snprintf(s + p, l - p, ",rss=def"); + else + p += snprintf(s + p, l - p, ",rss=%#x", + spec->rss_context); + } + + if (spec->match_flags & EFX_FILTER_MATCH_OUTER_VID) + p += snprintf(s + p, l - p, + ",ovid=%d", ntohs(spec->outer_vid)); + if (spec->match_flags & EFX_FILTER_MATCH_INNER_VID) + p += snprintf(s + p, l - p, + ",ivid=%d", ntohs(spec->inner_vid)); + if (spec->match_flags & EFX_FILTER_MATCH_ENCAP_TYPE) + p += snprintf(s + p, l - p, + ",encap=%d", spec->encap_type); + if (spec->match_flags & EFX_FILTER_MATCH_ENCAP_TNI) + p += snprintf(s + p, l - p, + ",tni=%#x", spec->tni); + if (spec->match_flags & EFX_FILTER_MATCH_LOC_MAC) + p += snprintf(s + p, l - p, + ",lmac=%02x:%02x:%02x:%02x:%02x:%02x", + spec->loc_mac[0], spec->loc_mac[1], + spec->loc_mac[2], spec->loc_mac[3], + spec->loc_mac[4], spec->loc_mac[5]); + if (spec->match_flags & EFX_FILTER_MATCH_REM_MAC) + p += snprintf(s + p, l - p, + ",rmac=%02x:%02x:%02x:%02x:%02x:%02x", + spec->rem_mac[0], spec->rem_mac[1], + spec->rem_mac[2], spec->rem_mac[3], + spec->rem_mac[4], spec->rem_mac[5]); + if (spec->match_flags & EFX_FILTER_MATCH_OUTER_LOC_MAC) + p += snprintf(s + p, l - p, + ",olmac=%02x:%02x:%02x:%02x:%02x:%02x", + spec->outer_loc_mac[0], spec->outer_loc_mac[1], + spec->outer_loc_mac[2], spec->outer_loc_mac[3], + spec->outer_loc_mac[4], spec->outer_loc_mac[5]); + if (spec->match_flags & EFX_FILTER_MATCH_ETHER_TYPE) + p += snprintf(s + p, l - p, + ",ether=%#x", ntohs(spec->ether_type)); + if (spec->match_flags & EFX_FILTER_MATCH_IP_PROTO) + p += snprintf(s + p, l - p, + ",ippr=%#x", spec->ip_proto); + if (spec->match_flags & EFX_FILTER_MATCH_LOC_HOST) { + if (ntohs(spec->ether_type) == ETH_P_IP) { + ip[0] = (__force u32) spec->loc_host[0]; + p += snprintf(s + p, l - p, + ",lip=%d.%d.%d.%d", + ip[0] & 0xff, + (ip[0] >> 8) & 0xff, + (ip[0] >> 16) & 0xff, + (ip[0] >> 24) & 0xff); + } else if (ntohs(spec->ether_type) == ETH_P_IPV6) { + ip[0] = (__force u32) spec->loc_host[0]; + ip[1] = (__force u32) spec->loc_host[1]; + ip[2] = (__force u32) spec->loc_host[2]; + ip[3] = (__force u32) spec->loc_host[3]; + p += snprintf(s + p, l - p, + ",lip=%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x", + ip[0] & 0xffff, + (ip[0] >> 16) & 0xffff, + ip[1] & 0xffff, + (ip[1] >> 16) & 0xffff, + ip[2] & 0xffff, + (ip[2] >> 16) & 0xffff, + ip[3] & 0xffff, + (ip[3] >> 16) & 0xffff); + } else { + p += snprintf(s + p, l - p, ",lip=?"); + } + } + if (spec->match_flags & EFX_FILTER_MATCH_REM_HOST) { + if (ntohs(spec->ether_type) == ETH_P_IP) { + ip[0] = (__force u32) spec->rem_host[0]; + p += snprintf(s + p, l - p, + ",rip=%d.%d.%d.%d", + ip[0] & 0xff, + (ip[0] >> 8) & 0xff, + (ip[0] >> 16) & 0xff, + (ip[0] >> 24) & 0xff); + } else if (ntohs(spec->ether_type) == ETH_P_IPV6) { + ip[0] = (__force u32) spec->rem_host[0]; + ip[1] = (__force u32) spec->rem_host[1]; + ip[2] = (__force u32) spec->rem_host[2]; + ip[3] = (__force u32) spec->rem_host[3]; + p += snprintf(s + p, l - p, + ",rip=%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x", + ip[0] & 0xffff, + (ip[0] >> 16) & 0xffff, + ip[1] & 0xffff, + (ip[1] >> 16) & 0xffff, + ip[2] & 0xffff, + (ip[2] >> 16) & 0xffff, + ip[3] & 0xffff, + (ip[3] >> 16) & 0xffff); + } else { + p += snprintf(s + p, l - p, ",rip=?"); + } + } + if (spec->match_flags & EFX_FILTER_MATCH_LOC_PORT) + p += snprintf(s + p, l - p, + ",lport=%d", ntohs(spec->loc_port)); + if (spec->match_flags & EFX_FILTER_MATCH_REM_PORT) + p += snprintf(s + p, l - p, + ",rport=%d", ntohs(spec->rem_port)); + if (spec->match_flags & EFX_FILTER_MATCH_LOC_MAC_IG) + p += snprintf(s + p, l - p, ",%s", + spec->loc_mac[0] ? "mc" : "uc"); +} + diff --git a/src/drivers/net/ethernet/sfc/debugfs.h b/src/drivers/net/ethernet/sfc/debugfs.h new file mode 100644 index 0000000..1a793ab --- /dev/null +++ b/src/drivers/net/ethernet/sfc/debugfs.h @@ -0,0 +1,225 @@ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2005-2006 Fen Systems Ltd. + * Copyright 2006-2017 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#ifndef EFX_DEBUGFS_H +#define EFX_DEBUGFS_H +#ifdef CONFIG_SFC_VDPA +#include "ef100_vdpa.h" +#endif + +struct seq_file; + +struct efx_debugfs_parameter { + const char *name; + size_t offset; + int (*reader)(struct seq_file *, void *); +}; + +#ifdef CONFIG_DEBUG_FS +int efx_init_debugfs_netdev(struct net_device *net_dev); +void efx_fini_debugfs_netdev(struct net_device *net_dev); +void efx_update_debugfs_netdev(struct efx_nic *efx); +int efx_init_debugfs_nic(struct efx_nic *efx); +void efx_fini_debugfs_nic(struct efx_nic *efx); +int efx_init_debugfs_channels(struct efx_nic *efx); +void efx_fini_debugfs_channels(struct efx_nic *efx); +int efx_init_debugfs(void); +void efx_fini_debugfs(void); +int efx_extend_debugfs_port(struct efx_nic *efx, + void *context, u64 ignore, + const struct efx_debugfs_parameter *params); +void efx_trim_debugfs_port(struct efx_nic *efx, + const struct efx_debugfs_parameter *params); +#ifdef CONFIG_SFC_VDPA +int efx_init_debugfs_vdpa(struct ef100_vdpa_nic *vdpa); +void efx_fini_debugfs_vdpa(struct ef100_vdpa_nic *vdpa); +int efx_init_debugfs_vdpa_vring(struct ef100_vdpa_nic *vdpa, + struct ef100_vdpa_vring_info *vdpa_vring, + u16 idx); +void efx_fini_debugfs_vdpa_vring(struct ef100_vdpa_vring_info *vdpa_vring); +#endif + +/* Helpers for handling debugfs entry reads */ +int efx_debugfs_read_ushort(struct seq_file *file, void *data); +int efx_debugfs_read_uint(struct seq_file *, void *); +int efx_debugfs_read_ulong(struct seq_file *, void *); +int efx_debugfs_read_string(struct seq_file *, void *); +int efx_debugfs_read_int(struct seq_file *, void *); +int efx_debugfs_read_atomic(struct seq_file *, void *); +int efx_debugfs_read_u64(struct seq_file *, void *); +int efx_debugfs_read_bool(struct seq_file *, void *); +#ifdef CONFIG_SFC_VDPA +int efx_debugfs_read_x64(struct seq_file *, void *); +#endif + +/* Handy macros for filling out parameters */ + +/* Initialiser for a struct efx_debugfs_parameter without type-checking */ +#define _EFX_RAW_PARAMETER(_name, reader_function) { \ + .name = #_name, \ + .offset = 0, \ + .reader = reader_function, \ +} + +/* Initialiser for a struct efx_debugfs_parameter without type-checking */ +#define _EFX_PARAMETER(container_type, parameter, reader_function) { \ + .name = #parameter, \ + .offset = offsetof(container_type, parameter), \ + .reader = reader_function, \ +} + +/* Initialiser for a struct efx_debugfs_parameter with type-checking */ +#define EFX_PARAMETER(container_type, parameter, field_type, \ + reader_function) { \ + .name = #parameter, \ + .offset = ((((field_type *) 0) == \ + &((container_type *) 0)->parameter) ? \ + offsetof(container_type, parameter) : \ + offsetof(container_type, parameter)), \ + .reader = reader_function, \ +} + +/* Likewise, but the file name is not taken from the field name */ +#define EFX_NAMED_PARAMETER(_name, container_type, parameter, field_type, \ + reader_function) { \ + .name = #_name, \ + .offset = ((((field_type *) 0) == \ + &((container_type *) 0)->parameter) ? \ + offsetof(container_type, parameter) : \ + offsetof(container_type, parameter)), \ + .reader = reader_function, \ +} + +/* A string parameter (string embedded in the structure) */ +#define EFX_STRING_PARAMETER(container_type, parameter) { \ + .name = #parameter, \ + .offset = ((((char *) 0) == \ + ((container_type *) 0)->parameter) ? \ + offsetof(container_type, parameter) : \ + offsetof(container_type, parameter)), \ + .reader = efx_debugfs_read_string, \ +} + +/* An unsigned short parameter */ +#define EFX_USHORT_PARAMETER(container_type, parameter) \ + EFX_PARAMETER(container_type, parameter, \ + unsigned short, efx_debugfs_read_ushort) + +/* An unsigned integer parameter */ +#define EFX_UINT_PARAMETER(container_type, parameter) \ + EFX_PARAMETER(container_type, parameter, \ + unsigned int, efx_debugfs_read_uint) + +/* An unsigned long integer parameter */ +#define EFX_ULONG_PARAMETER(container_type, parameter) \ + EFX_PARAMETER(container_type, parameter, \ + unsigned long, efx_debugfs_read_ulong) + +/* A u64 parameter */ +#define EFX_U64_PARAMETER(container_type, parameter) \ + EFX_PARAMETER(container_type, parameter, \ + u64, efx_debugfs_read_u64) + +/* A u64 hex parameter */ +#define EFX_X64_PARAMETER(container_type, parameter) \ + EFX_PARAMETER(container_type, parameter, \ + u64, efx_debugfs_read_x64) + +/* An atomic_t parameter */ +#define EFX_ATOMIC_PARAMETER(container_type, parameter) \ + EFX_PARAMETER(container_type, parameter, \ + atomic_t, efx_debugfs_read_atomic) + +/* An integer parameter */ +#define EFX_INT_PARAMETER(container_type, parameter) \ + EFX_PARAMETER(container_type, parameter, \ + int, efx_debugfs_read_int) + +#define EFX_BOOL_PARAMETER(container_type, parameter) \ + EFX_PARAMETER(container_type, parameter, \ + bool, efx_debugfs_read_bool) + +/* utility functions common between farch and ef10 */ +void efx_debugfs_print_filter(char *s, size_t l, struct efx_filter_spec *spec); +#ifdef EFX_NOT_UPSTREAM +int efx_debugfs_read_kernel_blocked(struct seq_file *file, void *data); +#endif + +#else /* !CONFIG_DEBUG_FS */ + +static inline int efx_init_debugfs_netdev(struct net_device *net_dev) +{ + return 0; +} +static inline void efx_fini_debugfs_netdev(struct net_device *net_dev) {} + +static inline void efx_update_debugfs_netdev(struct efx_nic *efx) {} + +static inline int efx_init_debugfs_port(struct efx_nic *efx) +{ + return 0; +} +static inline void efx_fini_debugfs_port(struct efx_nic *efx) {} +static inline int efx_init_debugfs_nic(struct efx_nic *efx) +{ + return 0; +} +static inline void efx_fini_debugfs_nic(struct efx_nic *efx) {} +static inline int efx_init_debugfs_channels(struct efx_nic *efx) +{ + return 0; +} +static inline void efx_fini_debugfs_channels(struct efx_nic *efx) {} +static inline int efx_init_debugfs(void) +{ + return 0; +} +static inline void efx_fini_debugfs(void) {} + +static inline +int efx_extend_debugfs_port(struct efx_nic *efx, + void *context, u64 ignore, + const struct efx_debugfs_parameter *params) +{ + return 0; +} + +static inline +void efx_trim_debugfs_port(struct efx_nic *efx, + const struct efx_debugfs_parameter *params) +{ +} + +#ifdef CONFIG_SFC_VDPA +static inline int efx_init_debugfs_vdpa(struct ef100_vdpa_nic *vdpa) +{ + return 0; +} + +static inline void efx_fini_debugfs_vdpa(struct ef100_vdpa_nic *vdpa) +{ +} + +static inline +int efx_init_debugfs_vdpa_vring(struct ef100_vdpa_nic *vdpa, + struct ef100_vdpa_vring_info *vdpa_vring, + u16 idx) +{ + return 0; +} + +static inline +void efx_fini_debugfs_vdpa_vring(struct ef100_vdpa_vring_info *vdpa_vring) +{ +} +#endif +#endif /* CONFIG_DEBUG_FS */ + +#endif /* EFX_DEBUGFS_H */ diff --git a/src/drivers/net/ethernet/sfc/driverlink.c b/src/drivers/net/ethernet/sfc/driverlink.c new file mode 100644 index 0000000..eb14ecf --- /dev/null +++ b/src/drivers/net/ethernet/sfc/driverlink.c @@ -0,0 +1,468 @@ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2005 Fen Systems Ltd. + * Copyright 2005-2017 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#include +#include +#include +#include +#include +#include +#include "net_driver.h" + +/* Global lists are protected by rtnl_lock */ + +/* List of all registered drivers */ +static LIST_HEAD(efx_driver_list); + +/* List of all registered devices. Protected by the rtnl_lock */ +static LIST_HEAD(efx_nic_list); + +/** + * struct efx_dl_handle - Internal extension of efx_dev. + * @efx_dev: driverlink device handle exported to consumers + * @nic_node: per-device list node + * @driver_node: per-driver list node + * @publish_count: Number of external stacks that have published this handle + * @block_kernel_count: Number of times client has requested each kernel block, + * indexed by enum efx_dl_filter_block_kernel_type + */ +struct efx_dl_handle { + struct efx_dl_device efx_dev; + struct list_head nic_node; + struct list_head driver_node; + unsigned int publish_count; + unsigned int block_kernel_count[EFX_DL_FILTER_BLOCK_KERNEL_MAX]; +}; + +static struct efx_dl_handle *efx_dl_handle(struct efx_dl_device *efx_dev) +{ + return container_of(efx_dev, struct efx_dl_handle, efx_dev); +} + +/* Warn if a driverlink call takes longer than 1 second */ +#define EFX_DL_DURATION_WARN (1 * HZ) +/* Onload probe verifies a SW licenses which can take >1s. See SFC bug 62649 */ +#define EFX_DL_DURATION_PROBE_WARN (3 * HZ) + +#define _EFX_DL_CHECK_DURATION(duration, limit, label) \ + do { \ + if ((duration) > (limit) && \ + !efx_dev->nic->ops->hw_unavailable(efx_dev)) \ + netif_warn(efx_dev->nic, drv, efx_dev->nic->net_dev, \ + "%s: driverlink " label " took %ums\n", \ + efx_dev->driver->name, \ + jiffies_to_msecs(duration)); \ + } while (0) +#define EFX_DL_CHECK_DURATION(duration, label) \ + _EFX_DL_CHECK_DURATION(duration, EFX_DL_DURATION_WARN, label) +#define EFX_DL_CHECK_DURATION_PROBE(duration, label) \ + _EFX_DL_CHECK_DURATION(duration, EFX_DL_DURATION_PROBE_WARN, label) + +/* Remove an Efx device, and call the driver's remove() callback if + * present. The caller must hold rtnl_lock. */ +static void efx_dl_del_device(struct efx_dl_device *efx_dev) +{ + struct efx_dl_handle *efx_handle = efx_dl_handle(efx_dev); + struct efx_dl_driver *driver = efx_dev->driver; + struct efx_dl_nic *nic = efx_dev->nic; + unsigned int type; + u64 before, after, duration; + + netif_info(nic, drv, nic->net_dev, + "%s driverlink client unregistering\n", + efx_dev->driver->name); + + before = get_jiffies_64(); + + if (driver->remove) + driver->remove(efx_dev); + while (efx_handle->publish_count) + efx_dl_unpublish(efx_dev); + + after = get_jiffies_64(); + duration = after - before; + EFX_DL_CHECK_DURATION(duration, "remove()"); + + list_del(&efx_handle->driver_node); + + /* Disable and then re-enable NAPI when removing efx interface from list + * to prevent a race with read access from NAPI context; napi_disable() + * ensures that NAPI is no longer running when it returns. Also + * internally lock NAPI while disabled to prevent busy-polling. + */ + nic->ops->pause(efx_dev); + list_del(&efx_handle->nic_node); + nic->ops->resume(efx_dev); + + /* Remove this client's kernel blocks */ + for (type = 0; type < EFX_DL_FILTER_BLOCK_KERNEL_MAX; type++) + while (efx_handle->block_kernel_count[type]) + efx_dl_filter_unblock_kernel(efx_dev, type); + + kfree(efx_handle); +} + +/* Attempt to probe the given device with the driver, creating a + * new &struct efx_dl_device. If the probe routine returns an error, + * then the &struct efx_dl_device is destroyed */ +static void efx_dl_try_add_device(struct efx_dl_nic *nic, + struct efx_dl_driver *driver) +{ + struct efx_dl_handle *efx_handle; + struct efx_dl_handle *ex_efx_handle; + struct efx_dl_device *efx_dev; + struct net_device *net_dev = nic->net_dev; + int rc = 0; + bool added = false; + u64 before, after, duration; + + /* First check if device is supported by driver */ + if ((nic->pci_dev->device & 0x0f00) >= 0x0b00 && + !(driver->flags & EFX_DL_DRIVER_CHECKS_MEDFORD2_VI_STRIDE)) { + netif_info(nic, drv, net_dev, + "%s driverlink client skipped: does not support X2 adapters\n", + driver->name); + return; + } + + efx_handle = kzalloc(sizeof(*efx_handle), GFP_KERNEL); + if (!efx_handle) { + rc = -ENOMEM; + goto fail; + } + efx_dev = &efx_handle->efx_dev; + efx_dev->driver = driver; + efx_dev->nic = nic; + efx_dev->pci_dev = nic->pci_dev; + INIT_LIST_HEAD(&efx_handle->nic_node); + INIT_LIST_HEAD(&efx_handle->driver_node); + + before = get_jiffies_64(); + + if (!(driver->flags & EFX_DL_DRIVER_NO_PUBLISH)) + rc = efx_dl_publish(efx_dev); + if (rc) + goto fail; + + rc = driver->probe(efx_dev, net_dev, nic->dl_info, ""); + + after = get_jiffies_64(); + duration = after - before; +#if defined(EFX_WORKAROUND_62649) + EFX_DL_CHECK_DURATION_PROBE(duration, "probe()"); +#else + EFX_DL_CHECK_DURATION(duration, "probe()"); +#endif + + if (rc) + goto fail_unpublish; + + /* Rather than just add to the end of the list, + * find the point that is at the end of the desired priority level + * and insert there. This will ensure that remove() callbacks are + * called in the reverse of the order of insertion. + */ + + list_for_each_entry(ex_efx_handle, &nic->device_list, nic_node) { + if (ex_efx_handle->efx_dev.driver->priority > + driver->priority) { + list_add_tail(&efx_handle->nic_node, + &ex_efx_handle->nic_node); + added = true; + break; + } + } + if (!added) + list_add_tail(&efx_handle->nic_node, &nic->device_list); + + list_add_tail(&efx_handle->driver_node, &driver->device_list); + + netif_info(nic, drv, net_dev, + "%s driverlink client registered\n", driver->name); + return; + +fail_unpublish: + efx_dl_unpublish(efx_dev); +fail: + netif_info(nic, drv, net_dev, + "%s driverlink client skipped\n", driver->name); + + kfree(efx_handle); +} + +/* Unregister a driver from the driverlink layer, calling the + * driver's remove() callback for every attached device */ +void efx_dl_unregister_driver(struct efx_dl_driver *driver) +{ + struct efx_dl_handle *efx_handle, *efx_handle_n; + + printk(KERN_INFO "Efx driverlink unregistering %s driver\n", + driver->name); + + rtnl_lock(); + + list_for_each_entry_safe(efx_handle, efx_handle_n, + &driver->device_list, driver_node) + efx_dl_del_device(&efx_handle->efx_dev); + + list_del(&driver->driver_node); + + rtnl_unlock(); +} +EXPORT_SYMBOL(efx_dl_unregister_driver); + +/* Register a new driver with the driverlink layer. The driver's + * probe routine will be called for every attached nic. */ +int efx_dl_register_driver(struct efx_dl_driver *driver) +{ + struct efx_dl_nic *nic; + + if (driver->flags & EFX_DL_DRIVER_REQUIRES_MINOR_VER && + driver->minor_ver > EFX_DRIVERLINK_API_VERSION_MINOR) { + pr_err("Efx driverlink: %s requires API %d.%d, %s has %d.%d\n", + driver->name, EFX_DRIVERLINK_API_VERSION, + driver->minor_ver, KBUILD_MODNAME, + EFX_DRIVERLINK_API_VERSION, + EFX_DRIVERLINK_API_VERSION_MINOR); + return -EPERM; + } + driver->flags |= EFX_DL_DRIVER_SUPPORTS_MINOR_VER; + + printk(KERN_INFO "Efx driverlink registering %s driver\n", + driver->name); + + INIT_LIST_HEAD(&driver->driver_node); + INIT_LIST_HEAD(&driver->device_list); + + rtnl_lock(); + + list_add_tail(&driver->driver_node, &efx_driver_list); + + list_for_each_entry(nic, &efx_nic_list, nic_node) + efx_dl_try_add_device(nic, driver); + + rtnl_unlock(); + return 0; +} +EXPORT_SYMBOL(efx_dl_register_driver); + +void efx_dl_unregister_nic(struct efx_dl_nic *nic) +{ + struct efx_dl_handle *efx_handle, *efx_handle_n; + + ASSERT_RTNL(); + + list_for_each_entry_safe_reverse(efx_handle, efx_handle_n, + &nic->device_list, + nic_node) + efx_dl_del_device(&efx_handle->efx_dev); + + list_del_init(&nic->nic_node); +} +EXPORT_SYMBOL(efx_dl_unregister_nic); + +void efx_dl_register_nic(struct efx_dl_nic *nic) +{ + struct efx_dl_driver *driver; + + ASSERT_RTNL(); + + netif_info(nic, drv, nic->net_dev, "driverlink registering nic\n"); + + list_add_tail(&nic->nic_node, &efx_nic_list); + + list_for_each_entry(driver, &efx_driver_list, driver_node) + efx_dl_try_add_device(nic, driver); +} +EXPORT_SYMBOL(efx_dl_register_nic); + +bool efx_dl_netdev_is_ours(const struct net_device *net_dev) +{ + struct efx_dl_nic *nic; + + list_for_each_entry(nic, &efx_nic_list, nic_node) + if (nic->net_dev == net_dev) + return true; + + return false; +} +EXPORT_SYMBOL(efx_dl_netdev_is_ours); + +struct efx_dl_device *efx_dl_dev_from_netdev(const struct net_device *net_dev, + struct efx_dl_driver *driver) +{ + struct efx_dl_handle *handle; + + list_for_each_entry(handle, &driver->device_list, driver_node) + if (handle->efx_dev.nic->net_dev == net_dev) + return &handle->efx_dev; + + return NULL; +} +EXPORT_SYMBOL(efx_dl_dev_from_netdev); + +int efx_dl_publish(struct efx_dl_device *efx_dev) +{ + struct efx_dl_handle *handle = efx_dl_handle(efx_dev); + int rc = handle->publish_count ? 0 : + efx_dev->nic->ops->publish(efx_dev); + + if (!rc) + ++handle->publish_count; + return rc; +} + +void efx_dl_unpublish(struct efx_dl_device *efx_dev) +{ + struct efx_dl_handle *handle = efx_dl_handle(efx_dev); + + if (handle->publish_count && --handle->publish_count == 0) + efx_dev->nic->ops->unpublish(efx_dev); +} + +/* Suspend ready for reset, calling the reset_suspend() callback of every + * registered driver */ +void efx_dl_reset_suspend(struct efx_dl_nic *nic) +{ + struct efx_dl_handle *efx_handle; + struct efx_dl_device *efx_dev; + + ASSERT_RTNL(); + + list_for_each_entry_reverse(efx_handle, + &nic->device_list, + nic_node) { + efx_dev = &efx_handle->efx_dev; + if (efx_dev->driver->reset_suspend) { + u64 before, after, duration; + + before = get_jiffies_64(); + + efx_dev->driver->reset_suspend(efx_dev); + + after = get_jiffies_64(); + duration = after - before; + EFX_DL_CHECK_DURATION(duration, "reset_suspend()"); + } + } +} +EXPORT_SYMBOL(efx_dl_reset_suspend); + +/* Resume after a reset, calling the resume() callback of every registered + * driver */ +void efx_dl_reset_resume(struct efx_dl_nic *nic, int ok) +{ + struct efx_dl_handle *efx_handle; + struct efx_dl_device *efx_dev; + + ASSERT_RTNL(); + + list_for_each_entry(efx_handle, &nic->device_list, + nic_node) { + efx_dev = &efx_handle->efx_dev; + if (efx_dev->driver->reset_resume) { + u64 before, after, duration; + + before = get_jiffies_64(); + + efx_dev->driver->reset_resume(efx_dev, ok); + + after = get_jiffies_64(); + duration = after - before; + EFX_DL_CHECK_DURATION(duration, "reset_resume()"); + } + } +} +EXPORT_SYMBOL(efx_dl_reset_resume); + +int efx_dl_handle_event(struct efx_dl_nic *nic, void *event, int budget) +{ + struct efx_dl_handle *efx_handle; + struct efx_dl_device *efx_dev; + + list_for_each_entry(efx_handle, &nic->device_list, nic_node) { + efx_dev = &efx_handle->efx_dev; + if (efx_dev->driver->handle_event ) { + u64 before, after, duration; + int rc; + + before = get_jiffies_64(); + + rc = efx_dev->driver->handle_event(efx_dev, + event, budget); + + after = get_jiffies_64(); + duration = after - before; + EFX_DL_CHECK_DURATION(duration, "handle_event()"); + + if (rc >= 0 ) + return rc > budget ? budget : rc; + } + } + + return -EINVAL; +} +EXPORT_SYMBOL(efx_dl_handle_event); + +int efx_dl_filter_block_kernel(struct efx_dl_device *dl_dev, + enum efx_dl_filter_block_kernel_type type) +{ + struct efx_dl_handle *handle = efx_dl_handle(dl_dev); + int rc = 0; + + if (handle->block_kernel_count[type] == 0) + rc = dl_dev->nic->ops->filter_block_kernel(dl_dev, type); + if (!rc) + ++handle->block_kernel_count[type]; + return rc; +} +EXPORT_SYMBOL(efx_dl_filter_block_kernel); + +void efx_dl_filter_unblock_kernel(struct efx_dl_device *dl_dev, + enum efx_dl_filter_block_kernel_type type) +{ + struct efx_dl_handle *handle = efx_dl_handle(dl_dev); + + if (handle->block_kernel_count[type] == 0) { + WARN_ON(1); + return; + } + + if (--handle->block_kernel_count[type] == 0) + dl_dev->nic->ops->filter_unblock_kernel(dl_dev, type); +} +EXPORT_SYMBOL(efx_dl_filter_unblock_kernel); + +#define stringify2(x) #x +#define stringify(x) stringify2(x) +#define DRIVERLINK_API_VERSION stringify(EFX_DRIVERLINK_API_VERSION) "." \ + stringify(EFX_DRIVERLINK_API_VERSION_MINOR) + +static int __init efx_dl_init_module(void) +{ + printk(KERN_INFO "Solarflare driverlink driver v" EFX_DRIVER_VERSION + " API v" DRIVERLINK_API_VERSION "\n"); + + return 0; +} + +static void __exit efx_dl_exit_module(void) +{ + printk(KERN_INFO "Solarflare driverlink driver unloading\n"); +} + +module_init(efx_dl_init_module); +module_exit(efx_dl_exit_module); + +MODULE_AUTHOR("Solarflare Communications"); +MODULE_DESCRIPTION("Solarflare driverlink driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(EFX_DRIVER_VERSION); + diff --git a/src/drivers/net/ethernet/sfc/driverlink_api.h b/src/drivers/net/ethernet/sfc/driverlink_api.h new file mode 100644 index 0000000..573cb8e --- /dev/null +++ b/src/drivers/net/ethernet/sfc/driverlink_api.h @@ -0,0 +1,981 @@ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2005-2006 Fen Systems Ltd. + * Copyright 2005-2017 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#ifndef EFX_DRIVERLINK_API_H +#define EFX_DRIVERLINK_API_H + +#include +#include +#include "filter.h" + +/* Forward declarations */ +struct pci_dev; +struct net_device; +struct efx_dl_device; +struct efx_dl_device_info; + +/* Driverlink API source compatibility version. This is incremented + * whenever a definition is added, removed or changed such that a + * client might need to guard its use with a compile-time check. It + * is not used for binary compatibility checking, as that is done by + * kbuild and the module loader using symbol versions. + */ +#define EFX_DRIVERLINK_API_VERSION 33 +#define EFX_DRIVERLINK_API_VERSION_MINOR_MAX 1 + +/* If the client didn't define their VERSION_MINOR, default to 0 */ +#ifndef EFX_DRIVERLINK_API_VERSION_MINOR +#define EFX_DRIVERLINK_API_VERSION_MINOR 0 +#endif + +/** + * enum efx_dl_ev_prio - Driverlink client's priority level for event handling + * @EFX_DL_EV_HIGH: Client driver wants to handle events first + * @EFX_DL_EV_MED: Client driver is not particular about priority + * @EFX_DL_EV_LOW: Client driver wants to handle events last + */ +enum efx_dl_ev_prio { + EFX_DL_EV_HIGH = 0, + EFX_DL_EV_MED, + EFX_DL_EV_LOW, +}; + +/** + * enum efx_dl_driver_flags - flags for Driverlink client driver behaviour + * @EFX_DL_DRIVER_REQUIRES_MINOR_VER: Set by client drivers to indicate the + * minor_ver entry is present in their struct. Defined from API 22.1. + * @EFX_DL_DRIVER_SUPPORTS_MINOR_VER: Set by the device driver to + * indicate the minor version supplied by the client is supported. + * @EFX_DL_DRIVER_CHECKS_MEDFORD2_VI_STRIDE: Set by client drivers that + * promise to use the VI stride and memory BAR supplied by the net + * driver on Medford2. If this flag is not set, the client driver + * will not be probed for Medford2 (or newer) NICs. Defined from API 22.5. + * @EFX_DL_DRIVER_NO_PUBLISH: Set by client drivers to indicate they are only + * interested in querying interfaces and are not a stack. + * This will cause the driver not to allocate resources for them, but + * they cannot insert filters or create queues. + */ +enum efx_dl_driver_flags { + EFX_DL_DRIVER_REQUIRES_MINOR_VER = 0x2, + EFX_DL_DRIVER_SUPPORTS_MINOR_VER = 0x4, + EFX_DL_DRIVER_CHECKS_MEDFORD2_VI_STRIDE = 0x8, + EFX_DL_DRIVER_NO_PUBLISH = 0x10, +}; + +/** + * struct efx_dl_driver - Driverlink client driver + * @name: Name of the driver + * @priority: Priority of this driver in event handling + * @flags: Flags describing driver behaviour. Defined from API version 8. + * @probe: Called when device added + * The client should use the @dev_info linked list to determine + * if they wish to attach to this device. (@silicon_rev is a + * dummy parameter.) + * Called in process context, rtnl_lock held. + * @remove: Called when device removed + * The client must ensure the finish all operations with this + * device before returning from this method. + * Called in process context, rtnl_lock held. + * @reset_suspend: Called before device is reset + * Called immediately before a hardware reset. The client must stop all + * hardware processing before returning from this method. Callbacks will + * be inactive when this method is called. + * Called in process context, rtnl_lock held. + * @reset_resume: Called after device is reset + * Called after a hardware reset. If @ok is true, the client should + * state and resume normal operations. If @ok is false, the client should + * abandon use of the hardware resources. remove() will still be called. + * Called in process context, rtnl_lock held. + * @handle_event: Called when an event on a single-function port may + * need to be handled by a client. May be %NULL if the client + * driver does not handle events. Returns %true if the event is + * recognised and handled, else %false. If multiple clients + * registered for a device implement this operation, they will be + * called in priority order from high to low, until one returns + * %true. Called in NAPI context. + * @minor_ver: API minor version, checked by the driver if @flags contains + * %EFX_DL_DRIVER_REQUIRES_MINOR_VER. + * + * A driverlink client defines and initializes as many instances of + * efx_dl_driver as required, registering each one with + * efx_dl_register_driver(). + * + * Prior to API version 7, only one driver with non-null @handle_event + * could be registered for each device. The @priority field was not + * defined and the return type of @handle_event was void. + */ +struct efx_dl_driver { +/* public: */ + const char *name; + enum efx_dl_ev_prio priority; + enum efx_dl_driver_flags flags; + + int (*probe)(struct efx_dl_device *efx_dev, + const struct net_device *net_dev, + const struct efx_dl_device_info *dev_info, + const char *silicon_rev); + void (*remove)(struct efx_dl_device *efx_dev); + void (*reset_suspend)(struct efx_dl_device *efx_dev); + void (*reset_resume)(struct efx_dl_device *efx_dev, int ok); + int (*handle_event)(struct efx_dl_device *efx_dev, + void *p_event, int budget); + + unsigned int minor_ver; + +/* private: */ + struct list_head driver_node; + struct list_head device_list; +}; + +/** + * enum efx_dl_device_info_type - Device information identifier. + * + * Used to identify each item in the &struct efx_dl_device_info linked list + * provided to each driverlink client in the probe() @dev_info member. + * Value 4 is reserved, it was used for AOE. + * + * @EFX_DL_HASH_INSERTION: Information type is &struct efx_dl_hash_insertion + * @EFX_DL_MCDI_RESOURCES: Obsolete and unused. + * @EFX_DL_EF10_RESOURCES: Information type is &struct efx_dl_ef10_resources. + * Defined from API version 9. + * @EFX_DL_IRQ_RESOURCES: Information type is &struct efx_dl_irq_resources. + * Defined from API version 27. + * + * Used to identify each item in the &struct efx_dl_device_info linked list + * provided to each driverlink client in the probe() @dev_info member. + */ +enum efx_dl_device_info_type { + EFX_DL_HASH_INSERTION = 1, + EFX_DL_MCDI_RESOURCES = 3, + EFX_DL_EF10_RESOURCES = 5, + EFX_DL_IRQ_RESOURCES = 6, +}; + +/** + * struct efx_dl_device_info - device information structure + * + * @next: Link to next structure, if any + * @type: Type code for this structure + */ +struct efx_dl_device_info { + struct efx_dl_device_info *next; + enum efx_dl_device_info_type type; +}; + +/** + * enum efx_dl_hash_type_flags - Hash insertion type flags + * + * @EFX_DL_HASH_TOEP_TCPIP4: Toeplitz hash of TCP/IPv4 4-tuple + * @EFX_DL_HASH_TOEP_IP4: Toeplitz hash of IPv4 addresses + * @EFX_DL_HASH_TOEP_TCPIP6: Toeplitz hash of TCP/IPv6 4-tuple + * @EFX_DL_HASH_TOEP_IP6: Toeplitz hash of IPv6 addresses + */ +enum efx_dl_hash_type_flags { + EFX_DL_HASH_TOEP_TCPIP4 = 0x1, + EFX_DL_HASH_TOEP_IP4 = 0x2, + EFX_DL_HASH_TOEP_TCPIP6 = 0x4, + EFX_DL_HASH_TOEP_IP6 = 0x8, +}; + +/** + * struct efx_dl_hash_insertion - Hash insertion behaviour + * + * @hdr: Resource linked list header + * @data_offset: Offset of packet data relative to start of buffer + * @hash_offset: Offset of hash relative to start of buffer + * @flags: Flags for hash type(s) enabled + */ +struct efx_dl_hash_insertion { + struct efx_dl_device_info hdr; + unsigned int data_offset; + unsigned int hash_offset; + enum efx_dl_hash_type_flags flags; +}; + +/** + * enum efx_dl_ef10_resource_flags - EF10 resource information flags. + * @EFX_DL_EF10_USE_MSI: Port is initialised to use MSI/MSI-X interrupts. + * EF10 supports traditional legacy interrupts and MSI/MSI-X + * interrupts. The choice is made at run time by the sfc driver, and + * notified to the clients by this enumeration + * + * Flags that describe hardware variations for the current EF10 or + * Siena device. + */ +enum efx_dl_ef10_resource_flags { + EFX_DL_EF10_USE_MSI = 0x2, +}; + +/** + * struct efx_dl_ef10_resources - EF10 resource information + * + * @hdr: Resource linked list header + * @vi_base: Absolute index of first VI in this function. This may change + * after a reset. Clients that cache this value will need to update + * the cached value in their reset_resume() function. + * @vi_min: Relative index of first available VI + * @vi_lim: Relative index of last available VI + 1 + * @timer_quantum_ns: Timer quantum (nominal period between timer ticks) + * for wakeup timers, in nanoseconds. + * @rss_channel_count: Number of receive channels used for RSS. + * @rx_channel_count: Number of receive channels available for use. + * @flags: Resource information flags. + * @vi_shift: Shift value for absolute VI number computation. + * @vi_stride: size in bytes of a single VI. + * @mem_bar: PCIe memory BAR index. + */ +struct efx_dl_ef10_resources { + struct efx_dl_device_info hdr; + unsigned int vi_base; + unsigned int vi_min; + unsigned int vi_lim; + unsigned int timer_quantum_ns; + unsigned int rss_channel_count; + enum efx_dl_ef10_resource_flags flags; + unsigned int rx_channel_count; + unsigned int vi_shift; + unsigned int vi_stride; + unsigned int mem_bar; +}; + +/** + * struct efx_dl_irq_resources - interrupt resource information + * + * @hdr: Resource linked list header + * @flags: currently none + * @n_ranges: Number of entries in irq_ranges. Must be > 0. + * @int_prime: Address of the INT_PRIME register. + * @channel_base: Base channel number. + * @irq_ranges: Array of interrupts, specified as base vector + range. + * @irq_ranges.vector: Interrupt base vector. + * @irq_ranges.range: Interrupt range starting at @irq_ranges.vector. + */ +struct efx_dl_irq_resources { + struct efx_dl_device_info hdr; + u16 flags; + u16 n_ranges; + void __iomem *int_prime; + int channel_base; + struct efx_dl_irq_range { + int vector; + int range; + } irq_ranges[1]; +}; + +/** + * enum efx_dl_filter_block_kernel_type - filter types + * @EFX_DL_FILTER_BLOCK_KERNEL_UCAST: Unicast + * @EFX_DL_FILTER_BLOCK_KERNEL_MCAST: Multicast + */ +enum efx_dl_filter_block_kernel_type { + EFX_DL_FILTER_BLOCK_KERNEL_UCAST = 0, + EFX_DL_FILTER_BLOCK_KERNEL_MCAST, + /** @EFX_DL_FILTER_BLOCK_KERNEL_MAX: Limit of enum values */ + EFX_DL_FILTER_BLOCK_KERNEL_MAX, +}; + +/** + * struct efx_dl_ops - Operations for driverlink clients to use + * on a driverlink nic. + * @hw_unavailable: Return %true if hardware is uninitialised or disabled. + * @pause: Pause NAPI processing in driver + * @resume: Resume NAPI processing in driver + * @schedule_reset: Schedule a driver reset + * @rss_flags_default: Get default RSS flags + * @rss_context_new: Allocate an RSS context + * @rss_context_set: Push RSS context settings to NIC + * @rss_context_free: Free an RSS context + * @filter_insert: Insert an RX filter + * @filter_remove: Remove an RX filter + * @filter_redirect: Redirect an RX filter + * @vport_new: Allocate a vport + * @vport_free: free a vport + * @init_txq: Initialise a TX queue + * @init_rxq: Initialise an RX queue + * @set_multicast_loopback_suppression: Configure if multicast loopback + * traffic should be suppressed. + * @filter_block_kernel: Block kernel handling of traffic type + * @filter_unblock_kernel: Unblock kernel handling of traffic type + * @mcdi_rpc: Issue an MCDI command to firmware + * @publish: Allocate resources and bring interface up + * @unpublish: Release resources + * @dma_xlate: Translate DMA buffer addresses + * @mcdi_rpc_client: Issue an @mcdi_rpc for a given client ID. + * @client_alloc: Allocate a dynamic client ID + * @client_free: Free a dynamic client ID + * @vi_set_user: Set VI user to client ID +*/ +struct efx_dl_ops { + bool (*hw_unavailable)(struct efx_dl_device *efx_dev); + void (*pause)(struct efx_dl_device *efx_dev); + void (*resume)(struct efx_dl_device *efx_dev); + void (*schedule_reset)(struct efx_dl_device *efx_dev); + u32 (*rss_flags_default)(struct efx_dl_device *efx_dev); + int (*rss_context_new)(struct efx_dl_device *efx_dev, + const u32 *indir, const u8 *key, + u32 flags, u8 num_queues, + u32 *rss_context); + int (*rss_context_set)(struct efx_dl_device *efx_dev, + const u32 *indir, const u8 *key, + u32 flags, u32 rss_context); + int (*rss_context_free)(struct efx_dl_device *efx_dev, + u32 rss_context); + int (*filter_insert)(struct efx_dl_device *efx_dev, + const struct efx_filter_spec *spec, + bool replace_equal); + int (*filter_remove)(struct efx_dl_device *efx_dev, int filter_id); + int (*filter_redirect)(struct efx_dl_device *efx_dev, + int filter_id, int rxq_i, u32 *rss_context, + int stack_id); + int (*vport_new)(struct efx_dl_device *efx_dev, u16 vlan, + bool vlan_restrict); + int (*vport_free)(struct efx_dl_device *efx_dev, u16 port_id); + int (*init_txq)(struct efx_dl_device *efx_dev, u32 client_id, + dma_addr_t *dma_addrs, + int n_dma_addrs, u16 vport_id, u8 stack_id, u32 owner_id, + bool timestamp, u8 crc_mode, bool tcp_udp_only, + bool tcp_csum_dis, bool ip_csum_dis, bool inner_tcp_csum, + bool inner_ip_csum, bool buff_mode, bool pacer_bypass, + bool ctpio, bool ctpio_uthresh, bool m2m_d2c, u32 instance, + u32 label, u32 target_evq, u32 num_entries); + int (*init_rxq)(struct efx_dl_device *efx_dev, u32 client_id, + dma_addr_t *dma_addrs, + int n_dma_addrs, u16 vport_id, u8 stack_id, + u32 owner_id, u8 crc_mode, bool timestamp, + bool hdr_split, bool buff_mode, bool rx_prefix, + u8 dma_mode, u32 instance, u32 label, u32 target_evq, + u32 num_entries, u8 ps_buf_size, bool force_rx_merge, + int ef100_rx_buffer_size); + int (*set_multicast_loopback_suppression)(struct efx_dl_device *efx_dev, + bool suppress, u16 vport_id, + u8 stack_id); + int (*filter_block_kernel)(struct efx_dl_device *dl_dev, + enum efx_dl_filter_block_kernel_type block); + void (*filter_unblock_kernel)(struct efx_dl_device *dl_dev, + enum efx_dl_filter_block_kernel_type type); + int (*mcdi_rpc)(struct efx_dl_device *dl_dev, + unsigned int cmd, size_t inlen, size_t outlen, + size_t *outlen_actual, + const u8 *inbuf, u8 *outbuf); + int (*publish)(struct efx_dl_device *dl_dev); + void (*unpublish)(struct efx_dl_device *dl_dev); + long (*dma_xlate)(struct efx_dl_device *dl_dev, const dma_addr_t *src, + dma_addr_t *dst, unsigned int n); + int (*mcdi_rpc_client)(struct efx_dl_device *dl_dev, u32 client_id, + unsigned int cmd, size_t inlen, size_t outlen, + size_t *outlen_actual, + u32 *inbuf, u32 *outbuf); + int (*client_alloc)(struct efx_dl_device *dl_dev, u32 parent, u32 *id); + int (*client_free)(struct efx_dl_device *dl_dev, u32 id); + int (*vi_set_user)(struct efx_dl_device *dl_dev, u32 vi_instance, + u32 client_id); +}; + +/** + * struct efx_dl_nic - An Efx driverlink nic. + * + * @pci_dev: Underlying PCI function + * @net_dev: Underlying net device + * @ops: Operations struct for + * @dl_info: hardware parameters + * @msg_enable: netdev log level for netif_* messages. + * + * @nic_node: node for global nic list + * @device_list: Head of list of efx_dl_devices + * for this nic + */ +struct efx_dl_nic { + struct pci_dev *pci_dev; + struct net_device *net_dev; + struct efx_dl_ops *ops; + + struct efx_dl_device_info *dl_info; + + int msg_enable; + + /* private */ + struct list_head nic_node; + struct list_head device_list; +}; + +/** + * struct efx_dl_device - An Efx driverlink device. + * There is one of these for each (driver, nic) tuple. + * + * @pci_dev: Underlying PCI function + * @priv: Driver private data + * Driverlink clients can use this to store a pointer to their + * internal per-device data structure. Each (driver, device) + * tuple has a separate &struct efx_dl_device, so clients can use + * this @priv field independently. + * @driver: Efx driverlink driver for this device + * @nic: Efx driverlink nic for this device + */ +struct efx_dl_device { + struct pci_dev *pci_dev; + void *priv; + struct efx_dl_driver *driver; + struct efx_dl_nic *nic; +}; + +/** + * efx_dl_unregister_driver() - Unregister a client driver + * @driver: Driver operations structure + * + * This acquires the rtnl_lock and therefore must be called from + * process context. + */ +void efx_dl_unregister_driver(struct efx_dl_driver *driver); + +/* Include API version number in symbol used for efx_dl_register_driver + * and efx_dl_register_nic + */ +#define efx_dl_stringify_1(x, y) x ## y +#define efx_dl_stringify_2(x, y) efx_dl_stringify_1(x, y) +#define efx_dl_register_driver \ + efx_dl_stringify_2(efx_dl_register_driver_api_ver_, \ + EFX_DRIVERLINK_API_VERSION) +#define efx_dl_register_nic \ + efx_dl_stringify_2(efx_dl_register_nic_api_ver_, \ + EFX_DRIVERLINK_API_VERSION) + +/** + * efx_dl_register_driver() - Register a client driver + * @driver: Driver operations structure + * + * This acquires the rtnl_lock and therefore must be called from + * process context. + * + * Return: a negative error code or 0 on success. + */ +int efx_dl_register_driver(struct efx_dl_driver *driver); + +/** + * efx_dl_netdev_is_ours() - Check whether netdevice is a driverlink nic + * @net_dev: Net device to be checked + * + * Return: %true if the netdevice is a driverlink nic, otherwise %false. + */ +bool efx_dl_netdev_is_ours(const struct net_device *net_dev); + +/** + * efx_dl_dev_from_netdev() - Find Driverlink device structure for net device + * @net_dev: Net device to be checked + * @driver: Driver structure for the device to be found + * + * Caller must hold the rtnl_lock. + * + * Return: &struct efx_dl_device on success, otherwise %NULL. + */ +struct efx_dl_device * +efx_dl_dev_from_netdev(const struct net_device *net_dev, + struct efx_dl_driver *driver); + +/* Schedule a reset without grabbing any locks */ +static inline void efx_dl_schedule_reset(struct efx_dl_device *efx_dev) +{ + efx_dev->nic->ops->schedule_reset(efx_dev); +} + +int efx_dl_publish(struct efx_dl_device *efx_dev); +void efx_dl_unpublish(struct efx_dl_device *efx_dev); + +/** + * efx_dl_rss_flags_default() - return appropriate default RSS flags for NIC + * @efx_dev: NIC on which to act + * + * Return: default RSS flags. + */ +static inline u32 efx_dl_rss_flags_default(struct efx_dl_device *efx_dev) +{ + return efx_dev->nic->ops->rss_flags_default(efx_dev); +} + +/** + * efx_dl_rss_context_new() - allocate and configure a new RSS context + * @efx_dev: NIC on which to act + * @indir: initial indirection table, or %NULL to use default + * @key: initial hashing key, or %NULL to use default + * @flags: initial hashing flags (as defined by MCDI) + * @num_queues: number of queues spanned by this context, in the range 1-64 + * @rss_context: location to store user_id of newly allocated RSS context + * + * Return: a negative error code or 0 on success. + */ +static inline int efx_dl_rss_context_new(struct efx_dl_device *efx_dev, + const u32 *indir, const u8 *key, + u32 flags, u8 num_queues, + u32 *rss_context) +{ + return efx_dev->nic->ops->rss_context_new(efx_dev, indir, key, flags, + num_queues, rss_context); +} + +/** + * efx_dl_rss_context_set() - update the configuration of existing RSS context + * @efx_dev: NIC on which to act + * @indir: new indirection table, or %NULL for no change + * @key: new hashing key, or %NULL for no change + * @flags: new hashing flags (as defined by MCDI) + * @rss_context: user_id of RSS context on which to act. Should be a value + * previously written by efx_dl_rss_context_new(). + * + * Return: a negative error code or 0 on success. + */ +static inline int efx_dl_rss_context_set(struct efx_dl_device *efx_dev, + const u32 *indir, const u8 *key, + u32 flags, u32 rss_context) +{ + return efx_dev->nic->ops->rss_context_set(efx_dev, indir, key, flags, + rss_context); +} + +/** + * efx_dl_rss_context_free() - remove an existing RSS context + * @efx_dev: NIC on which to act + * @rss_context: user_id of RSS context to be removed. Should be a value + * previously written by efx_dl_rss_context_new(). + * + * Return: a negative error code or 0 on success. + */ +static inline int efx_dl_rss_context_free(struct efx_dl_device *efx_dev, + u32 rss_context) +{ + return efx_dev->nic->ops->rss_context_free(efx_dev, rss_context); +} + +/* From API version 23, @spec->rss_context is a user_id allocated by the driver, + * as per comments in filter.h. In older Driverlink versions, it was an MCFW- + * facing ID, but that behaviour caused problems around resets (bug 74758). + */ +static inline int efx_dl_filter_insert(struct efx_dl_device *efx_dev, + const struct efx_filter_spec *spec, + bool replace_equal) +{ + return efx_dev->nic->ops->filter_insert(efx_dev, spec, replace_equal); +} + +static inline int efx_dl_filter_remove(struct efx_dl_device *efx_dev, + int filter_id) +{ + return efx_dev->nic->ops->filter_remove(efx_dev, filter_id); +} + +/** + * efx_dl_filter_redirect() - update the queue for an existing RX filter + * @efx_dev: NIC in which to update the filter + * @filter_id: ID of filter, as returned by @efx_dl_filter_insert + * @rxq_i: Index of RX queue + * @stack_id: ID of stack, as returned by @efx_dl_filter_insert + * + * If filter previously had %EFX_FILTER_FLAG_RX_RSS and an associated RSS + * context, the flag will be cleared and the RSS context deassociated. (This + * behaviour is new in API version 23 and is only supported by EF10; farch will + * return -EINVAL in this case.) + * + * Return: a negative error code or 0 on success. + */ +static inline int efx_dl_filter_redirect(struct efx_dl_device *efx_dev, + int filter_id, int rxq_i, int stack_id) +{ + return efx_dev->nic->ops->filter_redirect(efx_dev, filter_id, rxq_i, + NULL, stack_id); +} + +/** + * efx_dl_filter_redirect_rss() - update the queue and RSS context + * for an existing RX filter + * @efx_dev: NIC in which to update the filter + * @filter_id: ID of filter, as returned by @efx_dl_filter_insert + * @rxq_i: Index of RX queue + * @rss_context: user_id of RSS context. Either a value supplied by + * efx_dl_rss_context_new(), or 0 to use default RSS context. + * @stack_id: ID of stack, as returned by @efx_dl_filter_insert + * + * If filter previously did not have %EFX_FILTER_FLAG_RX_RSS, it will be set + * (EF10) or -EINVAL will be returned (farch). + * + * Return: a negative error code or 0 on success. + */ +static inline int efx_dl_filter_redirect_rss(struct efx_dl_device *efx_dev, + int filter_id, int rxq_i, + u32 rss_context, int stack_id) +{ + return efx_dev->nic->ops->filter_redirect(efx_dev, filter_id, rxq_i, + &rss_context, stack_id); +} + +/** + * efx_dl_vport_new() - allocate and configure a new vport + * @efx_dev: NIC on which to act + * @vlan: VID of VLAN to place this vport on, or %EFX_FILTER_VID_UNSPEC for none + * @vlan_restrict: as per corresponding flag in MCDI + * + * Return: user_id of new vport, or negative error. + */ +static inline int efx_dl_vport_new(struct efx_dl_device *efx_dev, + u16 vlan, bool vlan_restrict) +{ + return efx_dev->nic->ops->vport_new(efx_dev, vlan, vlan_restrict); +} + +/** + * efx_dl_vport_free() - remove an existing vport + * @efx_dev: NIC on which to act + * @port_id: user_id of vport to be removed. Should be a value previously + * returned by efx_dl_vport_new(). + * + * Return: a negative error code or 0 on success. + */ +static inline int efx_dl_vport_free(struct efx_dl_device *efx_dev, u16 port_id) +{ + return efx_dev->nic->ops->vport_free(efx_dev, port_id); +} + + +/** + * efx_dl_init_txq() - initialise a TXQ + * @efx_dev: NIC on which to act + * @client_id: as per MCDI + * @dma_addrs: array of DMA addresses of buffer space for TX descriptors + * @n_dma_addrs: count of elements in @dma_addrs + * @vport_id: user_id of vport (returned by efx_dl_vport_new(), or 0) + * @stack_id: stack ID to OR into bits 23-16 of HW vport ID + * @owner_id: Owner ID to use if in buffer mode (zero if physical) + * @timestamp: flag as per MCDI + * @crc_mode: as per MCDI + * @tcp_udp_only: flag as per MCDI + * @tcp_csum_dis: disable TCP/UDP checksum offload + * @ip_csum_dis: disable IP header checksum offload + * @inner_tcp_csum: enable inner TCP/UDP checksum offload + * @inner_ip_csum: enable inner IP header checksum offload + * @buff_mode: flag as per MCDI + * @pacer_bypass: flag as per MCDI + * @ctpio: flag as per MCDI + * @ctpio_uthresh: flag as per MCDI + * @m2m_d2c: flag as per MCDI + * @instance: as per MCDI + * @label: queue label to put in events + * @target_evq: the EVQ to send events to + * @num_entries: size of the TX ring in entries + * + * Available from API version 25.1. + * + * Return: a negative error code or 0 on success. + */ +static inline +int efx_dl_init_txq(struct efx_dl_device *efx_dev, u32 client_id, + dma_addr_t *dma_addrs, + int n_dma_addrs, u16 vport_id, u8 stack_id, u32 owner_id, + bool timestamp, u8 crc_mode, bool tcp_udp_only, + bool tcp_csum_dis, bool ip_csum_dis, bool inner_tcp_csum, + bool inner_ip_csum, bool buff_mode, bool pacer_bypass, + bool ctpio, bool ctpio_uthresh, bool m2m_d2c, u32 instance, + u32 label, u32 target_evq, u32 num_entries) +{ + return efx_dev->nic->ops->init_txq(efx_dev, client_id, + dma_addrs, n_dma_addrs, + vport_id, stack_id, owner_id, + timestamp, crc_mode, tcp_udp_only, + tcp_csum_dis, ip_csum_dis, + inner_tcp_csum, inner_ip_csum, + buff_mode, pacer_bypass, ctpio, + ctpio_uthresh, m2m_d2c, instance, label, + target_evq, num_entries); +} + +/** + * efx_dl_init_rxq() - initialise an RXQ + * @efx_dev: NIC on which to act + * @client_id: as per MCDI + * @dma_addrs: array of DMA addresses of buffer space for RX descriptors + * @n_dma_addrs: count of elements in @dma_addrs + * @vport_id: user_id of vport (returned by efx_dl_vport_new(), or 0) + * @stack_id: stack ID to OR into bits 23-16 of HW vport ID + * @owner_id: Owner ID to use if in buffer mode (zero if physical) + * @crc_mode: as per MCDI + * @timestamp: flag as per MCDI + * @hdr_split: flag as per MCDI + * @buff_mode: flag as per MCDI + * @rx_prefix: flag as per MCDI + * @dma_mode: enum as per MCDI + * @instance: as per MCDI + * @label: queue label to put in events + * @target_evq: the EVQ to send events to + * @num_entries: size of the TX ring in entries + * @ps_buf_size: enum as per MCDI (PACKED_STREAM_BUFF_SIZE) + * @force_rx_merge: flag as per MCDI + * @ef100_rx_buffer_size: RX buffer size (in bytes) for EF100. + * + * Available from API version 25.1. + * + * Return: a negative error code or 0 on success. + */ +static inline +int efx_dl_init_rxq(struct efx_dl_device *efx_dev, u32 client_id, + dma_addr_t *dma_addrs, + int n_dma_addrs, u16 vport_id, u8 stack_id, u32 owner_id, + u8 crc_mode, bool timestamp, bool hdr_split, bool buff_mode, + bool rx_prefix, u8 dma_mode, u32 instance, u32 label, + u32 target_evq, u32 num_entries, u8 ps_buf_size, + bool force_rx_merge, int ef100_rx_buffer_size) +{ + return efx_dev->nic->ops->init_rxq(efx_dev, client_id, + dma_addrs, n_dma_addrs, + vport_id, stack_id, owner_id, + crc_mode, timestamp, hdr_split, buff_mode, + rx_prefix, dma_mode, instance, label, + target_evq, num_entries, ps_buf_size, + force_rx_merge, ef100_rx_buffer_size); +} + +/** + * efx_dl_set_multicast_loopback_suppression() - configure multicast loopback + * @efx_dev: NIC on which to act + * @suppress: whether to suppress multicast loopback + * @vport_id: user_id of vport (returned by efx_dl_vport_new(), or 0) + * @stack_id: stack ID to OR into bits 23-16 of HW vport ID + * + * Available from API version 25.1. + * + * Return: a negative error code or 0 on success. + */ +static inline +int efx_dl_set_multicast_loopback_suppression(struct efx_dl_device *efx_dev, + bool suppress, u16 vport_id, + u8 stack_id) +{ + return efx_dev->nic->ops->set_multicast_loopback_suppression(efx_dev, + suppress, vport_id, stack_id); +} + +/** + * efx_dl_filter_block_kernel - Block the kernel from receiving packets + * @dl_dev: Driverlink client device context + * @type: Type (unicast or multicast) of kernel block to insert + * + * This increments the kernel block count for the client. So long as + * any client has a non-zero count, all filters with priority HINT or + * AUTO will be removed (or pointed to a drop queue). The kernel + * stack and upper devices will not receive packets except through + * explicit configuration (e.g. ethtool -U or PTP on Siena). The net + * driver's loopback self-test will also fail. + * + * Return: a negative error code or 0 on success. + */ +int efx_dl_filter_block_kernel(struct efx_dl_device *dl_dev, + enum efx_dl_filter_block_kernel_type type); + +/** + * efx_dl_filter_unblock_kernel - Reverse efx_filter_block_kernel() + * @dl_dev: Driverlink client device context + * @type: Type (unicast or multicast) of kernel block to insert + * + * This decrements the kernel block count for the client. + */ +void efx_dl_filter_unblock_kernel(struct efx_dl_device *dl_dev, + enum efx_dl_filter_block_kernel_type type); + +/** + * efx_dl_mcdi_rpc - Issue an MCDI command and wait for completion + * @dl_dev: Driverlink client device context + * @cmd: Command type number + * @inbuf: Command parameters + * @inlen: Length of command parameters, in bytes. Must be a multiple + * of 4 and no greater than %MC_SMEM_PDU_LEN. + * @outbuf: Response buffer. May be %NULL if @outlen is 0. + * @outlen: Length of response buffer, in bytes. If the actual + * response is longer than @outlen & ~3, it will be truncated + * to that length. + * @outlen_actual: Pointer through which to return the actual response + * length. May be %NULL if this is not needed. + * + * This function may sleep and therefore must be called in process + * context. Defined from API version 6. + * + * Return: a negative error code or 0 on success. + */ +static inline int efx_dl_mcdi_rpc(struct efx_dl_device *dl_dev, + unsigned int cmd, size_t inlen, size_t outlen, + size_t *outlen_actual, + const u8 *inbuf, u8 *outbuf) +{ + return dl_dev->nic->ops->mcdi_rpc(dl_dev, cmd, inlen, + outlen, outlen_actual, + inbuf, outbuf); +} + +/** + * efx_dl_dma_xlate - Translate local DMA addresses to QDMA addresses + * @dl_dev: Driverlink client device context + * @src: Input array of local DMA addresses to translate, e.g. from + * dma_map_single + * @dst: Output array of translated DMA addresses, for use in descriptors. + * src and dst may be the same pointer (but no other overlaps are + * permitted). + * @n: Number of elements in src and dst arrays. + * + * The return value is the number of initial array elements which were + * successfully translated, i.e. if it is less than n then src[rc] is an + * address which the NIC cannot access via DMA. Translation will still + * continue after an error is encountered; all untranslatable addresses will + * have their dst value set to (dma_addr_t)-1. Defined from API version 30. + * + * Return: Number of successfully translated addresses. + */ +static inline long efx_dl_dma_xlate(struct efx_dl_device *dl_dev, + const dma_addr_t *src, + dma_addr_t *dst, unsigned int n) +{ + return dl_dev->nic->ops->dma_xlate(dl_dev, src, dst, n); +} + +/** + * efx_dl_mcdi_rpc_client - issue an MCDI command on a non-base client + * @dl_dev: Driverlink client device context + * @client_id: A dynamic client ID created by efx_dl_client_alloc() on + * which to send this MCDI command + * @cmd: Command type number + * @inlen: Length of command parameters, in bytes. Must be a multiple + * of 4 and no greater than %MC_SMEM_PDU_LEN. + * @outlen: Length of response buffer, in bytes. If the actual + * response is longer than @outlen & ~3, it will be truncated + * to that length. + * @outlen_actual: Pointer through which to return the actual response + * length. May be %NULL if this is not needed. + * @inbuf: Command parameters + * @outbuf: Response buffer. May be %NULL if @outlen is 0. + * + * This is a superset of efx_dl_mcdi_rpc(), adding @client_id. + * + * The caller must provide space for 12 additional bytes (beyond inlen) in the + * memory at inbuf; inbuf may be modified in-situ. This function may sleep and + * therefore must be called in process context. Defined from API version 30. + * + * Return: a negative error code or 0 on success. + */ +static inline int efx_dl_mcdi_rpc_client(struct efx_dl_device *dl_dev, + u32 client_id, unsigned int cmd, + size_t inlen, size_t outlen, + size_t *outlen_actual, + u32 *inbuf, u32 *outbuf) +{ + return dl_dev->nic->ops->mcdi_rpc_client(dl_dev, client_id, cmd, + inlen, outlen, outlen_actual, + inbuf, outbuf); +} + +/** + * efx_dl_client_alloc - create a new dynamic client + * @dl_dev: Driverlink client device context + * @parent: Client ID of the owner of the new client. MC_CMD_CLIENT_ID_SELF + * to make the base function the owner. + * @id: Out. The allocated client ID. + * + * Use efx_dl_client_free to release the client. Available from API version 30. + * + * Return: a negative error code or 0 on success. + */ +static inline int efx_dl_client_alloc(struct efx_dl_device *dl_dev, + u32 parent, u32 *id) +{ + return dl_dev->nic->ops->client_alloc(dl_dev, parent, id); +} + +/** + * efx_dl_client_free - destroy a dynamic client object + * @dl_dev: Driverlink client device context + * @id: The dynamic client ID to destroy + * + * See efx_dl_client_alloc. Available from API version 30. + * + * Return: a negative error code or 0 on success. + */ +static inline int efx_dl_client_free(struct efx_dl_device *dl_dev, u32 id) +{ + return dl_dev->nic->ops->client_free(dl_dev, id); +} + +/** + * efx_dl_vi_set_user - reassign a VI to a dynamic client + * @dl_dev: Driverlink client device context + * @vi_instance: The (relative) VI number to reassign + * @client_id: Dynamic client ID to be the new user. MC_CMD_CLIENT_ID_SELF to + * assign back to the base function. + * + * See efx_dl_client_alloc. VIs may only be moved to/from the base function + * and a dynamic client; to reassign from one dynamic client to another they + * must go via the base function first. Available from API version 30. + * + * Return: a negative error code or 0 on success. + */ +static inline int efx_dl_vi_set_user(struct efx_dl_device *dl_dev, + u32 vi_instance, u32 client_id) +{ + return dl_dev->nic->ops->vi_set_user(dl_dev, vi_instance, client_id); +} + +/** + * efx_dl_for_each_device_info_matching - iterate an efx_dl_device_info list + * @_dev_info: Pointer to first &struct efx_dl_device_info + * @_type: Type code to look for + * @_info_type: Structure type corresponding to type code + * @_field: Name of &struct efx_dl_device_info field in the type + * @_p: Iterator variable + * + * Example: + * struct efx_dl_ef10_resources *res; + * efx_dl_for_each_device_info_matching(dev_info, EFX_DL_EF10_RESOURCES, + * struct efx_dl_ef10_resources, + * hdr, res) { + * if (res->flags & EFX_DL_EF10_USE_MSI) + * .... + * } + */ +#define efx_dl_for_each_device_info_matching(_dev_info, _type, \ + _info_type, _field, _p) \ + for ((_p) = container_of((_dev_info), _info_type, _field); \ + (_p) != NULL; \ + (_p) = container_of((_p)->_field.next, _info_type, _field))\ + if ((_p)->_field.type != _type) \ + continue; \ + else + +/** + * efx_dl_search_device_info - search an efx_dl_device_info list + * @_dev_info: Pointer to first &struct efx_dl_device_info + * @_type: Type code to look for + * @_info_type: Structure type corresponding to type code + * @_field: Name of &struct efx_dl_device_info member in this type + * @_p: Result variable + * + * Example: + * struct efx_dl_ef10_resources *res; + * efx_dl_search_device_info(dev_info, EFX_DL_EF10_RESOURCES, + * struct efx_dl_ef10_resources, hdr, res); + * if (res) + * .... + */ +#define efx_dl_search_device_info(_dev_info, _type, _info_type, \ + _field, _p) \ + efx_dl_for_each_device_info_matching((_dev_info), (_type), \ + _info_type, _field, (_p)) \ + break; + +void efx_dl_register_nic(struct efx_dl_nic *device); +void efx_dl_unregister_nic(struct efx_dl_nic *device); + +void efx_dl_reset_suspend(struct efx_dl_nic *device); +void efx_dl_reset_resume(struct efx_dl_nic *device, int ok); + +int efx_dl_handle_event(struct efx_dl_nic *device, void *event, int budget); + +#endif /* EFX_DRIVERLINK_API_H */ + diff --git a/src/drivers/net/ethernet/sfc/dump.c b/src/drivers/net/ethernet/sfc/dump.c new file mode 100644 index 0000000..25796f1 --- /dev/null +++ b/src/drivers/net/ethernet/sfc/dump.c @@ -0,0 +1,488 @@ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2015 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#include "net_driver.h" +#include "dump.h" +#include "mcdi.h" +#include "mcdi_pcol.h" +#include "nic.h" +#include "efx_ioctl.h" + +#define DUMPSPEC_FP_DEFAULT NULL +#define DUMPSPEC_MAX_SIZE (64 * 1024) +#define DUMPFILE_MAX_SIZE (5 * 1024 * 1024) +#define DUMPFILE_MAX_PAGES DUMPFILE_MAX_SIZE/MLI_PAGE_SIZE + +#define DH_NIDENT 16 +#define DH_IDENT_INIT "\x7fSFDUMP\x0\x0\x0\x0\x0\x0\x0\x0\x0" +#define DH_CLEAR_INIT "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" + +#define MLI_PAGE_SIZE 4096 +#define PAGE_N_ENTRIES (MLI_PAGE_SIZE / sizeof(uint64_t)) +#define IPAGES(npages_) DIV_ROUND_UP(npages_, PAGE_N_ENTRIES) + +/** + * struct dump_header - Header for the dump buffer + * @ident: Dump header identifier + * @type: Dump header type (e.g. file or spec) + * @version: Dump header version + * @arch: Dump header architecture (e.g. EF10) + * @dh_size: Dump header size, in bytes + * @sh_offset: Section header offset + * @sh_entsize: Section header size, in bytes + * @sh_count: Number of section headers + * @dumpfile_size: Dump buffer size, in bytes + */ +struct dump_header { + unsigned char ident[DH_NIDENT]; + uint32_t type; + uint32_t version; /* increase means incompatible changes */ + uint32_t arch; + uint32_t dh_size; + uint32_t sh_offset; + uint32_t sh_entsize; /* sizeof (struct section_header) */ + uint32_t sh_count; + uint32_t dumpfile_size; +}; + +/** + * struct dump_location - Location of dumpfile or dumpspec + * @location: Dump location (e.g. default or custom) + * @buffer: Dump buffer address + * @root_dma_handle: Dump buffer MLI root handle physical address + * @mli_depth: Dump buffer MLI depth + * @size: Dump buffer size, in bytes + */ +struct dump_location { + unsigned int location; + void *buffer; + uint64_t root_dma_handle; + int mli_depth; + size_t size; +}; + +/** + * struct efx_dump_data - Context for dump data + * @dumpspec: Dumpspec location + * @dumpfile: Dumpfile location + * @addr: Dumpfile buffer address + * @dma_addr: Dumpfile buffer physical address + * @total_pages: Number of pages needed for dump + * @enabled: Flag for dump enable + */ +struct efx_dump_data { + struct dump_location dumpspec; + struct dump_location dumpfile; + void **addr; + dma_addr_t *dma_addr; + size_t total_pages; + bool enabled; +}; + +static int efx_dump_get_boot_status(struct efx_nic *efx, + unsigned long *boot_offset, + unsigned long *flags) +{ + MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_BOOT_STATUS_OUT_LEN); + size_t outlen; + int rc; + + rc = efx_mcdi_rpc(efx, MC_CMD_GET_BOOT_STATUS, NULL, 0, + outbuf, sizeof(outbuf), &outlen); + if (rc) + return rc; + if (outlen < sizeof(outbuf)) + return -EIO; + + if (boot_offset) + *boot_offset = MCDI_DWORD(outbuf, + GET_BOOT_STATUS_OUT_BOOT_OFFSET); + if (flags) + *flags = MCDI_DWORD(outbuf, GET_BOOT_STATUS_OUT_FLAGS); + + return 0; +} + +static int efx_dump_do(struct efx_nic *efx, + const struct dump_location *dumpspec, + const struct dump_location *dumpfile, + size_t *dumpfile_size) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_DUMP_DO_IN_LEN); + MCDI_DECLARE_BUF(outbuf, MC_CMD_DUMP_DO_OUT_LEN); + size_t outlen; + int rc; + + MCDI_SET_DWORD(inbuf, DUMP_DO_IN_DUMPSPEC_SRC, dumpspec->location); + if (dumpspec->location == MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM) { + MCDI_SET_DWORD(inbuf, + DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_TYPE, + MC_CMD_DUMP_DO_IN_DUMP_LOCATION_HOST_MEMORY_MLI); + MCDI_SET_DWORD(inbuf, + DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_SIZE, + dumpspec->size); + MCDI_SET_DWORD(inbuf, + DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO, + (dumpspec->root_dma_handle >> 0) & 0xffffffff); + MCDI_SET_DWORD(inbuf, + DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI, + (dumpspec->root_dma_handle >> 32) & 0xffffffff); + MCDI_SET_DWORD(inbuf, + DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_DEPTH, + dumpspec->mli_depth); + } + MCDI_SET_DWORD(inbuf, DUMP_DO_IN_DUMPFILE_DST, dumpfile->location); + if (dumpfile->location == MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM) { + MCDI_SET_DWORD(inbuf, + DUMP_DO_IN_DUMPFILE_DST_CUSTOM_TYPE, + MC_CMD_DUMP_DO_IN_DUMP_LOCATION_HOST_MEMORY_MLI); + MCDI_SET_DWORD(inbuf, + DUMP_DO_IN_DUMPFILE_DST_CUSTOM_SIZE, + dumpfile->size); + MCDI_SET_DWORD(inbuf, + DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO, + (dumpfile->root_dma_handle >> 0) & 0xffffffff); + MCDI_SET_DWORD(inbuf, + DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI, + (dumpfile->root_dma_handle >> 32) & 0xffffffff); + MCDI_SET_DWORD(inbuf, + DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_DEPTH, + dumpfile->mli_depth); + } + rc = efx_mcdi_rpc(efx, MC_CMD_DUMP_DO, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), &outlen); + if (rc) + return rc; + if (outlen < sizeof(outbuf)) + return -EIO; + + *dumpfile_size = MCDI_DWORD(outbuf, DUMP_DO_OUT_DUMPFILE_SIZE); + + return 0; +} + +static int efx_dump_config_unsolicited(struct efx_nic *efx, + const struct dump_location *dumpspec, + const struct dump_location *dumpfile, + bool enabled) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_LEN); + + MCDI_SET_DWORD(inbuf, DUMP_CONFIGURE_UNSOLICITED_IN_ENABLE, enabled); + MCDI_SET_DWORD(inbuf, DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC, + dumpspec->location); + if (dumpspec->location == MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM) { + MCDI_SET_DWORD(inbuf, + DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_TYPE, + MC_CMD_DUMP_DO_IN_DUMP_LOCATION_HOST_MEMORY_MLI); + MCDI_SET_DWORD(inbuf, + DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_SIZE, + dumpspec->size); + MCDI_SET_DWORD(inbuf, + DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO, + (dumpspec->root_dma_handle >> 0) & 0xffffffff); + MCDI_SET_DWORD(inbuf, + DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI, + (dumpspec->root_dma_handle >> 32) & 0xffffffff); + MCDI_SET_DWORD(inbuf, + DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_DEPTH, + dumpspec->mli_depth); + } + MCDI_SET_DWORD(inbuf, DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST, + dumpfile->location); + if (dumpfile->location == MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM) { + MCDI_SET_DWORD(inbuf, + DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_TYPE, + MC_CMD_DUMP_DO_IN_DUMP_LOCATION_HOST_MEMORY_MLI); + MCDI_SET_DWORD(inbuf, + DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_SIZE, + dumpfile->size); + MCDI_SET_DWORD(inbuf, + DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO, + (dumpfile->root_dma_handle >> 0) & 0xffffffff); + MCDI_SET_DWORD(inbuf, + DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI, + (dumpfile->root_dma_handle >> 32) & 0xffffffff); + MCDI_SET_DWORD(inbuf, + DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_DEPTH, + dumpfile->mli_depth); + } + return efx_mcdi_rpc(efx, MC_CMD_DUMP_CONFIGURE_UNSOLICITED, inbuf, + sizeof(inbuf), NULL, 0, NULL); + return 0; +} + +static void efx_dump_free_buffer(struct efx_nic *efx) +{ + struct efx_dump_data *dump_data = efx->dump_data; + size_t pgn; + + if (!dump_data) + return; + + /* Free all MLI pages */ + if (dump_data->addr && dump_data->dma_addr) { + for (pgn = 0; pgn < dump_data->total_pages; pgn++) { + if (dump_data->addr[pgn]) + dma_free_coherent(&efx->pci_dev->dev, + MLI_PAGE_SIZE, + dump_data->addr[pgn], + dump_data->dma_addr[pgn]); + } + } + if (dump_data->addr) { + kfree(dump_data->addr); + dump_data->addr = NULL; + } + if (dump_data->dma_addr) { + kfree(dump_data->dma_addr); + dump_data->dma_addr = NULL; + } + dump_data->dumpfile.location = 0; + dump_data->dumpfile.buffer = NULL; + dump_data->dumpfile.root_dma_handle = 0; + dump_data->dumpfile.mli_depth = 0; + dump_data->dumpfile.size = 0; + dump_data->total_pages = 0; +} + +static int efx_dump_alloc_buffer(struct efx_nic *efx) +{ + struct efx_dump_data *dump_data = efx->dump_data; + size_t level_pages; + size_t pgn; + int mli_depth; + + /* Check if dump buffer already allocated */ + if (dump_data->dumpfile.buffer) + return 0; + + /* Calculate total number of pages needed and MLI depth */ + mli_depth = 0; + pgn = 0; + for (level_pages = DUMPFILE_MAX_PAGES; level_pages > 1; + level_pages = IPAGES(level_pages)) { + mli_depth++; + pgn += level_pages; + } + + dump_data->total_pages = pgn + 1; + netif_dbg(efx, drv, efx->net_dev, "total_pages=%zd, mli_depth=%d\n", + dump_data->total_pages, mli_depth); + + /* Allocate all MLI pages */ + dump_data->addr = kcalloc(dump_data->total_pages, + sizeof(void *), GFP_KERNEL); + if (!dump_data->addr) { + return -ENOMEM; + } + dump_data->dma_addr = kcalloc(dump_data->total_pages, + sizeof(dma_addr_t), GFP_KERNEL); + if (!dump_data->dma_addr) { + efx_dump_free_buffer(efx); + return -ENOMEM; + } + for (pgn = 0; pgn < dump_data->total_pages; pgn++) { + dump_data->addr[pgn] = + dma_alloc_coherent(&efx->pci_dev->dev, MLI_PAGE_SIZE, + &dump_data->dma_addr[pgn], + GFP_KERNEL); + if (!dump_data->addr[pgn]) { + efx_dump_free_buffer(efx); + return -ENOMEM; + } + memset(dump_data->addr[pgn], 0, MLI_PAGE_SIZE); + } + + /* Populate multi-level indirection (MLI) pages */ + pgn = 0; + for (level_pages = DUMPFILE_MAX_PAGES; level_pages > 1; + level_pages = IPAGES(level_pages)) { + __le64 *entries; + size_t s; + + for (s = 0; s < level_pages; s++) { + entries = dump_data-> + addr[pgn+level_pages+s/PAGE_N_ENTRIES]; + entries[s%PAGE_N_ENTRIES] = cpu_to_le64(dump_data-> + dma_addr[pgn+s]); + } + pgn += level_pages; + } + dump_data->dumpfile.location = MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM; + dump_data->dumpfile.buffer = dump_data->addr[0]; + dump_data->dumpfile.root_dma_handle = dump_data->dma_addr[pgn]; + dump_data->dumpfile.mli_depth = mli_depth; + dump_data->dumpfile.size = DUMPFILE_MAX_SIZE; + + /* Clear dump identifier to prepare for next dump */ + memcpy(dump_data->dumpfile.buffer, DH_CLEAR_INIT, DH_NIDENT); + + return 0; +} + +int efx_dump_init(struct efx_nic *efx) +{ + struct efx_dump_data *dump_data; + + dump_data = kzalloc(sizeof(*dump_data), GFP_KERNEL); + if (!dump_data) + return -ENOMEM; + efx->dump_data = dump_data; + + return 0; +} + +void efx_dump_fini(struct efx_nic *efx) +{ + struct efx_dump_data *dump_data = efx->dump_data; + + efx_dump_free_buffer(efx); + kfree(dump_data); + efx->dump_data = NULL; +} + +int efx_dump_reset(struct efx_nic *efx) +{ + struct efx_dump_data *dump_data = efx->dump_data; + struct ethtool_dump val; + int rc; + + if (!dump_data) + return 0; + + if (dump_data->enabled) { + dump_data->enabled = false; + val.flag = EFX_DUMP_ENABLE; + rc = efx_dump_set(efx, &val); + if (rc) + return rc; + } + + return 0; +} + +int efx_dump_get_flag(struct efx_nic *efx, struct ethtool_dump *dump) +{ + struct efx_dump_data *dump_data = efx->dump_data; + struct dump_header *dump_header = dump_data->dumpfile.buffer; + size_t dumpfile_size; + + /* Get dump size */ + dumpfile_size = 0; + if (dump_header) { + if (memcmp(dump_header, DH_IDENT_INIT, DH_NIDENT) == 0) + dumpfile_size = dump_header->dumpfile_size; + else if (memcmp(dump_header, DH_CLEAR_INIT, DH_NIDENT) != 0) + netif_warn(efx, drv, efx->net_dev, + "Invalid dump header detected\n"); + } + + dump->version = 0; + dump->flag = dump_data->enabled; + dump->len = dumpfile_size; + + return 0; +} + +int efx_dump_get_data(struct efx_nic *efx, struct ethtool_dump *dump, + void *buffer) +{ + struct efx_dump_data *dump_data = efx->dump_data; + struct dump_header *dump_header = dump_data->dumpfile.buffer; + size_t dumpfile_size; + uint8_t *ptr; + size_t pgn; + + /* Get dump size */ + dumpfile_size = 0; + if (dump_header) { + if (memcmp(dump_header, DH_IDENT_INIT, DH_NIDENT) == 0) + dumpfile_size = dump_header->dumpfile_size; + else if (memcmp(dump_header, DH_CLEAR_INIT, DH_NIDENT) != 0) + netif_warn(efx, drv, efx->net_dev, + "Invalid dump header detected\n"); + } else { + netif_warn(efx, drv, efx->net_dev, "Dumping not enabled\n"); + return -EINVAL; + } + + /* Copy dump data and clear identifier to prepare for next dump */ + ptr = buffer; + for (pgn = 0; + (ptr - (uint8_t *)buffer) < dumpfile_size; + pgn++, ptr += MLI_PAGE_SIZE) + memcpy(ptr, dump_data->addr[pgn], MLI_PAGE_SIZE); + + memcpy(dump_data->dumpfile.buffer, DH_CLEAR_INIT, DH_NIDENT); + + return 0; +} + +int efx_dump_set(struct efx_nic *efx, struct ethtool_dump *val) +{ + struct efx_dump_data *dump_data = efx->dump_data; + unsigned long boot_offset = 0; + size_t dumpfile_size; + int rc; + + if ((val->flag != EFX_DUMP_DISABLE) && + (val->flag != EFX_DUMP_ENABLE) && + (val->flag != EFX_DUMP_FORCE)) { + return -EINVAL; + } + + if ((val->flag == EFX_DUMP_ENABLE) || + (val->flag == EFX_DUMP_FORCE)) { + rc = efx_dump_alloc_buffer(efx); + if (rc) + return rc; + } + + /* Initialize dumpspec */ + efx_dump_get_boot_status(efx, &boot_offset, NULL); + if (boot_offset == MC_CMD_GET_BOOT_STATUS_OUT_BOOT_OFFSET_NULL) { + netif_err(efx, drv, efx->net_dev, + "MC wasn't booted; the default dumpspec " + "is unlikely to exist.\n"); + return -EIO; + } + dump_data->dumpspec.location = MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_DEFAULT; + + /* Send MC dump command */ + if ((val->flag == EFX_DUMP_DISABLE) || + (val->flag == EFX_DUMP_ENABLE)) { + if (dump_data->enabled != + (val->flag == EFX_DUMP_ENABLE ? true : false)) { + rc = efx_dump_config_unsolicited( + efx, &dump_data->dumpspec, + &dump_data->dumpfile, + val->flag == EFX_DUMP_ENABLE ? + true : false); + if (rc) + return rc; + + dump_data->enabled = val->flag == EFX_DUMP_ENABLE ? + true : false; + } + + } else if (val->flag == EFX_DUMP_FORCE) { + rc = efx_dump_do(efx, &dump_data->dumpspec, + &dump_data->dumpfile, &dumpfile_size); + if (rc) + return rc; + } + + if (val->flag == EFX_DUMP_DISABLE) { + efx_dump_free_buffer(efx); + } + + return 0; +} + diff --git a/src/drivers/net/ethernet/sfc/dump.h b/src/drivers/net/ethernet/sfc/dump.h new file mode 100644 index 0000000..c6e2bed --- /dev/null +++ b/src/drivers/net/ethernet/sfc/dump.h @@ -0,0 +1,40 @@ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2015 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#ifndef EFX_DUMP_H +#define EFX_DUMP_H + +#ifdef CONFIG_SFC_DUMP + +#define EFX_DUMP_DISABLE 0x0 +#define EFX_DUMP_ENABLE 0x1 +#define EFX_DUMP_FORCE 0x2 + +struct efx_dump_data; +struct ethtool_dump; + +int efx_dump_init(struct efx_nic *efx); +void efx_dump_fini(struct efx_nic *efx); +int efx_dump_reset(struct efx_nic *efx); +int efx_dump_get_flag(struct efx_nic *efx, struct ethtool_dump *dump); +int efx_dump_get_data(struct efx_nic *efx, struct ethtool_dump *dump, + void *buffer); +int efx_dump_set(struct efx_nic *efx, struct ethtool_dump *val); + +#else /* !CONFIG_SFC_DUMP */ + +static inline int efx_dump_init(struct efx_nic *efx) +{ + return 0; +} +static inline void efx_dump_fini(struct efx_nic *efx) {} + +#endif /* CONFIG_SFC_DUMP */ + +#endif /* EFX_DUMP_H */ diff --git a/src/drivers/net/ethernet/sfc/ef10.c b/src/drivers/net/ethernet/sfc/ef10.c new file mode 100644 index 0000000..0c40230 --- /dev/null +++ b/src/drivers/net/ethernet/sfc/ef10.c @@ -0,0 +1,6162 @@ +/*************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2012-2017 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#include "net_driver.h" +#include "ef10_regs.h" +#include "io.h" +#include "mcdi.h" +#include "mcdi_pcol.h" +#include "mcdi_port.h" +#include "mcdi_port_common.h" +#include "nic.h" +#include "mcdi_filters.h" +#include "mcdi_functions.h" +#include "efx_common.h" +#include "efx_channels.h" +#include "rx_common.h" +#include "tx_common.h" +#include "workarounds.h" +#include "selftest.h" +#include "sriov.h" +#include "ef10_sriov.h" +#include +#include +#include +#include +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_UDP_TUNNEL_NIC_INFO) +#include +#endif +#ifdef EFX_NOT_UPSTREAM +#include "efx_ioctl.h" +#endif +#include +#include "debugfs.h" + +#if defined(EFX_USE_KCOMPAT) && !defined(EFX_HAVE_HW_ENC_FEATURES) +#undef EFX_USE_OVERLAY_TX_CSUM +#endif + +static unsigned int tx_push_max_fill = 0xffffffff; +module_param(tx_push_max_fill, uint, 0444); +MODULE_PARM_DESC(tx_push_max_fill, + "[SFC9100-family] Only use Tx push when the queue is below " + "this fill level; 0=>never push 1=>push when empty; " + "default always to push"); + +static bool tx_coalesce_doorbell = false; +module_param(tx_coalesce_doorbell, bool, 0444); +MODULE_PARM_DESC(tx_coalesce_doorbell, + "[SFC9100-family] Coalesce notification to NIC of pending TX" + "data when set this option sets tx_push_max_fill=0:" + ":default=N"); + +static bool multicast_chaining = true; +module_param(multicast_chaining, bool, 0444); +MODULE_PARM_DESC(multicast_chaining, + "[SFC9100-family] Enabled multicast filter chaining in " + "firmware; default=Y"); + +#ifdef EFX_NOT_UPSTREAM +static bool monitor_hw_available = false; +module_param(monitor_hw_available, bool, 0644); +MODULE_PARM_DESC(monitor_hw_available, + "[SFC9100-family] Check hardware availability during periodic " + "monitor; default=N"); +#endif + +#ifdef EFX_NOT_UPSTREAM +static bool tx_non_csum_queue = false; +module_param(tx_non_csum_queue, bool, 0644); +MODULE_PARM_DESC(tx_non_csum_queue, + "[SFC9100-family] Allocate dedicated TX queues for traffic " + "not requiring checksum offload; default=N"); +#endif + +#ifdef EFX_NOT_UPSTREAM +/* A fixed key for RSS that has been tested and found to provide good + * spreading behaviour. It also has the desirable property of being + * symmetric. + */ +static const u8 efx_rss_fixed_key[40] = { + 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, + 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, + 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, + 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, + 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, +}; + +static bool efx_rss_use_fixed_key = true; +module_param_named(rss_use_fixed_key, efx_rss_use_fixed_key, bool, 0444); +MODULE_PARM_DESC(rss_use_fixed_key, "Use a fixed RSS hash key, " + "tested for reliable spreading across channels"); +#endif + +/* Hardware control for EF10 architecture including 'Huntington'. */ + +#define EFX_EF10_DRVGEN_EV 7 +enum { + EFX_EF10_TEST = 1, + EFX_EF10_REFILL, +#ifdef EFX_NOT_UPSTREAM + EFX_EF10_RERING_RX_DOORBELL, +#endif +}; +#define EFX_EF10_DRVGEN_MAGIC(_code, _data) ((_code) | ((_data) << 8)) +#define EFX_EF10_DRVGEN_CODE(_magic) ((_magic) & 0xff) +#define EFX_EF10_DRVGEN_DATA(_magic) ((_magic) >> 8) + +#ifdef EFX_NOT_UPSTREAM +#define EF10_ONLOAD_PF_VIS 240 +#define EF10_ONLOAD_VF_VIS 0 +#endif + +static bool efx_ef10_hw_unavailable(struct efx_nic *efx); +static void _efx_ef10_rx_write(struct efx_rx_queue *rx_queue); + +#ifdef CONFIG_SFC_SRIOV +static void efx_ef10_vf_update_stats_work(struct work_struct *data); +#endif +#ifdef EFX_NOT_UPSTREAM +static struct efx_tx_queue * +efx_ef10_select_tx_queue_non_csum(struct efx_channel *channel, + struct sk_buff *skb); +#endif +static struct efx_tx_queue * +efx_ef10_select_tx_queue(struct efx_channel *channel, struct sk_buff *skb); +#ifdef EFX_USE_OVERLAY_TX_CSUM +#ifdef EFX_NOT_UPSTREAM +static struct efx_tx_queue * +efx_ef10_select_tx_queue_non_csum_overlay(struct efx_channel *channel, + struct sk_buff *skb); +#endif +static struct efx_tx_queue * +efx_ef10_select_tx_queue_overlay(struct efx_channel *channel, + struct sk_buff *skb); +#endif /* EFX_USE_OVERLAY_TX_CSUM */ + +static u8 *efx_ef10_mcdi_buf(struct efx_nic *efx, u8 bufid, + dma_addr_t *dma_addr) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + + if (dma_addr) + *dma_addr = nic_data->mcdi_buf.dma_addr + + bufid * ALIGN(MCDI_BUF_LEN, 256); + return nic_data->mcdi_buf.addr + bufid * ALIGN(MCDI_BUF_LEN, 256); +} + +static int efx_ef10_get_warm_boot_count(struct efx_nic *efx) +{ + efx_dword_t reg; + + efx_readd(efx, ®, ER_DZ_BIU_MC_SFT_STATUS); + + if (EFX_DWORD_FIELD(reg, EFX_DWORD_0) == 0xffffffff) { + netif_err(efx, hw, efx->net_dev, "Hardware unavailable\n"); + efx->state = STATE_DISABLED; + return -ENETDOWN; + } else { + return EFX_DWORD_FIELD(reg, EFX_WORD_1) == 0xb007 ? + EFX_DWORD_FIELD(reg, EFX_WORD_0) : -EIO; + } +} + +/* On all EF10s up to and including SFC9220 (Medford1), all PFs use BAR 0 for + * I/O space and BAR 2(&3) for memory. On SFC9250 (Medford2), there is no I/O + * bar; PFs use BAR 0/1 for memory. + */ +static unsigned int efx_ef10_pf_mem_bar(struct efx_nic *efx) +{ + switch (efx->pci_dev->device) { + case 0x0b03: /* SFC9250 PF */ + return 0; + default: + return 2; + } +} + +#if defined(CONFIG_SFC_SRIOV) +/* All VFs use BAR 0/1 for memory */ +static unsigned int efx_ef10_vf_mem_bar(struct efx_nic *efx) +{ + return 0; +} +#endif + +static unsigned int efx_ef10_initial_mem_map_size(struct efx_nic *efx) +{ + /* For the initial memory mapping, map only one (minimum-size) VI's- + * worth. This is enough to get the VI-independent registers. + */ +#ifdef EFX_NOT_UPSTREAM + /* ...but is not so much as to interfere with pre-existing mappings of + * portions of the BAR held by Onload. + */ +#endif + return EFX_DEFAULT_VI_STRIDE; +} + +static unsigned int efx_ef10_bar_size(struct efx_nic *efx) +{ + int bar; + + bar = efx->type->mem_bar(efx); + return resource_size(&efx->pci_dev->resource[bar]); +} + +static bool efx_ef10_is_vf(struct efx_nic *efx) +{ + return efx->type->is_vf; +} + +static int efx_ef10_init_datapath_caps(struct efx_nic *efx) +{ + MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CAPABILITIES_V4_OUT_LEN); + struct efx_ef10_nic_data *nic_data = efx->nic_data; + size_t outlen; + int rc; + + BUILD_BUG_ON(MC_CMD_GET_CAPABILITIES_IN_LEN != 0); + + rc = efx_mcdi_rpc(efx, MC_CMD_GET_CAPABILITIES, NULL, 0, + outbuf, sizeof(outbuf), &outlen); + if (rc) + return rc; + if (outlen < MC_CMD_GET_CAPABILITIES_OUT_LEN) { + pci_err(efx->pci_dev, + "unable to read datapath firmware capabilities\n"); + return -EIO; + } + + nic_data->datapath_caps = + MCDI_DWORD(outbuf, GET_CAPABILITIES_OUT_FLAGS1); + + if (outlen >= MC_CMD_GET_CAPABILITIES_V2_OUT_LEN) { + nic_data->datapath_caps2 = MCDI_DWORD(outbuf, + GET_CAPABILITIES_V2_OUT_FLAGS2); + nic_data->piobuf_size = MCDI_WORD(outbuf, + GET_CAPABILITIES_V2_OUT_SIZE_PIO_BUFF); +#ifdef EFX_NOT_UPSTREAM + /* Does the largest sw-possible PIO packet fit inside a hw + * PIO buffer? + */ + EFX_WARN_ON_PARANOID(nic_data->piobuf_size < ER_DZ_TX_PIOBUF_SIZE); +#endif + } else { + nic_data->datapath_caps2 = 0; + nic_data->piobuf_size = ER_DZ_TX_PIOBUF_SIZE; + } + + if (!efx_ef10_has_cap(nic_data->datapath_caps, RX_PREFIX_LEN_14)) { + pci_err(efx->pci_dev, + "current firmware does not support an RX prefix\n"); + return -ENODEV; + } + + if (outlen >= MC_CMD_GET_CAPABILITIES_V3_OUT_LEN) { + u8 vi_window_mode = MCDI_BYTE(outbuf, + GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE); + + rc = efx_mcdi_window_mode_to_stride(efx, vi_window_mode); + if (rc) + return rc; + } else { + /* keep default VI stride */ + pci_dbg(efx->pci_dev, + "firmware did not report VI window mode, assuming vi_stride = %u\n", + efx->vi_stride); + } + + if (outlen >= MC_CMD_GET_CAPABILITIES_V4_OUT_LEN) { + efx->num_mac_stats = MCDI_WORD(outbuf, + GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS); + pci_dbg(efx->pci_dev, + "firmware reports num_mac_stats = %u\n", + efx->num_mac_stats); + } else { + /* leave num_mac_stats as the default value, MC_CMD_MAC_NSTATS */ + pci_dbg(efx->pci_dev, + "firmware did not report num_mac_stats, assuming %u\n", + efx->num_mac_stats); + } + + return 0; +} + +static void efx_ef10_read_licensed_features(struct efx_nic *efx) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_LICENSING_V3_IN_LEN); + MCDI_DECLARE_BUF(outbuf, MC_CMD_LICENSING_V3_OUT_LEN); + struct efx_ef10_nic_data *nic_data = efx->nic_data; + size_t outlen; + int rc, try; + + MCDI_SET_DWORD(inbuf, LICENSING_V3_IN_OP, + MC_CMD_LICENSING_V3_IN_OP_REPORT_LICENSE); + for (try=0; try < 15; try++) { + rc = efx_mcdi_rpc_quiet(efx, MC_CMD_LICENSING_V3, inbuf, + sizeof(inbuf), outbuf, sizeof(outbuf), + &outlen); + if (rc != -EAGAIN) + break; + /* It takes a long time to verify the license on an 8xxx + * series. + */ + msleep(200); + } + if (!rc && (outlen >= MC_CMD_LICENSING_V3_OUT_LEN)) { + nic_data->licensed_features = MCDI_QWORD(outbuf, + LICENSING_V3_OUT_LICENSED_FEATURES); + return; + } + if (rc != -MC_CMD_ERR_ENOSYS) + efx_mcdi_display_error(efx, MC_CMD_LICENSING_V3, + MC_CMD_LICENSING_V3_IN_LEN, outbuf, + outlen, rc); + + + /* LICENSING_V3 will fail on older firmwares, so fall back to + * LICENSED_APP_STATE. + */ + BUILD_BUG_ON(MC_CMD_GET_LICENSED_APP_STATE_IN_LEN > + MC_CMD_LICENSING_V3_IN_LEN); + BUILD_BUG_ON(MC_CMD_GET_LICENSED_APP_STATE_OUT_LEN > + MC_CMD_LICENSING_V3_OUT_LEN); + + MCDI_SET_DWORD(inbuf, GET_LICENSED_APP_STATE_IN_APP_ID, + LICENSED_APP_ID_PTP); + rc = efx_mcdi_rpc(efx, MC_CMD_GET_LICENSED_APP_STATE, inbuf, + MC_CMD_GET_LICENSED_APP_STATE_IN_LEN, + outbuf, MC_CMD_GET_LICENSED_APP_STATE_OUT_LEN, + &outlen); + if (rc || (outlen < MC_CMD_GET_LICENSED_APP_STATE_OUT_LEN)) + return; + + if (MCDI_QWORD(outbuf, GET_LICENSED_APP_STATE_OUT_STATE) == + MC_CMD_GET_LICENSED_APP_STATE_OUT_LICENSED) + nic_data->licensed_features |= + (1 << LICENSED_V3_FEATURES_TX_TIMESTAMPS_LBN); +} + +static int efx_ef10_get_sysclk_freq(struct efx_nic *efx) +{ + MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CLOCK_OUT_LEN); + int rc; + + rc = efx_mcdi_rpc(efx, MC_CMD_GET_CLOCK, NULL, 0, + outbuf, sizeof(outbuf), NULL); + if (rc) + return rc; + rc = MCDI_DWORD(outbuf, GET_CLOCK_OUT_SYS_FREQ); + return rc > 0 ? rc : -ERANGE; +} + +static int efx_ef10_get_timer_workarounds(struct efx_nic *efx) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + unsigned int implemented; + unsigned int enabled; + int rc; + + nic_data->workaround_35388 = false; + nic_data->workaround_61265 = false; + + rc = efx_mcdi_get_workarounds(efx, &implemented, &enabled); + + if (rc == -ENOSYS) { + /* Firmware without GET_WORKAROUNDS - not a problem. */ + rc = 0; + } else if (rc == 0) { + /* Bug61265 workaround is always enabled if implemented. */ + if (enabled & MC_CMD_GET_WORKAROUNDS_OUT_BUG61265) + nic_data->workaround_61265 = true; + + if (enabled & MC_CMD_GET_WORKAROUNDS_OUT_BUG35388) { + nic_data->workaround_35388 = true; + } else if (implemented & MC_CMD_GET_WORKAROUNDS_OUT_BUG35388) { + /* Workaround is implemented but not enabled. + * Try to enable it. + */ + rc = efx_mcdi_set_workaround(efx, + MC_CMD_WORKAROUND_BUG35388, + true, NULL); + if (rc == 0) + nic_data->workaround_35388 = true; + /* If we failed to set the workaround just carry on. */ + rc = 0; + } + } + + pci_dbg(efx->pci_dev, + "workaround for bug 35388 is %sabled\n", + nic_data->workaround_35388 ? "en" : "dis"); + pci_dbg(efx->pci_dev, + "workaround for bug 61265 is %sabled\n", + nic_data->workaround_61265 ? "en" : "dis"); + + return rc; +} + +static void efx_ef10_process_timer_config(struct efx_nic *efx, + const efx_dword_t *data) +{ + unsigned int max_count; + + if (EFX_EF10_WORKAROUND_61265(efx)) { + efx->timer_quantum_ns = MCDI_DWORD(data, + GET_EVQ_TMR_PROPERTIES_OUT_MCDI_TMR_STEP_NS); + efx->timer_max_ns = MCDI_DWORD(data, + GET_EVQ_TMR_PROPERTIES_OUT_MCDI_TMR_MAX_NS); + } else if (EFX_EF10_WORKAROUND_35388(efx)) { + efx->timer_quantum_ns = MCDI_DWORD(data, + GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_NS_PER_COUNT); + max_count = MCDI_DWORD(data, + GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_MAX_COUNT); + efx->timer_max_ns = max_count * efx->timer_quantum_ns; + } else { + efx->timer_quantum_ns = MCDI_DWORD(data, + GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_NS_PER_COUNT); + max_count = MCDI_DWORD(data, + GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_MAX_COUNT); + efx->timer_max_ns = max_count * efx->timer_quantum_ns; + } + + pci_dbg(efx->pci_dev, + "got timer properties from MC: quantum %u ns; max %u ns\n", + efx->timer_quantum_ns, efx->timer_max_ns); +} + +static int efx_ef10_get_timer_config(struct efx_nic *efx) +{ + MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_LEN); + int rc; + + rc = efx_ef10_get_timer_workarounds(efx); + if (rc) + return rc; + + rc = efx_mcdi_rpc_quiet(efx, MC_CMD_GET_EVQ_TMR_PROPERTIES, NULL, 0, + outbuf, sizeof(outbuf), NULL); + + if (rc == 0) { + efx_ef10_process_timer_config(efx, outbuf); + } else if (rc == -ENOSYS || rc == -EPERM) { + /* Not available - fall back to Huntington defaults. */ + unsigned int quantum; + + rc = efx_ef10_get_sysclk_freq(efx); + if (rc < 0) + return rc; + + quantum = 1536000 / rc; /* 1536 cycles */ + efx->timer_quantum_ns = quantum; + efx->timer_max_ns = efx->type->timer_period_max * quantum; + rc = 0; + } else { + efx_mcdi_display_error(efx, MC_CMD_GET_EVQ_TMR_PROPERTIES, + MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_LEN, + NULL, 0, rc); + } + + return rc; +} + +static int efx_ef10_get_mac_address_pf(struct efx_nic *efx, u8 *mac_address) +{ + MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_MAC_ADDRESSES_OUT_LEN); + size_t outlen; + int rc; + + BUILD_BUG_ON(MC_CMD_GET_MAC_ADDRESSES_IN_LEN != 0); + + rc = efx_mcdi_rpc(efx, MC_CMD_GET_MAC_ADDRESSES, NULL, 0, + outbuf, sizeof(outbuf), &outlen); + if (rc) + return rc; + if (outlen < MC_CMD_GET_MAC_ADDRESSES_OUT_LEN) + return -EIO; + + ether_addr_copy(mac_address, + MCDI_PTR(outbuf, GET_MAC_ADDRESSES_OUT_MAC_ADDR_BASE)); +#ifdef EFX_NOT_UPSTREAM + if (mac_address[0] & 2) + netif_warn(efx, probe, efx->net_dev, + "static config does not include a global MAC address pool; using local address\n"); +#endif + return 0; +} + +#if defined(CONFIG_SFC_SRIOV) +static int efx_ef10_get_mac_address_vf(struct efx_nic *efx, u8 *mac_address) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_LEN); + MCDI_DECLARE_BUF(outbuf, MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMAX); + size_t outlen; + int num_addrs, rc; + + MCDI_SET_DWORD(inbuf, VPORT_GET_MAC_ADDRESSES_IN_VPORT_ID, + EVB_PORT_ID_ASSIGNED); + rc = efx_mcdi_rpc(efx, MC_CMD_VPORT_GET_MAC_ADDRESSES, inbuf, + sizeof(inbuf), outbuf, sizeof(outbuf), &outlen); + + if (rc) + return rc; + if (outlen < MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMIN) + return -EIO; + + num_addrs = MCDI_DWORD(outbuf, + VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_COUNT); + + WARN_ON(num_addrs != 1); + + ether_addr_copy(mac_address, + MCDI_PTR(outbuf, VPORT_GET_MAC_ADDRESSES_OUT_MACADDR)); + + return 0; +} +#endif + +#ifdef CONFIG_SFC_SRIOV +static int efx_vf_parent(struct efx_nic *efx, struct efx_nic **efx_pf) +{ + struct pci_dev *pci_dev_pf = pci_physfn(efx->pci_dev); + int rc = 0; + + /* By default succeed without a parent PF */ + *efx_pf = NULL; + + /* Suceed if this is a PF already, or if there is noparent PF. + * Fail if the parent is not an sfc device. + */ + if (!pci_dev_pf || (efx->pci_dev == pci_dev_pf)) + rc = 0; + else if (!pci_dev_pf->dev.driver || + (pci_dev_pf->dev.driver->owner != THIS_MODULE)) + rc = -EBUSY; + else + *efx_pf = pci_get_drvdata(pci_dev_pf); + + return rc; +} +#endif + +int efx_ef10_vswitch_alloc(struct efx_nic *efx, unsigned int port_id, + unsigned int vswitch_type) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_VSWITCH_ALLOC_IN_LEN); + int rc; + + MCDI_SET_DWORD(inbuf, VSWITCH_ALLOC_IN_UPSTREAM_PORT_ID, port_id); + MCDI_SET_DWORD(inbuf, VSWITCH_ALLOC_IN_TYPE, vswitch_type); + MCDI_SET_DWORD(inbuf, VSWITCH_ALLOC_IN_NUM_VLAN_TAGS, 2); + MCDI_POPULATE_DWORD_1(inbuf, VSWITCH_ALLOC_IN_FLAGS, + VSWITCH_ALLOC_IN_FLAG_AUTO_PORT, 0); + + /* Quietly try to allocate 2 VLAN tags */ + rc = efx_mcdi_rpc_quiet(efx, MC_CMD_VSWITCH_ALLOC, inbuf, sizeof(inbuf), + NULL, 0, NULL); + + /* If 2 VLAN tags is too many, revert to trying with 1 VLAN tags */ + if (rc == -EPROTO) { + MCDI_SET_DWORD(inbuf, VSWITCH_ALLOC_IN_NUM_VLAN_TAGS, 1); + rc = efx_mcdi_rpc(efx, MC_CMD_VSWITCH_ALLOC, inbuf, + sizeof(inbuf), NULL, 0, NULL); + } else if (rc) { + efx_mcdi_display_error(efx, MC_CMD_VSWITCH_ALLOC, + MC_CMD_VSWITCH_ALLOC_IN_LEN, NULL, 0, + rc); + } + return rc; +} + +int efx_ef10_vswitch_free(struct efx_nic *efx, unsigned int port_id) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_VSWITCH_FREE_IN_LEN); + + MCDI_SET_DWORD(inbuf, VSWITCH_FREE_IN_UPSTREAM_PORT_ID, port_id); + + return efx_mcdi_rpc(efx, MC_CMD_VSWITCH_FREE, inbuf, sizeof(inbuf), + NULL, 0, NULL); +} + +int efx_ef10_vport_alloc(struct efx_nic *efx, u16 vlan, bool vlan_restrict, + unsigned int *port_id_out) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_ALLOC_IN_LEN); + MCDI_DECLARE_BUF(outbuf, MC_CMD_VPORT_ALLOC_OUT_LEN); + size_t outlen; + int rc; + + EFX_WARN_ON_PARANOID(!port_id_out); + + /* we only ever want a single level of vports, so parent is ASSIGNED */ + MCDI_SET_DWORD(inbuf, VPORT_ALLOC_IN_UPSTREAM_PORT_ID, + EVB_PORT_ID_ASSIGNED); + /* we also only ever want NORMAL type, the others are obsolete */ + MCDI_SET_DWORD(inbuf, VPORT_ALLOC_IN_TYPE, + MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_NORMAL); + MCDI_SET_DWORD(inbuf, VPORT_ALLOC_IN_NUM_VLAN_TAGS, + (vlan != EFX_FILTER_VID_UNSPEC)); + MCDI_POPULATE_DWORD_2(inbuf, VPORT_ALLOC_IN_FLAGS, + VPORT_ALLOC_IN_FLAG_AUTO_PORT, 0, + VPORT_ALLOC_IN_FLAG_VLAN_RESTRICT, vlan_restrict); + if (vlan != EFX_FILTER_VID_UNSPEC) + MCDI_POPULATE_DWORD_1(inbuf, VPORT_ALLOC_IN_VLAN_TAGS, + VPORT_ALLOC_IN_VLAN_TAG_0, vlan); + + rc = efx_mcdi_rpc(efx, MC_CMD_VPORT_ALLOC, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), &outlen); + if (rc) + return rc; + if (outlen < MC_CMD_VPORT_ALLOC_OUT_LEN) + return -EIO; + + *port_id_out = MCDI_DWORD(outbuf, VPORT_ALLOC_OUT_VPORT_ID); + return 0; +} + +int efx_ef10_vport_free(struct efx_nic *efx, unsigned int port_id) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_FREE_IN_LEN); + + MCDI_SET_DWORD(inbuf, VPORT_FREE_IN_VPORT_ID, port_id); + + return efx_mcdi_rpc(efx, MC_CMD_VPORT_FREE, inbuf, sizeof(inbuf), + NULL, 0, NULL); +} + +int efx_ef10_vadaptor_query(struct efx_nic *efx, unsigned int port_id, + u32 *port_flags, u32 *vadaptor_flags, + unsigned int *vlan_tags) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_QUERY_IN_LEN); + MCDI_DECLARE_BUF(outbuf, MC_CMD_VADAPTOR_QUERY_OUT_LEN); + size_t outlen; + int rc; + + if (efx_ef10_has_cap(nic_data->datapath_caps, VADAPTOR_QUERY)) { + MCDI_SET_DWORD(inbuf, VADAPTOR_QUERY_IN_UPSTREAM_PORT_ID, + port_id); + + rc = efx_mcdi_rpc(efx, MC_CMD_VADAPTOR_QUERY, + inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), &outlen); + if (rc) + return rc; + + if (outlen < sizeof(outbuf)) { + rc = -EIO; + return rc; + } + } + + if (port_flags) + *port_flags = MCDI_DWORD(outbuf, VADAPTOR_QUERY_OUT_PORT_FLAGS); + if (vadaptor_flags) + *vadaptor_flags = + MCDI_DWORD(outbuf, VADAPTOR_QUERY_OUT_VADAPTOR_FLAGS); + if (vlan_tags) + *vlan_tags = + MCDI_DWORD(outbuf, + VADAPTOR_QUERY_OUT_NUM_AVAILABLE_VLAN_TAGS); + + return 0; +} + +int efx_ef10_vadaptor_alloc(struct efx_nic *efx, unsigned int port_id) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_ALLOC_IN_LEN); + + MCDI_SET_DWORD(inbuf, VADAPTOR_ALLOC_IN_UPSTREAM_PORT_ID, port_id); + MCDI_POPULATE_DWORD_1(inbuf, VADAPTOR_ALLOC_IN_FLAGS, + VADAPTOR_ALLOC_IN_FLAG_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED, + 1); + return efx_mcdi_rpc(efx, MC_CMD_VADAPTOR_ALLOC, inbuf, sizeof(inbuf), + NULL, 0, NULL); +} + +int efx_ef10_vadaptor_free(struct efx_nic *efx, unsigned int port_id) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_FREE_IN_LEN); + + MCDI_SET_DWORD(inbuf, VADAPTOR_FREE_IN_UPSTREAM_PORT_ID, port_id); + return efx_mcdi_rpc(efx, MC_CMD_VADAPTOR_FREE, inbuf, sizeof(inbuf), + NULL, 0, NULL); +} + +int efx_ef10_evb_port_assign(struct efx_nic *efx, unsigned int port_id, + unsigned int vf_fn) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_EVB_PORT_ASSIGN_IN_LEN); + struct efx_ef10_nic_data *nic_data = efx->nic_data; + + MCDI_SET_DWORD(inbuf, EVB_PORT_ASSIGN_IN_PORT_ID, port_id); + MCDI_POPULATE_DWORD_2(inbuf, EVB_PORT_ASSIGN_IN_FUNCTION, + EVB_PORT_ASSIGN_IN_PF, nic_data->pf_index, + EVB_PORT_ASSIGN_IN_VF, vf_fn); + + return efx_mcdi_rpc(efx, MC_CMD_EVB_PORT_ASSIGN, inbuf, sizeof(inbuf), + NULL, 0, NULL); +} + +int efx_ef10_vport_add_mac(struct efx_nic *efx, unsigned int port_id, const u8 *mac) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_LEN); + + MCDI_SET_DWORD(inbuf, VPORT_ADD_MAC_ADDRESS_IN_VPORT_ID, port_id); + ether_addr_copy(MCDI_PTR(inbuf, VPORT_ADD_MAC_ADDRESS_IN_MACADDR), mac); + + return efx_mcdi_rpc(efx, MC_CMD_VPORT_ADD_MAC_ADDRESS, inbuf, + sizeof(inbuf), NULL, 0, NULL); +} + +int efx_ef10_vport_del_mac(struct efx_nic *efx, unsigned int port_id, const u8 *mac) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_LEN); + + MCDI_SET_DWORD(inbuf, VPORT_DEL_MAC_ADDRESS_IN_VPORT_ID, port_id); + ether_addr_copy(MCDI_PTR(inbuf, VPORT_DEL_MAC_ADDRESS_IN_MACADDR), mac); + + return efx_mcdi_rpc(efx, MC_CMD_VPORT_DEL_MAC_ADDRESS, inbuf, + sizeof(inbuf), NULL, 0, NULL); +} + +#ifdef EFX_USE_PIO + +static void efx_ef10_free_piobufs(struct efx_nic *efx) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + MCDI_DECLARE_BUF(inbuf, MC_CMD_FREE_PIOBUF_IN_LEN); + unsigned int i; + int rc; + + BUILD_BUG_ON(MC_CMD_FREE_PIOBUF_OUT_LEN != 0); + + for (i = 0; i < nic_data->n_piobufs; i++) { + MCDI_SET_DWORD(inbuf, FREE_PIOBUF_IN_PIOBUF_HANDLE, + nic_data->piobuf_handle[i]); + rc = efx_mcdi_rpc(efx, MC_CMD_FREE_PIOBUF, inbuf, sizeof(inbuf), + NULL, 0, NULL); + if (unlikely(rc && rc != -ENETDOWN && + !efx_ef10_hw_unavailable(efx))) + netif_warn(efx, probe, efx->net_dev, + "Failed to free PIO buffers: %d\n", rc); + } + + nic_data->n_piobufs = 0; +} + +static int efx_ef10_alloc_piobufs(struct efx_nic *efx, unsigned int n) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + MCDI_DECLARE_BUF(outbuf, MC_CMD_ALLOC_PIOBUF_OUT_LEN); + unsigned int i; + size_t outlen; + int rc = 0; + + BUILD_BUG_ON(MC_CMD_ALLOC_PIOBUF_IN_LEN != 0); + + for (i = 0; i < n; i++) { + rc = efx_mcdi_rpc_quiet(efx, MC_CMD_ALLOC_PIOBUF, NULL, 0, + outbuf, sizeof(outbuf), &outlen); + if (rc == -EPERM) { + netif_info(efx, probe, efx->net_dev, "no PIO support\n"); + break; + } else if (rc) { + /* Don't display the MC error if we didn't have space + * for a VF. + */ + if (!(efx_ef10_is_vf(efx) && rc == -ENOSPC)) + efx_mcdi_display_error(efx, MC_CMD_ALLOC_PIOBUF, + 0, outbuf, outlen, rc); + break; + } + if (outlen < MC_CMD_ALLOC_PIOBUF_OUT_LEN) { + rc = -EIO; + break; + } + nic_data->piobuf_handle[i] = + MCDI_DWORD(outbuf, ALLOC_PIOBUF_OUT_PIOBUF_HANDLE); + netif_dbg(efx, probe, efx->net_dev, + "allocated PIO buffer %u handle %x\n", i, + nic_data->piobuf_handle[i]); + } + + nic_data->n_piobufs = i; + if (rc) + efx_ef10_free_piobufs(efx); + return rc; +} + +static int efx_ef10_link_piobufs(struct efx_nic *efx) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + MCDI_DECLARE_BUF(inbuf, MC_CMD_LINK_PIOBUF_IN_LEN); + struct efx_channel *channel; + struct efx_tx_queue *tx_queue; + unsigned int offset, index; + int rc; + + BUILD_BUG_ON(MC_CMD_LINK_PIOBUF_OUT_LEN != 0); + BUILD_BUG_ON(MC_CMD_UNLINK_PIOBUF_OUT_LEN != 0); + + /* Link a buffer to each VI in the write-combining mapping */ + for (index = 0; index < nic_data->n_piobufs; ++index) { + MCDI_SET_DWORD(inbuf, LINK_PIOBUF_IN_PIOBUF_HANDLE, + nic_data->piobuf_handle[index]); + MCDI_SET_DWORD(inbuf, LINK_PIOBUF_IN_TXQ_INSTANCE, + nic_data->pio_write_vi_base + index); + rc = efx_mcdi_rpc(efx, MC_CMD_LINK_PIOBUF, + inbuf, MC_CMD_LINK_PIOBUF_IN_LEN, + NULL, 0, NULL); + if (rc) { + netif_err(efx, drv, efx->net_dev, + "failed to link VI %u to PIO buffer %u (%d)\n", + nic_data->pio_write_vi_base + index, index, + rc); + goto fail; + } + netif_dbg(efx, probe, efx->net_dev, + "linked VI %u to PIO buffer %u\n", + nic_data->pio_write_vi_base + index, index); + } + + /* Link a buffer to each TX queue */ + efx_for_each_channel(channel, efx) { + if (!efx_channel_has_tx_queues(channel)) + continue; + + efx_for_each_channel_tx_queue(tx_queue, channel) { + /* We assign the PIO buffers to queues in + * reverse order to allow for the following + * special case. + */ + offset = ((efx->tx_channel_offset + + efx_tx_channels(efx) - + tx_queue->channel->channel - 1) * + efx_piobuf_size); + index = offset / nic_data->piobuf_size; + offset = offset % nic_data->piobuf_size; + + /* When the host page size is 4K, the first + * host page in the WC mapping may be within + * the same VI page as the last TX queue. We + * can only link one buffer to each VI. + */ + if (tx_queue->queue == nic_data->pio_write_vi_base) { + BUG_ON(index != 0); + rc = 0; + } else { + MCDI_SET_DWORD(inbuf, + LINK_PIOBUF_IN_PIOBUF_HANDLE, + nic_data->piobuf_handle[index]); + MCDI_SET_DWORD(inbuf, + LINK_PIOBUF_IN_TXQ_INSTANCE, + tx_queue->queue); + rc = efx_mcdi_rpc(efx, MC_CMD_LINK_PIOBUF, + inbuf, MC_CMD_LINK_PIOBUF_IN_LEN, + NULL, 0, NULL); + } + + if (rc) { + /* This is non-fatal; the TX path just + * won't use PIO for this queue + */ + netif_err(efx, drv, efx->net_dev, + "failed to link VI %u to PIO buffer %u (%d)\n", + tx_queue->queue, index, rc); + tx_queue->piobuf = NULL; + + if (rc == -ENETDOWN) + goto fail; + } else { + tx_queue->piobuf = + nic_data->pio_write_base + + index * efx->vi_stride + offset; + tx_queue->piobuf_offset = offset; + netif_dbg(efx, probe, efx->net_dev, + "linked VI %u to PIO buffer %u offset %x addr %p\n", + tx_queue->queue, index, + tx_queue->piobuf_offset, + tx_queue->piobuf); + } + } + } + + return 0; + +fail: + /* inbuf was defined for MC_CMD_LINK_PIOBUF. We can use the same + * buffer for MC_CMD_UNLINK_PIOBUF because it's shorter. + */ + BUILD_BUG_ON(MC_CMD_LINK_PIOBUF_IN_LEN < MC_CMD_UNLINK_PIOBUF_IN_LEN); + while (index--) { + MCDI_SET_DWORD(inbuf, UNLINK_PIOBUF_IN_TXQ_INSTANCE, + nic_data->pio_write_vi_base + index); + efx_mcdi_rpc(efx, MC_CMD_UNLINK_PIOBUF, + inbuf, MC_CMD_UNLINK_PIOBUF_IN_LEN, + NULL, 0, NULL); + } + return rc; +} + +static void efx_ef10_forget_old_piobufs(struct efx_nic *efx) +{ + struct efx_channel *channel; + struct efx_tx_queue *tx_queue; + + /* All our existing PIO buffers went away */ + efx_for_each_channel(channel, efx) + efx_for_each_channel_tx_queue(tx_queue, channel) + tx_queue->piobuf = NULL; +} + +#else /* !EFX_USE_PIO */ + +static int efx_ef10_alloc_piobufs(struct efx_nic *efx, unsigned int n) +{ + return n == 0 ? 0 : -ENOBUFS; +} + +static int efx_ef10_link_piobufs(struct efx_nic *efx) +{ + return 0; +} + +static void efx_ef10_free_piobufs(struct efx_nic *efx) +{ +} + +static void efx_ef10_forget_old_piobufs(struct efx_nic *efx) +{ +} + +#endif /* EFX_USE_PIO */ + +static int efx_ef10_alloc_vis(struct efx_nic *efx, + unsigned int min_vis, unsigned int max_vis) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + + return efx_mcdi_alloc_vis(efx, min_vis, max_vis, +#if defined(EFX_NOT_UPSTREAM) && IS_MODULE(CONFIG_SFC_DRIVERLINK) + &efx->ef10_resources.vi_base, + &efx->ef10_resources.vi_shift, +#else + NULL, NULL, +#endif + &nic_data->n_allocated_vis); +} + +static void efx_ef10_free_resources(struct efx_nic *efx) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + + efx_mcdi_free_vis(efx); + + if (nic_data->wc_membase) { + iounmap(nic_data->wc_membase); + nic_data->wc_membase = NULL; + } + + if (!nic_data->must_restore_piobufs) + efx_ef10_free_piobufs(efx); +} + +static int efx_ef10_dimension_resources(struct efx_nic *efx) +{ +#ifdef EFX_NOT_UPSTREAM +#if IS_MODULE(CONFIG_SFC_DRIVERLINK) + struct efx_dl_ef10_resources *res = &efx->ef10_resources; +#endif +#endif + struct efx_ef10_nic_data *nic_data = efx->nic_data; + unsigned int uc_mem_map_size, wc_mem_map_size; + unsigned int channel_vis, pio_write_vi_base, max_vis; + unsigned int tx_channels, tx_vis, rx_vis, pio_vis; + unsigned int rx_vis_per_queue; + void __iomem *membase; + unsigned int min_vis; + int rc; + + rx_vis_per_queue = efx_tx_vi_spreading(efx) ? 2 : 1; + + /* A VI (virtual interface) consists of three queues: rx, tx and + * event. It's possible for any event queue to service any rx or tx + * queue, and multiple queues can be serviced by a single event queue. + */ + min_vis = max3(efx->tx_queues_per_channel, + separate_tx_channels ? 2u : 1u, /* Event queues */ + rx_vis_per_queue); + + rx_vis = efx_rx_channels(efx) * rx_vis_per_queue; + tx_channels = efx_tx_channels(efx); + if (efx->max_tx_channels && efx->max_tx_channels < tx_channels) { + netif_dbg(efx, drv, efx->net_dev, + "Reducing TX channels from %u to %u\n", + tx_channels, efx->max_tx_channels); + tx_channels = efx->max_tx_channels; + } + tx_vis = tx_channels * efx->tx_queues_per_channel; + + tx_vis += efx_xdp_channels(efx) * efx->xdp_tx_per_channel; + + channel_vis = max(rx_vis, tx_vis); + if (efx->max_vis && efx->max_vis < channel_vis) { + netif_dbg(efx, drv, efx->net_dev, + "Reducing channel VIs from %u to %u\n", + channel_vis, efx->max_vis); + channel_vis = efx->max_vis; + } + +#ifdef EFX_USE_PIO + /* Try to allocate PIO buffers if wanted and if the full + * number of PIO buffers would be sufficient to allocate one + * copy-buffer per TX channel. Failure is non-fatal, as there + * are only a small number of PIO buffers shared between all + * functions of the controller. + */ + if (efx_piobuf_size != 0 && + nic_data->piobuf_size / efx_piobuf_size * EF10_TX_PIOBUF_COUNT >= + efx_tx_channels(efx)) { + unsigned int n_piobufs = + DIV_ROUND_UP(efx_tx_channels(efx), + nic_data->piobuf_size / efx_piobuf_size); + + rc = efx_ef10_alloc_piobufs(efx, n_piobufs); + if (rc == -ENOSPC) + netif_dbg(efx, probe, efx->net_dev, + "out of PIO buffers; cannot allocate more\n"); + else if (rc == -EPERM) + netif_dbg(efx, probe, efx->net_dev, + "not permitted to allocate PIO buffers\n"); + else if (rc) + netif_err(efx, probe, efx->net_dev, + "failed to allocate PIO buffers (%d)\n", rc); + else + netif_dbg(efx, probe, efx->net_dev, + "allocated %u PIO buffers\n", n_piobufs); + } +#else + nic_data->n_piobufs = 0; +#endif + + /* PIO buffers should be mapped with write-combining enabled, + * and we want to make single UC and WC mappings rather than + * several of each (in fact that's the only option if host + * page size is >4K). So we may allocate some extra VIs just + * for writing PIO buffers through. + * + * The UC mapping contains (channel_vis - 1) complete VIs and the + * first 4K of the next VI. Then the WC mapping begins with + * the remainder of this last VI. + */ + uc_mem_map_size = PAGE_ALIGN((channel_vis - 1) * efx->vi_stride + + ER_DZ_TX_PIOBUF); + if (nic_data->n_piobufs) { + /* pio_write_vi_base rounds down to give the number of complete + * VIs inside the UC mapping. + */ + pio_write_vi_base = uc_mem_map_size / efx->vi_stride; + wc_mem_map_size = (PAGE_ALIGN((pio_write_vi_base + + nic_data->n_piobufs) * + efx->vi_stride) - + uc_mem_map_size); + pio_vis = pio_write_vi_base + nic_data->n_piobufs; + } else { + pio_write_vi_base = 0; + wc_mem_map_size = 0; + pio_vis = 0; + } + max_vis = max(pio_vis, channel_vis); + +#ifdef EFX_NOT_UPSTREAM + max_vis += efx_target_num_vis >= 0 ? + efx_target_num_vis : + efx_ef10_is_vf(efx) ? EF10_ONLOAD_VF_VIS + : EF10_ONLOAD_PF_VIS; + if (efx->max_vis && efx->max_vis < max_vis) { + netif_dbg(efx, drv, efx->net_dev, + "reducing max VIs requested from %u to %u\n", + max_vis, efx->max_vis); + max_vis = efx->max_vis; + } +#endif + + max_vis = max(min_vis, max_vis); + /* In case the last attached driver failed to free VIs, do it now */ + rc = efx_mcdi_free_vis(efx); + if (rc != 0) + return rc; + + rc = efx_ef10_alloc_vis(efx, min_vis, max_vis); + if (rc != 0) { + netif_err(efx, drv, efx->net_dev, + "Could not allocate %u VIs.\n", min_vis); + return rc; + } + + if (nic_data->n_allocated_vis < channel_vis) { + netif_info(efx, drv, efx->net_dev, + "Could not allocate enough VIs to satisfy RSS " + "requirements. Performance may be impaired.\n"); + /* We didn't get the VIs to populate our channels. We could keep + * what we got but then we'd have more interrupts than we need. + * Instead calculate new max_channels and restart */ + efx->max_vis = efx->max_channels = nic_data->n_allocated_vis; + efx->max_tx_channels = + nic_data->n_allocated_vis / efx->tx_queues_per_channel; + + efx_mcdi_free_vis(efx); + return -EAGAIN; + } + + /* If we didn't get enough VIs to map all the PIO buffers, free the + * PIO buffers + */ + if (nic_data->n_piobufs && + nic_data->n_allocated_vis < + pio_write_vi_base + nic_data->n_piobufs) { + netif_dbg(efx, probe, efx->net_dev, + "%u VIs are not sufficient to map %u PIO buffers\n", + nic_data->n_allocated_vis, nic_data->n_piobufs); + efx_ef10_free_piobufs(efx); + pio_write_vi_base = 0; + wc_mem_map_size = 0; + } + + /* Extend the original UC mapping of the memory BAR */ +#if defined(EFX_USE_KCOMPAT) + membase = efx_ioremap(efx->membase_phys, uc_mem_map_size); +#else + membase = ioremap(efx->membase_phys, uc_mem_map_size); +#endif + if (!membase) { + netif_err(efx, probe, efx->net_dev, + "could not extend memory BAR to %x\n", + uc_mem_map_size); + return -ENOMEM; + } + iounmap(efx->membase); + efx->membase = membase; + + /* Set up the WC mapping if needed */ + if (wc_mem_map_size) { + nic_data->wc_membase = ioremap_wc(efx->membase_phys + + uc_mem_map_size, + wc_mem_map_size); + if (!nic_data->wc_membase) { + netif_err(efx, probe, efx->net_dev, + "could not allocate WC mapping of size %x\n", + wc_mem_map_size); + return -ENOMEM; + } + nic_data->pio_write_vi_base = pio_write_vi_base; + nic_data->pio_write_base = + nic_data->wc_membase + + (pio_write_vi_base * efx->vi_stride + ER_DZ_TX_PIOBUF - + uc_mem_map_size); + + rc = efx_ef10_link_piobufs(efx); + if (rc) + efx_ef10_free_piobufs(efx); + if (rc == -ENETDOWN) + return rc; + } + + netif_dbg(efx, probe, efx->net_dev, + "memory BAR at %llx (virtual %p+%x UC, %p+%x WC)\n", + (unsigned long long)efx->membase_phys, efx->membase, + uc_mem_map_size, nic_data->wc_membase, wc_mem_map_size); + +#ifdef EFX_NOT_UPSTREAM +#if IS_MODULE(CONFIG_SFC_DRIVERLINK) + res->vi_min = DIV_ROUND_UP(uc_mem_map_size + wc_mem_map_size, + efx->vi_stride); + res->vi_lim = nic_data->n_allocated_vis; + res->timer_quantum_ns = efx->timer_quantum_ns; + res->rss_channel_count = efx->rss_spread; + res->rx_channel_count = efx_rx_channels(efx); + res->flags |= EFX_DL_EF10_USE_MSI; + res->vi_stride = efx->vi_stride; + res->mem_bar = efx->type->mem_bar(efx); + + efx->dl_nic.dl_info = &res->hdr; +#endif +#endif + return 0; +} + +#ifdef EFX_NOT_UPSTREAM +static struct efx_tx_queue * +efx_ef10_select_tx_queue_non_csum(struct efx_channel *channel, + struct sk_buff *skb) +{ + return &channel->tx_queues[skb->ip_summed ? EFX_TXQ_TYPE_CSUM_OFFLOAD : + EFX_TXQ_TYPE_NO_OFFLOAD]; +} +#endif + +static void efx_ef10_set_tx_queue_csum(struct efx_tx_queue *tx_queue, + unsigned int txq_type) +{ + if (tx_queue->csum_offload != txq_type) { + bool outer_csum = txq_type & EFX_TXQ_TYPE_CSUM_OFFLOAD; + bool inner_csum = txq_type & EFX_TXQ_TYPE_INNER_CSUM_OFFLOAD; + bool tso_v2 = ((tx_queue->tso_version == 2) && + !tx_queue->timestamping); + struct efx_tx_buffer *buffer = + efx_tx_queue_get_insert_buffer(tx_queue); + + buffer->flags = EFX_TX_BUF_OPTION; + buffer->len = buffer->unmap_len = 0; + EFX_POPULATE_QWORD_7(buffer->option, + ESF_DZ_TX_DESC_IS_OPT, true, + ESF_DZ_TX_OPTION_TYPE, + ESE_DZ_TX_OPTION_DESC_CRC_CSUM, + ESF_DZ_TX_OPTION_UDP_TCP_CSUM, + outer_csum, + ESF_DZ_TX_OPTION_IP_CSUM, + outer_csum && !tso_v2, + ESF_DZ_TX_OPTION_INNER_UDP_TCP_CSUM, + inner_csum, + ESF_DZ_TX_OPTION_INNER_IP_CSUM, + inner_csum && !tso_v2, + ESF_DZ_TX_TIMESTAMP, + tx_queue->timestamping); + + ++tx_queue->insert_count; + tx_queue->csum_offload = txq_type; + } +} + +static struct efx_tx_queue * +efx_ef10_select_tx_queue(struct efx_channel *channel, + struct sk_buff *skb) +{ + unsigned int txq_type = skb->ip_summed == CHECKSUM_PARTIAL ? + EFX_TXQ_TYPE_CSUM_OFFLOAD : + EFX_TXQ_TYPE_NO_OFFLOAD; + struct efx_tx_queue *tx_queue = &channel->tx_queues[0]; + + efx_ef10_set_tx_queue_csum(tx_queue, txq_type); + + return tx_queue; +} + +#ifdef EFX_USE_OVERLAY_TX_CSUM +static unsigned int efx_ef10_select_tx_queue_type(struct sk_buff *skb) +{ + unsigned int txq_type = EFX_TXQ_TYPE_NO_OFFLOAD; + + if (skb->ip_summed != CHECKSUM_PARTIAL) + return txq_type; + + if (skb->encapsulation && + skb_checksum_start_offset(skb) == + skb_inner_transport_offset(skb)) { + /* we only advertise features for IPv4 and IPv6 checksums on + * encapsulated packets, so if the checksum is for the inner + * packet, it must be one of them; no further checking required. + */ + txq_type = EFX_TXQ_TYPE_INNER_CSUM_OFFLOAD; + +#if !defined (EFX_USE_KCOMPAT) || defined (EFX_HAVE_GSO_UDP_TUNNEL_CSUM) + /* Do we also need to offload the outer header checksum? */ + if (skb_shinfo(skb)->gso_segs > 1 && +#if !defined (EFX_USE_KCOMPAT) || defined (EFX_HAVE_GSO_PARTIAL) + !(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) && +#endif + (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) + txq_type |= EFX_TXQ_TYPE_CSUM_OFFLOAD; +#endif + return txq_type; + } + + /* similarly, we only advertise features for IPv4 and IPv6 checksums, + * so it must be one of them. No need for further checks. + */ + return EFX_TXQ_TYPE_CSUM_OFFLOAD; +} + +#ifdef EFX_NOT_UPSTREAM +static struct efx_tx_queue * +efx_ef10_select_tx_queue_non_csum_overlay(struct efx_channel *channel, + struct sk_buff *skb) +{ + unsigned int txq_type = efx_ef10_select_tx_queue_type(skb); + + EFX_WARN_ON_PARANOID(txq_type > channel->tx_queue_count); + + return &channel->tx_queues[txq_type]; +} +#endif + +static struct efx_tx_queue * +efx_ef10_select_tx_queue_overlay(struct efx_channel *channel, + struct sk_buff *skb) +{ + unsigned int txq_type = efx_ef10_select_tx_queue_type(skb); + /* map no offload and overlay offload to second queue, which + * will be reconfigured with option descriptors if needed. + */ + static const unsigned int txq_map[] = { + [EFX_TXQ_TYPE_CSUM_OFFLOAD] = EFX_TXQ_TYPE_CSUM_OFFLOAD, + [EFX_TXQ_TYPE_NO_OFFLOAD] = EFX_TXQ_TYPE_NO_OFFLOAD, + [EFX_TXQ_TYPE_INNER_CSUM_OFFLOAD] = EFX_TXQ_TYPE_NO_OFFLOAD, + [EFX_TXQ_TYPE_BOTH_CSUM_OFFLOAD] = EFX_TXQ_TYPE_NO_OFFLOAD, + }; + struct efx_tx_queue *tx_queue = &channel->tx_queues[txq_map[txq_type]]; + + efx_ef10_set_tx_queue_csum(tx_queue, txq_type); + + return tx_queue; +} +#endif + +static void efx_ef10_fini_nic(struct efx_nic *efx) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + + kfree(nic_data->mc_stats); + nic_data->mc_stats = NULL; + + if (!efx_ptp_uses_separate_channel(efx) && + !efx_ptp_use_mac_tx_timestamps(efx)) + efx_ptp_remove(efx); +} + +static int efx_ef10_init_nic(struct efx_nic *efx) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + u32 old_datapath_caps = nic_data->datapath_caps; +#if !defined(EFX_USE_KCOMPAT) || (defined(EFX_HAVE_HW_ENC_FEATURES) && \ + defined(EFX_HAVE_NDO_FEATURES_CHECK)) +#ifdef EFX_USE_OVERLAY_TX_CSUM + netdev_features_t hw_enc_features; +#endif +#endif + int rc; + + if (nic_data->must_check_datapath_caps) { + rc = efx_ef10_init_datapath_caps(efx); + if (rc) + return rc; + nic_data->must_check_datapath_caps = false; + } + + if (nic_data->datapath_caps != old_datapath_caps) + efx_mcdi_filter_probe_supported_filters(efx); + + if (nic_data->must_realloc_vis) { + /* We cannot let the number of VIs change now */ + rc = efx_ef10_alloc_vis(efx, nic_data->n_allocated_vis, + nic_data->n_allocated_vis); + if (rc) + return rc; + nic_data->must_realloc_vis = false; + } + + nic_data->mc_stats = kmalloc_array(efx->num_mac_stats, sizeof(__le64), + GFP_KERNEL); + if (!nic_data->mc_stats) + return -ENOMEM; + + if (nic_data->must_reprobe_sensors) { + efx_mcdi_mon_remove(efx); + efx_mcdi_mon_probe(efx); + nic_data->must_reprobe_sensors = false; + } + + /* Don't fail init if RSS setup doesn't work, except that EAGAIN needs + * passing up. + */ + rc = efx->type->rx_push_rss_config(efx, false, + efx->rss_context.rx_indir_table, NULL); + if (rc == -EAGAIN) + return rc; + + if (nic_data->must_restore_piobufs && nic_data->n_piobufs) { + rc = efx_ef10_alloc_piobufs(efx, nic_data->n_piobufs); + if (rc == 0) { + rc = efx_ef10_link_piobufs(efx); + if (rc) + efx_ef10_free_piobufs(efx); + } + + /* Log an error on failure, but this is non-fatal. + * Permission errors are less important - we've presumably + * had the PIO buffer licence removed. + */ + if (rc == -EPERM) + netif_dbg(efx, drv, efx->net_dev, + "not permitted to restore PIO buffers\n"); + else if (rc) + netif_err(efx, drv, efx->net_dev, + "failed to restore PIO buffers (%d)\n", rc); + nic_data->must_restore_piobufs = false; + } + +#if !defined(EFX_USE_KCOMPAT) || (defined(EFX_HAVE_HW_ENC_FEATURES) && \ + defined(EFX_HAVE_NDO_FEATURES_CHECK)) +#ifdef EFX_USE_OVERLAY_TX_CSUM + hw_enc_features = 0; + + /* add encapsulated checksum offload features */ + if (efx_ef10_has_cap(nic_data->datapath_caps, VXLAN_NVGRE) && + !efx_ef10_is_vf(efx)) + hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; +#ifdef EFX_CAN_SUPPORT_ENCAP_TSO + /* add encapsulated TSO features */ + if (efx_ef10_has_cap(nic_data->datapath_caps2, TX_TSO_V2_ENCAP)) { + netdev_features_t encap_tso_features; + + encap_tso_features = NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE | + NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM; + + hw_enc_features |= encap_tso_features | NETIF_F_TSO; + efx->net_dev->features |= encap_tso_features; + } +#endif + + efx->net_dev->hw_enc_features = hw_enc_features; +#endif +#endif + + if (!efx_ptp_uses_separate_channel(efx) && + !efx_ptp_use_mac_tx_timestamps(efx)) + efx_ptp_probe(efx, NULL); + + return 0; +} + +static void efx_ef10_reset_mc_allocations(struct efx_nic *efx) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; +#ifdef CONFIG_SFC_SRIOV + unsigned int i; +#endif + + efx_mcdi_filter_table_reset_mc_allocations(efx); + efx->rss_context.context_id = EFX_MCDI_RSS_CONTEXT_INVALID; + efx_ef10_forget_old_piobufs(efx); + + + /* Driver-created vswitches and vports must be re-created */ + nic_data->must_probe_vswitching = true; + efx->vport.vport_id = EVB_PORT_ID_ASSIGNED; +#ifdef CONFIG_SFC_SRIOV + if (nic_data->vf) + for (i = 0; i < nic_data->vf_count; i++) + nic_data->vf[i].vport_id = 0; +#endif + + /* All our allocations have been reset */ + if (!efx_net_allocated(efx->state)) + return; + + nic_data->must_realloc_vis = true; + nic_data->must_restore_piobufs = true; + efx->stats_initialised = false; +} + +static enum reset_type efx_ef10_map_reset_reason(enum reset_type reason) +{ + if (reason == RESET_TYPE_MC_FAILURE) + return RESET_TYPE_DATAPATH; + else if (reason < RESET_TYPE_MAX_METHOD || + reason == RESET_TYPE_MCDI_TIMEOUT) + return reason; + else + return RESET_TYPE_ALL; +} + +static int efx_ef10_map_reset_flags(u32 *flags) +{ + enum { + EF10_RESET_PORT = ((ETH_RESET_MAC | ETH_RESET_PHY) << + ETH_RESET_SHARED_SHIFT), + EF10_RESET_MC = ((ETH_RESET_DMA | ETH_RESET_FILTER | + ETH_RESET_OFFLOAD | ETH_RESET_MAC | + ETH_RESET_PHY | ETH_RESET_MGMT) << + ETH_RESET_SHARED_SHIFT) + }; + + /* We assume for now that our PCI function is permitted to + * reset everything. + */ + + if ((*flags & EF10_RESET_MC) == EF10_RESET_MC) { + *flags &= ~EF10_RESET_MC; + *flags &= ~ETH_RESET_MAC; + return RESET_TYPE_WORLD; + } + + if ((*flags & EF10_RESET_PORT) == EF10_RESET_PORT) { + *flags &= ~EF10_RESET_PORT; + *flags &= ~ETH_RESET_MAC; + return RESET_TYPE_ALL; + } + + /* no invisible reset implemented */ + + return -EINVAL; +} + +static int efx_ef10_reset(struct efx_nic *efx, enum reset_type reset_type) +{ +#if defined(EFX_USE_KCOMPAT) && !defined(EFX_HAVE_UDP_TUNNEL_NIC_INFO) + struct efx_ef10_nic_data *nic_data = efx->nic_data; +#endif + int rc; + +#if defined(EFX_USE_KCOMPAT) && !defined(EFX_HAVE_UDP_TUNNEL_NIC_INFO) + /* Make sure any UDP tunnel work has finished, else it could cause more + * MC reboots during reset_up + */ + flush_work(&nic_data->udp_tunnel_work); +#endif + + rc = efx_mcdi_reset(efx, reset_type); + + efx->last_reset = jiffies; + + /* Unprivileged functions return -EPERM, but need to return success + * here so that the datapath is brought back up. + */ + if (reset_type == RESET_TYPE_WORLD && rc == -EPERM) + rc = 0; + + /* If it was a port reset, trigger reallocation of MC resources. + * Note that on an MC reset nothing needs to be done now because we'll + * detect the MC reset later and handle it then. + * For an FLR, we never get an MC reset event, but the MC has reset all + * resources assigned to us, so we have to trigger reallocation now. + */ + if ((reset_type == RESET_TYPE_ALL || + reset_type == RESET_TYPE_RECOVER_OR_ALL || + reset_type == RESET_TYPE_MCDI_TIMEOUT) && !rc) + efx_ef10_reset_mc_allocations(efx); + return rc; +} + +static bool efx_ef10_hw_unavailable(struct efx_nic *efx) +{ + if (efx->state == STATE_DISABLED || efx_recovering(efx->state)) + return true; + + /* we just haven't initialised I/O yet. */ + if (!efx->membase) + return false; + + if (_efx_readd(efx, ER_DZ_BIU_MC_SFT_STATUS) == + cpu_to_le32(0xffffffff)) { + netif_err(efx, hw, efx->net_dev, "Hardware unavailable\n"); + efx->state = STATE_DISABLED; + return true; + } + + return false; +} + +#ifdef EFX_NOT_UPSTREAM +static int efx_ef10_rx_defer_ring_rx_doorbell(struct efx_rx_queue *rx_queue) +{ + struct efx_channel *channel = efx_rx_queue_channel(rx_queue); + MCDI_DECLARE_BUF(inbuf, MC_CMD_DRIVER_EVENT_IN_LEN); + efx_qword_t event; + size_t outlen; + u32 magic; + + magic = EFX_EF10_DRVGEN_MAGIC(EFX_EF10_RERING_RX_DOORBELL, + efx_rx_queue_index(rx_queue)); + EFX_POPULATE_QWORD_2(event, + ESF_DZ_EV_CODE, EFX_EF10_DRVGEN_EV, + ESF_DZ_EV_DATA, magic); + + MCDI_SET_DWORD(inbuf, DRIVER_EVENT_IN_EVQ, channel->channel); + + /* MCDI_SET_QWORD is not appropriate here since EFX_POPULATE_* has + * already swapped the data to little-endian order. + */ + memcpy(MCDI_PTR(inbuf, DRIVER_EVENT_IN_DATA), &event.u64[0], + sizeof(efx_qword_t)); + + return efx_mcdi_rpc_quiet(channel->efx, MC_CMD_DRIVER_EVENT, + inbuf, sizeof(inbuf), NULL, 0, &outlen); +} +#endif + +static void efx_ef10_monitor(struct efx_nic *efx) +{ +#if defined(EFX_NOT_UPSTREAM) + if (monitor_hw_available && efx_ef10_hw_unavailable(efx)) { + if (efx->link_state.up) { + efx->link_state.up = false; + efx_link_status_changed(efx); + } + } +#endif +#if defined(CONFIG_EEH) + { +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_EEH_DEV_CHECK_FAILURE) + struct eeh_dev *eehdev = pci_dev_to_eeh_dev(efx->pci_dev); + + eeh_dev_check_failure(eehdev); +#else + struct pci_dev *pcidev = efx->pci_dev; + struct device_node *dn = pci_device_to_OF_node(pcidev); + + eeh_dn_check_failure(dn, pcidev); +#endif + } +#endif + +#ifdef EFX_NOT_UPSTREAM + if (EFX_WORKAROUND_59975(efx)) { + /* re-ring the RX doorbell. Should be harmless */ + struct efx_channel *channel; + int rc = 0; + efx_for_each_channel(channel, efx) { + struct efx_rx_queue *rxq; + efx_for_each_channel_rx_queue(rxq, channel) { + if (rxq->removed_count != 0) + continue; + rc = efx_ef10_rx_defer_ring_rx_doorbell(rxq); + if (rc != 0) + break; + } + if (rc != 0) + break; + } + } +#endif +} + +#define EF10_DMA_STAT(ext_name, mcdi_name) \ + [EF10_STAT_ ## ext_name] = \ + { #ext_name, 64, 8 * MC_CMD_MAC_ ## mcdi_name } +#define EF10_DMA_INVIS_STAT(int_name, mcdi_name) \ + [EF10_STAT_ ## int_name] = \ + { NULL, 64, 8 * MC_CMD_MAC_ ## mcdi_name } +#define EF10_OTHER_STAT(ext_name) \ + [EF10_STAT_ ## ext_name] = { #ext_name, 0, 0 } + +static const struct efx_hw_stat_desc efx_ef10_stat_desc[EF10_STAT_COUNT] = { + EF10_DMA_STAT(port_tx_bytes, TX_BYTES), + EF10_DMA_STAT(port_tx_packets, TX_PKTS), + EF10_DMA_STAT(port_tx_pause, TX_PAUSE_PKTS), + EF10_DMA_STAT(port_tx_control, TX_CONTROL_PKTS), + EF10_DMA_STAT(port_tx_unicast, TX_UNICAST_PKTS), + EF10_DMA_STAT(port_tx_multicast, TX_MULTICAST_PKTS), + EF10_DMA_STAT(port_tx_broadcast, TX_BROADCAST_PKTS), + EF10_DMA_STAT(port_tx_lt64, TX_LT64_PKTS), + EF10_DMA_STAT(port_tx_64, TX_64_PKTS), + EF10_DMA_STAT(port_tx_65_to_127, TX_65_TO_127_PKTS), + EF10_DMA_STAT(port_tx_128_to_255, TX_128_TO_255_PKTS), + EF10_DMA_STAT(port_tx_256_to_511, TX_256_TO_511_PKTS), + EF10_DMA_STAT(port_tx_512_to_1023, TX_512_TO_1023_PKTS), + EF10_DMA_STAT(port_tx_1024_to_15xx, TX_1024_TO_15XX_PKTS), + EF10_DMA_STAT(port_tx_15xx_to_jumbo, TX_15XX_TO_JUMBO_PKTS), + EF10_DMA_STAT(port_rx_bytes, RX_BYTES), + EF10_DMA_INVIS_STAT(port_rx_bytes_minus_good_bytes, RX_BAD_BYTES), + EF10_OTHER_STAT(port_rx_good_bytes), + EF10_OTHER_STAT(port_rx_bad_bytes), + EF10_DMA_STAT(port_rx_packets, RX_PKTS), + EF10_DMA_STAT(port_rx_good, RX_GOOD_PKTS), + EF10_DMA_STAT(port_rx_bad, RX_BAD_FCS_PKTS), + EF10_DMA_STAT(port_rx_pause, RX_PAUSE_PKTS), + EF10_DMA_STAT(port_rx_control, RX_CONTROL_PKTS), + EF10_DMA_STAT(port_rx_unicast, RX_UNICAST_PKTS), + EF10_DMA_STAT(port_rx_multicast, RX_MULTICAST_PKTS), + EF10_DMA_STAT(port_rx_broadcast, RX_BROADCAST_PKTS), + EF10_DMA_STAT(port_rx_lt64, RX_UNDERSIZE_PKTS), + EF10_DMA_STAT(port_rx_64, RX_64_PKTS), + EF10_DMA_STAT(port_rx_65_to_127, RX_65_TO_127_PKTS), + EF10_DMA_STAT(port_rx_128_to_255, RX_128_TO_255_PKTS), + EF10_DMA_STAT(port_rx_256_to_511, RX_256_TO_511_PKTS), + EF10_DMA_STAT(port_rx_512_to_1023, RX_512_TO_1023_PKTS), + EF10_DMA_STAT(port_rx_1024_to_15xx, RX_1024_TO_15XX_PKTS), + EF10_DMA_STAT(port_rx_15xx_to_jumbo, RX_15XX_TO_JUMBO_PKTS), + EF10_DMA_STAT(port_rx_gtjumbo, RX_GTJUMBO_PKTS), + EF10_DMA_STAT(port_rx_bad_gtjumbo, RX_JABBER_PKTS), + EF10_DMA_STAT(port_rx_overflow, RX_OVERFLOW_PKTS), + EF10_DMA_STAT(port_rx_align_error, RX_ALIGN_ERROR_PKTS), + EF10_DMA_STAT(port_rx_length_error, RX_LENGTH_ERROR_PKTS), + EF10_DMA_STAT(port_rx_nodesc_drops, RX_NODESC_DROPS), + EFX_GENERIC_SW_STAT(rx_nodesc_trunc), + EFX_GENERIC_SW_STAT(rx_noskb_drops), + EF10_DMA_STAT(port_rx_pm_trunc_bb_overflow, PM_TRUNC_BB_OVERFLOW), + EF10_DMA_STAT(port_rx_pm_discard_bb_overflow, PM_DISCARD_BB_OVERFLOW), + EF10_DMA_STAT(port_rx_pm_trunc_vfifo_full, PM_TRUNC_VFIFO_FULL), + EF10_DMA_STAT(port_rx_pm_discard_vfifo_full, PM_DISCARD_VFIFO_FULL), + EF10_DMA_STAT(port_rx_pm_trunc_qbb, PM_TRUNC_QBB), + EF10_DMA_STAT(port_rx_pm_discard_qbb, PM_DISCARD_QBB), + EF10_DMA_STAT(port_rx_pm_discard_mapping, PM_DISCARD_MAPPING), + EF10_DMA_STAT(port_rx_dp_q_disabled_packets, RXDP_Q_DISABLED_PKTS), + EF10_DMA_STAT(port_rx_dp_di_dropped_packets, RXDP_DI_DROPPED_PKTS), + EF10_DMA_STAT(port_rx_dp_streaming_packets, RXDP_STREAMING_PKTS), + EF10_DMA_STAT(port_rx_dp_hlb_fetch, RXDP_HLB_FETCH_CONDITIONS), + EF10_DMA_STAT(port_rx_dp_hlb_wait, RXDP_HLB_WAIT_CONDITIONS), + EF10_DMA_STAT(rx_unicast, VADAPTER_RX_UNICAST_PACKETS), + EF10_DMA_STAT(rx_unicast_bytes, VADAPTER_RX_UNICAST_BYTES), + EF10_DMA_STAT(rx_multicast, VADAPTER_RX_MULTICAST_PACKETS), + EF10_DMA_STAT(rx_multicast_bytes, VADAPTER_RX_MULTICAST_BYTES), + EF10_DMA_STAT(rx_broadcast, VADAPTER_RX_BROADCAST_PACKETS), + EF10_DMA_STAT(rx_broadcast_bytes, VADAPTER_RX_BROADCAST_BYTES), + EF10_DMA_STAT(rx_bad, VADAPTER_RX_BAD_PACKETS), + EF10_DMA_STAT(rx_bad_bytes, VADAPTER_RX_BAD_BYTES), + EF10_DMA_STAT(rx_overflow, VADAPTER_RX_OVERFLOW), + EF10_DMA_STAT(tx_unicast, VADAPTER_TX_UNICAST_PACKETS), + EF10_DMA_STAT(tx_unicast_bytes, VADAPTER_TX_UNICAST_BYTES), + EF10_DMA_STAT(tx_multicast, VADAPTER_TX_MULTICAST_PACKETS), + EF10_DMA_STAT(tx_multicast_bytes, VADAPTER_TX_MULTICAST_BYTES), + EF10_DMA_STAT(tx_broadcast, VADAPTER_TX_BROADCAST_PACKETS), + EF10_DMA_STAT(tx_broadcast_bytes, VADAPTER_TX_BROADCAST_BYTES), + EF10_DMA_STAT(tx_bad, VADAPTER_TX_BAD_PACKETS), + EF10_DMA_STAT(tx_bad_bytes, VADAPTER_TX_BAD_BYTES), + EF10_DMA_STAT(tx_overflow, VADAPTER_TX_OVERFLOW), + EF10_DMA_STAT(fec_uncorrected_errors, FEC_UNCORRECTED_ERRORS), + EF10_DMA_STAT(fec_corrected_errors, FEC_CORRECTED_ERRORS), + EF10_DMA_STAT(fec_corrected_symbols_lane0, FEC_CORRECTED_SYMBOLS_LANE0), + EF10_DMA_STAT(fec_corrected_symbols_lane1, FEC_CORRECTED_SYMBOLS_LANE1), + EF10_DMA_STAT(fec_corrected_symbols_lane2, FEC_CORRECTED_SYMBOLS_LANE2), + EF10_DMA_STAT(fec_corrected_symbols_lane3, FEC_CORRECTED_SYMBOLS_LANE3), + EF10_DMA_STAT(ctpio_vi_busy_fallback, CTPIO_VI_BUSY_FALLBACK), + EF10_DMA_STAT(ctpio_long_write_success, CTPIO_LONG_WRITE_SUCCESS), + EF10_DMA_STAT(ctpio_missing_dbell_fail, CTPIO_MISSING_DBELL_FAIL), + EF10_DMA_STAT(ctpio_overflow_fail, CTPIO_OVERFLOW_FAIL), + EF10_DMA_STAT(ctpio_underflow_fail, CTPIO_UNDERFLOW_FAIL), + EF10_DMA_STAT(ctpio_timeout_fail, CTPIO_TIMEOUT_FAIL), + EF10_DMA_STAT(ctpio_noncontig_wr_fail, CTPIO_NONCONTIG_WR_FAIL), + EF10_DMA_STAT(ctpio_frm_clobber_fail, CTPIO_FRM_CLOBBER_FAIL), + EF10_DMA_STAT(ctpio_invalid_wr_fail, CTPIO_INVALID_WR_FAIL), + EF10_DMA_STAT(ctpio_vi_clobber_fallback, CTPIO_VI_CLOBBER_FALLBACK), + EF10_DMA_STAT(ctpio_unqualified_fallback, CTPIO_UNQUALIFIED_FALLBACK), + EF10_DMA_STAT(ctpio_runt_fallback, CTPIO_RUNT_FALLBACK), + EF10_DMA_STAT(ctpio_success, CTPIO_SUCCESS), + EF10_DMA_STAT(ctpio_fallback, CTPIO_FALLBACK), + EF10_DMA_STAT(ctpio_poison, CTPIO_POISON), + EF10_DMA_STAT(ctpio_erase, CTPIO_ERASE), +}; + +static void efx_ef10_common_stat_mask(unsigned long *mask) +{ + __set_bit(EF10_STAT_port_tx_bytes, mask); + __set_bit(EF10_STAT_port_tx_packets, mask); + __set_bit(EF10_STAT_port_tx_pause, mask); + __set_bit(EF10_STAT_port_tx_unicast, mask); + __set_bit(EF10_STAT_port_tx_multicast, mask); + __set_bit(EF10_STAT_port_tx_broadcast, mask); + __set_bit(EF10_STAT_port_rx_bytes, mask); + __set_bit(EF10_STAT_port_rx_bytes_minus_good_bytes, mask); + __set_bit(EF10_STAT_port_rx_good_bytes, mask); + __set_bit(EF10_STAT_port_rx_bad_bytes, mask); + __set_bit(EF10_STAT_port_rx_packets, mask); + __set_bit(EF10_STAT_port_rx_good, mask); + __set_bit(EF10_STAT_port_rx_bad, mask); + __set_bit(EF10_STAT_port_rx_pause, mask); + __set_bit(EF10_STAT_port_rx_control, mask); + __set_bit(EF10_STAT_port_rx_unicast, mask); + __set_bit(EF10_STAT_port_rx_multicast, mask); + __set_bit(EF10_STAT_port_rx_broadcast, mask); + __set_bit(EF10_STAT_port_rx_lt64, mask); + __set_bit(EF10_STAT_port_rx_64, mask); + __set_bit(EF10_STAT_port_rx_65_to_127, mask); + __set_bit(EF10_STAT_port_rx_128_to_255, mask); + __set_bit(EF10_STAT_port_rx_256_to_511, mask); + __set_bit(EF10_STAT_port_rx_512_to_1023, mask); + __set_bit(EF10_STAT_port_rx_1024_to_15xx, mask); + __set_bit(EF10_STAT_port_rx_15xx_to_jumbo, mask); + __set_bit(EF10_STAT_port_rx_gtjumbo, mask); + __set_bit(EF10_STAT_port_rx_bad_gtjumbo, mask); + __set_bit(EF10_STAT_port_rx_overflow, mask); + __set_bit(EF10_STAT_port_rx_nodesc_drops, mask); + __set_bit(GENERIC_STAT_rx_nodesc_trunc, mask); + __set_bit(GENERIC_STAT_rx_noskb_drops, mask); +} + +/* On 7000 series NICs, these statistics are only provided by the 10G MAC. + * For a 10G/40G switchable port we do not expose these because they might + * not include all the packets they should. + * On 8000 series NICs these statistics are always provided. + */ +static void efx_ef10_10g_only_stat_mask(unsigned long *mask) +{ + __set_bit(EF10_STAT_port_tx_control, mask); + __set_bit(EF10_STAT_port_tx_lt64, mask); + __set_bit(EF10_STAT_port_tx_64, mask); + __set_bit(EF10_STAT_port_tx_65_to_127, mask); + __set_bit(EF10_STAT_port_tx_128_to_255, mask); + __set_bit(EF10_STAT_port_tx_256_to_511, mask); + __set_bit(EF10_STAT_port_tx_512_to_1023, mask); + __set_bit(EF10_STAT_port_tx_1024_to_15xx, mask); + __set_bit(EF10_STAT_port_tx_15xx_to_jumbo, mask); +} + +/* These statistics are only provided by the 40G MAC. For a 10G/40G + * switchable port we do expose these because the errors will otherwise + * be silent. + */ +static void efx_ef10_40g_extra_stat_mask(unsigned long *mask) +{ + __set_bit(EF10_STAT_port_rx_align_error, mask); + __set_bit(EF10_STAT_port_rx_length_error, mask); +} + +/* These statistics are only provided if the firmware supports the + * capability PM_AND_RXDP_COUNTERS. + */ +static void efx_ef10_pm_and_rxdp_stat_mask(unsigned long *mask) +{ + __set_bit(EF10_STAT_port_rx_pm_trunc_bb_overflow, mask); + __set_bit(EF10_STAT_port_rx_pm_discard_bb_overflow, mask); + __set_bit(EF10_STAT_port_rx_pm_trunc_vfifo_full, mask); + __set_bit(EF10_STAT_port_rx_pm_discard_vfifo_full, mask); + __set_bit(EF10_STAT_port_rx_pm_trunc_qbb, mask); + __set_bit(EF10_STAT_port_rx_pm_discard_qbb, mask); + __set_bit(EF10_STAT_port_rx_pm_discard_mapping, mask); + __set_bit(EF10_STAT_port_rx_dp_q_disabled_packets, mask); + __set_bit(EF10_STAT_port_rx_dp_di_dropped_packets, mask); + __set_bit(EF10_STAT_port_rx_dp_streaming_packets, mask); + __set_bit(EF10_STAT_port_rx_dp_hlb_fetch, mask); + __set_bit(EF10_STAT_port_rx_dp_hlb_wait, mask); +} + +/* These statistics are only provided if the NIC supports MC_CMD_MAC_STATS_V2, + * indicated by returning a value >= MC_CMD_MAC_NSTATS_V2 in + * MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS. + */ +static void efx_ef10_fec_stat_mask(unsigned long *mask) +{ + __set_bit(EF10_STAT_fec_uncorrected_errors, mask); + __set_bit(EF10_STAT_fec_corrected_errors, mask); + __set_bit(EF10_STAT_fec_corrected_symbols_lane0, mask); + __set_bit(EF10_STAT_fec_corrected_symbols_lane1, mask); + __set_bit(EF10_STAT_fec_corrected_symbols_lane2, mask); + __set_bit(EF10_STAT_fec_corrected_symbols_lane3, mask); +} + +/* These statistics are only provided if the NIC supports MC_CMD_MAC_STATS_V3, + * indicated by returning a value >= MC_CMD_MAC_NSTATS_V3 in + * MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS. + * These bits are in the second u64 of the raw mask. + */ +static void efx_ef10_ctpio_stat_mask(unsigned long *mask) +{ + __set_bit(EF10_STAT_ctpio_vi_busy_fallback, mask); + __set_bit(EF10_STAT_ctpio_long_write_success, mask); + __set_bit(EF10_STAT_ctpio_missing_dbell_fail, mask); + __set_bit(EF10_STAT_ctpio_overflow_fail, mask); + __set_bit(EF10_STAT_ctpio_underflow_fail, mask); + __set_bit(EF10_STAT_ctpio_timeout_fail, mask); + __set_bit(EF10_STAT_ctpio_noncontig_wr_fail, mask); + __set_bit(EF10_STAT_ctpio_frm_clobber_fail, mask); + __set_bit(EF10_STAT_ctpio_invalid_wr_fail, mask); + __set_bit(EF10_STAT_ctpio_vi_clobber_fallback, mask); + __set_bit(EF10_STAT_ctpio_unqualified_fallback, mask); + __set_bit(EF10_STAT_ctpio_runt_fallback, mask); + __set_bit(EF10_STAT_ctpio_success, mask); + __set_bit(EF10_STAT_ctpio_fallback, mask); + __set_bit(EF10_STAT_ctpio_poison, mask); + __set_bit(EF10_STAT_ctpio_erase, mask); +} + +static void efx_ef10_get_stat_mask(struct efx_nic *efx, unsigned long *mask) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + u32 port_caps = efx_mcdi_phy_get_caps(efx); + unsigned int i; + + efx_ef10_common_stat_mask(mask); + if (efx->mcdi->fn_flags & (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL)) { + if (port_caps & (1 << MC_CMD_PHY_CAP_40000FDX_LBN)) { + efx_ef10_40g_extra_stat_mask(mask); + /* 8000 series have everything even at 40G */ + if (efx_ef10_has_cap(nic_data->datapath_caps2, + MAC_STATS_40G_TX_SIZE_BINS)) + efx_ef10_10g_only_stat_mask(mask); + } else { + efx_ef10_10g_only_stat_mask(mask); + } + + if (efx_ef10_has_cap(nic_data->datapath_caps, PM_AND_RXDP_COUNTERS)) + efx_ef10_pm_and_rxdp_stat_mask(mask); + } + + /* Only show vadaptor stats when EVB capability is present */ + if (efx_ef10_has_cap(nic_data->datapath_caps, EVB)) + for (i = EF10_STAT_port_COUNT; i < EF10_STAT_V1_COUNT; i++) + __set_bit(i, mask); + + /* Only show FEC stats when NIC supports MC_CMD_MAC_STATS_V2 */ + if (efx->num_mac_stats >= MC_CMD_MAC_NSTATS_V2) + efx_ef10_fec_stat_mask(mask); + + /* CTPIO stats appear in V3. Only show them on devices that actually + * support CTPIO. Although this driver doesn't use CTPIO others might, + * and we may be reporting the stats for the underlying port. + */ + if ((efx->num_mac_stats >= MC_CMD_MAC_NSTATS_V3) && + efx_ef10_has_cap(nic_data->datapath_caps2, CTPIO)) + efx_ef10_ctpio_stat_mask(mask); +} + +static size_t efx_ef10_describe_stats(struct efx_nic *efx, u8 *names) +{ + DECLARE_BITMAP(mask, EF10_STAT_COUNT) = {}; + + efx_ef10_get_stat_mask(efx, mask); + return efx_nic_describe_stats(efx_ef10_stat_desc, EF10_STAT_COUNT, + mask, names); +} + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_ETHTOOL_FECSTATS) +static void efx_ef10_get_fec_stats(struct efx_nic *efx, + struct ethtool_fec_stats *fec_stats) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + DECLARE_BITMAP(mask, EF10_STAT_COUNT) = {}; + u64 *stats = nic_data->stats; + + efx_ef10_get_stat_mask(efx, mask); + if (test_bit(EF10_STAT_fec_corrected_errors, mask)) + fec_stats->corrected_blocks.total = + stats[EF10_STAT_fec_corrected_errors]; + if (test_bit(EF10_STAT_fec_uncorrected_errors, mask)) + fec_stats->uncorrectable_blocks.total = + stats[EF10_STAT_fec_uncorrected_errors]; +} +#endif + +static bool efx_use_vadaptor_stats(struct efx_nic *efx) +{ +#ifdef CONFIG_SFC_SRIOV + struct efx_ef10_nic_data *nic_data = efx->nic_data; +#endif + + if (!(efx->mcdi->fn_flags & + (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL))) + return true; +#ifdef CONFIG_SFC_SRIOV + if (efx->type->is_vf || nic_data->vf_count) + return true; +#endif + + return false; +} + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_USE_NETDEV_STATS64) +static size_t efx_ef10_update_stats_common(struct efx_nic *efx, u64 *full_stats, + struct rtnl_link_stats64 *core_stats) +#else +static size_t efx_ef10_update_stats_common(struct efx_nic *efx, u64 *full_stats, + struct net_device_stats *core_stats) +#endif +{ + DECLARE_BITMAP(mask, EF10_STAT_COUNT) = {}; + struct efx_ef10_nic_data *nic_data = efx->nic_data; + u64 *stats = nic_data->stats; + size_t stats_count = 0, index; + + efx_ef10_get_stat_mask(efx, mask); + + if (full_stats) { + for_each_set_bit(index, mask, EF10_STAT_COUNT) { + if (efx_ef10_stat_desc[index].name) { + *full_stats++ = stats[index]; + ++stats_count; + } + } + } + + if (!core_stats) + return stats_count; + + if (efx_use_vadaptor_stats(efx)) { + core_stats->rx_packets = stats[EF10_STAT_rx_unicast] + + stats[EF10_STAT_rx_multicast] + + stats[EF10_STAT_rx_broadcast]; + core_stats->tx_packets = stats[EF10_STAT_tx_unicast] + + stats[EF10_STAT_tx_multicast] + + stats[EF10_STAT_tx_broadcast]; + core_stats->rx_bytes = stats[EF10_STAT_rx_unicast_bytes] + + stats[EF10_STAT_rx_multicast_bytes] + + stats[EF10_STAT_rx_broadcast_bytes]; + core_stats->tx_bytes = stats[EF10_STAT_tx_unicast_bytes] + + stats[EF10_STAT_tx_multicast_bytes] + + stats[EF10_STAT_tx_broadcast_bytes]; + core_stats->rx_dropped = stats[GENERIC_STAT_rx_nodesc_trunc] + + stats[GENERIC_STAT_rx_noskb_drops]; + core_stats->multicast = stats[EF10_STAT_rx_multicast]; + core_stats->rx_crc_errors = stats[EF10_STAT_rx_bad]; + core_stats->rx_fifo_errors = stats[EF10_STAT_rx_overflow]; + core_stats->rx_errors = core_stats->rx_crc_errors; + core_stats->tx_errors = stats[EF10_STAT_tx_bad]; + } else { + /* Use port stats. */ + core_stats->rx_packets = stats[EF10_STAT_port_rx_packets]; + core_stats->tx_packets = stats[EF10_STAT_port_tx_packets]; + core_stats->rx_bytes = stats[EF10_STAT_port_rx_bytes]; + core_stats->tx_bytes = stats[EF10_STAT_port_tx_bytes]; + core_stats->rx_dropped = stats[EF10_STAT_port_rx_nodesc_drops] + + stats[GENERIC_STAT_rx_nodesc_trunc] + + stats[GENERIC_STAT_rx_noskb_drops]; + core_stats->multicast = stats[EF10_STAT_port_rx_multicast]; + core_stats->rx_length_errors = + stats[EF10_STAT_port_rx_gtjumbo] + + stats[EF10_STAT_port_rx_length_error]; + core_stats->rx_crc_errors = stats[EF10_STAT_port_rx_bad]; + core_stats->rx_frame_errors = + stats[EF10_STAT_port_rx_align_error]; + core_stats->rx_fifo_errors = stats[EF10_STAT_port_rx_overflow]; + core_stats->rx_errors = (core_stats->rx_length_errors + + core_stats->rx_crc_errors + + core_stats->rx_frame_errors); + } + return stats_count; +} + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_USE_NETDEV_STATS64) +static size_t efx_ef10_update_stats_pf(struct efx_nic *efx, u64 *full_stats, + struct rtnl_link_stats64 *core_stats) +#else +static size_t efx_ef10_update_stats_pf(struct efx_nic *efx, u64 *full_stats, + struct net_device_stats *core_stats) +#endif + __acquires(efx->stats_lock) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + DECLARE_BITMAP(mask, EF10_STAT_COUNT) = {}; + u64 *stats = nic_data->stats; + + spin_lock_bh(&efx->stats_lock); + + efx_ef10_get_stat_mask(efx, mask); + + efx_nic_copy_stats(efx, nic_data->mc_stats); + efx_nic_update_stats(efx_ef10_stat_desc, EF10_STAT_COUNT, + mask, stats, efx->mc_initial_stats, nic_data->mc_stats); + + /* Update derived statistics */ + efx_nic_fix_nodesc_drop_stat(efx, + &stats[EF10_STAT_port_rx_nodesc_drops]); + /* MC Firmware reads RX_BYTES and RX_GOOD_BYTES from the MAC. + * It then calculates RX_BAD_BYTES and DMAs it to us with RX_BYTES. + * We report these as port_rx_ stats. We are not given RX_GOOD_BYTES. + * Here we calculate port_rx_good_bytes. + */ + stats[EF10_STAT_port_rx_good_bytes] = + stats[EF10_STAT_port_rx_bytes] - + stats[EF10_STAT_port_rx_bytes_minus_good_bytes]; + + /* The asynchronous reads used to calculate RX_BAD_BYTES in + * MC Firmware are done such that we should not see an increase in + * RX_BAD_BYTES when a good packet has arrived. Unfortunately this + * does mean that the stat can decrease at times. Here we do not + * update the stat unless it has increased or has gone to zero + * (In the case of the NIC rebooting). + * Please see Bug 33781 for a discussion of why things work this way. + */ + efx_update_diff_stat(&stats[EF10_STAT_port_rx_bad_bytes], + stats[EF10_STAT_port_rx_bytes_minus_good_bytes]); + efx_update_sw_stats(efx, stats); + + return efx_ef10_update_stats_common(efx, full_stats, core_stats); +} + +static void efx_ef10_pull_stats_pf(struct efx_nic *efx) +{ + efx_mcdi_mac_pull_stats(efx); + if (!efx->stats_initialised) { + efx_reset_sw_stats(efx); + efx_ptp_reset_stats(efx); + efx_nic_reset_stats(efx); + efx->stats_initialised = true; + } +} + +#ifdef CONFIG_SFC_SRIOV +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_USE_NETDEV_STATS64) +static size_t efx_ef10_update_stats_vf(struct efx_nic *efx, u64 *full_stats, + struct rtnl_link_stats64 *core_stats) +#else +static size_t efx_ef10_update_stats_vf(struct efx_nic *efx, u64 *full_stats, + struct net_device_stats *core_stats) +#endif + __acquires(efx->stats_lock) +{ + /* MCDI is required to update statistics, but it can't be used + * here since read dev_base_lock rwlock may be held outside and + * EVQ/CPU0 may wait for the write lock with bottom-halves + * disabled (i.e. NAPI cannot run to process MCDI completion). + */ + + /* Serialize access to nic_data->stats */ + spin_lock_bh(&efx->stats_lock); + /* The lock should be held on return */ + return efx_ef10_update_stats_common(efx, full_stats, core_stats); +} + +static void efx_ef10_vf_schedule_stats_work(struct efx_nic *efx) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + + schedule_delayed_work(&nic_data->vf_stats_work, + msecs_to_jiffies(efx->stats_period_ms)); +} + +static void efx_ef10_start_stats_vf(struct efx_nic *efx) +{ + efx_ef10_vf_schedule_stats_work(efx); +} + +static void efx_ef10_stop_stats_vf(struct efx_nic *efx) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + + cancel_delayed_work_sync(&nic_data->vf_stats_work); +} + +static void efx_ef10_pull_stats_vf(struct efx_nic *efx) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + MCDI_DECLARE_BUF(inbuf, MC_CMD_MAC_STATS_IN_LEN); + DECLARE_BITMAP(mask, EF10_STAT_COUNT) = {}; + __le64 generation_start, generation_end; + u32 dma_len = efx->num_mac_stats * sizeof(u64); + struct efx_buffer stats_buf; + __le64 *dma_stats; + u64 *stats = nic_data->stats; + int rc; + + efx_ef10_get_stat_mask(efx, mask); + + if (efx_nic_alloc_buffer(efx, &stats_buf, dma_len, GFP_KERNEL)) + return; + + dma_stats = stats_buf.addr; + dma_stats[efx->num_mac_stats - 1] = EFX_MC_STATS_GENERATION_INVALID; + + MCDI_SET_QWORD(inbuf, MAC_STATS_IN_DMA_ADDR, stats_buf.dma_addr); + MCDI_POPULATE_DWORD_1(inbuf, MAC_STATS_IN_CMD, + MAC_STATS_IN_DMA, 1); + MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_LEN, dma_len); + MCDI_SET_DWORD(inbuf, MAC_STATS_IN_PORT_ID, EVB_PORT_ID_ASSIGNED); + + rc = efx_mcdi_rpc_quiet(efx, MC_CMD_MAC_STATS, inbuf, sizeof(inbuf), + NULL, 0, NULL); + + if (rc) { + /* Expect ENOENT if DMA queues have not been set up */ + if (rc != -ENOENT || atomic_read(&efx->active_queues)) + efx_mcdi_display_error(efx, MC_CMD_MAC_STATS, + sizeof(inbuf), NULL, 0, rc); + goto out; + } + + generation_end = dma_stats[efx->num_mac_stats - 1]; + if (generation_end == EFX_MC_STATS_GENERATION_INVALID) { + WARN_ON_ONCE(1); + goto out; + } + + if (!efx->stats_initialised) { + efx_ptp_reset_stats(efx); + efx_reset_sw_stats(efx); + memcpy(efx->mc_initial_stats, dma_stats, + efx->num_mac_stats * sizeof(u64)); + efx->stats_initialised = true; + } + + /* Acquire lock back since stats should be updated under lock */ + spin_lock_bh(&efx->stats_lock); + + efx_nic_update_stats(efx_ef10_stat_desc, EF10_STAT_COUNT, mask, + stats, efx->mc_initial_stats, dma_stats); + rmb(); + generation_start = dma_stats[MC_CMD_MAC_GENERATION_START]; + if (generation_end != generation_start) + goto out_unlock; + + efx_update_sw_stats(efx, stats); + +out_unlock: + spin_unlock_bh(&efx->stats_lock); +out: + efx_nic_free_buffer(efx, &stats_buf); +} + +static void efx_ef10_vf_update_stats_work(struct work_struct *data) +{ + struct efx_ef10_nic_data *nic_data = + container_of(data, struct efx_ef10_nic_data, vf_stats_work.work); + + efx_ef10_pull_stats_vf(nic_data->efx); + efx_ef10_vf_schedule_stats_work(nic_data->efx); +} +#endif + +static void efx_ef10_push_irq_moderation(struct efx_channel *channel) +{ + struct efx_nic *efx = channel->efx; + unsigned int mode, usecs; + efx_dword_t timer_cmd; + + if (channel->irq_moderation_us) { + mode = 3; + usecs = channel->irq_moderation_us; + } else { + mode = 0; + usecs = 0; + } + + if (EFX_EF10_WORKAROUND_61265(efx)) { + MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_EVQ_TMR_IN_LEN); + unsigned int ns = usecs * 1000; + + MCDI_SET_DWORD(inbuf, SET_EVQ_TMR_IN_INSTANCE, + channel->channel); + MCDI_SET_DWORD(inbuf, SET_EVQ_TMR_IN_TMR_LOAD_REQ_NS, ns); + MCDI_SET_DWORD(inbuf, SET_EVQ_TMR_IN_TMR_RELOAD_REQ_NS, ns); + MCDI_SET_DWORD(inbuf, SET_EVQ_TMR_IN_TMR_MODE, mode); + + efx_mcdi_rpc_async(efx, MC_CMD_SET_EVQ_TMR, + inbuf, sizeof(inbuf), NULL, 0); + } else if (EFX_EF10_WORKAROUND_35388(efx)) { + unsigned int ticks = efx_usecs_to_ticks(efx, usecs); + + EFX_POPULATE_DWORD_3(timer_cmd, ERF_DD_EVQ_IND_TIMER_FLAGS, + EFE_DD_EVQ_IND_TIMER_FLAGS, + ERF_DD_EVQ_IND_TIMER_MODE, mode, + ERF_DD_EVQ_IND_TIMER_VAL, ticks); + efx_writed_page(efx, &timer_cmd, ER_DD_EVQ_INDIRECT, + channel->channel); + } else { + unsigned int ticks = efx_usecs_to_ticks(efx, usecs); + + EFX_POPULATE_DWORD_3(timer_cmd, ERF_DZ_TC_TIMER_MODE, mode, + ERF_DZ_TC_TIMER_VAL, ticks, + ERF_FZ_TC_TMR_REL_VAL, ticks); + efx_writed_page(efx, &timer_cmd, ER_DZ_EVQ_TMR, + channel->channel); + } +} + +#ifdef CONFIG_SFC_SRIOV +static void efx_ef10_get_wol_vf(struct efx_nic *efx, struct ethtool_wolinfo *wol) {} + +static int efx_ef10_set_wol_vf(struct efx_nic *efx, u32 type) +{ + return -EOPNOTSUPP; +} +#endif + +static void efx_ef10_get_wol(struct efx_nic *efx, struct ethtool_wolinfo *wol) +{ + wol->supported = 0; + wol->wolopts = 0; + memset(&wol->sopass, 0, sizeof(wol->sopass)); +} + +static int efx_ef10_set_wol(struct efx_nic *efx, u32 type) +{ + if (type != 0) + return -EINVAL; + return 0; +} + +static void efx_ef10_mcdi_request(struct efx_nic *efx, u8 bufid, + const efx_dword_t *hdr, size_t hdr_len, + const efx_dword_t *sdu, size_t sdu_len) +{ + dma_addr_t dma_addr; + u8 *pdu = efx_ef10_mcdi_buf(efx, bufid, &dma_addr); + + memcpy(pdu, hdr, hdr_len); + memcpy(pdu + hdr_len, sdu, sdu_len); + wmb(); + + /* The hardware provides 'low' and 'high' (doorbell) registers + * for passing the 64-bit address of an MCDI request to + * firmware. However the dwords are swapped by firmware. The + * least significant bits of the doorbell are then 0 for all + * MCDI requests due to alignment. + */ + _efx_writed(efx, cpu_to_le32((u64)dma_addr >> 32), ER_DZ_MC_DB_LWRD); + _efx_writed(efx, cpu_to_le32((u32)dma_addr), ER_DZ_MC_DB_HWRD); +} + +static bool efx_ef10_mcdi_poll_response(struct efx_nic *efx, u8 bufid) +{ + const efx_dword_t hdr = + *(const efx_dword_t *)(efx_ef10_mcdi_buf(efx, bufid, NULL)); + + rmb(); + return EFX_DWORD_FIELD(hdr, MCDI_HEADER_RESPONSE); +} + +static void +efx_ef10_mcdi_read_response(struct efx_nic *efx, u8 bufid, + efx_dword_t *outbuf, size_t offset, size_t outlen) +{ + const u8 *pdu = efx_ef10_mcdi_buf(efx, bufid, NULL); + + memcpy(outbuf, pdu + offset, outlen); +} + +static void efx_ef10_mcdi_reboot_detected(struct efx_nic *efx) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + + efx->current_reset = jiffies; + + /* All our allocations have been reset */ + efx_ef10_reset_mc_allocations(efx); + + /* The datapath firmware might have been changed */ + nic_data->must_check_datapath_caps = true; + + /* MAC statistics have been cleared on the NIC; clear the local + * statistic that we update with efx_update_diff_stat(). + */ + nic_data->stats[EF10_STAT_port_rx_bad_bytes] = 0; + + /* The set of available sensors might have changed */ + nic_data->must_reprobe_sensors = true; +} + +static int efx_ef10_mcdi_poll_reboot(struct efx_nic *efx) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + int rc; + + rc = efx_ef10_get_warm_boot_count(efx); + if (rc < 0) { + /* The firmware is presumably in the process of + * rebooting. However, we are supposed to report each + * reboot just once, so we must only do that once we + * can read and store the updated warm boot count. + */ + return 0; + } + + if (rc == nic_data->warm_boot_count) + return 0; + + nic_data->warm_boot_count = rc; + + return -EIO; +} + +/* Record the warm boot count at the start of BIST. + * If efx_ef10_mcdi_poll_reboot sees a reboot after the function enters BIST + * mode but before the scheduled MC_BIST reset actually begins, then the warm + * boot count increase will be recorded before the reset. This means that + * efx_wait_for_bist_end will timeout waiting for an increase in the warm boot + * count which has already happened. By comparing with the warm boot count at + * the start of BIST mode we remove this race condition. + */ +static void efx_ef10_mcdi_record_bist_event(struct efx_nic *efx) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + + nic_data->bist_warm_boot_count = nic_data->warm_boot_count; +} + +/* Poll for a warm boot count increase as compared with the count when the + * function entered BIST mode. + */ +static int efx_ef10_mcdi_poll_bist_end(struct efx_nic *efx) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + int rc; + + rc = efx_ef10_get_warm_boot_count(efx); + if (rc < 0) { + /* The firmware is presumably in the process of + * rebooting. However, we are supposed to report each + * reboot just once, so we must only do that once we + * can read and store the updated warm boot count. + */ + return 0; + } + + if (rc == nic_data->bist_warm_boot_count) + return 0; + + return -EIO; +} + +/* Get an MCDI buffer + * + * The caller is responsible for preventing racing by holding the + * MCDI iface_lock. + */ +static bool efx_ef10_mcdi_get_buf(struct efx_nic *efx, u8 *bufid) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + + *bufid = ffz(nic_data->mcdi_buf_use); + if (*bufid < EF10_NUM_MCDI_BUFFERS) { + set_bit(*bufid, &nic_data->mcdi_buf_use); + return true; + } + + return false; +} + +/* Return an MCDI buffer */ +static void efx_ef10_mcdi_put_buf(struct efx_nic *efx, u8 bufid) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + + EFX_WARN_ON_PARANOID(bufid >= EF10_NUM_MCDI_BUFFERS); + EFX_WARN_ON_PARANOID(!test_bit(bufid, &nic_data->mcdi_buf_use)); + + clear_bit(bufid, &nic_data->mcdi_buf_use); +} + +/* Handle an MSI interrupt + * + * Handle an MSI hardware interrupt. This routine schedules event + * queue processing. No interrupt acknowledgement cycle is necessary. + * Also, we never need to check that the interrupt is for us, since + * MSI interrupts cannot be shared. + */ +static irqreturn_t efx_ef10_msi_interrupt(int irq, void *dev_id) +{ + struct efx_msi_context *context = dev_id; + struct efx_nic *efx = context->efx; + struct efx_channel *channel; + + netif_vdbg(efx, intr, efx->net_dev, + "IRQ %d on CPU %d\n", irq, raw_smp_processor_id()); + + if (likely(READ_ONCE(efx->irq_soft_enabled))) { + /* Note test interrupts */ + if (context->index == efx->irq_level) + efx->last_irq_cpu = raw_smp_processor_id(); + + /* Schedule processing of the channel */ + channel = efx_get_channel(efx, context->index); + efx_schedule_channel_irq(channel); + } + + return IRQ_HANDLED; +} + +static int efx_ef10_irq_test_generate(struct efx_nic *efx) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_TRIGGER_INTERRUPT_IN_LEN); + + if (efx_mcdi_set_workaround(efx, MC_CMD_WORKAROUND_BUG41750, true, + NULL) == 0) + return -EOPNOTSUPP; + + BUILD_BUG_ON(MC_CMD_TRIGGER_INTERRUPT_OUT_LEN != 0); + + MCDI_SET_DWORD(inbuf, TRIGGER_INTERRUPT_IN_INTR_LEVEL, efx->irq_level); + return efx_mcdi_rpc_quiet(efx, MC_CMD_TRIGGER_INTERRUPT, + inbuf, sizeof(inbuf), NULL, 0, NULL); +} + +static int efx_ef10_tx_probe(struct efx_tx_queue *tx_queue) +{ + return efx_nic_alloc_buffer(tx_queue->efx, &tx_queue->txd, + (tx_queue->ptr_mask + 1) * + sizeof(efx_qword_t), + GFP_KERNEL); +} + +/* This writes to the TX_DESC_WPTR and also pushes data. If multiple + * descriptors are queued, subsequent descriptors will then be DMA-fetched. + */ +static inline void efx_ef10_push_tx_desc(struct efx_tx_queue *tx_queue, + const efx_qword_t *txd) +{ + unsigned int write_ptr; + efx_oword_t reg; + + write_ptr = tx_queue->write_count & tx_queue->ptr_mask; + EFX_POPULATE_OWORD_1(reg, ERF_DZ_TX_DESC_WPTR, write_ptr); + reg.qword[0] = *txd; + efx_writeo_page(tx_queue->efx, ®, + ER_DZ_TX_DESC_UPD, tx_queue->queue); + tx_queue->notify_count = tx_queue->write_count; +} + +/* Add Firmware-Assisted TSO v2 option descriptors to a queue. + */ +static int efx_ef10_tx_tso_desc(struct efx_tx_queue *tx_queue, + struct sk_buff *skb, + bool *data_mapped) +{ + struct efx_tx_buffer *buffer; + u16 inner_ipv4_id = 0; + u16 outer_ipv4_id = 0; + struct tcphdr *tcp; + struct iphdr *ip; + u16 ip_tot_len; + u32 seqnum; + u32 mss; + + EFX_WARN_ON_ONCE_PARANOID(tx_queue->tso_version != 2); + + mss = skb_shinfo(skb)->gso_size; + + if (unlikely(mss < 4)) { + WARN_ONCE(1, "MSS of %u is too small for TSO v2\n", mss); + return -EINVAL; + } + +#ifdef EFX_NOT_UPSTREAM + if (efx_skb_encapsulation(skb)) { +#else + if (skb->encapsulation) { +#endif +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_CAN_SUPPORT_ENCAP_TSO) + if (!tx_queue->tso_encap) + return -EINVAL; + ip = ip_hdr(skb); + if (ip->version == 4) + outer_ipv4_id = ntohs(ip->id); + + ip = inner_ip_hdr(skb); + tcp = inner_tcp_hdr(skb); +#else + /* Insufficient information in the SKB to support this */ + return -EINVAL; +#endif + } else { + ip = ip_hdr(skb); + tcp = tcp_hdr(skb); + } + + /* 8000-series EF10 hardware requires that IP Total Length be + * greater than or equal to the value it will have in each segment + * (which is at most mss + 208 + TCP header length), but also less + * than (0x10000 - inner_network_header). Otherwise the TCP + * checksum calculation will be broken for encapsulated packets. + * We fill in ip->tot_len with 0xff30, which should satisfy the + * first requirement unless the MSS is ridiculously large (which + * should be impossible as the driver max MTU is 9216); it is + * guaranteed to satisfy the second as we only attempt TSO if + * inner_network_header <= 208. + */ + ip_tot_len = 0x10000 - EFX_TSO2_MAX_HDRLEN; + EFX_WARN_ON_ONCE_PARANOID(mss + EFX_TSO2_MAX_HDRLEN + + (tcp->doff << 2u) > ip_tot_len); + + if (ip->version == 4) { + ip->tot_len = htons(ip_tot_len); + ip->check = 0; + inner_ipv4_id = ntohs(ip->id); + } else { + ((struct ipv6hdr *)ip)->payload_len = htons(ip_tot_len); + } + + seqnum = ntohl(tcp->seq); + + buffer = efx_tx_queue_get_insert_buffer(tx_queue); + + buffer->flags = EFX_TX_BUF_OPTION; + buffer->len = 0; + buffer->unmap_len = 0; + EFX_POPULATE_QWORD_5(buffer->option, + ESF_DZ_TX_DESC_IS_OPT, 1, + ESF_DZ_TX_OPTION_TYPE, ESE_DZ_TX_OPTION_DESC_TSO, + ESF_DZ_TX_TSO_OPTION_TYPE, + ESE_DZ_TX_TSO_OPTION_DESC_FATSO2A, + ESF_DZ_TX_TSO_IP_ID, inner_ipv4_id, + ESF_DZ_TX_TSO_TCP_SEQNO, seqnum + ); + ++tx_queue->insert_count; + + buffer = efx_tx_queue_get_insert_buffer(tx_queue); + + buffer->flags = EFX_TX_BUF_OPTION; + buffer->len = 0; + buffer->unmap_len = 0; + EFX_POPULATE_QWORD_5(buffer->option, + ESF_DZ_TX_DESC_IS_OPT, 1, + ESF_DZ_TX_OPTION_TYPE, ESE_DZ_TX_OPTION_DESC_TSO, + ESF_DZ_TX_TSO_OPTION_TYPE, + ESE_DZ_TX_TSO_OPTION_DESC_FATSO2B, + ESF_DZ_TX_TSO_OUTER_IPID, outer_ipv4_id, + ESF_DZ_TX_TSO_TCP_MSS, mss + ); + ++tx_queue->insert_count; + + return 0; +} + +static int efx_ef10_tx_init(struct efx_tx_queue *tx_queue) +{ + struct efx_channel *channel = tx_queue->channel; + struct efx_nic *efx = tx_queue->efx; + struct efx_ef10_nic_data *nic_data = efx->nic_data; + efx_qword_t *txd; + bool tso_v2 = false; + int rc; + bool outer_csum_offload = + tx_queue->csum_offload & EFX_TXQ_TYPE_CSUM_OFFLOAD; + bool inner_csum_offload = + tx_queue->csum_offload & EFX_TXQ_TYPE_INNER_CSUM_OFFLOAD; + + EFX_WARN_ON_PARANOID(inner_csum_offload && + !efx_ef10_has_cap(nic_data->datapath_caps, VXLAN_NVGRE)); + + /* Only allow TX timestamping if we have the license for it. */ + if (!(nic_data->licensed_features & + (1 << LICENSED_V3_FEATURES_TX_TIMESTAMPS_LBN))) { + tx_queue->timestamping = false; +#ifdef CONFIG_SFC_PTP + /* Disable sync events on this channel. */ + if (efx->type->ptp_set_ts_sync_events) + efx->type->ptp_set_ts_sync_events(efx, false, false); +#endif + } + +#ifdef EFX_NOT_UPSTREAM + /* TSOv2 is a limited resource that can only be configured on a limited + * number of queues. On a queue with no checksum offloads, TSO cannot + * possibly be a thing, so we don't enable it there. However, normally + * we use the same queue for NO_OFFLOAD and for INNER_CSUM_OFFLOAD, and + * it's initialised as a NO_OFFLOAD. So it's only when + * tx_non_csum_queue is set (meaning we have separate NO_OFFLOAD and + * INNER_CSUM_OFFLOAD queues) that the NO_OFFLOAD queue doesn't want a + * TSOv2 context. + */ + + if (!(tx_non_csum_queue && + tx_queue->csum_offload == EFX_TXQ_TYPE_NO_OFFLOAD)) +#endif + /* TSOv2 cannot be used with Hardware timestamping, and is never needed + * for XDP tx. */ + if (!tx_queue->timestamping && !tx_queue->xdp_tx && +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_XDP_SOCK) +#if defined(CONFIG_XDP_SOCKETS) + !efx_is_xsk_tx_queue(tx_queue) && +#endif +#endif + efx_ef10_has_cap(nic_data->datapath_caps2, TX_TSO_V2)) { + tso_v2 = true; + netif_dbg(efx, hw, efx->net_dev, "Using TSOv2 for channel %u\n", + channel->channel); + } + + rc = efx_mcdi_tx_init(tx_queue, tso_v2); + if (rc) + goto fail; + + channel->tx_coalesce_doorbell = tx_coalesce_doorbell; + + /* A previous user of this TX queue might have set us up the + * bomb by writing a descriptor to the TX push collector but + * not the doorbell. (Each collector belongs to a port, not a + * queue or function, so cannot easily be reset.) We must + * attempt to push a no-op descriptor in its place. + */ + tx_queue->buffer[0].flags = EFX_TX_BUF_OPTION; + tx_queue->insert_count = 1; + txd = efx_tx_desc(tx_queue, 0); + EFX_POPULATE_QWORD_7(*txd, + ESF_DZ_TX_DESC_IS_OPT, true, + ESF_DZ_TX_OPTION_TYPE, + ESE_DZ_TX_OPTION_DESC_CRC_CSUM, + ESF_DZ_TX_OPTION_UDP_TCP_CSUM, + outer_csum_offload, + ESF_DZ_TX_OPTION_IP_CSUM, + outer_csum_offload && !tso_v2, + ESF_DZ_TX_OPTION_INNER_UDP_TCP_CSUM, + inner_csum_offload, + ESF_DZ_TX_OPTION_INNER_IP_CSUM, + inner_csum_offload && !tso_v2, + ESF_DZ_TX_TIMESTAMP, + tx_queue->timestamping); + tx_queue->write_count = 1; + + if (tso_v2) { + tx_queue->handle_tso = efx_ef10_tx_tso_desc; + tx_queue->tso_version = 2; + + if (efx_ef10_has_cap(nic_data->datapath_caps2, TX_TSO_V2_ENCAP)) + tx_queue->tso_encap = true; + } else if (efx_ef10_has_cap(nic_data->datapath_caps, TX_TSO)) { + tx_queue->tso_version = 1; + } + + wmb(); + efx_ef10_push_tx_desc(tx_queue, txd); + + return 0; + +fail: + if (rc != -ENETDOWN && rc != -EAGAIN) + netdev_WARN(efx->net_dev, "failed to initialise TXQ %d\n", + tx_queue->queue); + + return rc; +} + +static void efx_ef10_notify_tx_desc(struct efx_tx_queue *tx_queue) +{ + unsigned int write_ptr; + efx_dword_t reg; + + write_ptr = tx_queue->write_count & tx_queue->ptr_mask; + EFX_POPULATE_DWORD_1(reg, ERF_DZ_TX_DESC_WPTR_DWORD, write_ptr); + efx_writed_page(tx_queue->efx, ®, + ER_DZ_TX_DESC_UPD_DWORD, tx_queue->queue); + tx_queue->notify_count = tx_queue->write_count; + tx_queue->notify_jiffies = jiffies; +} + +#define EFX_EF10_MAX_TX_DESCRIPTOR_LEN 0x3fff + +static unsigned int efx_ef10_tx_limit_len(struct efx_tx_queue *tx_queue, + dma_addr_t dma_addr, unsigned int len) +{ + if (len > EFX_EF10_MAX_TX_DESCRIPTOR_LEN) { + /* If we need to break across multiple descriptors we should + * stop at a page boundary. This assumes the length limit is + * greater than the page size. + */ + dma_addr_t end = dma_addr + EFX_EF10_MAX_TX_DESCRIPTOR_LEN; + + BUILD_BUG_ON(EFX_EF10_MAX_TX_DESCRIPTOR_LEN < EFX_PAGE_SIZE); + len = (end & (~(EFX_PAGE_SIZE - 1))) - dma_addr; + } + + return len; +} + +static void efx_ef10_tx_write(struct efx_tx_queue *tx_queue) +{ + unsigned int old_write_count = tx_queue->write_count; + unsigned int new_write_count = old_write_count; + struct efx_tx_buffer *buffer; + bool prohibit_push = false; + unsigned int write_ptr; + efx_qword_t *txd; + + tx_queue->xmit_pending = false; + if (unlikely(tx_queue->write_count == tx_queue->insert_count)) + return; + + /* Some firmware versions don't like TSO descriptors in a tx push. */ + write_ptr = new_write_count & tx_queue->ptr_mask; + buffer = &tx_queue->buffer[write_ptr]; + if (buffer->flags & EFX_TX_BUF_OPTION && + EFX_QWORD_FIELD(buffer->option, ESF_DZ_TX_OPTION_TYPE) == + ESE_DZ_TX_OPTION_DESC_TSO) + prohibit_push = true; + /* We've already got the write_ptr and buffer, so jump ahead. */ + goto got_buffer; + + do { + write_ptr = new_write_count & tx_queue->ptr_mask; + buffer = &tx_queue->buffer[write_ptr]; +got_buffer: + txd = efx_tx_desc(tx_queue, write_ptr); + ++new_write_count; + + /* Create TX descriptor ring entry */ + if (buffer->flags & EFX_TX_BUF_OPTION) { + *txd = buffer->option; + if (EFX_QWORD_FIELD(*txd, ESF_DZ_TX_OPTION_TYPE) == 1) { + /* PIO descriptor */ + tx_queue->packet_write_count = new_write_count; + } + } else { + tx_queue->packet_write_count = new_write_count; + BUILD_BUG_ON(EFX_TX_BUF_CONT != 1); + EFX_POPULATE_QWORD_3( + *txd, + ESF_DZ_TX_KER_CONT, + buffer->flags & EFX_TX_BUF_CONT, + ESF_DZ_TX_KER_BYTE_CNT, buffer->len, + ESF_DZ_TX_KER_BUF_ADDR, buffer->dma_addr); + } + } while (new_write_count != tx_queue->insert_count); + + wmb(); /* Ensure descriptors are written before they are fetched */ + + tx_queue->write_count = new_write_count; + + /* Only the first descriptor can be pushed, so don't need to consider + * whether multiple descriptors are queued. + */ + if (!prohibit_push && + likely(old_write_count - tx_queue->read_count < tx_push_max_fill)) { + txd = efx_tx_desc(tx_queue, + old_write_count & tx_queue->ptr_mask); + efx_ef10_push_tx_desc(tx_queue, txd); + ++tx_queue->pushes; + } else { + /* The write_count above must be updated before reading + * channel->holdoff_doorbell to avoid a race with the + * completion path, so ensure these operations are not + * re-ordered. This also flushes the update of write_count + * back into the cache. + */ + smp_mb(); + + /* If the completion path is running and module option + * to enable coalescing is set we let the completion path + * handle the doorbell ping. + */ + if (!tx_queue->channel->holdoff_doorbell) { + /* Completion handler not running so send out */ + efx_ef10_notify_tx_desc(tx_queue); + ++tx_queue->doorbell_notify_tx; + } + } +} + +/* Maximum number of descriptors required for a single SKB */ +static unsigned int efx_ef10_tx_max_skb_descs(struct efx_nic *efx) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + unsigned int max_descs; + + /* In all cases we assume that we don't need to fragment a descriptor + * due to a DMA boundary for a single output packet. + */ + BUILD_BUG_ON(EFX_MAX_MTU > EFX_EF10_MAX_TX_DESCRIPTOR_LEN); + + if (efx_ef10_has_cap(nic_data->datapath_caps, TX_TSO)) { + /* We need a header, option and payload descriptor for each + * output segment we might produce. + */ + max_descs = EFX_TSO_MAX_SEGS * 3 + MAX_SKB_FRAGS; + } else if (efx_ef10_has_cap(nic_data->datapath_caps2, TX_TSO_V2)) { + /* TSOv2 is limited to a certain number of queues, so we + * have to allow for a possible fallback to GSO. We make + * the assumption that GSO will segment things sensibly, + * so we allow for each possible output segment: + * - 1 DMA descriptor for the header + * - 1 DMA descriptor for the payload + * - additional DMA descriptors to allow for a fragment + * boundary in the middle of an output segment + * + * This is similar to the TSOv1 case above, but without + * the additional option descriptor. + */ + max_descs = EFX_TSO_MAX_SEGS * 2 + MAX_SKB_FRAGS; + } else { + /* We don't need to consider boundaries here - our maximum + * packet size is less than EFX_EF10_MAX_TX_DESCRIPTOR_LEN, + * as checked above. + */ + max_descs = MAX_SKB_FRAGS; + } + + return max_descs; +} + +#ifdef CONFIG_DEBUG_FS +#if defined(EFX_USE_KCOMPAT) && !defined(EFX_HAVE_UDP_TUNNEL_NIC_INFO) +static int efx_debugfs_udp_tunnels(struct seq_file *file, void *data) +{ + struct efx_nic *efx = data; + struct efx_ef10_nic_data *nic_data = efx->nic_data; + struct efx_udp_tunnel *tnl; + char typebuf[8]; + unsigned int i; + + spin_lock_bh(&nic_data->udp_tunnels_lock); + + for (i = 0; i < ARRAY_SIZE(nic_data->udp_tunnels); ++i) { + tnl = nic_data->udp_tunnels + i; + efx_get_udp_tunnel_type_name(tnl->type, typebuf, + sizeof(typebuf)); + if (tnl->count) + seq_printf(file, "%d (%s) (%u %c%c)\n", + ntohs(tnl->port), typebuf, tnl->count, + tnl->adding ? 'A' : '-', + tnl->removing ? 'R' : '-'); + } + spin_unlock_bh(&nic_data->udp_tunnels_lock); + return 0; +} +#endif + +static int efx_debugfs_read_netdev_uc_addr(struct seq_file *file, void *data) +{ + struct net_device *net_dev = data; + struct netdev_hw_addr *uc; + unsigned int i = 0; + + netdev_for_each_uc_addr(uc, net_dev) { + seq_printf(file, "%d - %pM\n", i, uc->addr); + i++; + } + return 0; +} + +static int efx_debugfs_read_netdev_mc_addr(struct seq_file *file, void *data) +{ + struct net_device *net_dev = data; +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_NET_DEVICE_MC) + struct netdev_hw_addr *mc; +#else + struct dev_mc_list *mc; +#endif + unsigned int i = 0; + + netdev_for_each_mc_addr(mc, net_dev) { +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_NET_DEVICE_MC) + seq_printf(file, "%d - %pM\n", i, mc->addr); +#else + seq_printf(file, "%d - %pM\n", i, mc->dmi_addr); +#endif + i++; + } + return 0; +} + +static int efx_debugfs_read_netdev_uc_count(struct seq_file *file, void *data) +{ + struct net_device *net_dev = data; + + seq_printf(file, "%d\n", netdev_uc_count(net_dev)); + return 0; +} + +static int efx_debugfs_read_netdev_mc_count(struct seq_file *file, void *data) +{ + struct net_device *net_dev = data; + + seq_printf(file, "%d\n", netdev_mc_count(net_dev)); + return 0; +} + +static int efx_debugfs_read_netdev_flags(struct seq_file *file, void *data) +{ + struct net_device *net_dev = data; + + seq_printf(file, "%#x promisc=%d\n", net_dev->flags, + net_dev->flags & IFF_PROMISC); + return 0; +} + +static int efx_debugfs_read_netdev_dev_addr(struct seq_file *file, void *data) +{ + struct net_device *net_dev = data; + + seq_printf(file, "%pM\n", net_dev->dev_addr); + return 0; +} + +static const struct efx_debugfs_parameter efx_debugfs[] = { +#if defined(EFX_USE_KCOMPAT) && !defined(EFX_HAVE_UDP_TUNNEL_NIC_INFO) + _EFX_RAW_PARAMETER(udp_tunnels, efx_debugfs_udp_tunnels), +#endif + {NULL}, +}; + +static const struct efx_debugfs_parameter netdev_debugfs[] = { + _EFX_RAW_PARAMETER(netdev_uc_addr, efx_debugfs_read_netdev_uc_addr), + _EFX_RAW_PARAMETER(netdev_mc_addr, efx_debugfs_read_netdev_mc_addr), + _EFX_RAW_PARAMETER(netdev_uc_count, efx_debugfs_read_netdev_uc_count), + _EFX_RAW_PARAMETER(netdev_mc_count, efx_debugfs_read_netdev_mc_count), + _EFX_RAW_PARAMETER(netdev_flags, efx_debugfs_read_netdev_flags), + _EFX_RAW_PARAMETER(netdev_dev_addr, efx_debugfs_read_netdev_dev_addr), + {NULL}, +}; +#else /* CONFIG_DEBUG_FS */ +static const struct efx_debugfs_parameter efx_debugfs[] = { + {NULL} +}; + +static const struct efx_debugfs_parameter netdev_debugfs[] = { + {NULL} +}; +#endif /* CONFIG_DEBUG_FS */ + +static int efx_ef10_probe_multicast_chaining(struct efx_nic *efx) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + unsigned int enabled, implemented; + bool want_workaround_26807; + int rc; + + rc = efx_mcdi_get_workarounds(efx, &implemented, &enabled); + if (rc == -ENOSYS) { + /* GET_WORKAROUNDS was implemented before this workaround, + * thus it must be unavailable in this firmware. + */ + nic_data->workaround_26807 = false; + return 0; + } + if (rc) + return rc; + want_workaround_26807 = multicast_chaining && \ + (implemented & MC_CMD_GET_WORKAROUNDS_OUT_BUG26807); + nic_data->workaround_26807 = + !!(enabled & MC_CMD_GET_WORKAROUNDS_OUT_BUG26807); + + if (want_workaround_26807 && !nic_data->workaround_26807) { + unsigned int flags; + + rc = efx_mcdi_set_workaround(efx, + MC_CMD_WORKAROUND_BUG26807, + true, &flags); + if (!rc) { + if (flags & + 1 << MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_LBN) { + netif_info(efx, drv, efx->net_dev, + "other functions on NIC have been reset\n"); + + /* With MCFW v4.6.x and earlier, the + * boot count will have incremented, + * so re-read the warm_boot_count + * value now to ensure this function + * doesn't think it has changed next + * time it checks. + */ + rc = efx_ef10_get_warm_boot_count(efx); + if (rc >= 0) { + nic_data->warm_boot_count = rc; + rc = 0; + } + } + nic_data->workaround_26807 = true; + } else if (rc == -EPERM) { + rc = 0; + } + } + return rc; +} + +static int efx_ef10_filter_table_probe(struct efx_nic *efx) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + bool additional_rss = efx_ef10_has_cap(nic_data->datapath_caps, + ADDITIONAL_RSS_MODES); + + return efx_mcdi_filter_table_probe(efx, additional_rss); +} + +static int efx_ef10_filter_table_init(struct efx_nic *efx) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + bool encap = efx_ef10_has_cap(nic_data->datapath_caps, VXLAN_NVGRE); + int rc = efx_ef10_probe_multicast_chaining(efx); + + if (rc) + return rc; + + rc = efx_mcdi_filter_table_init(efx, nic_data->workaround_26807, + encap); + if (rc) + return rc; + + efx_extend_debugfs_port(efx, efx, 0, efx_debugfs); + efx_extend_debugfs_port(efx, efx->net_dev, 0, netdev_debugfs); + return 0; +} + +static int efx_ef10_filter_table_up(struct efx_nic *efx) +{ + int rc; + + down_write(&efx->filter_sem); + rc = efx_mcdi_filter_table_up(efx); + up_write(&efx->filter_sem); + return rc; +} + +static void efx_ef10_filter_table_down(struct efx_nic *efx) + +{ + down_write(&efx->filter_sem); + efx_mcdi_filter_table_down(efx); + up_write(&efx->filter_sem); +} + +static void efx_ef10_filter_table_fini(struct efx_nic *efx) +{ + efx_trim_debugfs_port(efx, efx_debugfs); + efx_trim_debugfs_port(efx, netdev_debugfs); + efx_mcdi_filter_table_fini(efx); +} + +static int efx_ef10_pf_rx_push_rss_config(struct efx_nic *efx, bool user, + const u32 *rx_indir_table, + const u8 *key) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + + if (efx_ef10_has_cap(nic_data->datapath_caps, RX_RSS_LIMITED)) + return -EOPNOTSUPP; + return efx_mcdi_rx_push_rss_config(efx, user, rx_indir_table, key); +} + +#ifdef CONFIG_SFC_SRIOV +static int efx_ef10_vf_rx_push_rss_config(struct efx_nic *efx, bool user, + const u32 *rx_indir_table, + const u8 *key) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + + if (efx_ef10_has_cap(nic_data->datapath_caps, RX_RSS_LIMITED)) + return -EOPNOTSUPP; + + /* on EF10 we're limited on RSS contexts, so do not push an exclusive + * context, only accept a shared. + */ + if (user) + return -EOPNOTSUPP; + if (efx->rss_context.context_id != EFX_MCDI_RSS_CONTEXT_INVALID) + return 0; + + return efx_mcdi_rx_push_shared_rss_config(efx, NULL); +} +#endif + +static int efx_ef10_rx_init(struct efx_rx_queue *rx_queue) +{ + bool want_outer_classes = false; + +#if defined(EFX_NOT_UPSTREAM) && defined(EFX_USE_FAKE_VLAN_RX_ACCEL) + struct efx_nic *efx = rx_queue->efx; + struct efx_ef10_nic_data *nic_data = efx->nic_data; + + /* Outer classes (ETH_TAG_CLASS to be more precise) are required + * for Rx VLAN stripping offload. + */ + want_outer_classes = efx_ef10_has_cap(nic_data->datapath_caps, + VXLAN_NVGRE) || + efx_ef10_has_cap(nic_data->datapath_caps2, + L3XUDP_SUPPORT); +#endif + + return efx_mcdi_rx_init(rx_queue, want_outer_classes); +} + +/* This creates an entry in the RX descriptor queue */ +static inline void +efx_ef10_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index) +{ + struct efx_rx_buffer *rx_buf; + efx_qword_t *rxd; + + rxd = efx_rx_desc(rx_queue, index); + rx_buf = efx_rx_buffer(rx_queue, index); + EFX_POPULATE_QWORD_2(*rxd, + ESF_DZ_RX_KER_BYTE_CNT, rx_buf->len, + ESF_DZ_RX_KER_BUF_ADDR, rx_buf->dma_addr); +} + +static void _efx_ef10_rx_write(struct efx_rx_queue *rx_queue) +{ + struct efx_nic *efx = rx_queue->efx; + unsigned int write_count; + efx_dword_t reg; + + /* Firmware requires that RX_DESC_WPTR be a multiple of 8 */ + write_count = rx_queue->added_count & ~7; + while (rx_queue->notified_count != write_count) { + efx_ef10_build_rx_desc( + rx_queue, + rx_queue->notified_count & rx_queue->ptr_mask); + ++rx_queue->notified_count; + } + + wmb(); + EFX_POPULATE_DWORD_1(reg, ERF_DZ_RX_DESC_WPTR, + write_count & rx_queue->ptr_mask); + efx_writed_page(efx, ®, ER_DZ_RX_DESC_UPD, + efx_rx_queue_instance(rx_queue)); +} + +static void efx_ef10_rx_write(struct efx_rx_queue *rx_queue) +{ + unsigned int write_count; + + /* Firmware requires that RX_DESC_WPTR be a multiple of 8 */ + write_count = rx_queue->added_count & ~7; + if (rx_queue->notified_count == write_count) + return; + + _efx_ef10_rx_write(rx_queue); +} + +static int efx_ef10_rx_defer_refill(struct efx_rx_queue *rx_queue) +{ + struct efx_channel *channel = efx_rx_queue_channel(rx_queue); + MCDI_DECLARE_BUF(inbuf, MC_CMD_DRIVER_EVENT_IN_LEN); + efx_qword_t event; + size_t outlen; + u32 magic; + + magic = EFX_EF10_DRVGEN_MAGIC(EFX_EF10_REFILL, + efx_rx_queue_index(rx_queue)); + EFX_POPULATE_QWORD_2(event, + ESF_DZ_EV_CODE, EFX_EF10_DRVGEN_EV, + ESF_DZ_EV_DATA, magic); + + MCDI_SET_DWORD(inbuf, DRIVER_EVENT_IN_EVQ, channel->channel); + + /* MCDI_SET_QWORD is not appropriate here since EFX_POPULATE_* has + * already swapped the data to little-endian order. + */ + memcpy(MCDI_PTR(inbuf, DRIVER_EVENT_IN_DATA), &event.u64[0], + sizeof(efx_qword_t)); + + return efx_mcdi_rpc(channel->efx, MC_CMD_DRIVER_EVENT, + inbuf, sizeof(inbuf), NULL, 0, &outlen); +} + +static int efx_ef10_ev_init(struct efx_channel *channel) +{ + struct efx_nic *efx = channel->efx; + struct efx_ef10_nic_data *nic_data = efx->nic_data; + bool use_v2 = efx_ef10_has_cap(nic_data->datapath_caps2, INIT_EVQ_V2); + bool cut_thru = !efx_ef10_has_cap(nic_data->datapath_caps, RX_BATCHING); + + return efx_mcdi_ev_init(channel, cut_thru, use_v2); +} + +static void efx_ef10_handle_rx_wrong_queue(struct efx_rx_queue *rx_queue, + unsigned int rx_queue_label) +{ + struct efx_nic *efx = rx_queue->efx; + + netif_info(efx, hw, efx->net_dev, + "rx event arrived on queue %d labeled as queue %u\n", + efx_rx_queue_index(rx_queue), rx_queue_label); + + efx_schedule_reset(efx, RESET_TYPE_DISABLE); +} + +static void +efx_ef10_handle_rx_bad_lbits(struct efx_rx_queue *rx_queue, + unsigned int actual, unsigned int expected) +{ + unsigned int dropped = (actual - expected) & rx_queue->ptr_mask; + struct efx_nic *efx = rx_queue->efx; + + netif_info(efx, hw, efx->net_dev, + "dropped %d events (index=%d expected=%d)\n", + dropped, actual, expected); + + atomic_inc(&efx->errors.missing_event); + efx_schedule_reset(efx, RESET_TYPE_DATAPATH); +} + +/* partially received RX was aborted. clean up. */ +static void efx_ef10_handle_rx_abort(struct efx_rx_queue *rx_queue) +{ + unsigned int rx_desc_ptr; + + netif_dbg(rx_queue->efx, hw, rx_queue->efx->net_dev, + "scattered RX aborted (dropping %u buffers)\n", + rx_queue->scatter_n); + + rx_desc_ptr = rx_queue->removed_count & rx_queue->ptr_mask; + + if (rx_queue->scatter_n) + efx_rx_packet(rx_queue, rx_desc_ptr, rx_queue->scatter_n, + 0, EFX_RX_PKT_DISCARD); + + rx_queue->removed_count += rx_queue->scatter_n; + rx_queue->scatter_n = 0; + rx_queue->scatter_len = 0; + ++rx_queue->n_rx_nodesc_trunc; +} + +static u16 +efx_ef10_handle_rx_event_errors(struct efx_rx_queue *rx_queue, + unsigned int n_packets, + unsigned int rx_encap_hdr, + unsigned int rx_l3_class, + unsigned int rx_l4_class, + const efx_qword_t *event) +{ + struct efx_nic *efx = rx_queue->efx; + bool handled = false; + + if (EFX_QWORD_FIELD(*event, ESF_DZ_RX_ECRC_ERR)) { +#if defined(EFX_USE_KCOMPAT) && !defined(EFX_HAVE_ETHTOOL_FCS) + if (!efx->forward_fcs) { +#else + if (!(efx->net_dev->features & NETIF_F_RXALL)) { +#endif + if (!efx->loopback_selftest) + rx_queue->n_rx_eth_crc_err += n_packets; + return EFX_RX_PKT_DISCARD; + } + handled = true; + } + if (EFX_QWORD_FIELD(*event, ESF_DZ_RX_IPCKSUM_ERR)) { + if (unlikely(rx_encap_hdr != ESE_EZ_ENCAP_HDR_VXLAN && + rx_l3_class != ESE_DZ_L3_CLASS_IP4 && + rx_l3_class != ESE_DZ_L3_CLASS_IP4_FRAG && + rx_l3_class != ESE_DZ_L3_CLASS_IP6 && + rx_l3_class != ESE_DZ_L3_CLASS_IP6_FRAG)) + netdev_WARN(efx->net_dev, + "invalid class for RX_IPCKSUM_ERR: event=" + EFX_QWORD_FMT "\n", + EFX_QWORD_VAL(*event)); + if (!efx->loopback_selftest) + *(rx_encap_hdr ? + &rx_queue->n_rx_outer_ip_hdr_chksum_err : + &rx_queue->n_rx_ip_hdr_chksum_err) += n_packets; + return 0; + } + if (EFX_QWORD_FIELD(*event, ESF_DZ_RX_TCPUDP_CKSUM_ERR)) { + if (unlikely(rx_encap_hdr != ESE_EZ_ENCAP_HDR_VXLAN && + ((rx_l3_class != ESE_DZ_L3_CLASS_IP4 && + rx_l3_class != ESE_DZ_L3_CLASS_IP6) || + (rx_l4_class != ESE_FZ_L4_CLASS_TCP && + rx_l4_class != ESE_FZ_L4_CLASS_UDP)))) + netdev_WARN(efx->net_dev, + "invalid class for RX_TCPUDP_CKSUM_ERR: event=" + EFX_QWORD_FMT "\n", + EFX_QWORD_VAL(*event)); + if (!efx->loopback_selftest) + *(rx_encap_hdr ? + &rx_queue->n_rx_outer_tcp_udp_chksum_err : + &rx_queue->n_rx_tcp_udp_chksum_err) += n_packets; + return 0; + } + if (EFX_QWORD_FIELD(*event, ESF_EZ_RX_IP_INNER_CHKSUM_ERR)) { + if (unlikely(!rx_encap_hdr)) + netdev_WARN(efx->net_dev, + "invalid encapsulation type for RX_IP_INNER_CHKSUM_ERR: event=" + EFX_QWORD_FMT "\n", + EFX_QWORD_VAL(*event)); + else if (unlikely(rx_l3_class != ESE_DZ_L3_CLASS_IP4 && + rx_l3_class != ESE_DZ_L3_CLASS_IP4_FRAG && + rx_l3_class != ESE_DZ_L3_CLASS_IP6 && + rx_l3_class != ESE_DZ_L3_CLASS_IP6_FRAG)) + netdev_WARN(efx->net_dev, + "invalid class for RX_IP_INNER_CHKSUM_ERR: event=" + EFX_QWORD_FMT "\n", + EFX_QWORD_VAL(*event)); + if (!efx->loopback_selftest) + rx_queue->n_rx_inner_ip_hdr_chksum_err += n_packets; + return 0; + } + if (EFX_QWORD_FIELD(*event, ESF_EZ_RX_TCP_UDP_INNER_CHKSUM_ERR)) { + if (unlikely(!rx_encap_hdr)) + netdev_WARN(efx->net_dev, + "invalid encapsulation type for RX_TCP_UDP_INNER_CHKSUM_ERR: event=" + EFX_QWORD_FMT "\n", + EFX_QWORD_VAL(*event)); + else if (unlikely((rx_l3_class != ESE_DZ_L3_CLASS_IP4 && + rx_l3_class != ESE_DZ_L3_CLASS_IP6) || + (rx_l4_class != ESE_FZ_L4_CLASS_TCP && + rx_l4_class != ESE_FZ_L4_CLASS_UDP))) + netdev_WARN(efx->net_dev, + "invalid class for RX_TCP_UDP_INNER_CHKSUM_ERR: event=" + EFX_QWORD_FMT "\n", + EFX_QWORD_VAL(*event)); + if (!efx->loopback_selftest) + rx_queue->n_rx_inner_tcp_udp_chksum_err += n_packets; + return 0; + } + + WARN_ON(!handled); /* No error bits were recognised */ + return 0; +} + +static int efx_ef10_handle_rx_event(struct efx_channel *channel, + const efx_qword_t *event) +{ + unsigned int rx_bytes, next_ptr_lbits, rx_queue_label, rx_l4_class; + unsigned int rx_l3_class, rx_encap_hdr; +#if defined(EFX_NOT_UPSTREAM) && defined(EFX_USE_FAKE_VLAN_RX_ACCEL) + unsigned int rx_eth_tag_class; +#endif + unsigned int n_descs, n_packets, i; + struct efx_nic *efx = channel->efx; + struct efx_ef10_nic_data *nic_data = efx->nic_data; + struct efx_rx_queue *rx_queue; + efx_qword_t errors; + bool rx_cont; + u16 flags = 0; + + if (unlikely(READ_ONCE(efx->reset_pending))) + return 0; + + /* Basic packet information */ + rx_bytes = EFX_QWORD_FIELD(*event, ESF_DZ_RX_BYTES); + next_ptr_lbits = EFX_QWORD_FIELD(*event, ESF_DZ_RX_DSC_PTR_LBITS); + rx_queue_label = EFX_QWORD_FIELD(*event, ESF_DZ_RX_QLABEL); +#if defined(EFX_NOT_UPSTREAM) && defined(EFX_USE_FAKE_VLAN_RX_ACCEL) + rx_eth_tag_class = EFX_QWORD_FIELD(*event, ESF_DZ_RX_ETH_TAG_CLASS); +#endif + rx_l3_class = EFX_QWORD_FIELD(*event, ESF_DZ_RX_L3_CLASS); + rx_l4_class = EFX_QWORD_FIELD(*event, ESF_FZ_RX_L4_CLASS); + rx_cont = EFX_QWORD_FIELD(*event, ESF_DZ_RX_CONT); + rx_encap_hdr = (efx_ef10_has_cap(nic_data->datapath_caps, VXLAN_NVGRE) || + efx_ef10_has_cap(nic_data->datapath_caps2, L3XUDP_SUPPORT)) ? + EFX_QWORD_FIELD(*event, ESF_EZ_RX_ENCAP_HDR) : + ESE_EZ_ENCAP_HDR_NONE; + + + if (EFX_QWORD_FIELD(*event, ESF_DZ_RX_DROP_EVENT)) + netdev_WARN(efx->net_dev, "saw RX_DROP_EVENT: event=" + EFX_QWORD_FMT "\n", + EFX_QWORD_VAL(*event)); + + rx_queue = efx_channel_get_rx_queue(channel); + + if (unlikely(rx_queue_label != efx_rx_queue_index(rx_queue) % + (1 << ESF_DZ_RX_QLABEL_WIDTH))) + efx_ef10_handle_rx_wrong_queue(rx_queue, rx_queue_label); + + n_descs = ((next_ptr_lbits - rx_queue->removed_count) & + ((1 << ESF_DZ_RX_DSC_PTR_LBITS_WIDTH) - 1)); + + if (n_descs != rx_queue->scatter_n + 1) { + struct efx_ef10_nic_data *nic_data = efx->nic_data; + + /* detect rx abort */ + if (unlikely(n_descs == rx_queue->scatter_n)) { + if (rx_queue->scatter_n == 0 || rx_bytes != 0) + netdev_WARN(efx->net_dev, + "invalid RX abort: scatter_n=%u event=" + EFX_QWORD_FMT "\n", + rx_queue->scatter_n, + EFX_QWORD_VAL(*event)); + efx_ef10_handle_rx_abort(rx_queue); + return 0; + } + + /* Check that RX completion merging is valid, i.e. + * the current firmware supports it and this is a + * non-scattered packet. + */ + if (!efx_ef10_has_cap(nic_data->datapath_caps, RX_BATCHING) || + rx_queue->scatter_n != 0 || rx_cont) { + efx_ef10_handle_rx_bad_lbits( + rx_queue, next_ptr_lbits, + (rx_queue->removed_count + + rx_queue->scatter_n + 1) & + ((1 << ESF_DZ_RX_DSC_PTR_LBITS_WIDTH) - 1)); + return 0; + } + + /* Merged completion for multiple non-scattered packets */ + rx_queue->scatter_n = 1; + rx_queue->scatter_len = 0; + n_packets = n_descs; + ++rx_queue->n_rx_merge_events; + rx_queue->n_rx_merge_packets += n_packets; + flags |= EFX_RX_PKT_PREFIX_LEN; + } else { + ++rx_queue->scatter_n; + rx_queue->scatter_len += rx_bytes; + if (rx_cont) + return 0; + n_packets = 1; + } + + EFX_POPULATE_QWORD_5(errors, ESF_DZ_RX_ECRC_ERR, 1, + ESF_DZ_RX_IPCKSUM_ERR, 1, + ESF_DZ_RX_TCPUDP_CKSUM_ERR, 1, + ESF_EZ_RX_IP_INNER_CHKSUM_ERR, 1, + ESF_EZ_RX_TCP_UDP_INNER_CHKSUM_ERR, 1); + EFX_AND_QWORD(errors, *event, errors); + if (unlikely(!EFX_QWORD_IS_ZERO(errors))) { + flags |= efx_ef10_handle_rx_event_errors(rx_queue, n_packets, + rx_encap_hdr, + rx_l3_class, rx_l4_class, + event); + } else { + bool tcpudp = rx_l4_class == ESE_FZ_L4_CLASS_TCP || + rx_l4_class == ESE_FZ_L4_CLASS_UDP; + + if (rx_l3_class == ESE_DZ_L3_CLASS_IP4) + flags |= EFX_RX_PKT_IPV4; + else if (rx_l3_class == ESE_DZ_L3_CLASS_IP6) + flags |= EFX_RX_PKT_IPV6; + + switch (rx_encap_hdr) { + case ESE_EZ_ENCAP_HDR_VXLAN: /* VxLAN, GENEVE or L3xUDP */ + flags |= EFX_RX_PKT_CSUMMED; /* outer UDP csum */ + if (tcpudp) + flags |= EFX_RX_PKT_CSUM_LEVEL; /* inner L4 */ + break; + case ESE_EZ_ENCAP_HDR_NONE: + if (rx_l4_class == ESE_FZ_L4_CLASS_TCP) + flags |= EFX_RX_PKT_TCP; + fallthrough; + case ESE_EZ_ENCAP_HDR_GRE: + if (tcpudp) + flags |= EFX_RX_PKT_CSUMMED; + break; + default: + netdev_WARN(efx->net_dev, + "unknown encapsulation type: event=" + EFX_QWORD_FMT "\n", + EFX_QWORD_VAL(*event)); + } + } + +#if defined(EFX_NOT_UPSTREAM) && defined(EFX_USE_FAKE_VLAN_RX_ACCEL) + if (rx_eth_tag_class == ESE_DZ_ETH_TAG_CLASS_VLAN1 || + rx_eth_tag_class == ESE_DZ_ETH_TAG_CLASS_VLAN2) + flags |= EFX_RX_PKT_VLAN; +#endif + + channel->irq_mod_score += 2 * n_packets; + + /* XXX for bug 37073 */ + if (rx_queue->added_count - rx_queue->removed_count < n_descs) + netdev_WARN(efx->net_dev, + "invalid completion index: added_count=%#x removed_count=%#x n_packets=%u scatter_n=%u\n", + rx_queue->added_count, rx_queue->removed_count, + n_packets, rx_queue->scatter_n); + + /* Handle received packet(s) */ + for (i = 0; i < n_packets; i++) { + efx_rx_packet(rx_queue, + rx_queue->removed_count & rx_queue->ptr_mask, + rx_queue->scatter_n, rx_queue->scatter_len, + flags); + rx_queue->removed_count += rx_queue->scatter_n; + } + + rx_queue->scatter_n = 0; + rx_queue->scatter_len = 0; + + return n_packets; +} + +static u32 efx_ef10_extract_event_ts(efx_qword_t *event) +{ + u32 tstamp; + + tstamp = EFX_QWORD_FIELD(*event, TX_TIMESTAMP_EVENT_TSTAMP_DATA_HI); + tstamp <<= 16; + tstamp |= EFX_QWORD_FIELD(*event, TX_TIMESTAMP_EVENT_TSTAMP_DATA_LO); + + return tstamp; +} + +static void +efx_ef10_handle_tx_event(struct efx_channel *channel, efx_qword_t *event) +{ + struct efx_nic *efx = channel->efx; + struct efx_tx_queue *tx_queue; + unsigned int tx_ev_desc_ptr; + unsigned int tx_ev_q_label; + unsigned int tx_ev_type; + u64 ts_part; + + if (unlikely(READ_ONCE(efx->reset_pending))) + return; + + if (unlikely(EFX_QWORD_FIELD(*event, ESF_DZ_TX_DROP_EVENT))) + return; + + /* Get the transmit queue */ + tx_ev_q_label = EFX_QWORD_FIELD(*event, ESF_DZ_TX_QLABEL); + tx_queue = efx_channel_get_tx_queue(channel, tx_ev_q_label); + + if (!tx_queue->timestamping) { + tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, ESF_DZ_TX_DESCR_INDX); + efx_xmit_done(tx_queue, tx_ev_desc_ptr & tx_queue->ptr_mask); + return; + } + + /* Transmit timestamps are only available for 8XXX series. They result + * in up to three events per packet. These occur in order, and are: + * - the normal completion event (may be omitted) + * - the low part of the timestamp + * - the high part of the timestamp + * + * It's possible for multiple completion events to appear before the + * corresponding timestamps. So we can for example get: + * COMP N + * COMP N+1 + * TS_LO N + * TS_HI N + * TS_LO N+1 + * TS_HI N+1 + * + * In addition it's also possible for the adjacent completions to be + * merged, so we may not see COMP N above. As such, the completion + * events are not very useful here. + * + * Each part of the timestamp is itself split across two 16 bit + * fields in the event. + */ + tx_ev_type = EFX_QWORD_FIELD(*event, ESF_EZ_TX_SOFT1); + + switch (tx_ev_type) { + case TX_TIMESTAMP_EVENT_TX_EV_COMPLETION: + /* Ignore this event - see above. */ + break; + + case TX_TIMESTAMP_EVENT_TX_EV_TSTAMP_LO: + ts_part = efx_ef10_extract_event_ts(event); + tx_queue->completed_timestamp_minor = ts_part; + break; + + case TX_TIMESTAMP_EVENT_TX_EV_TSTAMP_HI: + ts_part = efx_ef10_extract_event_ts(event); + tx_queue->completed_timestamp_major = ts_part; + efx_xmit_done_single(tx_queue); + break; + + default: + netif_err(efx, hw, efx->net_dev, + "channel %d unknown tx event type %d (data " + EFX_QWORD_FMT ")\n", + channel->channel, tx_ev_type, + EFX_QWORD_VAL(*event)); + break; + } +} + +static int +efx_ef10_handle_driver_event(struct efx_channel *channel, + efx_qword_t *event, int budget) +{ + struct efx_nic *efx = channel->efx; + int subcode; + + subcode = EFX_QWORD_FIELD(*event, ESF_DZ_DRV_SUB_CODE); + + switch (subcode) { + case ESE_DZ_DRV_TIMER_EV: + case ESE_DZ_DRV_WAKE_UP_EV: +#ifdef EFX_NOT_UPSTREAM +#if IS_MODULE(CONFIG_SFC_DRIVERLINK) + return efx_dl_handle_event(&efx->dl_nic, event, budget); +#endif +#endif + break; + case ESE_DZ_DRV_START_UP_EV: + /* event queue init complete. ok. */ + break; + default: + netif_err(efx, hw, efx->net_dev, + "channel %d unknown driver event type %d" + " (data " EFX_QWORD_FMT ")\n", + channel->channel, subcode, + EFX_QWORD_VAL(*event)); + return -EINVAL; + } + return 0; +} + +static bool efx_ef10_port_process_event(struct efx_channel *channel, + efx_qword_t *event, int *rc, int budget) +{ + struct efx_nic *efx = channel->efx; + int code = EFX_QWORD_FIELD(*event, MCDI_EVENT_CODE); + + switch (code) { + case MCDI_EVENT_CODE_SENSOREVT: + efx_mcdi_sensor_event(efx, event); + return true; + } + + return false; +} + +static void efx_ef10_handle_driver_generated_event(struct efx_channel *channel, + efx_qword_t *event) +{ + struct efx_nic *efx = channel->efx; + struct efx_rx_queue *rx_queue; + u32 subcode; + + subcode = EFX_QWORD_FIELD(*event, EFX_DWORD_0); + + switch (EFX_EF10_DRVGEN_CODE(subcode)) { + case EFX_EF10_TEST: + channel->event_test_cpu = raw_smp_processor_id(); + break; + case EFX_EF10_REFILL: + /* The queue must be empty, so we won't receive any rx + * events, so efx_process_channel() won't refill the + * queue. Refill it here + */ + efx_for_each_channel_rx_queue(rx_queue, channel) + if (EFX_EF10_DRVGEN_DATA(subcode) == + efx_rx_queue_index(rx_queue)) + efx_fast_push_rx_descriptors(rx_queue, true); + break; +#ifdef EFX_NOT_UPSTREAM + case EFX_EF10_RERING_RX_DOORBELL: + /* For workaround 59975 */ + efx_for_each_channel_rx_queue(rx_queue, channel) + if (EFX_EF10_DRVGEN_DATA(subcode) == + efx_rx_queue_index(rx_queue)) + _efx_ef10_rx_write(rx_queue); + break; +#endif + default: + netif_err(efx, hw, efx->net_dev, + "channel %d unknown driver event type %u" + " (data " EFX_QWORD_FMT ")\n", + channel->channel, (unsigned int) subcode, + EFX_QWORD_VAL(*event)); + } +} + +static int efx_ef10_ev_process(struct efx_channel *channel, int quota) +{ + struct efx_nic *efx = channel->efx; + efx_qword_t cacheline[L1_CACHE_BYTES / sizeof(efx_qword_t)]; + efx_qword_t event, *p_event, *cl_base, *old_cl_base = NULL; + bool fresh = false; + unsigned int read_ptr, cr_ptr; + int ev_code; + int spent = 0; + int rc; + + read_ptr = channel->eventq_read_ptr; + + EFX_WARN_ON_ONCE_PARANOID(!IS_ALIGNED((uintptr_t)channel->eventq.addr, + L1_CACHE_BYTES)); + + for (;;) { + p_event = efx_event(channel, read_ptr); + /* We read a whole cacheline at a time, to minimise cache + * eviction when hardware and software are in the same + * cacheline and DDIO is not present + */ + cl_base = (efx_qword_t *)round_down((uintptr_t)p_event, L1_CACHE_BYTES); + if (cl_base != old_cl_base) { + /* memcpy could conceivably copy in bytes, which could + * lead to holes if the NIC is writing at the same time. + * So use open-coded qword copy + */ + for (cr_ptr = p_event - cl_base; + cr_ptr < ARRAY_SIZE(cacheline); cr_ptr++) + cacheline[cr_ptr] = cl_base[cr_ptr]; + old_cl_base = cl_base; + fresh = true; + } + event = cacheline[p_event - cl_base]; + + if (!efx_event_present(&event)) { + if (fresh) + break; + /* re-read the cacheline, in case the NIC wrote more + * events while we were handling the ones we read before + */ + old_cl_base = NULL; + continue; + } + fresh = false; + + EFX_SET_QWORD(*p_event); + + ++read_ptr; + + ev_code = EFX_QWORD_FIELD(event, ESF_DZ_EV_CODE); + + netif_vdbg(efx, drv, efx->net_dev, + "processing event on %d " EFX_QWORD_FMT "\n", + channel->channel, EFX_QWORD_VAL(event)); + + switch (ev_code) { + case ESE_DZ_EV_CODE_RX_EV: + spent += efx_ef10_handle_rx_event(channel, &event); + if (spent >= quota) { + /* XXX can we split a merged event to + * avoid going over-quota? + */ + spent = quota; + goto out; + } + break; + case ESE_DZ_EV_CODE_TX_EV: + efx_ef10_handle_tx_event(channel, &event); + break; + case ESE_DZ_EV_CODE_MCDI_EV: + rc = 0; + if (!efx_mcdi_process_event(channel, &event) && + !efx_mcdi_port_process_event_common(channel, + &event, &rc, quota - spent) && + !efx_mcdi_port_process_event(channel, &event, + &rc, quota - spent) && + !efx_ef10_port_process_event(channel, &event, + &rc, quota - spent)) + netif_err(efx, hw, efx->net_dev, + "Unknown MCDI event " EFX_QWORD_FMT "\n", + EFX_QWORD_VAL(event)); + + if (rc > 0) + spent += rc; + else if (rc < 0) + spent++; + if (spent >= quota) + goto out; + break; + case ESE_DZ_EV_CODE_DRIVER_EV: + rc = efx_ef10_handle_driver_event(channel, &event, + quota - spent); + if (rc > 0) + spent += rc; + else if (rc < 0) + spent++; + if (spent >= quota) + goto out; + break; + case EFX_EF10_DRVGEN_EV: + efx_ef10_handle_driver_generated_event(channel, &event); + break; + default: + netif_err(efx, hw, efx->net_dev, + "channel %d unknown event type %d" + " (data " EFX_QWORD_FMT ")\n", + channel->channel, ev_code, + EFX_QWORD_VAL(event)); + } + } + +out: + channel->eventq_read_ptr = read_ptr; + return spent; +} + +static bool efx_ef10_ev_mcdi_pending(struct efx_channel *channel) +{ + unsigned int read_ptr; + efx_qword_t event; + int ev_code; + + read_ptr = channel->eventq_read_ptr; + for (;;) { + event = *efx_event(channel, read_ptr++); + if (!efx_event_present(&event)) + return false; + ev_code = EFX_QWORD_FIELD(event, ESF_DZ_EV_CODE); + if (ev_code == ESE_DZ_EV_CODE_MCDI_EV) + return true; + } +} + +static void efx_ef10_ev_read_ack(struct efx_channel *channel) +{ + struct efx_nic *efx = channel->efx; + efx_dword_t rptr; + + if (EFX_EF10_WORKAROUND_35388(efx)) { + BUILD_BUG_ON(EFX_MIN_EVQ_SIZE < + (1 << ERF_DD_EVQ_IND_RPTR_WIDTH)); + BUILD_BUG_ON(EFX_MAX_EVQ_SIZE > + (1 << 2 * ERF_DD_EVQ_IND_RPTR_WIDTH)); + + EFX_POPULATE_DWORD_2(rptr, ERF_DD_EVQ_IND_RPTR_FLAGS, + EFE_DD_EVQ_IND_RPTR_FLAGS_HIGH, + ERF_DD_EVQ_IND_RPTR, + (channel->eventq_read_ptr & + channel->eventq_mask) >> + ERF_DD_EVQ_IND_RPTR_WIDTH); + efx_writed_page(efx, &rptr, ER_DD_EVQ_INDIRECT, + channel->channel); + EFX_POPULATE_DWORD_2(rptr, ERF_DD_EVQ_IND_RPTR_FLAGS, + EFE_DD_EVQ_IND_RPTR_FLAGS_LOW, + ERF_DD_EVQ_IND_RPTR, + channel->eventq_read_ptr & + ((1 << ERF_DD_EVQ_IND_RPTR_WIDTH) - 1)); + efx_writed_page(efx, &rptr, ER_DD_EVQ_INDIRECT, + channel->channel); + } else { + EFX_POPULATE_DWORD_1(rptr, ERF_DZ_EVQ_RPTR, + channel->eventq_read_ptr & + channel->eventq_mask); + efx_writed_page(efx, &rptr, ER_DZ_EVQ_RPTR, channel->channel); + } +} + +static void efx_ef10_ev_test_generate(struct efx_channel *channel) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_DRIVER_EVENT_IN_LEN); + struct efx_nic *efx = channel->efx; + efx_qword_t event; + int rc; + + EFX_POPULATE_QWORD_2(event, + ESF_DZ_EV_CODE, EFX_EF10_DRVGEN_EV, + ESF_DZ_EV_DATA, EFX_EF10_TEST); + + MCDI_SET_DWORD(inbuf, DRIVER_EVENT_IN_EVQ, channel->channel); + + /* MCDI_SET_QWORD is not appropriate here since EFX_POPULATE_* has + * already swapped the data to little-endian order. + */ + memcpy(MCDI_PTR(inbuf, DRIVER_EVENT_IN_DATA), &event.u64[0], + sizeof(efx_qword_t)); + + rc = efx_mcdi_rpc(efx, MC_CMD_DRIVER_EVENT, inbuf, sizeof(inbuf), + NULL, 0, NULL); + if (rc && (rc != -ENETDOWN)) + goto fail; + + return; + +fail: + WARN_ON(true); + netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); +} + +static int efx_ef10_fini_dmaq(struct efx_nic *efx) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + + /* If the MC has just rebooted, the TX/RX queues will have already been + * torn down, but efx->active_queues needs to be set to zero. + */ + if (nic_data->must_realloc_vis) { + atomic_set(&efx->active_queues, 0); + return 0; + } + + return efx_fini_dmaq(efx); +} + +static void efx_ef10_prepare_flr(struct efx_nic *efx) +{ + atomic_set(&efx->active_queues, 0); +} + +static int efx_ef10_vport_set_mac_address(struct efx_nic *efx) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + u8 mac_old[ETH_ALEN]; + int rc, rc2; + + /* Only reconfigure a PF-created vport */ + if (is_zero_ether_addr(nic_data->vport_mac)) + return 0; + + efx_device_detach_sync(efx); + efx_net_stop(efx->net_dev); + down_write(&efx->filter_sem); + efx_mcdi_filter_table_down(efx); + efx_mcdi_filter_table_fini(efx); + up_write(&efx->filter_sem); + + rc = efx_ef10_vadaptor_free(efx, efx->vport.vport_id); + if (rc) + goto restore_filters; + + ether_addr_copy(mac_old, nic_data->vport_mac); + rc = efx_ef10_vport_del_mac(efx, efx->vport.vport_id, + nic_data->vport_mac); + if (rc) + goto restore_vadaptor; + + rc = efx_ef10_vport_add_mac(efx, efx->vport.vport_id, + efx->net_dev->dev_addr); + if (!rc) { + ether_addr_copy(nic_data->vport_mac, efx->net_dev->dev_addr); + } else { + rc2 = efx_ef10_vport_add_mac(efx, efx->vport.vport_id, mac_old); + if (rc2) { + /* Failed to add original MAC, so clear vport_mac */ + eth_zero_addr(nic_data->vport_mac); + goto reset_nic; + } + } + +restore_vadaptor: + rc2 = efx_ef10_vadaptor_alloc(efx, efx->vport.vport_id); + if (rc2) + goto reset_nic; +restore_filters: + mutex_lock(&efx->mac_lock); + down_write(&efx->filter_sem); + rc2 = efx_ef10_filter_table_init(efx); + if (!rc2) + rc2 = efx_ef10_filter_table_up(efx); + up_write(&efx->filter_sem); + mutex_unlock(&efx->mac_lock); + if (rc2) + goto reset_nic; + + rc2 = efx_net_open(efx->net_dev); + if (rc2) + goto reset_nic; + + efx_device_attach_if_not_resetting(efx); + + return rc; + +reset_nic: + netif_err(efx, drv, efx->net_dev, + "Failed to restore when changing MAC address - scheduling reset\n"); + efx_schedule_reset(efx, RESET_TYPE_DATAPATH); + + return rc ? rc : rc2; +} + +static int efx_ef10_set_mac_address(struct efx_nic *efx) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_SET_MAC_IN_LEN); + bool was_enabled = efx->port_enabled; + int rc; + +#ifdef CONFIG_SFC_SRIOV + /* If this function is a VF and we have access to the parent PF, + * then use the PF control path to attempt to change the VF MAC address. + */ + if (efx->pci_dev->is_virtfn && efx->pci_dev->physfn) { + struct efx_nic *efx_pf = pci_get_drvdata(efx->pci_dev->physfn); + struct efx_ef10_nic_data *nic_data = efx->nic_data; + u8 mac[ETH_ALEN]; + bool reset; + + ether_addr_copy(mac, efx->net_dev->dev_addr); + + rc = efx_ef10_sriov_set_vf_mac(efx_pf, nic_data->vf_index, + mac, &reset); + if (!rc) + return 0; + + netif_dbg(efx, drv, efx->net_dev, + "Updating VF mac via PF failed (%d), %s\n", + rc, + reset ? "resulting in reset; aborting" : + "setting directly"); + if (reset) + return rc; + } +#endif + + ether_addr_copy(MCDI_PTR(inbuf, VADAPTOR_SET_MAC_IN_MACADDR), + efx->net_dev->dev_addr); + MCDI_SET_DWORD(inbuf, VADAPTOR_SET_MAC_IN_UPSTREAM_PORT_ID, + efx->vport.vport_id); + + rc = efx_mcdi_rpc_quiet(efx, MC_CMD_VADAPTOR_SET_MAC, inbuf, + sizeof(inbuf), NULL, 0, NULL); + + if (!rc) { + down_write(&efx->filter_sem); + efx_mcdi_filter_table_restore(efx); + up_write(&efx->filter_sem); + } else if (rc == -EBUSY) { + + /* VADAPTOR_SET_MAC without tearing down queues and + * filters not allowed by firmware, try again after + * tearing down the queues and filters. This path will + * fail in the presence on Onload stack + */ + + efx_device_detach_sync(efx); + efx_net_stop(efx->net_dev); + + mutex_lock(&efx->mac_lock); + down_write(&efx->filter_sem); + efx_mcdi_filter_table_down(efx); + efx_ef10_filter_table_fini(efx); + + rc = efx_mcdi_rpc_quiet(efx, MC_CMD_VADAPTOR_SET_MAC, inbuf, + sizeof(inbuf), NULL, 0, NULL); + + efx_ef10_filter_table_init(efx); + efx_ef10_filter_table_up(efx); + up_write(&efx->filter_sem); + mutex_unlock(&efx->mac_lock); + + if (was_enabled) + efx_net_open(efx->net_dev); + efx_device_attach_if_not_resetting(efx); + } + + if (rc == -EPERM) { + netif_err(efx, drv, efx->net_dev, + "Cannot change MAC address; use sfboot to enable mac-spoofing on this interface\n"); + } else if (rc == -ENOSYS && !efx_ef10_is_vf(efx)) { + /* If the active MCFW does not support MC_CMD_VADAPTOR_SET_MAC + * fall-back to the method of changing the MAC address on the + * vport. This only applies to PFs because such versions of + * MCFW do not support VFs. + */ + rc = efx_ef10_vport_set_mac_address(efx); + } else if (rc) { + efx_mcdi_display_error(efx, MC_CMD_VADAPTOR_SET_MAC, + sizeof(inbuf), NULL, 0, rc); + } + + return rc; +} + + +static int efx_ef10_mac_reconfigure(struct efx_nic *efx, bool mtu_only) +{ + int rc; + struct efx_ef10_nic_data *nic_data = efx->nic_data; + + WARN_ON(!mutex_is_locked(&efx->mac_lock)); + + efx_mcdi_filter_sync_rx_mode(efx); + + if (mtu_only && + efx_ef10_has_cap(nic_data->datapath_caps, SET_MAC_ENHANCED)) + return efx_mcdi_set_mtu(efx); + + rc = efx_mcdi_set_mac(efx); + if (rc == -EPERM && efx_ef10_is_vf(efx)) + return 0; + + return rc; +} + +static unsigned int efx_ef10_mcdi_rpc_timeout(struct efx_nic *efx, + unsigned int cmd) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + + switch (cmd) { + case MC_CMD_POLL_BIST: + case MC_CMD_NVRAM_ERASE: + case MC_CMD_LICENSING_V3: + case MC_CMD_NVRAM_UPDATE_FINISH: + /* Potentially longer running commands. */ + if (efx_ef10_has_cap(nic_data->datapath_caps2, + NVRAM_UPDATE_REPORT_VERIFY_RESULT)) + return MCDI_RPC_LONG_TIMEOUT; + fallthrough; + default: + /* Some things take longer shortly after a reset. */ + if (time_before(jiffies, + efx->last_reset + MCDI_RPC_POST_RST_TIME)) + return MCDI_RPC_POST_RST_TIME; + return MCDI_RPC_TIMEOUT; + } +} + +static int efx_ef10_start_bist(struct efx_nic *efx, u32 bist_type) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_START_BIST_IN_LEN); + + MCDI_SET_DWORD(inbuf, START_BIST_IN_TYPE, bist_type); + return efx_mcdi_rpc(efx, MC_CMD_START_BIST, inbuf, sizeof(inbuf), + NULL, 0, NULL); +} + +/* MC BISTs follow a different poll mechanism to phy BISTs. + * The BIST is done in the poll handler on the MC, and the MCDI command + * will block until the BIST is done. + */ +static int efx_ef10_poll_bist(struct efx_nic *efx) +{ + int rc; + MCDI_DECLARE_BUF(outbuf, MC_CMD_POLL_BIST_OUT_LEN); + size_t outlen; + u32 result; + + rc = efx_mcdi_rpc(efx, MC_CMD_POLL_BIST, NULL, 0, + outbuf, sizeof(outbuf), &outlen); + if (rc != 0) + return rc; + + if (outlen < MC_CMD_POLL_BIST_OUT_LEN) + return -EIO; + + result = MCDI_DWORD(outbuf, POLL_BIST_OUT_RESULT); + switch (result) { + case MC_CMD_POLL_BIST_PASSED: + netif_dbg(efx, hw, efx->net_dev, "BIST passed.\n"); + return 0; + case MC_CMD_POLL_BIST_TIMEOUT: + netif_err(efx, hw, efx->net_dev, "BIST timed out\n"); + return -EIO; + case MC_CMD_POLL_BIST_FAILED: + netif_err(efx, hw, efx->net_dev, "BIST failed.\n"); + return -EIO; + default: + netif_err(efx, hw, efx->net_dev, + "BIST returned unknown result %u", result); + return -EIO; + } +} + +static int efx_ef10_run_bist(struct efx_nic *efx, u32 bist_type) +{ + int rc; + + netif_dbg(efx, drv, efx->net_dev, "starting BIST type %u\n", bist_type); + + rc = efx_ef10_start_bist(efx, bist_type); + if (rc != 0) + return rc; + + return efx_ef10_poll_bist(efx); +} + +static int +efx_ef10_test_chip(struct efx_nic *efx, struct efx_self_tests *tests) +{ + int rc, rc2; + + efx_reset_down(efx, RESET_TYPE_WORLD); + + rc = efx_mcdi_rpc(efx, MC_CMD_ENABLE_OFFLINE_BIST, + NULL, 0, NULL, 0, NULL); + if (rc != 0) + goto out; + + tests->memory = efx_ef10_run_bist(efx, MC_CMD_MC_MEM_BIST) ? -1 : 1; + tests->registers = efx_ef10_run_bist(efx, MC_CMD_REG_BIST) ? -1 : 1; + + rc = efx_mcdi_reset(efx, RESET_TYPE_WORLD); + +out: + if (rc == -EPERM) + rc = 0; + rc2 = efx_reset_up(efx, RESET_TYPE_WORLD, rc == 0); + return rc ? rc : rc2; +} + +#ifdef CONFIG_SFC_MTD + +struct efx_ef10_nvram_type_info { + u16 type, type_mask; + u8 port; + const char *name; +}; + +static const struct efx_ef10_nvram_type_info efx_ef10_nvram_types[] = { + { NVRAM_PARTITION_TYPE_MC_FIRMWARE, 0, 0, "sfc_mcfw" }, + { NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP, 0, 0, "sfc_mcfw_backup" }, + { NVRAM_PARTITION_TYPE_EXPANSION_ROM, 0, 0, "sfc_exp_rom" }, + { NVRAM_PARTITION_TYPE_STATIC_CONFIG, 0, 0, "sfc_static_cfg" }, + { NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG, 0, 0, "sfc_dynamic_cfg" }, + { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT0, 0, 0, "sfc_exp_rom_cfg" }, + { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT1, 0, 1, "sfc_exp_rom_cfg" }, + { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT2, 0, 2, "sfc_exp_rom_cfg" }, + { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT3, 0, 3, "sfc_exp_rom_cfg" }, + { NVRAM_PARTITION_TYPE_LICENSE, 0, 0, "sfc_license" }, + { NVRAM_PARTITION_TYPE_PHY_MIN, 0xff, 0, "sfc_phy_fw" }, + { NVRAM_PARTITION_TYPE_FPGA, 0, 0, "sfc_fpga" }, + { NVRAM_PARTITION_TYPE_FPGA_BACKUP, 0, 0, "sfc_fpgadiag" }, + { NVRAM_PARTITION_TYPE_FC_FIRMWARE, 0, 0, "sfc_fcfw" }, + { NVRAM_PARTITION_TYPE_MUM_FIRMWARE, 0, 0, "sfc_mumfw" }, + { NVRAM_PARTITION_TYPE_EXPANSION_UEFI, 0, 0, "sfc_uefi" }, + { NVRAM_PARTITION_TYPE_DYNCONFIG_DEFAULTS, 0, 0, "sfc_dynamic_cfg_dflt" }, + { NVRAM_PARTITION_TYPE_ROMCONFIG_DEFAULTS, 0, 0, "sfc_exp_rom_cfg_dflt" }, + { NVRAM_PARTITION_TYPE_STATUS, 0, 0, "sfc_status" }, + { NVRAM_PARTITION_TYPE_BUNDLE, 0, 0, "sfc_bundle" }, + { NVRAM_PARTITION_TYPE_BUNDLE_METADATA, 0, 0, "sfc_bundle_metadata" } + + +}; +#define EF10_NVRAM_PARTITION_COUNT ARRAY_SIZE(efx_ef10_nvram_types) + +static int efx_ef10_mtd_probe_partition(struct efx_nic *efx, + struct efx_mtd_partition *part, + unsigned int type, + unsigned long *found) +{ + const struct efx_ef10_nvram_type_info *info; + size_t size, erase_size, write_size; + int type_idx = 0; + bool protected; + int rc; + + for (type_idx = 0; ; type_idx++) { + if (type_idx == EF10_NVRAM_PARTITION_COUNT) + return -ENODEV; + info = efx_ef10_nvram_types + type_idx; + if ((type & ~info->type_mask) == info->type) + break; + } + if (info->port != efx_port_num(efx)) + return -ENODEV; + + rc = efx_mcdi_nvram_info(efx, type, &size, &erase_size, &write_size, + &protected); + if (rc) + return rc; + if (protected && !efx_allow_nvconfig_writes && + (type != NVRAM_PARTITION_TYPE_DYNCONFIG_DEFAULTS && + type != NVRAM_PARTITION_TYPE_ROMCONFIG_DEFAULTS)) + return -ENODEV; /* hide it */ + if (protected) + erase_size = 0; /* Protected partitions are read-only */ + + /* If we've already exposed a partition of this type, hide this + * duplicate. All operations on MTDs are keyed by the type anyway, + * so we can't act on the duplicate. + */ + if (__test_and_set_bit(type_idx, found)) + return -EEXIST; + + part->nvram_type = type; + + rc = efx_mcdi_nvram_metadata(efx, type, &part->fw_subtype, NULL, NULL, + 0); + if (rc) { + /* The metadata command fails if the dynamic config is empty. + * In this situation we want to block a firmware upgrade until + * the problem has been fixed. + */ + if (type == NVRAM_PARTITION_TYPE_MC_FIRMWARE) + part->fw_subtype = 0xff; + else + part->fw_subtype = 0; + } + + part->dev_type_name = "EF10 NVRAM manager"; + part->type_name = info->name; + + part->mtd.type = MTD_NORFLASH; + part->mtd.flags = MTD_CAP_NORFLASH; + part->mtd.size = size; + part->mtd.erasesize = erase_size; + if (!erase_size) + part->mtd.flags |= MTD_NO_ERASE; + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_USE_MTD_WRITESIZE) + part->mtd.writesize = write_size; +#else + part->writesize = write_size; +#endif + + return 0; +} + +static int efx_ef10_mtd_probe(struct efx_nic *efx) +{ + MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX); + DECLARE_BITMAP(found, EF10_NVRAM_PARTITION_COUNT) = { 0 }; + struct efx_mtd_partition *parts; + size_t outlen, n_parts_total, i, n_parts; + unsigned int type; + int rc; + + ASSERT_RTNL(); + + BUILD_BUG_ON(MC_CMD_NVRAM_PARTITIONS_IN_LEN != 0); + rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_PARTITIONS, NULL, 0, + outbuf, sizeof(outbuf), &outlen); + if (rc) + return rc; + if (outlen < MC_CMD_NVRAM_PARTITIONS_OUT_LENMIN) + return -EIO; + + n_parts_total = MCDI_DWORD(outbuf, NVRAM_PARTITIONS_OUT_NUM_PARTITIONS); + if (n_parts_total > + MCDI_VAR_ARRAY_LEN(outlen, NVRAM_PARTITIONS_OUT_TYPE_ID)) + return -EIO; + + parts = kcalloc(n_parts_total, sizeof(*parts), GFP_KERNEL); + if (!parts) + return -ENOMEM; + + n_parts = 0; + for (i = 0; i < n_parts_total; i++) { + type = MCDI_ARRAY_DWORD(outbuf, NVRAM_PARTITIONS_OUT_TYPE_ID, + i); + rc = efx_ef10_mtd_probe_partition(efx, &parts[n_parts], type, + found); + if (rc == -EEXIST || rc == -ENODEV) + continue; + if (rc) + goto fail_free; + n_parts++; + } + + /* Once we've passed parts to efx_mtd_add it becomes responsible for + * freeing it. + */ + return efx_mtd_add(efx, parts, n_parts); + +fail_free: + kfree(parts); + return rc; +} + +#endif /* CONFIG_SFC_MTD */ + +#ifdef CONFIG_SFC_PTP + +static void efx_ef10_ptp_write_host_time(struct efx_nic *efx, u32 host_time) +{ + _efx_writed(efx, cpu_to_le32(host_time), ER_DZ_MC_DB_LWRD); +} +#endif /* CONFIG_SFC_PTP */ + +#ifdef EFX_NOT_UPSTREAM + +int efx_ef10_update_keys(struct efx_nic *efx, + struct efx_update_license2 *key_stats) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_LICENSING_IN_LEN); + MCDI_DECLARE_BUF(outbuf, MC_CMD_LICENSING_OUT_LEN); + int rc; + + MCDI_SET_DWORD(inbuf, LICENSING_IN_OP, + MC_CMD_LICENSING_IN_OP_UPDATE_LICENSE); + rc = efx_mcdi_rpc(efx, MC_CMD_LICENSING, inbuf, sizeof(inbuf), + NULL, 0, NULL); + if (rc) + return rc; + + MCDI_SET_DWORD(inbuf, LICENSING_IN_OP, + MC_CMD_LICENSING_IN_OP_GET_KEY_STATS); + rc = efx_mcdi_rpc(efx, MC_CMD_LICENSING, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), NULL); + if (rc) + return rc; + + key_stats->valid_keys = + MCDI_DWORD(outbuf, LICENSING_OUT_VALID_APP_KEYS); + key_stats->invalid_keys = + MCDI_DWORD(outbuf, LICENSING_OUT_INVALID_APP_KEYS); + key_stats->blacklisted_keys = + MCDI_DWORD(outbuf, LICENSING_OUT_BLACKLISTED_APP_KEYS); + key_stats->unverifiable_keys = + MCDI_DWORD(outbuf, LICENSING_OUT_UNVERIFIABLE_APP_KEYS); + key_stats->wrong_node_keys = + MCDI_DWORD(outbuf, LICENSING_OUT_WRONG_NODE_APP_KEYS); + return 0; +} + +int efx_ef10_licensed_app_state(struct efx_nic *efx, + struct efx_licensed_app_state *app_state) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_LICENSED_APP_STATE_IN_LEN); + MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LICENSED_APP_STATE_OUT_LEN); + int rc; + + MCDI_SET_DWORD(inbuf, LICENSING_IN_OP,MC_CMD_GET_LICENSED_APP_STATE); + MCDI_SET_DWORD(inbuf, GET_LICENSED_APP_STATE_IN_APP_ID, + app_state->app_id); + + rc = efx_mcdi_rpc(efx, MC_CMD_GET_LICENSED_APP_STATE, inbuf, + sizeof(inbuf), outbuf, sizeof(outbuf), NULL); + if (!rc) + app_state->state = + MCDI_DWORD(outbuf, GET_LICENSED_APP_STATE_OUT_STATE); + return rc; +} + +#endif /* EFX_NOT_UPSTREAM */ + +#ifdef CONFIG_SFC_PTP +static int efx_ef10_rx_enable_timestamping(struct efx_channel *channel, + bool temp) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_LEN); + int rc; + u32 qid; + + if (channel->sync_events_state == SYNC_EVENTS_REQUESTED || + channel->sync_events_state == SYNC_EVENTS_VALID || + (temp && channel->sync_events_state == SYNC_EVENTS_DISABLED)) + return 0; + channel->sync_events_state = SYNC_EVENTS_REQUESTED; + + /* Try to subscribe with sync status reporting enabled. If this fails, + * fallback to using the old scheme */ + MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_TIME_EVENT_SUBSCRIBE); + MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0); + qid = channel->channel | + (1 << MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_REPORT_SYNC_STATUS_LBN); + MCDI_SET_DWORD(inbuf, PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE, qid); + + rc = efx_mcdi_rpc(channel->efx, MC_CMD_PTP, + inbuf, sizeof(inbuf), NULL, 0, NULL); + if (rc == -ERANGE) { + MCDI_SET_DWORD(inbuf, PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE, + channel->channel); + + rc = efx_mcdi_rpc(channel->efx, MC_CMD_PTP, + inbuf, sizeof(inbuf), NULL, 0, NULL); + } + + if (rc != 0) + channel->sync_events_state = temp ? SYNC_EVENTS_QUIESCENT : + SYNC_EVENTS_DISABLED; + + return rc; +} + +static int efx_ef10_rx_disable_timestamping(struct efx_channel *channel, + bool temp) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_LEN); + int rc; + + if (channel->sync_events_state == SYNC_EVENTS_DISABLED || + (temp && channel->sync_events_state == SYNC_EVENTS_QUIESCENT)) + return 0; + if (channel->sync_events_state == SYNC_EVENTS_QUIESCENT) { + channel->sync_events_state = SYNC_EVENTS_DISABLED; + return 0; + } + channel->sync_events_state = temp ? SYNC_EVENTS_QUIESCENT : + SYNC_EVENTS_DISABLED; + + MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_TIME_EVENT_UNSUBSCRIBE); + MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0); + MCDI_SET_DWORD(inbuf, PTP_IN_TIME_EVENT_UNSUBSCRIBE_CONTROL, + MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_SINGLE); + MCDI_SET_DWORD(inbuf, PTP_IN_TIME_EVENT_UNSUBSCRIBE_QUEUE, + channel->channel); + + rc = efx_mcdi_rpc(channel->efx, MC_CMD_PTP, + inbuf, sizeof(inbuf), NULL, 0, NULL); + + return rc; +} + +static int efx_ef10_ptp_set_ts_sync_events(struct efx_nic *efx, bool en, + bool temp) +{ + int (*set)(struct efx_channel *channel, bool temp); + struct efx_channel *channel; + int rc; + + set = en ? + efx_ef10_rx_enable_timestamping : + efx_ef10_rx_disable_timestamping; + + if (efx_ptp_uses_separate_channel(efx)) { + channel = efx_ptp_channel(efx); + if (channel) { + rc = set(channel, temp); + if (en && rc != 0) { + efx_ef10_ptp_set_ts_sync_events(efx, false, temp); + return rc; + } + } + } + else { + efx_for_each_channel(channel, efx) { + rc = set(channel, temp); + if (en && rc != 0) { + efx_ef10_ptp_set_ts_sync_events(efx, false, temp); + return rc; + } + } + } + + return 0; +} + +static int efx_ef10_ptp_set_ts_config(struct efx_nic *efx, + struct hwtstamp_config *init) +{ + int rc; + + switch (init->rx_filter) { + case HWTSTAMP_FILTER_NONE: + efx_ef10_ptp_set_ts_sync_events(efx, false, false); + /* if TX timestamping is still requested then leave PTP on */ + return efx_ptp_change_mode(efx, + init->tx_type != HWTSTAMP_TX_OFF, 0); + case HWTSTAMP_FILTER_ALL: + case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: + init->rx_filter = HWTSTAMP_FILTER_ALL; + rc = efx_ptp_change_mode(efx, true, MC_CMD_PTP_MODE_V2); + if (!rc) + rc = efx_ef10_ptp_set_ts_sync_events(efx, true, false); + if (rc) + efx_ptp_change_mode(efx, false, 0); + return rc; + default: + return -ERANGE; + } +} +#endif + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_NEED_GET_PHYS_PORT_ID) +static int efx_ef10_get_phys_port_id(struct efx_nic *efx, + struct netdev_phys_item_id *ppid) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + + if (!is_valid_ether_addr(nic_data->port_id)) + return -EOPNOTSUPP; + + ppid->id_len = ETH_ALEN; + memcpy(ppid->id, nic_data->port_id, ppid->id_len); + + return 0; +} +#endif + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_UDP_TUNNEL_NIC_INFO) +/* We rely on the MCDI wiping out our TX rings if it made any changes to the + * ports table, ensuring that any TSO descriptors that were made on a now- + * removed tunnel port will be blown away and won't break things when we try + * to transmit them using the new ports table. + */ +static int efx_ef10_set_udp_tnl_ports(struct efx_nic *efx, bool unloading) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_LENMAX); + MCDI_DECLARE_BUF(outbuf, MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_LEN); + struct efx_ef10_nic_data *nic_data = efx->nic_data; + efx_dword_t flags_and_num_entries; + size_t inlen, outlen, num_entries; + bool will_reset = false; + size_t i; + int rc; + + WARN_ON(!mutex_is_locked(&nic_data->udp_tunnels_lock)); + + nic_data->udp_tunnels_dirty = false; + + if (!efx_ef10_has_cap(nic_data->datapath_caps, VXLAN_NVGRE)) { + efx_device_attach_if_not_resetting(efx); + return 0; + } + + BUILD_BUG_ON(ARRAY_SIZE(nic_data->udp_tunnels) > + MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES_MAXNUM); + + num_entries = 0; + if (!unloading) { + for (i = 0; i < ARRAY_SIZE(nic_data->udp_tunnels); ++i) { + efx_dword_t entry; + + if (nic_data->udp_tunnels[i].type == + TUNNEL_ENCAP_UDP_PORT_ENTRY_INVALID) + continue; + + EFX_POPULATE_DWORD_2(entry, + TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT, + ntohs(nic_data->udp_tunnels[i].port), + TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL, + nic_data->udp_tunnels[i].type); + *_MCDI_ARRAY_DWORD(inbuf, + SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES, + num_entries++) = entry; + } + } + + /* Adding/removing a UDP tunnel can cause an MC reboot. We must + * prevent causing too many reboots in a second. + */ + efx->reset_count = 0; + + BUILD_BUG_ON((MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_NUM_ENTRIES_OFST - + MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_FLAGS_OFST) * 8 != + EFX_WORD_1_LBN); + BUILD_BUG_ON(MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_NUM_ENTRIES_LEN * 8 != + EFX_WORD_1_WIDTH); + EFX_POPULATE_DWORD_2(flags_and_num_entries, + MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_UNLOADING, + !!unloading, + EFX_WORD_1, num_entries); + *_MCDI_DWORD(inbuf, SET_TUNNEL_ENCAP_UDP_PORTS_IN_FLAGS) = + flags_and_num_entries; + + inlen = MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_LEN(num_entries); + + rc = efx_mcdi_rpc_quiet(efx, MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS, + inbuf, inlen, outbuf, sizeof(outbuf), &outlen); + if (rc == -EIO) { + /* Most likely the MC rebooted due to another function also + * setting its tunnel port list. Mark the tunnel port list as + * dirty, so it will be pushed upon coming up from the reboot. + */ + nic_data->udp_tunnels_dirty = true; + /* We detached earlier, expecting an MC reset to trigger a + * re-attach. If we haven't allocated event queues, we won't + * see the notification. In this case we're not using any + * resources, so we don't actually need to do any reset + * handling except to forget some resources and reattach. + */ + if (!unloading && !efx_net_allocated(efx->state)) { + efx_device_attach_if_not_resetting(efx); + efx_ef10_mcdi_reboot_detected(efx); + } + return 0; + } + + if (rc) { + /* expected not available on unprivileged functions */ + if (rc != -EPERM) + netif_warn(efx, drv, efx->net_dev, + "Unable to set UDP tunnel ports; rc=%d.\n", rc); + } else if (MCDI_DWORD(outbuf, SET_TUNNEL_ENCAP_UDP_PORTS_OUT_FLAGS) & + (1 << MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_RESETTING_LBN)) { + netif_info(efx, drv, efx->net_dev, + "Rebooting MC due to UDP tunnel port list change\n"); + will_reset = true; + if (unloading) + /* Delay for the MC reset to complete. This will make + * unloading other functions a bit smoother. This is a + * race, but the other unload will work whichever way + * it goes, this just avoids an unnecessary error + * message. + */ + msleep(100); + } + /* We detached earlier, expecting an MC reset to trigger a + * re-attach. + * But, there are two cases this won't happen: If the MC tells + * us it's not going to reset, just reattach and carry on. + * Alternatively, if the MC tells us it will reset but we haven't + * allocated event queues, we won't see the notification. In this + * case we're not using any resources, so we don't actually + * need to do any reset handling except to forget some + * resources and reattach. + */ + if (!unloading) { + if (will_reset && !efx_net_allocated(efx->state)) + efx_ef10_mcdi_reboot_detected(efx); + if (!will_reset || !efx_net_allocated(efx->state)) + efx_device_attach_if_not_resetting(efx); + } + + return rc; +} + +static int efx_ef10_udp_tnl_push_ports(struct efx_nic *efx) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + int rc = 0; + + mutex_lock(&nic_data->udp_tunnels_lock); + if (nic_data->udp_tunnels_dirty) { + /* Make sure all TX are stopped while we modify the table, else + * we might race against an efx_features_check(). + * If we fail early in probe, we won't have set up our TXQs yet, + * in which case we don't need to do this (and trying wouldn't + * end well). So check we've called register_netdevice(). + */ + if (efx->net_dev->reg_state == NETREG_REGISTERED) + efx_device_detach_sync(efx); + rc = efx_ef10_set_udp_tnl_ports(efx, false); + } + mutex_unlock(&nic_data->udp_tunnels_lock); + return rc; +} + +static int efx_ef10_udp_tnl_set_port(struct net_device *dev, + unsigned int table, unsigned int entry, + struct udp_tunnel_info *ti) +{ + struct efx_nic *efx = efx_netdev_priv(dev); + struct efx_ef10_nic_data *nic_data; + int efx_tunnel_type, rc; + + if (ti->type == UDP_TUNNEL_TYPE_VXLAN) + efx_tunnel_type = TUNNEL_ENCAP_UDP_PORT_ENTRY_VXLAN; + else + efx_tunnel_type = TUNNEL_ENCAP_UDP_PORT_ENTRY_GENEVE; + + nic_data = efx->nic_data; + if (!efx_ef10_has_cap(nic_data->datapath_caps, VXLAN_NVGRE)) + return -EOPNOTSUPP; + + mutex_lock(&nic_data->udp_tunnels_lock); + /* Make sure all TX are stopped while we modify the table, else + * we might race against an efx_features_check(). + * If we fail early in probe, we won't have set up our TXQs yet, + * in which case we don't need to do this (and trying wouldn't + * end well). So check we've called register_netdevice(). + */ + if (efx_dev_registered(efx)) + efx_device_detach_sync(efx); + nic_data->udp_tunnels[entry].type = efx_tunnel_type; + nic_data->udp_tunnels[entry].port = ti->port; + rc = efx_ef10_set_udp_tnl_ports(efx, false); + mutex_unlock(&nic_data->udp_tunnels_lock); + + return rc; +} + +/* Called under the TX lock with the TX queue running, hence no-one can be + * in the middle of updating the UDP tunnels table. However, they could + * have tried and failed the MCDI, in which case they'll have set the dirty + * flag before dropping their locks. + */ +static bool efx_ef10_udp_tnl_has_port(struct efx_nic *efx, __be16 port) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + size_t i; + + if (!efx_ef10_has_cap(nic_data->datapath_caps, VXLAN_NVGRE)) + return false; + + if (nic_data->udp_tunnels_dirty) + /* SW table may not match HW state, so just assume we can't + * use any UDP tunnel offloads. + */ + return false; + + for (i = 0; i < ARRAY_SIZE(nic_data->udp_tunnels); ++i) + if (nic_data->udp_tunnels[i].type != + TUNNEL_ENCAP_UDP_PORT_ENTRY_INVALID && + nic_data->udp_tunnels[i].port == port) + return true; + + return false; +} + +static int efx_ef10_udp_tnl_unset_port(struct net_device *dev, + unsigned int table __always_unused, + unsigned int entry, + struct udp_tunnel_info *ti __always_unused) +{ + struct efx_nic *efx = efx_netdev_priv(dev); + struct efx_ef10_nic_data *nic_data; + int rc; + + nic_data = efx->nic_data; + + mutex_lock(&nic_data->udp_tunnels_lock); + /* Make sure all TX are stopped while we remove from the table, else we + * might race against an efx_features_check(). + */ + efx_device_detach_sync(efx); + nic_data->udp_tunnels[entry].type = TUNNEL_ENCAP_UDP_PORT_ENTRY_INVALID; + nic_data->udp_tunnels[entry].port = 0; + rc = efx_ef10_set_udp_tnl_ports(efx, false); + mutex_unlock(&nic_data->udp_tunnels_lock); + + return rc; +} + +static const struct udp_tunnel_nic_info efx_ef10_udp_tunnels = { + .set_port = efx_ef10_udp_tnl_set_port, + .unset_port = efx_ef10_udp_tnl_unset_port, + .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP, + .tables = { + { + .n_entries = 16, + .tunnel_types = UDP_TUNNEL_TYPE_VXLAN | + UDP_TUNNEL_TYPE_GENEVE, + }, + }, +}; +#else +/* We rely on the MCDI wiping out our TX rings if it made any changes to the + * ports table, ensuring that any TSO descriptors that were made on a now- + * removed tunnel port will be blown away and won't break things when we try + * to transmit them using the new ports table. + */ +static int efx_ef10_set_udp_tnl_ports(struct efx_nic *efx, bool unloading) + __releases(nic_data->udp_tunnels_lock) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_LENMAX); + MCDI_DECLARE_BUF(outbuf, MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_LEN); + struct efx_ef10_nic_data *nic_data = efx->nic_data; + bool adding[ARRAY_SIZE(nic_data->udp_tunnels)] = {0}; + size_t num_entries, inlen, outlen; + struct efx_udp_tunnel *tnl; + bool will_reset = false; + bool done = false; + size_t i; + int rc; + efx_dword_t flags_and_num_entries; + + if (!efx_ef10_has_cap(nic_data->datapath_caps, VXLAN_NVGRE)) { + spin_unlock_bh(&nic_data->udp_tunnels_lock); + return 0; + } + + BUILD_BUG_ON(ARRAY_SIZE(nic_data->udp_tunnels) > + MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES_MAXNUM); + + if (!unloading) { + if (nic_data->udp_tunnels_busy) { + /* someone else is doing it for us */ + spin_unlock_bh(&nic_data->udp_tunnels_lock); + return 0; + } + } +again: + num_entries = 0; + for (i = 0; i < ARRAY_SIZE(nic_data->udp_tunnels); ++i) { + tnl = nic_data->udp_tunnels + i; + if (!unloading && tnl->count && !tnl->removing && tnl->port) { + efx_dword_t entry; + + adding[i] = true; + EFX_POPULATE_DWORD_2(entry, + TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT, + ntohs(tnl->port), + TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL, tnl->type); + *_MCDI_ARRAY_DWORD(inbuf, + SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES, + num_entries++) = entry; + } else { + adding[i] = false; + } + } + nic_data->udp_tunnels_busy = true; + spin_unlock_bh(&nic_data->udp_tunnels_lock); + + /* Adding/removing a UDP tunnel can cause an MC reboot. We must + * prevent causing too many reboots in a second. + */ + efx->reset_count = 0; + + BUILD_BUG_ON((MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_NUM_ENTRIES_OFST - + MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_FLAGS_OFST) * 8 != + EFX_WORD_1_LBN); + BUILD_BUG_ON(MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_NUM_ENTRIES_LEN * 8 != + EFX_WORD_1_WIDTH); + EFX_POPULATE_DWORD_2(flags_and_num_entries, + MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_UNLOADING, + !!unloading, + EFX_WORD_1, num_entries); + *_MCDI_DWORD(inbuf, SET_TUNNEL_ENCAP_UDP_PORTS_IN_FLAGS) = + flags_and_num_entries; + + WARN_ON(unloading && num_entries); + + inlen = MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_LEN(num_entries); + + /* Make sure all TX are stopped while we modify the table, else + * we might race against an efx_features_check(). + * If we fail early in probe, we won't have set up our TXQs yet, + * in which case we don't need to do this (and trying wouldn't + * end well). So check we've called register_netdevice(). + */ + if (efx->net_dev->reg_state == NETREG_REGISTERED) + efx_device_detach_sync(efx); + + rc = efx_mcdi_rpc_quiet(efx, MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS, + inbuf, inlen, outbuf, sizeof(outbuf), &outlen); + if (rc == -EIO) { + /* Most likely the MC rebooted due to another function also + * setting its tunnel port list. Mark all tunnel ports as not + * present; they will be added upon coming up from the reboot. + */ + spin_lock_bh(&nic_data->udp_tunnels_lock); + for (i = 0; i < ARRAY_SIZE(nic_data->udp_tunnels); i++) { + tnl = nic_data->udp_tunnels + i; + if (tnl->count) + tnl->adding = true; + tnl->removing = false; + } + nic_data->udp_tunnels_busy = false; + spin_unlock_bh(&nic_data->udp_tunnels_lock); + /* We detached earlier, expecting an MC reset to trigger a + * re-attach. If we haven't allocated event queues, we won't + * see the notification. In this case we're not using any + * resources, so we don't actually need to do any reset + * handling except to forget some resources and reattach. + */ + if (!unloading && !efx_net_allocated(efx->state)) { + efx_device_attach_if_not_resetting(efx); + efx_ef10_mcdi_reboot_detected(efx); + } + return 0; + } + + if (rc) { + /* expected not available on unprivileged functions */ + if (rc != -EPERM) + netif_warn(efx, drv, efx->net_dev, + "Unable to set UDP tunnel ports; rc=%d.\n", rc); + } else if (MCDI_DWORD(outbuf, SET_TUNNEL_ENCAP_UDP_PORTS_OUT_FLAGS) & + (1 << MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_RESETTING_LBN)) { + netif_info(efx, drv, efx->net_dev, + "Rebooting MC due to UDP tunnel port list change\n"); + will_reset = true; + } + /* We detached earlier, expecting an MC reset to trigger a + * re-attach. + * But, there are two cases this won't happen: If the MC tells + * us it's not going to reset, just reattach and carry on. + * Alternatively, if the MC tells us it will reset but we haven't + * allocated event queues, we won't see the notification. In this + * case we're not using any resources, so we don't actually + * need to do any reset handling except to forget some + * resources and reattach. + */ + if (!unloading) { + if (will_reset && !efx_net_allocated(efx->state)) + efx_ef10_mcdi_reboot_detected(efx); + if (!will_reset || !efx_net_allocated(efx->state)) + efx_device_attach_if_not_resetting(efx); + } + spin_lock_bh(&nic_data->udp_tunnels_lock); + if (!rc) { + done = true; + /* Mark the adds/removes as done */ + for (i = 0; i < ARRAY_SIZE(nic_data->udp_tunnels); i++) { + tnl = nic_data->udp_tunnels + i; + if (adding[i]) + /* it's added */ + tnl->adding = false; + else + /* it's not added, so if you wanted a remove + * you got it + */ + tnl->removing = false; + if (tnl->adding || tnl->removing) + done = false; + } + if (!done) + /* Someone requested more changes, let's go and do them */ + goto again; + } + nic_data->udp_tunnels_busy = false; + spin_unlock_bh(&nic_data->udp_tunnels_lock); + return rc; +} + +static void efx_ef10_udp_tnl_push_ports_sync(struct efx_nic *efx) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + + spin_lock_bh(&nic_data->udp_tunnels_lock); + efx_ef10_set_udp_tnl_ports(nic_data->efx, false); +} + +static void efx_ef10__udp_tnl_push_ports(struct work_struct *data) +{ + struct efx_ef10_nic_data *nic_data; + + nic_data = container_of(data, struct efx_ef10_nic_data, + udp_tunnel_work); + + efx_ef10_udp_tnl_push_ports_sync(nic_data->efx); +} + +static void efx_ef10_udp_tnl_push_ports_async(struct efx_nic *efx) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + + schedule_work(&nic_data->udp_tunnel_work); +} + +static struct efx_udp_tunnel *_efx_ef10_udp_tnl_lookup_port(struct efx_nic *efx, + __be16 port) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + size_t i; + + for (i = 0; i < ARRAY_SIZE(nic_data->udp_tunnels); ++i) { + if (!nic_data->udp_tunnels[i].count) + continue; + if (nic_data->udp_tunnels[i].port == port) + return &nic_data->udp_tunnels[i]; + } + return NULL; +} + +static void efx_ef10_udp_tnl_add_port(struct efx_nic *efx, + struct efx_udp_tunnel tnl) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + struct efx_udp_tunnel *match; + char typebuf[8]; + size_t i; + bool possible_reboot = false; + + if (!efx_ef10_has_cap(nic_data->datapath_caps, VXLAN_NVGRE)) + return; + + efx_get_udp_tunnel_type_name(tnl.type, typebuf, sizeof(typebuf)); + netif_dbg(efx, drv, efx->net_dev, "Adding UDP tunnel (%s) port %d\n", + typebuf, ntohs(tnl.port)); + + spin_lock_bh(&nic_data->udp_tunnels_lock); + + match = _efx_ef10_udp_tnl_lookup_port(efx, tnl.port); + if (match) { + if (match->type == tnl.type) { + netif_dbg(efx, drv, efx->net_dev, + "Referencing existing tunnel entry\n"); + /* Saturate at max value to prevent overflow. */ + if (match->count < EFX_UDP_TUNNEL_COUNT_MAX) + match->count++; + /* Yell if our refcounts are getting huge */ + WARN_ON_ONCE(match->count & EFX_UDP_TUNNEL_COUNT_WARN); + if (match->removing) { + /* This was due to be removed as its count had + * fallen to 0. We don't know how far the + * removal has got; mark it for re-add. + */ + match->removing = false; + match->adding = true; + /* The thread currently doing the update will + * see this in its post-MCDI checking and so + * will re-do the MCDI. Thus we don't need to + * schedule one ourselves. + */ + } else { + /* Nothing to do. This can only happen in the + * case of OVS tunnels, because regular kernel + * tunnels refcount their sockets and thus only + * ever ask for a port once. + */ + } + goto out_unlock; + } + efx_get_udp_tunnel_type_name(match->type, + typebuf, sizeof(typebuf)); + netif_dbg(efx, drv, efx->net_dev, + "UDP port %d is already in use by %s\n", + ntohs(tnl.port), typebuf); + goto out_unlock; + } + + /* find an empty slot and use it */ + for (i = 0; i < ARRAY_SIZE(nic_data->udp_tunnels); ++i) { + match = nic_data->udp_tunnels + i; + /* Unused slot? */ + if (!match->count && !match->adding && !match->removing) { + *match = tnl; + match->adding = true; + match->removing = false; + match->count = 1; + /* schedule an update */ + efx_ef10_udp_tnl_push_ports_async(efx); + possible_reboot = true; + goto out_unlock; + } + } + + netif_dbg(efx, drv, efx->net_dev, + "Unable to add UDP tunnel (%s) port %d; insufficient resources.\n", + typebuf, ntohs(tnl.port)); + +out_unlock: + spin_unlock_bh(&nic_data->udp_tunnels_lock); + if (possible_reboot) /* Wait for a reboot to complete */ + msleep(200); +} + +/* Called under the TX lock with the TX queue running, hence no-one can be + * in the middle of updating the UDP tunnels table. However, they could + * have tried and failed the MCDI, in which case they'll have set appropriate + * adding/removing flags before dropping their locks. + */ +static bool efx_ef10_udp_tnl_has_port(struct efx_nic *efx, __be16 port) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + struct efx_udp_tunnel *tnl; + + if (!efx_ef10_has_cap(nic_data->datapath_caps, VXLAN_NVGRE)) + return false; + + /* We're not under the spinlock, but that's OK because we're only + * reading. See above comment. + */ + tnl = _efx_ef10_udp_tnl_lookup_port(efx, port); + return tnl && !tnl->adding && !tnl->removing; +} + +static void efx_ef10_udp_tnl_del_port(struct efx_nic *efx, + struct efx_udp_tunnel tnl) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + struct efx_udp_tunnel *match; + char typebuf[8]; + bool possible_reboot = false; + + if (!efx_ef10_has_cap(nic_data->datapath_caps, VXLAN_NVGRE)) + return; + + efx_get_udp_tunnel_type_name(tnl.type, typebuf, sizeof(typebuf)); + netif_dbg(efx, drv, efx->net_dev, "Removing UDP tunnel (%s) port %d\n", + typebuf, ntohs(tnl.port)); + + spin_lock_bh(&nic_data->udp_tunnels_lock); + + match = _efx_ef10_udp_tnl_lookup_port(efx, tnl.port); + if (match) { + if (match->type == tnl.type) { + /* if we hit the max value, we stopped counting, so + * saturate and keep this port forever + */ + if (match->count != EFX_UDP_TUNNEL_COUNT_MAX) + match->count--; + if (match->count) { + netif_dbg(efx, drv, efx->net_dev, + "Keeping UDP tunnel (%s) port %d, refcount %u\n", + typebuf, ntohs(tnl.port), + match->count); + /* No MCDI to do */ + goto out_unlock; + } + if (match->adding) + /* This was still waiting to be added, and we + * don't want it any more. We don't know how + * far the add has got; unmark it. + */ + match->adding = false; + match->removing = true; + /* schedule an update */ + efx_ef10_udp_tnl_push_ports_async(efx); + possible_reboot = true; + goto out_unlock; + } + efx_get_udp_tunnel_type_name(match->type, + typebuf, sizeof(typebuf)); + netif_warn(efx, drv, efx->net_dev, + "UDP port %d is actually in use by %s, not removing\n", + ntohs(tnl.port), typebuf); + } else { + /* nothing to do */ + netif_dbg(efx, drv, efx->net_dev, + "UDP port %d was not previously offloaded\n", + ntohs(tnl.port)); + } + +out_unlock: + spin_unlock_bh(&nic_data->udp_tunnels_lock); + if (possible_reboot) /* Wait for a reboot to complete */ + msleep(200); +} +#endif + +#ifdef EFX_NOT_UPSTREAM +static ssize_t forward_fcs_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); + +#if defined(EFX_USE_KCOMPAT) && !defined(EFX_HAVE_ETHTOOL_FCS) + return scnprintf(buf, PAGE_SIZE, "%d\n", efx->forward_fcs); +#else + if (!(efx->net_dev->features & NETIF_F_RXFCS) != + !(efx->net_dev->features & NETIF_F_RXALL)) + return scnprintf(buf, PAGE_SIZE, "mixed\n"); + + return scnprintf(buf, PAGE_SIZE, "%d\n", + !!(efx->net_dev->features & NETIF_F_RXFCS)); +#endif +} + +static ssize_t forward_fcs_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); +#if defined(EFX_USE_KCOMPAT) && !defined(EFX_HAVE_ETHTOOL_FCS) + bool old_forward_fcs = efx->forward_fcs; +#endif + + if (sysfs_streq(buf, "1")) +#if defined(EFX_USE_KCOMPAT) && !defined(EFX_HAVE_ETHTOOL_FCS) + efx->forward_fcs = true; +#else + efx->net_dev->wanted_features |= NETIF_F_RXFCS | NETIF_F_RXALL; +#endif + else if (sysfs_streq(buf, "0")) +#if defined(EFX_USE_KCOMPAT) && !defined(EFX_HAVE_ETHTOOL_FCS) + efx->forward_fcs = false; +#else + efx->net_dev->wanted_features &= ~(NETIF_F_RXFCS | + NETIF_F_RXALL); +#endif + else + return -EINVAL; + +#if defined(EFX_USE_KCOMPAT) && !defined(EFX_HAVE_ETHTOOL_FCS) + if (efx->forward_fcs != old_forward_fcs) { + mutex_lock(&efx->mac_lock); + (void)efx_mac_reconfigure(efx, false); + mutex_unlock(&efx->mac_lock); + } +#else + /* will call our ndo_set_features to actually make the change */ + rtnl_lock(); + netdev_update_features(efx->net_dev); + rtnl_unlock(); +#endif + + return count; +} +static DEVICE_ATTR_RW(forward_fcs); + +static ssize_t physical_port_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); + + return scnprintf(buf, PAGE_SIZE, "%d\n", efx->port_num); +} +static DEVICE_ATTR_RO(physical_port); +#endif + +static ssize_t link_control_flag_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); + + return scnprintf(buf, PAGE_SIZE, "%d\n", + ((efx->mcdi->fn_flags) & + (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL)) + ? 1 : 0); +} +static DEVICE_ATTR_RO(link_control_flag); + +static ssize_t primary_flag_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); + + return scnprintf(buf, PAGE_SIZE, "%d\n", + ((efx->mcdi->fn_flags) & + (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_PRIMARY)) + ? 1 : 0); +} +static DEVICE_ATTR_RO(primary_flag); + +/* NIC probe and remove + */ +static void efx_ef10_remove_post_io(struct efx_nic *efx) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + int i; + + if (!nic_data) + return; + + efx_mcdi_filter_table_remove(efx); + + efx_mcdi_mon_remove(efx); + +#ifdef EFX_NOT_UPSTREAM + device_remove_file(&efx->pci_dev->dev, &dev_attr_physical_port); + + if (efx_ef10_has_cap(nic_data->datapath_caps, RX_INCLUDE_FCS)) + device_remove_file(&efx->pci_dev->dev, &dev_attr_forward_fcs); +#endif + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_UDP_TUNNEL_NIC_INFO) + for (i = 0; i < ARRAY_SIZE(nic_data->udp_tunnels); ++i) { + nic_data->udp_tunnels[i].type = + TUNNEL_ENCAP_UDP_PORT_ENTRY_INVALID; + nic_data->udp_tunnels[i].port = 0; + } + mutex_lock(&nic_data->udp_tunnels_lock); + (void)efx_ef10_set_udp_tnl_ports(efx, true); + mutex_unlock(&nic_data->udp_tunnels_lock); +#else + cancel_work_sync(&nic_data->udp_tunnel_work); + + /* mark all UDP tunnel ports for remove... */ + spin_lock_bh(&nic_data->udp_tunnels_lock); + /* Since the udp_tunnel_work has been finished, no-one can still be + * busy. + */ + WARN_ON(nic_data->udp_tunnels_busy); + nic_data->udp_tunnels_busy = true; + for (i = 0; i < ARRAY_SIZE(nic_data->udp_tunnels); i++) + nic_data->udp_tunnels[i].removing = true; + /* ... then remove them */ + efx_ef10_set_udp_tnl_ports(efx, true); /* drops the lock */ +#endif +} + +static int efx_ef10_probe_post_io(struct efx_nic *efx) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + unsigned int bar_size = efx_ef10_bar_size(efx); + int rc; + + rc = efx_get_fn_info(efx, &nic_data->pf_index, &nic_data->vf_index); + if (rc) + return rc; + + /* License checking on the firmware must finish before we can trust the + * capabilities. Before that some will not be set. + */ + efx_ef10_read_licensed_features(efx); + + rc = efx_ef10_init_datapath_caps(efx); + if (rc < 0) + return rc; + + efx->tx_queues_per_channel = 1; + efx->select_tx_queue = efx_ef10_select_tx_queue; +#ifdef EFX_USE_OVERLAY_TX_CSUM + if (efx_ef10_has_cap(nic_data->datapath_caps, VXLAN_NVGRE) && + !efx_ef10_is_vf(efx)) { + efx->tx_queues_per_channel = 2; + efx->select_tx_queue = efx_ef10_select_tx_queue_overlay; + } +#endif +#ifdef EFX_NOT_UPSTREAM + if (tx_non_csum_queue) { + efx->tx_queues_per_channel = 2; + efx->select_tx_queue = efx_ef10_select_tx_queue_non_csum; +#ifdef EFX_USE_OVERLAY_TX_CSUM + if (efx_ef10_has_cap(nic_data->datapath_caps, VXLAN_NVGRE) && + !efx_ef10_is_vf(efx)) { + efx->tx_queues_per_channel = 3; + efx->select_tx_queue = + efx_ef10_select_tx_queue_non_csum_overlay; + } +#endif + } +#endif + + /* We can have one VI for each vi_stride-byte region. + * Note we have more TX queues than channels, so TX queues are the + * limit on the number of channels we can have with our VIs. + */ + efx->max_vis = bar_size / efx->vi_stride; + if (!efx->max_vis) { + netif_err(efx, drv, efx->net_dev, "error determining max VIs\n"); + return -EIO; + } + efx->max_channels = min_t(unsigned int, efx->max_channels, + (bar_size / (efx->vi_stride * + efx->tx_queues_per_channel))); + efx->max_tx_channels = efx->max_channels; + if (efx->max_channels == 0) { + netif_err(efx, drv, efx->net_dev, "error determining max channels\n"); + return -EIO; + } + + efx->rx_packet_len_offset = + ES_DZ_RX_PREFIX_PKTLEN_OFST - ES_DZ_RX_PREFIX_SIZE; + + if (efx_ef10_has_cap(nic_data->datapath_caps, RX_INCLUDE_FCS)) + efx_add_hw_features(efx, NETIF_F_RXFCS); + + rc = efx_mcdi_port_get_number(efx); + if (rc < 0) + return rc; + efx->port_num = rc; + + rc = efx->type->get_mac_address(efx, efx->net_dev->perm_addr); + if (rc) + return rc; + + rc = efx_ef10_get_timer_config(efx); + if (rc < 0) + return rc; + + rc = efx_mcdi_mon_probe(efx); + if (rc && rc != -EPERM) + return rc; + +#ifdef CONFIG_SFC_SRIOV + efx_sriov_init_max_vfs(efx, nic_data->pf_index); +#endif + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_NEED_GET_PHYS_PORT_ID) +#ifdef CONFIG_SFC_SRIOV + if (efx->pci_dev->physfn && !efx->pci_dev->is_physfn) { + struct pci_dev *pci_dev_pf = efx->pci_dev->physfn; + struct efx_nic *efx_pf = pci_get_drvdata(pci_dev_pf); + + efx_pf->type->get_mac_address(efx_pf, nic_data->port_id); + } else +#endif + ether_addr_copy(nic_data->port_id, efx->net_dev->perm_addr); +#endif + /* If tx_coalesce_doorbell=Y disable tx_push, we do this here to avoid + * additional checks on the fast path. + */ + if (tx_coalesce_doorbell) { + netif_info(efx, drv, efx->net_dev, + "Tx push disabled due to tx_coalesce_doorbell=Y\n"); + tx_push_max_fill = 0; + } + + rc = efx_ef10_filter_table_probe(efx); + if (rc) + return rc; + + /* Add unspecified VID to support VLAN filtering being disabled */ + rc = efx_mcdi_filter_add_vlan(efx, EFX_FILTER_VID_UNSPEC); + if (rc) + return rc; + + /* If VLAN filtering is enabled, we need VID 0 to get untagged + * traffic. It is added automatically if 8021q module is loaded, + * but we can't rely on it since module may be not loaded. + */ + return efx_mcdi_filter_add_vlan(efx, 0); +} + +static int efx_ef10_probe(struct efx_nic *efx) +{ + struct efx_ef10_nic_data *nic_data; + unsigned int bar_size = efx_ef10_bar_size(efx); + int i, rc; + + if (WARN_ON(bar_size == 0)) + return -EIO; + + nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL); + if (!nic_data) + return -ENOMEM; + efx->nic_data = nic_data; + nic_data->efx = efx; + + /* we assume later that we can copy from this buffer in dwords */ + BUILD_BUG_ON(MCDI_CTL_SDU_LEN_MAX_V2 % 4); + + /* MCDI buffers must be 256 byte aligned, so pad the first N-1 + * buffers and add the last on the end. + */ + rc = efx_nic_alloc_buffer(efx, &nic_data->mcdi_buf, + ALIGN(MCDI_BUF_LEN, 256) * + (EF10_NUM_MCDI_BUFFERS - 1) + + MCDI_BUF_LEN, + GFP_KERNEL); + if (rc) + return rc; + + /* Get the MC's warm boot count. In case it's rebooting right + * now, be prepared to retry. + */ + i = 0; + for (;;) { + rc = efx_ef10_get_warm_boot_count(efx); + if (rc >= 0) + break; + if (++i == 5) + return rc; + ssleep(1); + } + nic_data->warm_boot_count = rc; + + /* In case we're recovering from a crash (kexec), we want to + * cancel any outstanding request by the previous user of this + * function. We send a special message using the least + * significant bits of the 'high' (doorbell) register. + */ + _efx_writed(efx, cpu_to_le32(1), ER_DZ_MC_DB_HWRD); + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_UDP_TUNNEL_NIC_INFO) + mutex_init(&nic_data->udp_tunnels_lock); + for (i = 0; i < ARRAY_SIZE(nic_data->udp_tunnels); ++i) + nic_data->udp_tunnels[i].type = + TUNNEL_ENCAP_UDP_PORT_ENTRY_INVALID; +#else + spin_lock_init(&nic_data->udp_tunnels_lock); + nic_data->udp_tunnels_busy = false; + INIT_WORK(&nic_data->udp_tunnel_work, efx_ef10__udp_tnl_push_ports); +#endif + + /* retry probe 3 times on EF10 due to UDP tunnel ports + * sometimes causing a reset during probe. + */ + rc = efx_pci_probe_post_io(efx, efx_ef10_probe_post_io); + if (rc) { + /* On failure, retry once immediately. + * If we aborted probe due to a scheduled reset, dismiss it. + */ + efx->reset_pending = 0; + efx_pci_remove_post_io(efx, efx_ef10_remove_post_io); + rc = efx_pci_probe_post_io(efx, efx_ef10_probe_post_io); + if (rc) { + /* On another failure, retry once more + * after a 50-305ms delay. + */ + unsigned char r; + + get_random_bytes(&r, 1); + msleep((unsigned int)r + 50); + + efx->reset_pending = 0; + efx_pci_remove_post_io(efx, efx_ef10_remove_post_io); + rc = efx_pci_probe_post_io(efx, efx_ef10_probe_post_io); + } + } + if (rc) { + netif_err(efx, drv, efx->net_dev, + "Failed too many attempts to probe. Aborting.\n"); + return rc; + } + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_UDP_TUNNEL_NIC_INFO) + if (efx_has_cap(efx, VXLAN_NVGRE) && + efx->mcdi->fn_flags & + (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_TRUSTED)) + efx->net_dev->udp_tunnel_nic_info = &efx_ef10_udp_tunnels; +#endif + +#ifdef EFX_NOT_UPSTREAM + if (efx->mcdi->fn_flags & + (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_NO_ACTIVE_PORT)) + return 0; +#endif + +#ifdef EFX_NOT_UPSTREAM + if (efx_rss_use_fixed_key) { + BUILD_BUG_ON(sizeof(efx_rss_fixed_key) < + sizeof(efx->rss_context.rx_hash_key)); + memcpy(&efx->rss_context.rx_hash_key, efx_rss_fixed_key, + sizeof(efx->rss_context.rx_hash_key)); + } else +#endif + netdev_rss_key_fill(efx->rss_context.rx_hash_key, + sizeof(efx->rss_context.rx_hash_key)); + + /* Don't fail init if RSS setup doesn't work. */ + efx_mcdi_push_default_indir_table(efx, efx->n_rss_channels); + +#ifdef EFX_NOT_UPSTREAM + if (efx_ef10_has_cap(nic_data->datapath_caps, RX_INCLUDE_FCS)) + rc = device_create_file(&efx->pci_dev->dev, + &dev_attr_forward_fcs); + if (rc) + return rc; + + rc = device_create_file(&efx->pci_dev->dev, &dev_attr_physical_port); + if (rc) + return rc; +#endif + + rc = device_create_file(&efx->pci_dev->dev, + &dev_attr_link_control_flag); + if (rc) + return rc; + + return device_create_file(&efx->pci_dev->dev, &dev_attr_primary_flag); +} + +static int efx_ef10_probe_pf(struct efx_nic *efx) +{ +#ifdef EFX_NOT_UPSTREAM + unsigned int desired_bandwidth; + + switch (efx->pci_dev->device) { + case 0x0923: + /* Greenport devices can make use of 8 lanes of Gen3. */ + desired_bandwidth = EFX_BW_PCIE_GEN3_X8; + break; + case 0x0a03: + case 0x0b03: + /* Medford and Medford2 devices want 16 lanes, even though + * not all have that. + * We suppress this optimism in efx_nic_check_pcie_link(). + */ + desired_bandwidth = EFX_BW_PCIE_GEN3_X16; + break; + default: + desired_bandwidth = EFX_BW_PCIE_GEN2_X8; + } + + efx_nic_check_pcie_link(efx, desired_bandwidth, NULL, NULL); +#endif + return efx_ef10_probe(efx); +} + +static void efx_ef10_remove(struct efx_nic *efx) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + + if (!nic_data) + return; + + device_remove_file(&efx->pci_dev->dev, &dev_attr_link_control_flag); + device_remove_file(&efx->pci_dev->dev, &dev_attr_primary_flag); + + efx_pci_remove_post_io(efx, efx_ef10_remove_post_io); + + efx_nic_free_buffer(efx, &nic_data->mcdi_buf); + + kfree(nic_data); + efx->nic_data = NULL; +} + +#if defined(CONFIG_SFC_SRIOV) +static void efx_ef10_remove_vf(struct efx_nic *efx) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_NDO_SET_VF_MAC) + struct efx_ef10_nic_data *nic_data_pf; + struct pci_dev *pci_dev_pf; + struct efx_nic *efx_pf; + struct ef10_vf *vf; +#endif + + efx_ef10_remove(efx); + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_NDO_SET_VF_MAC) + /* If PCI probe fails early there is no NIC specific data yet */ + if (!nic_data) + return; + + if (efx->pci_dev->is_virtfn) { + pci_dev_pf = efx->pci_dev->physfn; + if (pci_dev_pf) { + efx_pf = pci_get_drvdata(pci_dev_pf); + nic_data_pf = efx_pf->nic_data; + vf = nic_data_pf->vf + nic_data->vf_index; + vf->efx = NULL; + } + } +#endif +} + +static int efx_ef10_probe_vf(struct efx_nic *efx) +{ + struct efx_ef10_nic_data *nic_data; + struct efx_nic *efx_pf; + int rc = efx_vf_parent(efx, &efx_pf); + + /* Fail if the parent is not a Solarflare PF */ + if (rc) + return rc; + + /* If the parent PF has no VF data structure, it doesn't know about this + * VF so fail probe. The VF needs to be re-created. This can happen + * if the PF driver is unloaded while the VF is assigned to a guest. + */ + if (efx_pf) { + nic_data = efx_pf->nic_data; + if (!nic_data->vf) { + netif_info(efx, drv, efx->net_dev, + "The VF cannot link to its parent PF; " + "please destroy and re-create the VF\n"); + return -EBUSY; + } + } + + rc = efx_ef10_probe(efx); + if (rc) + goto fail; + + nic_data = efx->nic_data; + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_NDO_SET_VF_MAC) + if (efx->pci_dev->physfn) { + struct efx_nic *efx_pf = pci_get_drvdata(efx->pci_dev->physfn); + struct efx_ef10_nic_data *nic_data_pf = efx_pf->nic_data; + + nic_data_pf->vf[nic_data->vf_index].efx = efx; + nic_data_pf->vf[nic_data->vf_index].pci_dev = + efx->pci_dev; + } else { + netif_info(efx, drv, efx->net_dev, + "Could not get the PF id from VF\n"); + } +#endif + + INIT_DELAYED_WORK(&nic_data->vf_stats_work, + efx_ef10_vf_update_stats_work); + + return 0; + +fail: + efx_ef10_remove_vf(efx); + return rc; +} +#endif /* CONFIG_SFC_SRIOV */ + +static unsigned int ef10_check_caps(const struct efx_nic *efx, + u8 flag, + u32 offset) +{ + const struct efx_ef10_nic_data *nic_data = efx->nic_data; + + switch (offset) { + case(MC_CMD_GET_CAPABILITIES_V8_OUT_FLAGS1_OFST): + return nic_data->datapath_caps & BIT_ULL(flag); + case(MC_CMD_GET_CAPABILITIES_V8_OUT_FLAGS2_OFST): + return nic_data->datapath_caps2 & BIT_ULL(flag); + default: + return 0; + } +} + +static unsigned int efx_ef10_recycle_ring_size(const struct efx_nic *efx) +{ + unsigned int ret = EFX_RECYCLE_RING_SIZE_10G; + + /* There is no difference between PFs and VFs. The size is based on + * the maximum link speed of a given NIC. + */ + switch (efx->pci_dev->device & 0xfff) { + case 0x0903: /* Farmingdale can do up to 10G */ + break; + case 0x0923: /* Greenport can do up to 40G */ + case 0x0a03: /* Medford can do up to 40G */ + ret *= 4; + break; + default: /* Medford2 can do up to 100G */ + ret *= 10; + } + + if (IS_ENABLED(CONFIG_PPC64)) + ret *= 4; + + return ret; +} + +#define EF10_OFFLOAD_FEATURES \ + (NETIF_F_IP_CSUM | \ + NETIF_F_HW_VLAN_CTAG_FILTER | \ + NETIF_F_IPV6_CSUM | \ + NETIF_F_RXHASH | \ + NETIF_F_NTUPLE) + +#ifdef CONFIG_SFC_SRIOV +const struct efx_nic_type efx_hunt_a0_vf_nic_type = { + .is_vf = true, + .mem_bar = efx_ef10_vf_mem_bar, + .mem_map_size = efx_ef10_initial_mem_map_size, + .probe = efx_ef10_probe_vf, + .remove = efx_ef10_remove_vf, + .dimension_resources = efx_ef10_dimension_resources, + .net_alloc = __efx_net_alloc, + .net_dealloc = __efx_net_dealloc, + .init = efx_ef10_init_nic, + .fini = efx_ef10_fini_nic, + .monitor = efx_ef10_monitor, + .hw_unavailable = efx_ef10_hw_unavailable, + .map_reset_reason = efx_ef10_map_reset_reason, + .map_reset_flags = efx_ef10_map_reset_flags, + .reset = efx_ef10_reset, + .probe_port = efx_mcdi_port_probe, + .remove_port = efx_mcdi_port_remove, + .fini_dmaq = efx_ef10_fini_dmaq, + .prepare_flr = efx_ef10_prepare_flr, + .finish_flr = efx_port_dummy_op_void, + .describe_stats = efx_ef10_describe_stats, + .update_stats = efx_ef10_update_stats_vf, + .start_stats = efx_ef10_start_stats_vf, + .pull_stats = efx_ef10_pull_stats_vf, + .stop_stats = efx_ef10_stop_stats_vf, + .update_stats_period = efx_ef10_vf_schedule_stats_work, + .push_irq_moderation = efx_ef10_push_irq_moderation, + .reconfigure_mac = efx_ef10_mac_reconfigure, + .check_mac_fault = efx_mcdi_mac_check_fault, + .reconfigure_port = efx_mcdi_port_reconfigure, + .get_wol = efx_ef10_get_wol_vf, + .set_wol = efx_ef10_set_wol_vf, + .resume_wol = efx_port_dummy_op_void, + .mcdi_request = efx_ef10_mcdi_request, + .mcdi_poll_response = efx_ef10_mcdi_poll_response, + .mcdi_read_response = efx_ef10_mcdi_read_response, + .mcdi_poll_reboot = efx_ef10_mcdi_poll_reboot, + .mcdi_record_bist_event = efx_ef10_mcdi_record_bist_event, + .mcdi_poll_bist_end = efx_ef10_mcdi_poll_bist_end, + .mcdi_reboot_detected = efx_ef10_mcdi_reboot_detected, + .mcdi_get_buf = efx_ef10_mcdi_get_buf, + .mcdi_put_buf = efx_ef10_mcdi_put_buf, + .irq_enable_master = efx_port_dummy_op_void, + .irq_test_generate = efx_ef10_irq_test_generate, + .irq_disable_non_ev = efx_port_dummy_op_void, + .irq_handle_msi = efx_ef10_msi_interrupt, + .tx_probe = efx_ef10_tx_probe, + .tx_init = efx_ef10_tx_init, + .tx_write = efx_ef10_tx_write, + .tx_notify = efx_ef10_notify_tx_desc, + .tx_limit_len = efx_ef10_tx_limit_len, + .tx_enqueue = __efx_enqueue_skb, + .tx_max_skb_descs = efx_ef10_tx_max_skb_descs, + .rx_push_rss_config = efx_ef10_vf_rx_push_rss_config, + .rx_pull_rss_config = efx_mcdi_rx_pull_rss_config, + .rx_probe = efx_mcdi_rx_probe, + .rx_init = efx_ef10_rx_init, + .rx_remove = efx_mcdi_rx_remove, + .rx_write = efx_ef10_rx_write, + .rx_defer_refill = efx_ef10_rx_defer_refill, + .rx_packet = __efx_rx_packet, + .ev_probe = efx_mcdi_ev_probe, + .ev_init = efx_ef10_ev_init, + .ev_fini = efx_mcdi_ev_fini, + .ev_remove = efx_mcdi_ev_remove, + .ev_process = efx_ef10_ev_process, + .ev_mcdi_pending = efx_ef10_ev_mcdi_pending, + .ev_read_ack = efx_ef10_ev_read_ack, + .ev_test_generate = efx_ef10_ev_test_generate, + .filter_table_probe = efx_ef10_filter_table_init, + .filter_table_up = efx_ef10_filter_table_up, + .filter_table_restore = efx_mcdi_filter_table_restore, + .filter_table_down = efx_ef10_filter_table_down, + .filter_table_remove = efx_ef10_filter_table_fini, + .filter_match_supported = efx_mcdi_filter_match_supported, + .filter_insert = efx_mcdi_filter_insert, + .filter_remove_safe = efx_mcdi_filter_remove_safe, + .filter_get_safe = efx_mcdi_filter_get_safe, + .filter_clear_rx = efx_mcdi_filter_clear_rx, + .filter_count_rx_used = efx_mcdi_filter_count_rx_used, + .filter_get_rx_id_limit = efx_mcdi_filter_get_rx_id_limit, + .filter_get_rx_ids = efx_mcdi_filter_get_rx_ids, +#ifdef CONFIG_RFS_ACCEL + .filter_rfs_expire_one = efx_mcdi_filter_rfs_expire_one, +#endif +#ifdef EFX_NOT_UPSTREAM + .filter_redirect = efx_mcdi_filter_redirect, +#if IS_MODULE(CONFIG_SFC_DRIVERLINK) + .filter_block_kernel = efx_mcdi_filter_block_kernel, + .filter_unblock_kernel = efx_mcdi_filter_unblock_kernel, +#endif +#endif +#ifdef CONFIG_SFC_MTD + .mtd_probe = efx_port_dummy_op_int, +#endif +#ifdef CONFIG_SFC_PTP + .ptp_write_host_time = efx_ef10_ptp_write_host_time, + .ptp_set_ts_config = efx_ef10_ptp_set_ts_config, +#endif + .vlan_rx_add_vid = efx_mcdi_filter_add_vid, + .vlan_rx_kill_vid = efx_mcdi_filter_del_vid, + .vswitching_probe = efx_ef10_vswitching_probe_vf, + .vswitching_restore = efx_ef10_vswitching_restore_vf, + .vswitching_remove = efx_ef10_vswitching_remove_vf, + .get_mac_address = efx_ef10_get_mac_address_vf, + .set_mac_address = efx_ef10_set_mac_address, +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_NEED_GET_PHYS_PORT_ID) + .get_phys_port_id = efx_ef10_get_phys_port_id, +#endif + .revision = EFX_REV_HUNT_A0, + .max_dma_mask = DMA_BIT_MASK(ESF_DZ_TX_KER_BUF_ADDR_WIDTH), + .rx_prefix_size = ES_DZ_RX_PREFIX_SIZE, + .rx_hash_offset = ES_DZ_RX_PREFIX_HASH_OFST, + .rx_ts_offset = ES_DZ_RX_PREFIX_TSTAMP_OFST, + .can_rx_scatter = true, + .always_rx_scatter = true, + .option_descriptors = true, + .copy_break = true, + .supported_interrupt_modes = BIT(EFX_INT_MODE_MSIX), + .timer_period_max = 1 << ERF_DD_EVQ_IND_TIMER_VAL_WIDTH, +#ifdef EFX_NOT_UPSTREAM +#if IS_MODULE(CONFIG_SFC_DRIVERLINK) + .ef10_resources = { + .hdr.next = ((struct efx_dl_device_info *) + &efx_hunt_a0_nic_type.dl_hash_insertion.hdr), + .hdr.type = EFX_DL_EF10_RESOURCES, + .vi_base = 0, + .vi_shift = 0, + .vi_min = 0, + .vi_lim = 0, + .flags = 0 + }, + .dl_hash_insertion = { + .hdr.type = EFX_DL_HASH_INSERTION, + .data_offset = ES_DZ_RX_PREFIX_SIZE, + .hash_offset = ES_DZ_RX_PREFIX_HASH_OFST, + .flags = (EFX_DL_HASH_TOEP_TCPIP4 | EFX_DL_HASH_TOEP_IP4 | + EFX_DL_HASH_TOEP_TCPIP6 | EFX_DL_HASH_TOEP_IP6), + }, +#endif +#endif + .offload_features = EF10_OFFLOAD_FEATURES, + .mcdi_max_ver = 2, + .mcdi_rpc_timeout = efx_ef10_mcdi_rpc_timeout, + .max_rx_ip_filters = EFX_MCDI_FILTER_TBL_ROWS, + .hwtstamp_filters = 1 << HWTSTAMP_FILTER_NONE | + 1 << HWTSTAMP_FILTER_ALL, + .rx_hash_key_size = 40, + .check_caps = ef10_check_caps, + .rx_recycle_ring_size = efx_ef10_recycle_ring_size, +}; +#endif + +const struct efx_nic_type efx_hunt_a0_nic_type = { + .is_vf = false, + .mem_bar = efx_ef10_pf_mem_bar, + .mem_map_size = efx_ef10_initial_mem_map_size, + .probe = efx_ef10_probe_pf, + .remove = efx_ef10_remove, + .dimension_resources = efx_ef10_dimension_resources, + .free_resources = efx_ef10_free_resources, + .net_alloc = __efx_net_alloc, + .net_dealloc = __efx_net_dealloc, + .init = efx_ef10_init_nic, + .fini = efx_ef10_fini_nic, + .monitor = efx_ef10_monitor, + .hw_unavailable = efx_ef10_hw_unavailable, + .map_reset_reason = efx_ef10_map_reset_reason, + .map_reset_flags = efx_ef10_map_reset_flags, + .reset = efx_ef10_reset, + .probe_port = efx_mcdi_port_probe, + .remove_port = efx_mcdi_port_remove, + .fini_dmaq = efx_ef10_fini_dmaq, + .prepare_flr = efx_ef10_prepare_flr, + .finish_flr = efx_port_dummy_op_void, + .describe_stats = efx_ef10_describe_stats, + .update_stats = efx_ef10_update_stats_pf, + .pull_stats = efx_ef10_pull_stats_pf, + .push_irq_moderation = efx_ef10_push_irq_moderation, + .reconfigure_mac = efx_ef10_mac_reconfigure, + .check_mac_fault = efx_mcdi_mac_check_fault, + .reconfigure_port = efx_mcdi_port_reconfigure, + .get_wol = efx_ef10_get_wol, + .set_wol = efx_ef10_set_wol, + .resume_wol = efx_port_dummy_op_void, +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_ETHTOOL_FECSTATS) + .get_fec_stats = efx_ef10_get_fec_stats, +#endif + .test_chip = efx_ef10_test_chip, + .test_nvram = efx_mcdi_nvram_test_all, + .mcdi_request = efx_ef10_mcdi_request, + .mcdi_poll_response = efx_ef10_mcdi_poll_response, + .mcdi_read_response = efx_ef10_mcdi_read_response, + .mcdi_poll_reboot = efx_ef10_mcdi_poll_reboot, + .mcdi_record_bist_event = efx_ef10_mcdi_record_bist_event, + .mcdi_poll_bist_end = efx_ef10_mcdi_poll_bist_end, + .mcdi_reboot_detected = efx_ef10_mcdi_reboot_detected, + .mcdi_get_buf = efx_ef10_mcdi_get_buf, + .mcdi_put_buf = efx_ef10_mcdi_put_buf, + .irq_enable_master = efx_port_dummy_op_void, + .irq_test_generate = efx_ef10_irq_test_generate, + .irq_disable_non_ev = efx_port_dummy_op_void, + .irq_handle_msi = efx_ef10_msi_interrupt, + .tx_probe = efx_ef10_tx_probe, + .tx_init = efx_ef10_tx_init, + .tx_write = efx_ef10_tx_write, + .tx_notify = efx_ef10_notify_tx_desc, + .tx_limit_len = efx_ef10_tx_limit_len, + .tx_max_skb_descs = efx_ef10_tx_max_skb_descs, + .tx_enqueue = __efx_enqueue_skb, + .rx_push_rss_config = efx_ef10_pf_rx_push_rss_config, + .rx_pull_rss_config = efx_mcdi_rx_pull_rss_config, + .rx_push_rss_context_config = efx_mcdi_rx_push_rss_context_config, + .rx_pull_rss_context_config = efx_mcdi_rx_pull_rss_context_config, + .rx_restore_rss_contexts = efx_mcdi_rx_restore_rss_contexts, + .rx_get_default_rss_flags = efx_mcdi_get_default_rss_flags, + .rx_set_rss_flags = efx_mcdi_set_rss_context_flags, + .rx_get_rss_flags = efx_mcdi_get_rss_context_flags, + .rx_probe = efx_mcdi_rx_probe, + .rx_init = efx_ef10_rx_init, + .rx_remove = efx_mcdi_rx_remove, + .rx_write = efx_ef10_rx_write, + .rx_defer_refill = efx_ef10_rx_defer_refill, + .rx_packet = __efx_rx_packet, + .ev_probe = efx_mcdi_ev_probe, + .ev_init = efx_ef10_ev_init, + .ev_fini = efx_mcdi_ev_fini, + .ev_remove = efx_mcdi_ev_remove, + .ev_process = efx_ef10_ev_process, + .ev_mcdi_pending = efx_ef10_ev_mcdi_pending, + .ev_read_ack = efx_ef10_ev_read_ack, + .ev_test_generate = efx_ef10_ev_test_generate, + .filter_table_probe = efx_ef10_filter_table_init, + .filter_table_up = efx_ef10_filter_table_up, + .filter_table_restore = efx_mcdi_filter_table_restore, + .filter_table_down = efx_ef10_filter_table_down, + .filter_table_remove = efx_ef10_filter_table_fini, + .filter_match_supported = efx_mcdi_filter_match_supported, + .filter_insert = efx_mcdi_filter_insert, + .filter_remove_safe = efx_mcdi_filter_remove_safe, + .filter_get_safe = efx_mcdi_filter_get_safe, + .filter_clear_rx = efx_mcdi_filter_clear_rx, + .filter_count_rx_used = efx_mcdi_filter_count_rx_used, + .filter_get_rx_id_limit = efx_mcdi_filter_get_rx_id_limit, + .filter_get_rx_ids = efx_mcdi_filter_get_rx_ids, +#ifdef CONFIG_RFS_ACCEL + .filter_rfs_expire_one = efx_mcdi_filter_rfs_expire_one, +#endif +#ifdef EFX_NOT_UPSTREAM + .filter_redirect = efx_mcdi_filter_redirect, +#if IS_MODULE(CONFIG_SFC_DRIVERLINK) + .filter_block_kernel = efx_mcdi_filter_block_kernel, + .filter_unblock_kernel = efx_mcdi_filter_unblock_kernel, +#endif +#endif +#ifdef CONFIG_SFC_MTD + .mtd_probe = efx_ef10_mtd_probe, + .mtd_rename = efx_mcdi_mtd_rename, + .mtd_read = efx_mcdi_mtd_read, + .mtd_erase = efx_mcdi_mtd_erase, + .mtd_write = efx_mcdi_mtd_write, + .mtd_sync = efx_mcdi_mtd_sync, +#endif +#ifdef CONFIG_SFC_PTP + .ptp_write_host_time = efx_ef10_ptp_write_host_time, + .ptp_set_ts_sync_events = efx_ef10_ptp_set_ts_sync_events, + .ptp_set_ts_config = efx_ef10_ptp_set_ts_config, +#ifdef EFX_NOT_UPSTREAM + .pps_reset = efx_ptp_pps_reset, +#endif +#endif + .vlan_rx_add_vid = efx_mcdi_filter_add_vid, + .vlan_rx_kill_vid = efx_mcdi_filter_del_vid, + .udp_tnl_has_port = efx_ef10_udp_tnl_has_port, +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_UDP_TUNNEL_NIC_INFO) + .udp_tnl_push_ports = efx_ef10_udp_tnl_push_ports, +#else + .udp_tnl_push_ports = efx_ef10_udp_tnl_push_ports_sync, + .udp_tnl_add_port = efx_ef10_udp_tnl_add_port, + .udp_tnl_del_port = efx_ef10_udp_tnl_del_port, +#endif + .vport_add = efx_ef10_vport_alloc, + .vport_del = efx_ef10_vport_free, +#ifdef CONFIG_SFC_SRIOV + .sriov_configure = efx_ef10_sriov_configure, + .sriov_init = efx_ef10_sriov_init, + .sriov_fini = efx_ef10_sriov_fini, + .sriov_wanted = efx_ef10_sriov_wanted, + .sriov_set_vf_mac = efx_ef10_sriov_set_vf_mac, + .sriov_set_vf_vlan = efx_ef10_sriov_set_vf_vlan, + .sriov_set_vf_spoofchk = efx_ef10_sriov_set_vf_spoofchk, +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_NDO_SET_VF_MAC) + .sriov_get_vf_config = efx_ef10_sriov_get_vf_config, +#endif +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_VF_LINK_STATE) + .sriov_set_vf_link_state = efx_ef10_sriov_set_vf_link_state, +#endif +#endif + .vswitching_probe = efx_ef10_vswitching_probe_pf, + .vswitching_restore = efx_ef10_vswitching_restore_pf, + .vswitching_remove = efx_ef10_vswitching_remove_pf, + .get_mac_address = efx_ef10_get_mac_address_pf, + .set_mac_address = efx_ef10_set_mac_address, +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_NEED_GET_PHYS_PORT_ID) + .get_phys_port_id = efx_ef10_get_phys_port_id, +#endif + .revision = EFX_REV_HUNT_A0, + .max_dma_mask = DMA_BIT_MASK(ESF_DZ_TX_KER_BUF_ADDR_WIDTH), + .rx_prefix_size = ES_DZ_RX_PREFIX_SIZE, + .rx_hash_offset = ES_DZ_RX_PREFIX_HASH_OFST, + .rx_ts_offset = ES_DZ_RX_PREFIX_TSTAMP_OFST, + .can_rx_scatter = true, + .always_rx_scatter = true, + .option_descriptors = true, + .copy_break = true, + .supported_interrupt_modes = BIT(EFX_INT_MODE_MSIX) | + BIT(EFX_INT_MODE_MSI), + .timer_period_max = 1 << ERF_DD_EVQ_IND_TIMER_VAL_WIDTH, +#ifdef EFX_NOT_UPSTREAM +#if IS_MODULE(CONFIG_SFC_DRIVERLINK) + .ef10_resources = { + .hdr.next = ((struct efx_dl_device_info *) + &efx_hunt_a0_nic_type.dl_hash_insertion.hdr), + .hdr.type = EFX_DL_EF10_RESOURCES, + .vi_base = 0, + .vi_shift = 0, + .vi_min = 0, + .vi_lim = 0, + .flags = 0 + }, + .dl_hash_insertion = { + .hdr.type = EFX_DL_HASH_INSERTION, + .data_offset = ES_DZ_RX_PREFIX_SIZE, + .hash_offset = ES_DZ_RX_PREFIX_HASH_OFST, + .flags = (EFX_DL_HASH_TOEP_TCPIP4 | EFX_DL_HASH_TOEP_IP4 | + EFX_DL_HASH_TOEP_TCPIP6 | EFX_DL_HASH_TOEP_IP6), + }, +#endif +#endif + .offload_features = EF10_OFFLOAD_FEATURES, + .mcdi_max_ver = 2, + .mcdi_rpc_timeout = efx_ef10_mcdi_rpc_timeout, + .max_rx_ip_filters = EFX_MCDI_FILTER_TBL_ROWS, + .hwtstamp_filters = 1 << HWTSTAMP_FILTER_NONE | + 1 << HWTSTAMP_FILTER_ALL, + .rx_hash_key_size = 40, + .check_caps = ef10_check_caps, + .rx_recycle_ring_size = efx_ef10_recycle_ring_size, +}; diff --git a/src/drivers/net/ethernet/sfc/ef100.c b/src/drivers/net/ethernet/sfc/ef100.c new file mode 100644 index 0000000..7a33da5 --- /dev/null +++ b/src/drivers/net/ethernet/sfc/ef100.c @@ -0,0 +1,640 @@ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2005-2018 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#include "net_driver.h" +#include +#if defined(EFX_USE_KCOMPAT) && defined(EFX_HAVE_PCI_ENABLE_PCIE_ERROR_REPORTING) +#include +#endif +#include "efx_common.h" +#include "efx_channels.h" +#include "efx_auxbus.h" +#include "debugfs.h" +#include "io.h" +#include "ef100_nic.h" +#include "ef100_netdev.h" +#include "ef100_regs.h" +#include "ef100_bsp.h" +#include "ef100.h" +#ifdef CONFIG_SFC_TRACING +#include +#endif +#include "mcdi_pcol.h" + +#define EFX_EF100_PCI_DEFAULT_BAR 0 + +/* Number of bytes at start of vendor specified extended capability that indicate + * that the capability is vendor specified. i.e. offset from value returned by + * pci_find_next_ext_capability() to beginning of vendor specified capability + * header. +*/ +#define PCI_EXT_CAP_HDR_LENGTH 4 + +/* Expected size of a Xilinx continuation address table entry. */ +#define ESE_GZ_CFGBAR_CONT_CAP_MIN_LENGTH 16 + +struct ef100_func_ctl_window +{ + bool valid; + unsigned int bar; + u64 offset; +}; + +static int ef100_pci_walk_xilinx_table(struct efx_nic *efx, u64 offset, + struct ef100_func_ctl_window *result); + +/* Number of bytes to offset when reading bit position x with dword accessors. */ +#define ROUND_DOWN_TO_DWORD(x) (((x) & (~31)) >> 3) + +#define EXTRACT_BITS(x, lbn, width) \ + ((x) >> ((lbn) & 31)) & ((1ull << (width)) - 1) + +static u32 _ef100_pci_get_bar_bits_with_width(struct efx_nic *efx, + int structure_start, + int lbn, int width) +{ + efx_dword_t dword; + + efx_readd(efx, &dword, structure_start + ROUND_DOWN_TO_DWORD(lbn)); + + return EXTRACT_BITS(le32_to_cpu(dword.u32[0]), lbn, width); +} + +#define ef100_pci_get_bar_bits(efx, entry_location, bitdef) \ + _ef100_pci_get_bar_bits_with_width(efx, entry_location, \ + bitdef ## _LBN, bitdef ## _WIDTH) + +static int ef100_pci_parse_ef100_entry(struct efx_nic *efx, int entry_location, + struct ef100_func_ctl_window *result) +{ + u32 bar = ef100_pci_get_bar_bits(efx, entry_location, + ESF_GZ_CFGBAR_EF100_BAR); + u64 offset = ef100_pci_get_bar_bits(efx, entry_location, + ESF_GZ_CFGBAR_EF100_FUNC_CTL_WIN_OFF) << ESE_GZ_EF100_FUNC_CTL_WIN_OFF_SHIFT; + + pci_dbg(efx->pci_dev, + "Found EF100 function control window bar=%d offset=0x%llx\n", + bar, offset); + + if (result->valid) { + pci_err(efx->pci_dev, "Duplicated EF100 table entry.\n"); + return -EINVAL; + } + + if ((bar == ESE_GZ_CFGBAR_EF100_BAR_NUM_EXPANSION_ROM) || + (bar == ESE_GZ_CFGBAR_EF100_BAR_NUM_INVALID)) { + pci_err(efx->pci_dev, + "Bad BAR value of %d in Xilinx capabilities EF100 entry.\n", + bar); + return -EINVAL; + } + + result->bar = bar; + result->offset = offset; + result->valid = true; + return 0; +} + +static bool ef100_pci_does_bar_overflow(struct efx_nic *efx, int bar, + u64 next_entry) +{ + return next_entry + ESE_GZ_CFGBAR_ENTRY_HEADER_SIZE > + pci_resource_len(efx->pci_dev, bar); +} + +/* Parse a Xilinx capabilities table entry describing a continuation to a new + * sub-table. + */ +static int ef100_pci_parse_continue_entry(struct efx_nic *efx, int entry_location, + struct ef100_func_ctl_window *result) +{ + unsigned int previous_bar; + efx_oword_t entry; + u64 offset; + int rc = 0; + u32 bar; + + efx_reado(efx, &entry, entry_location); + + bar = EFX_OWORD_FIELD32(entry, ESF_GZ_CFGBAR_CONT_CAP_BAR); + + offset = EFX_OWORD_FIELD64(entry, ESF_GZ_CFGBAR_CONT_CAP_OFFSET) << + ESE_GZ_CONT_CAP_OFFSET_BYTES_SHIFT; + + previous_bar = efx->mem_bar; + + if ((bar == ESE_GZ_VSEC_BAR_NUM_EXPANSION_ROM) || + (bar == ESE_GZ_VSEC_BAR_NUM_INVALID)) { + pci_err(efx->pci_dev, + "Bad BAR value of %d in Xilinx capabilities sub-table.\n", + bar); + return -EINVAL; + } + + if (bar != previous_bar) { + efx_fini_io(efx); + + if (ef100_pci_does_bar_overflow(efx, bar, offset)) { + pci_err(efx->pci_dev, + "Xilinx table will overrun BAR[%d] offset=0x%llx\n", + bar, offset); + return -EINVAL; + } + + /* Temporarily map new BAR. */ + rc = efx_init_io(efx, bar, efx->type->max_dma_mask, + pci_resource_len(efx->pci_dev, bar)); + if (rc) { + pci_err(efx->pci_dev, + "Mapping new BAR for Xilinx table failed, rc=%d\n", rc); + return rc; + } + } + + rc = ef100_pci_walk_xilinx_table(efx, offset, result); + if (rc) + return rc; + + if (bar != previous_bar) { + efx_fini_io(efx); + + /* Put old BAR back. */ + rc = efx_init_io(efx, previous_bar, efx->type->max_dma_mask, + pci_resource_len(efx->pci_dev, previous_bar)); + if (rc) { + pci_err(efx->pci_dev, + "Putting old BAR back failed, rc=%d\n", rc); + return rc; + } + } + + return 0; +} + +/* Iterate over the Xilinx capabilities table in the currently mapped BAR and + * call ef100_pci_parse_ef100_entry() on any EF100 entries and + * ef100_pci_parse_continue_entry() on any table continuations. + */ +static int ef100_pci_walk_xilinx_table(struct efx_nic *efx, u64 offset, + struct ef100_func_ctl_window *result) +{ + u64 current_entry = offset; + int rc = 0; + + while (true) { + u32 id = ef100_pci_get_bar_bits(efx, current_entry, + ESF_GZ_CFGBAR_ENTRY_FORMAT); + u32 last = ef100_pci_get_bar_bits(efx, current_entry, + ESF_GZ_CFGBAR_ENTRY_LAST); + u32 rev = ef100_pci_get_bar_bits(efx, current_entry, + ESF_GZ_CFGBAR_ENTRY_REV); + u32 entry_size; + + if (id == ESE_GZ_CFGBAR_ENTRY_LAST) + return 0; + + entry_size = ef100_pci_get_bar_bits(efx, current_entry, + ESF_GZ_CFGBAR_ENTRY_SIZE); + + pci_dbg(efx->pci_dev, + "Seen Xilinx table entry 0x%x size 0x%x at 0x%llx in BAR[%d]\n", + id, entry_size, current_entry, efx->mem_bar); + + if (entry_size < sizeof(uint32_t)*2) { + pci_err(efx->pci_dev, + "Xilinx table entry too short len=0x%x\n", entry_size); + return -EINVAL; + } + + switch(id) { + case ESE_GZ_CFGBAR_ENTRY_EF100: + if ((rev != ESE_GZ_CFGBAR_ENTRY_REV_EF100) || + (entry_size < ESE_GZ_CFGBAR_ENTRY_SIZE_EF100)) { + pci_err(efx->pci_dev, + "Bad length or rev for EF100 entry in Xilinx capabilities table. entry_size=%d rev=%d.\n", + entry_size, rev); + return -EINVAL; + } + + rc = ef100_pci_parse_ef100_entry(efx, current_entry, + result); + if (rc) + return rc; + break; + case ESE_GZ_CFGBAR_ENTRY_CONT_CAP_ADDR: + if ((rev != 0) || (entry_size < ESE_GZ_CFGBAR_CONT_CAP_MIN_LENGTH)) { + pci_err(efx->pci_dev, + "Bad length or rev for continue entry in Xilinx capabilities table. entry_size=%d rev=%d.\n", + entry_size, rev); + return -EINVAL; + } + + rc = ef100_pci_parse_continue_entry(efx, current_entry, result); + if (rc) + return rc; + break; + default: + /* Ignore unknown table entries. */ + break; + } + + if (last) + return 0; + + current_entry += entry_size; + + if (ef100_pci_does_bar_overflow(efx, efx->mem_bar, current_entry)) { + pci_err(efx->pci_dev, + "Xilinx table overrun at position=0x%llx.\n", + current_entry); + return -EINVAL; + } + } +} + +static int _ef100_pci_get_config_bits_with_width(struct efx_nic *efx, + int structure_start, int lbn, + int width, u32 *result) +{ + int pos = structure_start + ROUND_DOWN_TO_DWORD(lbn); + u32 temp; + + int rc = pci_read_config_dword(efx->pci_dev, pos, &temp); + if (rc) { + pci_err(efx->pci_dev, + "Failed to read PCI config dword at %d\n", + pos); + return rc; + } + + *result = EXTRACT_BITS(temp, lbn, width); + + return 0; +} + +#define ef100_pci_get_config_bits(efx, entry_location, bitdef, result) \ + _ef100_pci_get_config_bits_with_width(efx, entry_location, \ + bitdef ## _LBN, bitdef ## _WIDTH, result) + +/* Call ef100_pci_walk_xilinx_table() for the Xilinx capabilities table pointed + * to by this PCI_EXT_CAP_ID_VNDR. + */ +static int ef100_pci_parse_xilinx_cap(struct efx_nic *efx, int vndr_cap, + bool has_offset_hi, + struct ef100_func_ctl_window *result) +{ + u32 offset_high = 0; + u32 offset_lo = 0; + u64 offset = 0; + u32 bar = 0; + int rc = 0; + + rc = ef100_pci_get_config_bits(efx, vndr_cap, ESF_GZ_VSEC_TBL_BAR, &bar); + if (rc) { + pci_err(efx->pci_dev, + "Failed to read ESF_GZ_VSEC_TBL_BAR, rc=%d\n", + rc); + return rc; + } + + if ((bar == ESE_GZ_CFGBAR_CONT_CAP_BAR_NUM_EXPANSION_ROM) || + (bar == ESE_GZ_CFGBAR_CONT_CAP_BAR_NUM_INVALID)) { + pci_err(efx->pci_dev, + "Bad BAR value of %d in Xilinx capabilities sub-table.\n", + bar); + return -EINVAL; + } + + rc = ef100_pci_get_config_bits(efx, vndr_cap, ESF_GZ_VSEC_TBL_OFF_LO, &offset_lo); + if (rc) { + pci_err(efx->pci_dev, + "Failed to read ESF_GZ_VSEC_TBL_OFF_LO, rc=%d\n", + rc); + return rc; + } + + /* Get optional extension to 64bit offset. */ + if (has_offset_hi) { + rc = ef100_pci_get_config_bits(efx, vndr_cap, ESF_GZ_VSEC_TBL_OFF_HI, &offset_high); + if (rc) { + pci_err(efx->pci_dev, + "Failed to read ESF_GZ_VSEC_TBL_OFF_HI, rc=%d\n", + rc); + return rc; + } + } + + offset = (((u64)offset_lo) << ESE_GZ_VSEC_TBL_OFF_LO_BYTES_SHIFT) | + (((u64)offset_high) << ESE_GZ_VSEC_TBL_OFF_HI_BYTES_SHIFT); + + if (offset > pci_resource_len(efx->pci_dev, bar) - sizeof(uint32_t)*2) { + pci_err(efx->pci_dev, + "Xilinx table will overrun BAR[%d] offset=0x%llx\n", + bar, offset); + return -EINVAL; + } + + /* Temporarily map BAR. */ + rc = efx_init_io(efx, bar, efx->type->max_dma_mask, + pci_resource_len(efx->pci_dev, bar)); + if (rc) { + pci_err(efx->pci_dev, "efx_init_io failed, rc=%d\n", rc); + return rc; + } + + rc = ef100_pci_walk_xilinx_table(efx, offset, result); + + /* Unmap temporarily mapped BAR. */ + efx_fini_io(efx); + return rc; +} + +/* Call ef100_pci_parse_ef100_entry() for each Xilinx PCI_EXT_CAP_ID_VNDR + * capability. + */ +static int ef100_pci_find_func_ctrl_window(struct efx_nic *efx, + struct ef100_func_ctl_window *result) +{ + int num_xilinx_caps = 0; + int cap = 0; + + result->valid = false; + + while ((cap = pci_find_next_ext_capability(efx->pci_dev, cap, + PCI_EXT_CAP_ID_VNDR)) != 0) { + int vndr_cap = cap + PCI_EXT_CAP_HDR_LENGTH; + u32 vsec_ver = 0; + u32 vsec_len = 0; + u32 vsec_id = 0; + int rc = 0; + + num_xilinx_caps++; + + rc = ef100_pci_get_config_bits(efx, vndr_cap, ESF_GZ_VSEC_ID, + &vsec_id); + if (rc) { + pci_err(efx->pci_dev, + "Failed to read ESF_GZ_VSEC_ID, rc=%d\n", + rc); + return rc; + } + + rc = ef100_pci_get_config_bits(efx, vndr_cap, ESF_GZ_VSEC_VER, + &vsec_ver); + if (rc) { + pci_err(efx->pci_dev, + "Failed to read ESF_GZ_VSEC_VER, rc=%d\n", + rc); + return rc; + } + + /* Get length of whole capability - i.e. starting at cap */ + rc = ef100_pci_get_config_bits(efx, vndr_cap, ESF_GZ_VSEC_LEN, + &vsec_len); + if (rc) { + pci_err(efx->pci_dev, + "Failed to read ESF_GZ_VSEC_LEN, rc=%d\n", + rc); + return rc; + } + + if ((vsec_id == ESE_GZ_XILINX_VSEC_ID) && + (vsec_ver == ESE_GZ_VSEC_VER_XIL_CFGBAR) && + (vsec_len >= ESE_GZ_VSEC_LEN_MIN)) { + bool has_offset_hi = (vsec_len >= ESE_GZ_VSEC_LEN_HIGH_OFFT); + + rc = ef100_pci_parse_xilinx_cap(efx, vndr_cap, + has_offset_hi, result); + if (rc) + return rc; + } + } + + if (num_xilinx_caps && !result->valid) { + pci_err(efx->pci_dev, + "Seen %d Xilinx tables, but no EF100 entry.\n", + num_xilinx_caps); + return -EINVAL; + } + + return 0; +} + +/* Support for address regions */ +static bool ef100_addr_in(dma_addr_t addr, dma_addr_t base, ssize_t size) +{ + return (addr >= base && addr < (base + size)); +} + +int ef100_regionmap_buffer(struct efx_nic *efx, dma_addr_t *addr) +{ + struct ef100_nic_data *nic_data = efx->nic_data; + struct ef100_addr_region *region; + int i; + + if (likely(nic_data->addr_mapping_type == + MC_CMD_GET_DESC_ADDR_INFO_OUT_MAPPING_FLAT)) + return 0; + + for (i = 0; i < EF100_QDMA_ADDR_REGIONS_MAX; i++) { + region = &nic_data->addr_region[i]; + if (!ef100_region_addr_is_populated(region)) + break; + + if (ef100_addr_in(*addr, region->trgt_addr, + 1UL << region->size_log2)) { + *addr |= region->qdma_addr; + return 0; + } + } + if (net_ratelimit()) + netif_err(efx, tx_err, efx->net_dev, + "Cannot map %pad to QDMA\n", addr); + return -ENOMEM; +} + +/* Final NIC shutdown + * This is called only at module unload (or hotplug removal). A PF can call + * this on its VFs to ensure they are unbound first. + */ +static void ef100_pci_remove(struct pci_dev *pci_dev) +{ + struct efx_probe_data *probe_data; + struct efx_nic *efx = pci_get_drvdata(pci_dev); + + if (!efx) + return; + + efx_ef100_set_bar_config(efx, EF100_BAR_CONFIG_NONE); + efx_auxbus_unregister(efx); + efx_fini_struct_tc(efx); + ef100_remove(efx); + efx_fini_io(efx); + pci_dbg(pci_dev, "shutdown successful\n"); + +#if defined(EFX_USE_KCOMPAT) && defined(EFX_HAVE_PCI_ENABLE_PCIE_ERROR_REPORTING) + pci_disable_pcie_error_reporting(pci_dev); + +#endif + pci_set_drvdata(pci_dev, NULL); + probe_data = container_of(efx, struct efx_probe_data, efx); + efx_fini_struct(efx); + kfree(probe_data); +}; + +static int efx_check_func_ctl_magic(struct efx_nic *efx) +{ + efx_dword_t reg; + + efx_readd(efx, ®, efx_reg(efx, ER_GZ_NIC_MAGIC)); + if (EFX_DWORD_FIELD(reg, EFX_DWORD_0) != EFE_GZ_NIC_MAGIC_EXPECTED) + return -EIO; + + return 0; +} + +static int ef100_pci_probe(struct pci_dev *pci_dev, + const struct pci_device_id *entry) +{ + struct ef100_func_ctl_window fcw = { 0 }; + struct efx_probe_data *probe_data; + struct efx_nic *efx; + int rc; + + /* Allocate probe data and struct efx_nic */ + probe_data = kzalloc(sizeof(*probe_data), GFP_KERNEL); + if (!probe_data) + return -ENOMEM; + probe_data->pci_dev = pci_dev; + efx = &probe_data->efx; + + efx->type = (const struct efx_nic_type *)entry->driver_data; + efx->pci_dev = pci_dev; + efx->client_id = MC_CMD_CLIENT_ID_SELF; + pci_set_drvdata(pci_dev, efx); + rc = efx_init_struct(efx, pci_dev); + if (rc) + goto fail; + + efx->vi_stride = EF100_DEFAULT_VI_STRIDE; + pci_info(pci_dev, "Solarflare EF100 NIC detected\n"); + + rc = ef100_pci_find_func_ctrl_window(efx, &fcw); + if (rc) { + pci_err(pci_dev, + "Error looking for ef100 function control window, rc=%d\n", + rc); + goto fail; + } + + if (!fcw.valid) { + /* Extended capability not found - use defaults. */ + fcw.bar = EFX_EF100_PCI_DEFAULT_BAR; + fcw.offset = 0; + fcw.valid = true; + } + + if (fcw.offset > pci_resource_len(efx->pci_dev, fcw.bar) - ESE_GZ_FCW_LEN) { + pci_err(pci_dev, "Func control window overruns BAR\n"); + rc = -EIO; + goto fail; + } + + /* Set up basic I/O (BAR mappings etc) */ + rc = efx_init_io(efx, fcw.bar, efx->type->max_dma_mask, + pci_resource_len(efx->pci_dev, fcw.bar)); + if (rc) + goto fail; + + efx->reg_base = fcw.offset; + + rc = efx_check_func_ctl_magic(efx); + if (rc) { + pci_err(pci_dev, "Func control window magic is wrong\n"); + goto fail; + } + + rc = efx->type->probe(efx); + if (rc) + goto fail; + + rc = ef100_bsp_init(efx); + if (rc) + goto fail; + rc = efx_auxbus_register(efx); + if (rc) + pci_warn(pci_dev, + "Unable to register auxiliary bus driver (%d)\n", rc); + +#if defined(EFX_USE_KCOMPAT) && defined(EFX_HAVE_PCI_ENABLE_PCIE_ERROR_REPORTING) + (void)pci_enable_pcie_error_reporting(pci_dev); + +#endif + efx->state = STATE_PROBED; + rc = efx_ef100_set_bar_config(efx, EF100_BAR_CONFIG_EF100); + if (rc) + goto fail; + + pci_dbg(pci_dev, "initialisation successful\n"); + return 0; + +fail: + ef100_pci_remove(pci_dev); + return rc; +} + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_SRIOV_CONFIGURE) || (defined(CONFIG_SFC_SRIOV) && defined(EFX_HAVE_PCI_DRIVER_RH)) +static int ef100_pci_sriov_configure(struct pci_dev *dev, int num_vfs) +{ + struct efx_nic *efx = pci_get_drvdata(dev); + int rc; + + if (efx->type->sriov_configure) { + rc = efx->type->sriov_configure(efx, num_vfs); + if (rc) + return rc; + else + return num_vfs; + } + else + return -ENOSYS; +} +#endif + +/* PCI device ID table. + * On changes make sure to update sfc_pci_table in efx.c + */ +static const struct pci_device_id ef100_pci_table[] = { + {PCI_DEVICE(PCI_VENDOR_ID_XILINX, 0x0100), /* Riverhead PF */ + .driver_data = (unsigned long) &ef100_pf_nic_type }, + {PCI_DEVICE(PCI_VENDOR_ID_XILINX, 0x1100), /* Riverhead VF */ + .class = PCI_CLASS_NETWORK_ETHERNET << 8, + .class_mask = 0xffff00, + .driver_data = (unsigned long) &ef100_vf_nic_type }, + {0} /* end of list */ +}; + +#if defined(CONFIG_SFC_SRIOV) && defined(EFX_HAVE_PCI_DRIVER_RH) && !defined(EFX_HAVE_SRIOV_CONFIGURE) +static struct pci_driver_rh ef100_pci_driver_rh = { + .sriov_configure = ef100_pci_sriov_configure, +}; +#endif + +struct pci_driver ef100_pci_driver = { + .name = "sfc_ef100", + .id_table = ef100_pci_table, + .probe = ef100_pci_probe, + .remove = ef100_pci_remove, + .err_handler = &efx_err_handlers, +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_SRIOV_CONFIGURE) + .sriov_configure = ef100_pci_sriov_configure, +#elif defined(EFX_HAVE_PCI_DRIVER_RH) + .rh_reserved = &ef100_pci_driver_rh, +#endif +}; diff --git a/src/drivers/net/ethernet/sfc/ef100.h b/src/drivers/net/ethernet/sfc/ef100.h new file mode 100644 index 0000000..b218f9b --- /dev/null +++ b/src/drivers/net/ethernet/sfc/ef100.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2018 Solarflare Communications Inc. + * Copyright 2019-2020 Xilinx Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +extern struct pci_driver ef100_pci_driver; + +int ef100_regionmap_buffer(struct efx_nic *efx, dma_addr_t *addr); diff --git a/src/drivers/net/ethernet/sfc/ef100_bsp.c b/src/drivers/net/ethernet/sfc/ef100_bsp.c new file mode 100644 index 0000000..7f250e7 --- /dev/null +++ b/src/drivers/net/ethernet/sfc/ef100_bsp.c @@ -0,0 +1,241 @@ +// SPDX-License-Identifier: GPL-2.0 +/**************************************************************************** + * Driver for Xilinx network controllers and boards + * Copyright 2021 Xilinx Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ +#include "net_driver.h" +#include "ef100_nic.h" +#include "ef100_bsp.h" +#ifdef CONFIG_OF +#include +#endif + +#define MAX_MCDI_REGIONS MC_CMD_GET_DESC_ADDR_REGIONS_OUT_REGIONS_MAXNUM +#define MAX_BANKS MAX_MCDI_REGIONS +#define DESC_ADDR_REGION_TRGT_ADDR_INVALID ((uint64_t)-1) + +struct bank_info { + phys_addr_t addr; + phys_addr_t size; +}; + +static int get_mem_banks(struct bank_info *trgt_addr) +{ +#ifdef CONFIG_OF + struct device_node *memnode; + int rc; + + if (!of_have_populated_dt()) + return -ENODEV; + + memnode = of_find_node_by_name(NULL, "memory"); + if (!memnode) + return -ENOMEM; + + rc = of_property_read_variable_u64_array(memnode, "reg", + (u64 *)trgt_addr, + 1, MAX_BANKS); + of_node_put(memnode); + /* For each bank there is a base address and a size register */ + if (rc > 0) + rc /= 2; + return rc; +#else + return -ENOTSUPP; +#endif +} + +static uint64_t region_start_for_bank(const struct bank_info *bank, + const struct ef100_addr_region *region) +{ + return bank->addr & ~DMA_BIT_MASK(region->trgt_alignment_log2); +} + +static bool bank_fits_in_region(const struct bank_info *bank, + const struct ef100_addr_region *region) +{ + dma_addr_t region_start, region_end, bank_end; + + if (region->trgt_addr == DESC_ADDR_REGION_TRGT_ADDR_INVALID) + region_start = region_start_for_bank(bank, region); + else + region_start = region->trgt_addr; + + region_end = region_start + (1ULL << region->size_log2); + bank_end = bank->addr + bank->size; + + if ((bank->addr < region_start) || (bank_end > region_end)) + return false; + + return true; +} + +static bool try_bank_region(const struct bank_info *bank, + struct ef100_addr_region *region) +{ + if (bank_fits_in_region(bank, region)) { + if (region->trgt_addr == DESC_ADDR_REGION_TRGT_ADDR_INVALID) { + region->trgt_addr = region_start_for_bank(bank, + region); + } + return true; + } + return false; +} + +static int ef100_set_address_mapping(struct efx_nic *efx) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_DESC_ADDR_REGIONS_IN_LENMAX); + struct ef100_nic_data *nic_data = efx->nic_data; + struct bank_info banks[MAX_BANKS] = { { 0, 0 } }; + struct ef100_addr_region *region; + int i, k, n_banks, n_regions = 0; + u32 mask = 0; + u64 *trgt; + + if (nic_data->addr_mapping_type == + MC_CMD_GET_DESC_ADDR_INFO_OUT_MAPPING_FLAT) + return 0; + + n_banks = get_mem_banks(banks); + if (n_banks < 0) { + pci_err(efx->pci_dev, "Error %d getting memory banks\n", + -n_banks); + return n_banks; + } + + /* Reset target addresses */ + for (i = 0; i < MAX_MCDI_REGIONS; i++) { + region = &nic_data->addr_region[i]; + region->trgt_addr = DESC_ADDR_REGION_TRGT_ADDR_INVALID; + + if (ef100_region_addr_is_populated(&nic_data->addr_region[i])) + n_regions = i + 1; + } + + /* Try to assign target addresses for all memory banks */ + for (i = 0; i < n_banks; i++) { + for (k = 0; k < n_regions; k++) { + region = &nic_data->addr_region[k]; + + if (try_bank_region(&banks[i], region)) + break; + } + if (k == n_regions) { + pci_err(efx->pci_dev, + "Memory bank %pa(%pa) is not addressable by QDMA\n", + &banks[i].addr, &banks[i].size); + return -ENOSPC; + } + } + + trgt = (uint64_t *)MCDI_PTR(inbuf, + SET_DESC_ADDR_REGIONS_IN_TRGT_ADDR_BASE); + for (i = 0; i < n_regions; i++) { + region = &nic_data->addr_region[i]; + if (region->trgt_addr == DESC_ADDR_REGION_TRGT_ADDR_INVALID) + continue; + + trgt[i] = region->trgt_addr; + mask |= (1ULL << i); + } + MCDI_SET_DWORD(inbuf, SET_DESC_ADDR_REGIONS_IN_SET_REGION_MASK, mask); + + if (mask != (1 << n_regions) - 1) { + pci_err(efx->pci_dev, + "Not all memory is addressable by QDMA: mask %x with %d QDMA regions\n", + mask, n_regions); + return -ENOSPC; + } + + return (efx_mcdi_rpc(efx, MC_CMD_SET_DESC_ADDR_REGIONS, inbuf, + MC_CMD_SET_DESC_ADDR_REGIONS_IN_LEN(i), + NULL, 0, NULL)); +} + +/* Regioned address mappings are only supported on 64-bit architectures. + * We only support the 32-bit driver on X86, which uses the flat address + * mapping. + */ +static void ef100_get_address_region(struct ef100_addr_region *region, + efx_qword_t *qbuf) +{ +#if defined(CONFIG_ARCH_DMA_ADDR_T_64BIT) + region->qdma_addr = le64_to_cpu(qbuf[0].u64[0]); + region->trgt_addr = le64_to_cpu(qbuf[1].u64[0]); + region->size_log2 = le32_to_cpu(qbuf[2].u32[0]); + region->trgt_alignment_log2 = le32_to_cpu(qbuf[2].u32[1]); +#endif +} + +#define MC_CMD_GET_QDMA_ADDR_OUT_LEN \ + max(MC_CMD_GET_DESC_ADDR_INFO_OUT_LEN, \ + MC_CMD_GET_DESC_ADDR_REGIONS_OUT_LEN(MC_CMD_GET_DESC_ADDR_REGIONS_OUT_REGIONS_MAXNUM)) + +static int ef100_get_address_mapping(struct efx_nic *efx) +{ + struct ef100_nic_data *nic_data = efx->nic_data; + efx_dword_t *outbuf; + efx_qword_t *qbuf; + size_t outlen = 0; + int rc, i; + + BUILD_BUG_ON(MC_CMD_GET_DESC_ADDR_INFO_IN_LEN != 0); + BUILD_BUG_ON(MC_CMD_GET_DESC_ADDR_REGIONS_IN_LEN != 0); + + outbuf = kzalloc(MC_CMD_GET_QDMA_ADDR_OUT_LEN, GFP_KERNEL); + if (!outbuf) + return -ENOMEM; + + nic_data->addr_mapping_type = + MC_CMD_GET_DESC_ADDR_INFO_OUT_MAPPING_FLAT; + rc = efx_mcdi_rpc(efx, MC_CMD_GET_DESC_ADDR_INFO, NULL, 0, + outbuf, MC_CMD_GET_QDMA_ADDR_OUT_LEN, &outlen); + if (rc) + goto fail; + + nic_data->addr_mapping_type = + MCDI_DWORD(outbuf, GET_DESC_ADDR_INFO_OUT_MAPPING_TYPE); + if (nic_data->addr_mapping_type == + MC_CMD_GET_DESC_ADDR_INFO_OUT_MAPPING_FLAT) + goto fail; + + rc = efx_mcdi_rpc(efx, MC_CMD_GET_DESC_ADDR_REGIONS, NULL, 0, + outbuf, MC_CMD_GET_QDMA_ADDR_OUT_LEN, &outlen); + if (rc) + goto fail; + if (outlen > MC_CMD_GET_QDMA_ADDR_OUT_LEN) { + rc = -ENOSPC; + goto fail; + } + + for (i = 0; + i < MC_CMD_GET_DESC_ADDR_REGIONS_OUT_REGIONS_NUM(outlen); + i++) { + qbuf = (efx_qword_t *)MCDI_ARRAY_STRUCT_PTR(outbuf, + GET_DESC_ADDR_REGIONS_OUT_REGIONS, i); + ef100_get_address_region(&nic_data->addr_region[i], qbuf); + } + +fail: + kfree(outbuf); + return rc; +} + +int ef100_bsp_init(struct efx_nic *efx) +{ + int rc; + + rc = ef100_get_address_mapping(efx); + if (rc) + return rc; + rc = ef100_set_address_mapping(efx); + if (rc) + return rc; + + return ef100_get_address_mapping(efx); +} diff --git a/src/drivers/net/ethernet/sfc/ef100_bsp.h b/src/drivers/net/ethernet/sfc/ef100_bsp.h new file mode 100644 index 0000000..de57306 --- /dev/null +++ b/src/drivers/net/ethernet/sfc/ef100_bsp.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/**************************************************************************** + * Driver for Xilinx network controllers and boards + * Copyright 2021 Xilinx Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +int ef100_bsp_init(struct efx_nic *efx); diff --git a/src/drivers/net/ethernet/sfc/ef100_dump.c b/src/drivers/net/ethernet/sfc/ef100_dump.c new file mode 100644 index 0000000..05515bd --- /dev/null +++ b/src/drivers/net/ethernet/sfc/ef100_dump.c @@ -0,0 +1,446 @@ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2021 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ +#include "ef100_dump.h" +#include "mcdi_pcol.h" +#include "mcdi.h" + +struct efx_mc_reg_table { + const char *name; + u32 addr; + u32 size; +}; + +/* Tables of addresses to dump. These are for SN1022 p7g images, and + * may not be correct for other EF100 NICs. TODO replace all this with + * something more robust before such other NICs arrive. + */ + +static struct efx_mc_reg_table efx_dump_misc[] = { + {"FPGA hardware version", 0xFD200004, 1}, +}; + +static struct efx_mc_reg_table efx_dump_sss_host_tx[] = { + {"DMAC-S2IC(Towards SSS) PKT CNT", 0x88001008, 1}, + {"Host Hub A PKTS IN", 0x8c010000, 32}, + {"Host Hub A plugin PKTS IN", 0x8c010200, 32}, + {"Host Hub A PKTS_OUT", 0x8c010400, 32}, + {"VNTX SP control dbg sticky reg1", 0x8f1ff104, 1}, + {"VNTX SP control dbg sticky reg2", 0x8f1ff108, 1}, + {"VNTX SP control chker csum error frm cnt", 0x8f1ff10c, 1}, + {"VNTX SP control chker csum error", 0x8f1ff110, 1}, + {"VNTX SP control chker protocol err reg0", 0x8f1ff114, 1}, + {"VNTX SP control chker protocol err reg1", 0x8f1ff118, 1}, + {"VNTX SP control ic2s ingress pkt count", 0x8f1ff014, 1}, + {"VNTX SP control s2ic egress pkt count", 0x8f1ff054, 1}, + {"Host Hub B PKTS IN", 0x8c410000, 40}, + {"Host Hub B plugin PKTS IN", 0x8c410400, 40}, + {"Host Hub B PKTS_OUT (from host and net)", 0x8c410800, 40}, + {"ME SP control dbg sticky reg1", 0x8f3ff104, 1}, + {"ME SP control dbg sticky reg2", 0x8f3ff108, 1}, + {"ME SP control chker csum error frm cnt", 0x8f3ff10c, 1}, + {"ME SP control chker csum error", 0x8f3ff110, 1}, + {"ME SP control chker protocol err reg0", 0x8f3ff114, 1}, + {"ME SP control chker protocol err reg1", 0x8f3ff118, 1}, + {"ME SP control ic2s ingress pkt count", 0x8f3ff014, 1}, + {"ME SP control s2ic egress pkt count", 0x8f3ff054, 1}, + {"Host Hub RP PKTS IN", 0x8b800000, 32}, + {"Host Hub RP PKTS_OUT", 0x8b800100, 32}, + {"Replay hub sticky", 0x8b801010, 1}, + {"AE SP control dbg sticky reg1", 0x8f5ff104, 1}, + {"AE SP control dbg sticky reg2", 0x8f5ff108, 1}, + {"AE SP control chker csum error frm cnt", 0x8f5ff10c, 1}, + {"AE SP control chker csum error", 0x8f5ff110, 1}, + {"AE SP control chker protocol err reg0", 0x8f5ff114, 1}, + {"AE SP control chker protocol err reg1", 0x8f5ff118, 1}, + {"AE SP control ic2s ingress pkt count", 0x8f5ff014, 1}, + {"AE SP control s2ic egress pkt count", 0x8f5ff054, 1}, + {"Net Hub C N0 net PKTS IN", 0x8cc15800, 40}, + {"Net Hub C N0 net PKTS_OUT", 0x8cc15c00, 40}, + {"Net Hub C N1 net PKTS IN", 0x8cc15000, 40}, + {"Net Hub C N1 net PKTS_OUT", 0x8cc15400, 40}, +}; + +static struct efx_mc_reg_table efx_dump_sss_rx_path[] = { + {"Net Hub A missing EOP sticky", 0x8d410e40, 32}, + {"Net Hub A PKTS IN0", 0x8d410000, 32}, + {"Net Hub A PKTS IN1", 0x8d411000, 32}, + {"Net Hub A PKTS_OUT", 0x8d410600, 32}, + {"Net Hub A PKTS DROP0", 0x8d410200, 32}, + {"Net Hub A PKTS DROP1", 0x8d411200, 32}, + {"Net Hub B PKTS IN", 0x8c410200, 40}, + {"Net Hub B plugin PKTS IN", 0x8c410600, 40}, + {"Host Hub C PKTS IN", 0x8cc11000, 40}, + {"Host Hub C PKTS_OUT", 0x8cc11100, 40}, + {"VNRX SP control dbg sticky reg1", 0x8f7ff104, 1}, + {"VNRX SP control dbg sticky reg2", 0x8f7ff108, 1}, + {"VNRX SP control chker csum error frm cnt", 0x8f7ff10c, 1}, + {"VNRX SP control chker csum error", 0x8f7ff110, 1}, + {"VNRX SP control chker protocol err reg0", 0x8f7ff114, 1}, + {"VNRX SP control chker protocol err reg1", 0x8f7ff118, 1}, + {"VNRX SP control ic2s ingress pkt count", 0x8f7ff014, 1}, + {"VNRX SP control s2ic egress pkt count", 0x8f7ff054, 1}, + {"Host Hub D PKTS IN host", 0x8d010000, 32}, + {"Host Hub D PKTS IN plugin", 0x8d010100, 32}, + {"Host Hub D PKTS IN OVS counter", 0x8d010200, 32}, + {"Host Hub D PKTS_OUT", 0x8d010300, 32}, +}; + +static struct efx_mc_reg_table efx_dump_sss_host_rx[] = { + {"DMAC-IC2S(Frm SSS) PKT CNT", 0x88001108, 1}, + {"DMAC_ALERT", 0x8800007c, 1}, + {"DMAC_ERR_STICKY_REG", 0x880000c0, 1}, + {"DMAC_C2H_DROP_CTR_REG", 0x88000108, 1}, + {"EVC ERR STICKY", 0x880000cc, 1}, + {"EVC Total events", 0x88001f10, 1}, + {"EVC RX event packt count (unmoderated)", 0x88001fac, 1}, + {"EVC TX event DSC (unmoderated)", 0x88001fb0, 1}, + {"EVC RX event stimulus (EF100)", 0x88001fb4, 1}, + {"EVC TX event stimulus (per packet)", 0x88001fb8, 1}, + {"QDMA C2H stat debug", 0xfe068b1c, 1}, + {"QDMA C2H ERR STAT", 0xfe068af0, 1}, + {"QDMA STAT WRB IN", 0xfe068b34, 1}, + {"QDMA STAT WRB OUT", 0xfe068b38, 1}, + {"QDMA STAT WRB DRP", 0xfe068b3c, 1}, + {"HAH cnt_nw_tx_dbl", 0xfd154800, 1}, + {"HAH cnt_nw_rx_dbl", 0xfd154804, 1}, + {"HAH cnt_nw_virtio_dbl", 0xfd154808, 1}, +}; + +/* indir_table read sequence: + * write %base+8 %addr + * if (%read_base) read %base + * val = read %base+4 + */ +struct efx_mc_reg_indir_table { + const char *name; + u32 base; + u32 addr; + bool read_base; +}; + +static struct efx_mc_reg_indir_table efx_dump_sched_dest_creds[] = { + {"Hub-H2C scheduler destination credit ID 0", 0x8b020060, 0x620e0004, true}, + {"Hub-H2C scheduler destination credit ID 1", 0x8b020060, 0x620e000c, true}, + {"Hub-HA TX to hub-b scheduler destination credit ID 0", 0x8c000060, 0x620e0004, true}, + {"Hub-HB TX/PL to hub-R scheduler destination credit ID 0", 0x8c400060, 0x620e0004, true}, + {"Hub-HB RX Net to hub-R scheduler destination credit ID 1", 0x8c400060, 0x620e000c, true}, + {"Hub-R host to net to hub-C scheduler destination credit ID 0", 0x8c800060, 0x620e0004, true}, + {"Hub-R net to host to hub-C scheduler destination credit ID 0", 0x8c800060, 0x620e000c, true}, + {"Hub-NetTX net to MAC scheduler destination credit ID 0", 0x8cc40060, 0x620e0004, true}, + {"Hub-NA from MAC to hub-B scheduler destination credit ID 0", 0x8d400060, 0x620e0004, true}, + {"Hub-HC hub-C to hub D scheduler destination credit ID 0", 0x8cc00060, 0x620e0004, true}, + {"Hub-D scheduler destination credit ID 0", 0x8d000060, 0x620e0004, true}, + {"Hub-D scheduler destination credit ID 1", 0x8d000060, 0x620e000c, true}, +}; + +static struct efx_mc_reg_table efx_dump_xon_xoff[] = { + {"XOFF Count", 0x8a1006bc, 1}, + {"XON Count", 0x8a1006b8, 1}, +}; +static struct efx_mc_reg_indir_table efx_dump_xon_state[] = { + {"XON", 0x8cc40060, 0x600E0004, false}, +}; + +static int efx_mcdi_dump_reg(struct efx_nic *efx, struct efx_mc_reg_table *reg) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_READ32_IN_LEN); + efx_dword_t *outbuf; + unsigned int i; + size_t outlen; + int rc; + + outbuf = kzalloc(MC_CMD_READ32_OUT_LEN(reg->size), GFP_KERNEL); + if (!outbuf) + return -ENOMEM; + MCDI_SET_DWORD(inbuf, READ32_IN_ADDR, reg->addr); + MCDI_SET_DWORD(inbuf, READ32_IN_NUMWORDS, reg->size); + rc = efx_mcdi_rpc(efx, MC_CMD_READ32, inbuf, sizeof(inbuf), + outbuf, MC_CMD_READ32_OUT_LEN(reg->size), &outlen); + if (rc < 0) + goto out_free; + if (reg->size == 1 && outlen >= MC_CMD_READ32_OUT_LEN(1)) + netif_err(efx, tx_err, efx->net_dev, + "reg %s: %#010x\n", reg->name, + MCDI_ARRAY_DWORD(outbuf, READ32_OUT_BUFFER, 0)); + else + for (i = 0; i < outlen && i < reg->size; i++) { + u32 val = MCDI_ARRAY_DWORD(outbuf, READ32_OUT_BUFFER, i); + + if (val || !i) + netif_err(efx, tx_err, efx->net_dev, + "reg %s [%#06x]: %#010x\n", reg->name, i, + val); + } +out_free: + kfree(outbuf); + return rc; +} + +static int efx_mcdi_dump_reg_table(struct efx_nic *efx, + struct efx_mc_reg_table *regs, + unsigned int nregs) +{ + int rc, fails = 0; + unsigned int i; + + for (i = 0; i < nregs; i++) { + rc = efx_mcdi_dump_reg(efx, regs + i); + /* If too many failures, give up (and return + * the last failure's rc) + */ + if (rc < 0 && ++fails >= 3) + return rc; + } + return 0; +} + +static int efx_mcdi_write32(struct efx_nic *efx, u32 addr, u32 value) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_WRITE32_IN_LEN(1)); + + BUILD_BUG_ON(MC_CMD_WRITE32_OUT_LEN); + MCDI_SET_DWORD(inbuf, WRITE32_IN_ADDR, addr); + MCDI_SET_DWORD(inbuf, WRITE32_IN_BUFFER, value); + return efx_mcdi_rpc(efx, MC_CMD_WRITE32, inbuf, sizeof(inbuf), + NULL, 0, NULL); +} + +static int efx_mcdi_dump_reg_indir(struct efx_nic *efx, struct efx_mc_reg_indir_table *reg) +{ + MCDI_DECLARE_BUF(outbuf, MC_CMD_READ32_OUT_LEN(1)); + MCDI_DECLARE_BUF(inbuf, MC_CMD_READ32_IN_LEN); + size_t outlen; + int rc; + + rc = efx_mcdi_write32(efx, reg->base + 8, reg->addr); + if (rc < 0) + return rc; + if (reg->read_base) { + MCDI_SET_DWORD(inbuf, READ32_IN_ADDR, reg->base); + MCDI_SET_DWORD(inbuf, READ32_IN_NUMWORDS, 1); + rc = efx_mcdi_rpc(efx, MC_CMD_READ32, inbuf, sizeof(inbuf), + NULL, 0, NULL); + if (rc < 0) + return rc; + } + MCDI_SET_DWORD(inbuf, READ32_IN_ADDR, reg->base + 4); + MCDI_SET_DWORD(inbuf, READ32_IN_NUMWORDS, 1); + rc = efx_mcdi_rpc(efx, MC_CMD_READ32, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), &outlen); + if (rc < 0) + return rc; + if (outlen < sizeof(outbuf)) + return -EIO; + netif_err(efx, tx_err, efx->net_dev, + "reg %s: %#010x\n", reg->name, + MCDI_ARRAY_DWORD(outbuf, READ32_OUT_BUFFER, 0)); + return 0; +} + +static int efx_mcdi_dump_reg_indir_table(struct efx_nic *efx, + struct efx_mc_reg_indir_table *regs, + unsigned int nregs) +{ + int rc, fails = 0; + unsigned int i; + + for (i = 0; i < nregs; i++) { + rc = efx_mcdi_dump_reg_indir(efx, regs + i); + /* If too many failures, give up (and return + * the last failure's rc) + */ + if (rc < 0 && ++fails >= 3) + return rc; + } + return 0; +} + +static const char *efx_mcdi_dump_sched_names[] = { + [SCHED_CREDIT_CHECK_RESULT_HUB_HOST_A] = "HUB_HOST_A", + [SCHED_CREDIT_CHECK_RESULT_HUB_NET_A] = "HUB_NET_A", + [SCHED_CREDIT_CHECK_RESULT_HUB_B] = "HUB_B", + [SCHED_CREDIT_CHECK_RESULT_HUB_HOST_C] = "HUB_HOST_C", + [SCHED_CREDIT_CHECK_RESULT_HUB_NET_TX] = "HUB_NET_TX", + [SCHED_CREDIT_CHECK_RESULT_HUB_HOST_D] = "HUB_HOST_D", + [SCHED_CREDIT_CHECK_RESULT_HUB_REPLAY] = "HUB_REPLAY", + [SCHED_CREDIT_CHECK_RESULT_DMAC_H2C] = "DMAC_H2C", +}; + +/* Returns number of pages, or negative error */ +static int efx_mcdi_dump_sched_cred_page(struct efx_nic *efx, u32 page, + u32 flags, u32 *generation_count) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_CHECK_SCHEDULER_CREDITS_IN_LEN); + u32 gen_count_actual, num_results, i; + efx_dword_t *outbuf; + size_t outlen; + int rc; + + outbuf = kzalloc(MC_CMD_CHECK_SCHEDULER_CREDITS_OUT_LENMAX_MCDI2, GFP_KERNEL); + if (!outbuf) + return -ENOMEM; + + MCDI_SET_DWORD(inbuf, CHECK_SCHEDULER_CREDITS_IN_FLAGS, flags); + MCDI_SET_DWORD(inbuf, CHECK_SCHEDULER_CREDITS_IN_PAGE, page); + + rc = efx_mcdi_rpc(efx, MC_CMD_CHECK_SCHEDULER_CREDITS, + inbuf, sizeof(inbuf), outbuf, + MC_CMD_CHECK_SCHEDULER_CREDITS_OUT_LENMAX_MCDI2, &outlen); + if (rc) + goto out_free; + if (outlen < MC_CMD_CHECK_SCHEDULER_CREDITS_OUT_RESULTS_OFST) + goto out_free; + gen_count_actual = MCDI_DWORD(outbuf, CHECK_SCHEDULER_CREDITS_OUT_GENERATION); + if (!page) + *generation_count = gen_count_actual; + if (gen_count_actual != *generation_count) { + rc = -EAGAIN; + goto out_free; + } + num_results = MCDI_DWORD(outbuf, CHECK_SCHEDULER_CREDITS_OUT_RESULTS_THIS_PAGE); + for (i = 0; i < num_results; i++) { + MCDI_DECLARE_STRUCT_PTR(result) = MCDI_ARRAY_STRUCT_PTR(outbuf, + CHECK_SCHEDULER_CREDITS_OUT_RESULTS, i); + u8 sched_idx = MCDI_STRUCT_BYTE(result, SCHED_CREDIT_CHECK_RESULT_SCHED_INSTANCE); + u8 node_type = MCDI_STRUCT_BYTE(result, SCHED_CREDIT_CHECK_RESULT_NODE_TYPE); + + netif_err(efx, tx_err, efx->net_dev, + "%10s: %s id %d level %d: Exp %#x got %#x\n", + sched_idx < ARRAY_SIZE(efx_mcdi_dump_sched_names) ? + efx_mcdi_dump_sched_names[sched_idx] : "???", + node_type == SCHED_CREDIT_CHECK_RESULT_DEST ? " Dest" : "Source", + MCDI_STRUCT_DWORD(result, SCHED_CREDIT_CHECK_RESULT_NODE_INDEX), + MCDI_STRUCT_WORD(result, SCHED_CREDIT_CHECK_RESULT_NODE_LEVEL), + MCDI_STRUCT_DWORD(result, SCHED_CREDIT_CHECK_RESULT_EXPECTED_CREDITS), + MCDI_STRUCT_DWORD(result, SCHED_CREDIT_CHECK_RESULT_ACTUAL_CREDITS)); + } + rc = MCDI_DWORD(outbuf, CHECK_SCHEDULER_CREDITS_OUT_NUM_PAGES); +out_free: + kfree(outbuf); + return rc; +} + +static int efx_mcdi_dump_sched_cred(struct efx_nic *efx) +{ + u32 generation_count = 0, num_pages = 1, page; + int rc; + + for (page = 0; page < num_pages; page++) { + rc = efx_mcdi_dump_sched_cred_page(efx, page, 0, + &generation_count); + if (rc == -EAGAIN) { + /* Someone else started a dump in the middle of ours, + * causing generation counts to mismatch. Start over. + */ + netif_err(efx, tx_err, efx->net_dev, "***RETRY***\n"); + page = -1; + continue; + } + if (rc < 0) + return rc; + if (!page) + num_pages = rc; + } + return 0; +} + +int efx_ef100_dump_sss_regs(struct efx_nic *efx) +{ + efx_mcdi_dump_reg_table(efx, efx_dump_misc, + ARRAY_SIZE(efx_dump_misc)); + efx_mcdi_dump_reg_table(efx, efx_dump_sss_host_tx, + ARRAY_SIZE(efx_dump_sss_host_tx)); + efx_mcdi_dump_reg_table(efx, efx_dump_sss_rx_path, + ARRAY_SIZE(efx_dump_sss_rx_path)); + efx_mcdi_dump_reg_table(efx, efx_dump_sss_host_rx, + ARRAY_SIZE(efx_dump_sss_host_rx)); + efx_mcdi_dump_reg_indir_table(efx, efx_dump_sched_dest_creds, + ARRAY_SIZE(efx_dump_sched_dest_creds)); + efx_mcdi_dump_sched_cred(efx); + efx_mcdi_dump_reg_table(efx, efx_dump_xon_xoff, + ARRAY_SIZE(efx_dump_xon_xoff)); + efx_mcdi_dump_reg_indir_table(efx, efx_dump_xon_state, + ARRAY_SIZE(efx_dump_xon_state)); + return 0; +} + +#ifdef SFC_NAPI_DEBUG +#include "ef100_nic.h" +#include "ef100_regs.h" + +#define MAX_EVENTS_TO_DUMP 0xffff + +static unsigned int ef100_dump_pending_events(struct efx_channel *channel, + bool print) +{ + unsigned int spent = 0; + struct efx_nic *efx = channel->efx; + struct ef100_nic_data *nic_data; + bool evq_phase, old_evq_phase; + unsigned int read_ptr; + efx_qword_t *p_event; + bool ev_phase; + + nic_data = efx->nic_data; + evq_phase = test_bit (channel->channel, nic_data->evq_phases); + old_evq_phase = evq_phase; + read_ptr = channel->eventq_read_ptr; + + for (;;) { + p_event = efx_event(channel, read_ptr); + + ev_phase = !!EFX_QWORD_FIELD(*p_event, ESF_GZ_EV_RXPKTS_PHASE); + if (ev_phase != evq_phase) + break; + + if (print && spent < MAX_EVENTS_TO_DUMP) { + netif_dbg(efx, drv, efx->net_dev, + "unprocessed event on %d " EFX_QWORD_FMT "\n", + channel->channel, EFX_QWORD_VAL(*p_event)); + } + ++spent; + + ++read_ptr; + if ((read_ptr & channel->eventq_mask) == 0) + evq_phase = !evq_phase; + } + + return spent; +} + +/* dump time since last irq, + * time since last napi poll, + * and unprocessed events + */ +void efx_ef100_dump_napi_debug(struct efx_nic *efx) +{ + struct efx_channel *channel; + + int now = jiffies; + + efx_for_each_channel(channel, efx) { + unsigned int remaining_events = ef100_dump_pending_events(channel, false); + + netif_dbg(efx, drv, efx->net_dev, + "channel %d irq %u ms ago, poll start %u ms ago, poll end %u ms ago, spent/budget/done = %d/%d/%d, reprime %u ms ago, %u events pending\n", + channel->channel, + jiffies_to_msecs(now - channel->last_irq_jiffies), + jiffies_to_msecs(now - channel->last_napi_poll_jiffies), + jiffies_to_msecs(now - channel->last_napi_poll_end_jiffies), + channel->last_spent, channel->last_budget, channel->last_complete_done, + jiffies_to_msecs(now - channel->last_irq_reprime_jiffies), + remaining_events); + } + efx_for_each_channel(channel, efx) + ef100_dump_pending_events(channel, true); +} +#else +void efx_ef100_dump_napi_debug(struct efx_nic *efx) {} +#endif + diff --git a/src/drivers/net/ethernet/sfc/ef100_dump.h b/src/drivers/net/ethernet/sfc/ef100_dump.h new file mode 100644 index 0000000..74302d2 --- /dev/null +++ b/src/drivers/net/ethernet/sfc/ef100_dump.h @@ -0,0 +1,21 @@ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2021 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#ifndef EFX_EF100_DUMP_H +#define EFX_EF100_DUMP_H + +#include "net_driver.h" + +/* Dump state of Streaming Sub-System to dmesg */ +int efx_ef100_dump_sss_regs(struct efx_nic *efx); + +/* Dump napi debug */ +void efx_ef100_dump_napi_debug(struct efx_nic *efx); + +#endif /* EFX_EF100_DUMP_H */ diff --git a/src/drivers/net/ethernet/sfc/ef100_ethtool.c b/src/drivers/net/ethernet/sfc/ef100_ethtool.c new file mode 100644 index 0000000..da599d9 --- /dev/null +++ b/src/drivers/net/ethernet/sfc/ef100_ethtool.c @@ -0,0 +1,198 @@ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2018 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ +#include +#include +#include "net_driver.h" +#include "efx.h" +#include "mcdi_port_common.h" +#include "ethtool_common.h" +#include "ef100_ethtool.h" +#include "mcdi_functions.h" + +/* This is the maximum number of descriptor rings supported by the QDMA */ +#define EFX_EF100_MAX_DMAQ_SIZE 16384UL + +static void ef100_ethtool_get_ringparam(struct net_device *net_dev, +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_ETHTOOL_GET_RINGPARAM_EXTACK) + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kring, + struct netlink_ext_ack *ext_ack) +#else + struct ethtool_ringparam *ring) +#endif +{ + struct efx_nic *efx = efx_netdev_priv(net_dev); + unsigned long driver_bitmap; + unsigned long max_size = 0; + + driver_bitmap = EFX_EF100_MAX_DMAQ_SIZE | (EFX_EF100_MAX_DMAQ_SIZE - 1); + driver_bitmap &= efx->guaranteed_bitmap; + if (driver_bitmap) + max_size = rounddown_pow_of_two(driver_bitmap); + + ring->rx_max_pending = max_size; + ring->tx_max_pending = max_size; + ring->rx_pending = efx->rxq_entries; + ring->tx_pending = efx->txq_entries; +} + +static int ef100_ethtool_set_ringparam(struct net_device *net_dev, +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_ETHTOOL_SET_RINGPARAM_EXTACK) + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kring, + struct netlink_ext_ack *ext_ack) +#else + struct ethtool_ringparam *ring) +#endif +{ + struct efx_nic *efx = efx_netdev_priv(net_dev); + int rc = 0; + + if (ring->rx_mini_pending || ring->rx_jumbo_pending) + return -EINVAL; + + if (!is_power_of_2(ring->rx_pending) || + !is_power_of_2(ring->tx_pending)) { + netif_err(efx, drv, efx->net_dev, + "ring sizes that are not pow of 2, not supported"); + return -EINVAL; + } + if (ring->rx_pending == efx->rxq_entries && + ring->tx_pending == efx->txq_entries) + /* Nothing to do */ + return 0; + + if (!efx->supported_bitmap) { + netif_err(efx, drv, efx->net_dev, + "ring size changes not supported\n"); + return -EOPNOTSUPP; + } + if (ring->rx_pending && + !(efx->guaranteed_bitmap & ring->rx_pending)) { + netif_err(efx, drv, efx->net_dev, + "unsupported ring size for RX"); + return -ERANGE; + } + if (ring->tx_pending && + !(efx->guaranteed_bitmap & ring->tx_pending)) { + netif_err(efx, drv, efx->net_dev, + "unsupported ring sizes for TX"); + return -ERANGE; + } + + /* Apply the new settings */ + efx->rxq_entries = ring->rx_pending; + efx->txq_entries = ring->tx_pending; + + /* Update the datapath with the new settings if the interface is up */ + if (!efx_check_disabled(efx) && netif_running(efx->net_dev)) { + dev_close(net_dev); + rc = dev_open(net_dev, NULL); + } + + return rc; +} + +static void ef100_ethtool_get_drvinfo(struct net_device *net_dev, + struct ethtool_drvinfo *info) +{ + struct efx_nic *efx = efx_netdev_priv(net_dev); + + efx_ethtool_get_common_drvinfo(efx, info); + if (!in_interrupt()) + efx_mcdi_print_fw_bundle_ver(efx, info->fw_version, + sizeof(info->fw_version)); +} + +/* Ethtool options available + */ +const struct ethtool_ops ef100_ethtool_ops = { + .get_drvinfo = ef100_ethtool_get_drvinfo, + .get_msglevel = efx_ethtool_get_msglevel, + .set_msglevel = efx_ethtool_set_msglevel, + .nway_reset = efx_ethtool_nway_reset, + .get_pauseparam = efx_ethtool_get_pauseparam, + .set_pauseparam = efx_ethtool_set_pauseparam, + .get_sset_count = efx_ethtool_get_sset_count, + .get_priv_flags = efx_ethtool_get_priv_flags, + .set_priv_flags = efx_ethtool_set_priv_flags, + .self_test = efx_ethtool_self_test, + .get_strings = efx_ethtool_get_strings, +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_ETHTOOL_LINKSETTINGS) + .get_link_ksettings = efx_ethtool_get_link_ksettings, + .set_link_ksettings = efx_ethtool_set_link_ksettings, +#else + .get_settings = efx_ethtool_get_settings, + .set_settings = efx_ethtool_set_settings, +#endif + .get_link = ethtool_op_get_link, + .get_ringparam = ef100_ethtool_get_ringparam, + .set_ringparam = ef100_ethtool_set_ringparam, +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_ETHTOOL_FECPARAM) + .get_fecparam = efx_ethtool_get_fecparam, + .set_fecparam = efx_ethtool_set_fecparam, +#endif +#if !defined(EFX_USE_KCOMPAT) || (defined(EFX_HAVE_ETHTOOL_SET_PHYS_ID) && !defined(EFX_USE_ETHTOOL_OPS_EXT)) + .set_phys_id = efx_ethtool_phys_id, +#elif !defined(EFX_USE_ETHTOOL_OPS_EXT) + .phys_id = efx_ethtool_phys_id_loop, +#endif + .get_ethtool_stats = efx_ethtool_get_stats, +#if !defined(EFX_USE_KCOMPAT) + .get_rxnfc = efx_ethtool_get_rxnfc, + .set_rxnfc = efx_ethtool_set_rxnfc, +#else + .get_rxnfc = efx_ethtool_get_rxnfc_wrapper, + .set_rxnfc = efx_ethtool_set_rxnfc_wrapper, +#endif +#if defined(EFX_USE_KCOMPAT) && (!defined(EFX_USE_DEVLINK) || defined(EFX_NEED_ETHTOOL_FLASH_DEVICE)) + .flash_device = efx_ethtool_flash_device, +#endif +#if !defined(EFX_USE_KCOMPAT) || (defined(EFX_HAVE_ETHTOOL_RESET) && !defined(EFX_USE_ETHTOOL_OPS_EXT)) + .reset = efx_ethtool_reset, +#endif +#if !defined(EFX_USE_KCOMPAT) || !defined(EFX_USE_ETHTOOL_OPS_EXT) +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_ETHTOOL_GET_RXFH_INDIR_SIZE) + .get_rxfh_indir_size = efx_ethtool_get_rxfh_indir_size, +#endif +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_ETHTOOL_GET_RXFH_KEY_SIZE) + .get_rxfh_key_size = efx_ethtool_get_rxfh_key_size, +#endif +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_CONFIGURABLE_RSS_HASH) + .get_rxfh = efx_ethtool_get_rxfh, + .set_rxfh = efx_ethtool_set_rxfh, +#elif defined(EFX_HAVE_ETHTOOL_GET_RXFH) + .get_rxfh = efx_ethtool_get_rxfh_no_hfunc, + .set_rxfh = efx_ethtool_set_rxfh_no_hfunc, +#elif defined(EFX_HAVE_ETHTOOL_GET_RXFH_INDIR) +# if defined(EFX_HAVE_OLD_ETHTOOL_RXFH_INDIR) + .get_rxfh_indir = efx_ethtool_old_get_rxfh_indir, + .set_rxfh_indir = efx_ethtool_old_set_rxfh_indir, +# else + .get_rxfh_indir = efx_ethtool_get_rxfh_indir, + .set_rxfh_indir = efx_ethtool_set_rxfh_indir, +# endif +#endif +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_ETHTOOL_RXFH_CONTEXT) + .get_rxfh_context = efx_ethtool_get_rxfh_context, + .set_rxfh_context = efx_ethtool_set_rxfh_context, +#endif + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_ETHTOOL_GMODULEEEPROM) + .get_module_info = efx_ethtool_get_module_info, + .get_module_eeprom = efx_ethtool_get_module_eeprom, +#endif + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_ETHTOOL_CHANNELS) || defined(EFX_HAVE_ETHTOOL_EXT_CHANNELS) + .get_channels = efx_ethtool_get_channels, + .set_channels = efx_ethtool_set_channels, +#endif +#endif /* EFX_USE_ETHTOOL_OPS_EXT */ +}; + diff --git a/src/drivers/net/ethernet/sfc/ef100_ethtool.h b/src/drivers/net/ethernet/sfc/ef100_ethtool.h new file mode 100644 index 0000000..71dba61 --- /dev/null +++ b/src/drivers/net/ethernet/sfc/ef100_ethtool.h @@ -0,0 +1,10 @@ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2018 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +extern const struct ethtool_ops ef100_ethtool_ops; diff --git a/src/drivers/net/ethernet/sfc/ef100_netdev.c b/src/drivers/net/ethernet/sfc/ef100_netdev.c new file mode 100644 index 0000000..58d41a0 --- /dev/null +++ b/src/drivers/net/ethernet/sfc/ef100_netdev.c @@ -0,0 +1,1002 @@ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2018 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ +#include +#include "net_driver.h" +#include "mcdi_port_common.h" +#include "mcdi_functions.h" +#include "efx_common.h" +#include "efx_channels.h" +#include "tx_common.h" +#include "debugfs.h" +#include "ef100_netdev.h" +#include "ef100_ethtool.h" +#include "efx_common.h" +#include "nic.h" +#include "ef100_nic.h" +#include "ef100_tx.h" +#include "ef100_regs.h" +#include "mcdi_filters.h" +#include "rx_common.h" +#include "efx_ioctl.h" +#include "ioctl_common.h" +#ifdef CONFIG_SFC_TRACING +#include +#endif +#include "mae.h" +#include "tc.h" +#include "tc_bindings.h" +#include "tc_encap_actions.h" +#include "efx_devlink.h" +#include "ef100_sriov.h" +#include "ef100_rep.h" +#include "xdp.h" + +#ifdef EFX_NOT_UPSTREAM +#if IS_MODULE(CONFIG_SFC_DRIVERLINK) +#include "io.h" +#endif +#endif + +static void ef100_update_name(struct efx_nic *efx) +{ + strcpy(efx->name, efx->net_dev->name); + efx_update_debugfs_netdev(efx); +} + +static int ef100_alloc_vis(struct efx_nic *efx, unsigned int *allocated_vis) +{ + unsigned int rx_vis = efx_rx_channels(efx); + unsigned int tx_vis = efx_tx_channels(efx) * efx->tx_queues_per_channel; + unsigned int min_vis, max_vis; + int rc; + + tx_vis += efx_xdp_channels(efx) * efx->xdp_tx_per_channel; + + max_vis = max(rx_vis, tx_vis); + min_vis = efx->tx_queues_per_channel; +#ifdef EFX_NOT_UPSTREAM +#if IS_MODULE(CONFIG_SFC_DRIVERLINK) + /* We will consume all VIs with IDs less than the current value of + * max_vis, so report that as the ID of the first VI available to the + * driverlink client, and then bump the requested number of VIs by the + * number that the client would like. */ + efx->ef10_resources.vi_min = max_vis; + max_vis += EF100_ONLOAD_VIS; +#endif +#endif + rc = efx_mcdi_alloc_vis(efx, min_vis, max_vis, +#if defined(EFX_NOT_UPSTREAM) && IS_MODULE(CONFIG_SFC_DRIVERLINK) + &efx->ef10_resources.vi_base, + &efx->ef10_resources.vi_shift, +#else + NULL, NULL, +#endif + allocated_vis); + if (rc) + return rc; + if ((*allocated_vis >= min_vis) && (*allocated_vis < max_vis)) + rc = -EAGAIN; + + return rc; +} + +static int ef100_remap_bar(struct efx_nic *efx, int max_vis) +{ + unsigned int uc_mem_map_size; + void __iomem *membase; + + efx->max_vis = max_vis; + uc_mem_map_size = PAGE_ALIGN(max_vis * efx->vi_stride); + + /* Extend the original UC mapping of the memory BAR */ +#if defined(EFX_USE_KCOMPAT) + membase = efx_ioremap(efx->membase_phys, uc_mem_map_size); +#else + membase = ioremap(efx->membase_phys, uc_mem_map_size); +#endif + if (!membase) { + netif_err(efx, probe, efx->net_dev, + "could not extend memory BAR to %x\n", + uc_mem_map_size); + return -ENOMEM; + } + iounmap(efx->membase); + efx->membase = membase; + return 0; +} + +/* Context: process, rtnl_lock() held. + * Note that the kernel will ignore our return code; this method + * should really be a void. + */ +static int ef100_net_stop(struct net_device *net_dev) +{ + struct efx_nic *efx = efx_netdev_priv(net_dev); + + netif_dbg(efx, ifdown, efx->net_dev, "closing on CPU %d\n", + raw_smp_processor_id()); + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_TC_OFFLOAD) + if (efx->type->detach_reps) + efx->type->detach_reps(efx); +#endif + netif_stop_queue(net_dev); + efx_stop_all(efx); + + efx->state = STATE_NET_ALLOCATED; + + efx_net_dealloc(efx); + + return 0; +} + +void ef100_net_dealloc(struct efx_nic *efx) +{ + struct ef100_nic_data *nic_data; + +#ifdef EFX_NOT_UPSTREAM +#if IS_MODULE(CONFIG_SFC_DRIVERLINK) + if (--efx->open_count) { + netif_dbg(efx, drv, efx->net_dev, "still open\n"); + return; + } +#endif +#endif + + efx_disable_interrupts(efx); +#if defined(EFX_NOT_UPSTREAM) && defined(CONFIG_SFC_BUSYPOLL) + if (efx->interrupt_mode != EFX_INT_MODE_POLLED) { + efx_clear_interrupt_affinity(efx); + efx_nic_fini_interrupt(efx); + } +#else + efx_clear_interrupt_affinity(efx); + efx_nic_fini_interrupt(efx); +#endif + efx_fini_filters(efx); + efx_fini_napi(efx); + + nic_data = efx->nic_data; + if (nic_data->evq_phases) + bitmap_free(nic_data->evq_phases); + nic_data->evq_phases = NULL; + + efx_remove_channels(efx); + efx_mcdi_free_vis(efx); + efx_unset_channels(efx); + efx_remove_interrupts(efx); + efx_fini_channels(efx); + + efx->state = STATE_NET_DOWN; +} + +/* Context: process, rtnl_lock() held. */ +static int ef100_net_open(struct net_device *net_dev) +{ + struct efx_nic *efx = efx_netdev_priv(net_dev); + struct ef100_nic_data *nic_data; + int rc; + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_TC_OFFLOAD) + /* There is a small time gap at which this net_dev has been + * registered but the representors have not yet been + * registered. Delay open until the probe has completely finished. + */ + if (efx->state != STATE_NET_DOWN) + return -EAGAIN; +#endif + ef100_update_name(efx); + netif_dbg(efx, ifup, net_dev, "opening device on CPU %d\n", + raw_smp_processor_id()); + + rc = ef100_net_alloc(efx); + if (rc) + goto fail; + + rc = efx_start_all(efx); + if (rc) + goto fail; + + /* Link state detection is normally event-driven; we have + * to poll now because we could have missed a change + */ + mutex_lock(&efx->mac_lock); + if (efx_mcdi_phy_poll(efx)) + efx_link_status_changed(efx); + mutex_unlock(&efx->mac_lock); + + efx->state = STATE_NET_UP; + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_TC_OFFLOAD) + if (netif_running(efx->net_dev) && efx->type->attach_reps) + efx->type->attach_reps(efx); +#endif + /* Mport changes are normally event-driven. Check now because + * we could have missed a change. + */ + nic_data = efx->nic_data; + if (nic_data->grp_mae) + schedule_work(&efx->mae->mport_work); + return 0; + +fail: + ef100_net_stop(net_dev); + return rc; +} + +/* This function tries to distribute allocated VIs based on the channel + * priority from the total pool of available VIs + */ +static void ef100_adjust_channels(struct efx_nic *efx, unsigned int avail_vis) +{ + unsigned int vi_share; + + /* + * - vi_share is initially initialized to the total VIs required for + * the channel type. + * - the below logic tries to compute the actual vi_share each channel + * type gets + */ + vi_share = min(efx->n_combined_channels * efx->tx_queues_per_channel, + avail_vis); + vi_share -= (vi_share % efx->tx_queues_per_channel); + avail_vis -= vi_share; + /* rx_only and tx_only channels are not present in ef100, + * the below logic does not have any effect + */ + efx->n_combined_channels = (vi_share / efx->tx_queues_per_channel); + vi_share = min(efx->n_extra_channels * efx->tx_queues_per_channel, + avail_vis); + vi_share -= (vi_share % efx->tx_queues_per_channel); + avail_vis -= vi_share; + efx->n_extra_channels = (vi_share / efx->tx_queues_per_channel); + vi_share = min(efx->n_rx_only_channels, avail_vis); + avail_vis -= vi_share; + efx->n_rx_only_channels = vi_share; + vi_share = min(efx->n_tx_only_channels * efx->tx_queues_per_channel, + avail_vis); + vi_share -= (vi_share % efx->tx_queues_per_channel); + avail_vis -= vi_share; + efx->n_tx_only_channels = (vi_share / efx->tx_queues_per_channel); + vi_share = (efx->n_xdp_channels * efx->xdp_tx_per_channel); + if (avail_vis < vi_share) + efx->n_xdp_channels = 0; + else + avail_vis -= vi_share; + /* Recalculate RSS spread based on available RX channels */ + efx->n_rss_channels = efx_rx_channels(efx) - efx->n_extra_channels; +} + +int ef100_net_alloc(struct efx_nic *efx) +{ + struct ef100_nic_data *nic_data; + unsigned int allocated_vis; + int rc; + +#ifdef EFX_NOT_UPSTREAM +#if IS_MODULE(CONFIG_SFC_DRIVERLINK) + if (efx->open_count++) { + netif_dbg(efx, drv, efx->net_dev, "already open\n"); + /* inform the kernel about link state again */ + efx_link_status_changed(efx); + return 0; + } +#endif +#endif + + rc = efx_check_disabled(efx); + if (rc) + return rc; + + efx->stats_initialised = false; + allocated_vis = 0; + do { + rc = efx_init_channels(efx); + if (rc) + return rc; + + rc = efx_probe_interrupts(efx); + if (rc) + return rc; + + rc = efx_set_channels(efx); + if (rc) + return rc; + + if (!allocated_vis) { + rc = efx_mcdi_free_vis(efx); + if (rc) + return rc; + + rc = ef100_alloc_vis(efx, &allocated_vis); + if (rc && rc != -EAGAIN) + return rc; + if (rc == -EAGAIN) { + efx_unset_channels(efx); + efx_remove_interrupts(efx); + efx_fini_channels(efx); + + ef100_adjust_channels(efx, allocated_vis); + efx->max_vis = allocated_vis; + } + } + } while (rc == -EAGAIN); + + rc = efx_mcdi_push_default_indir_table(efx, efx->n_rss_channels); + if (rc) + return rc; + + rc = efx_probe_channels(efx); + if (rc) + return rc; + + rc = ef100_remap_bar(efx, allocated_vis); + if (rc) + return rc; + + nic_data = efx->nic_data; + nic_data->evq_phases = bitmap_zalloc(efx_channels(efx), GFP_KERNEL); + if (!nic_data->evq_phases) + return -ENOMEM; + + rc = efx_init_napi(efx); + if (rc) + return rc; + + rc = efx_init_filters(efx); + if (rc) + return rc; + +#if defined(EFX_NOT_UPSTREAM) && defined(CONFIG_SFC_BUSYPOLL) + if (efx->interrupt_mode != EFX_INT_MODE_POLLED) { + rc = efx_nic_init_interrupt(efx); + if (rc) + return rc; + efx_set_interrupt_affinity(efx); + } +#else + rc = efx_nic_init_interrupt(efx); + if (rc) + return rc; + efx_set_interrupt_affinity(efx); +#endif + + rc = efx_enable_interrupts(efx); + if (rc) + return rc; + +#ifdef EFX_NOT_UPSTREAM +#if IS_MODULE(CONFIG_SFC_DRIVERLINK) + /* Register with driverlink layer */ + efx->ef10_resources.vi_lim = allocated_vis; + efx->ef10_resources.timer_quantum_ns = efx->timer_quantum_ns; + efx->ef10_resources.rss_channel_count = efx->rss_spread; + efx->ef10_resources.rx_channel_count = efx_rx_channels(efx); + efx->ef10_resources.flags = EFX_DL_EF10_USE_MSI; + efx->ef10_resources.vi_stride = efx->vi_stride; + efx->ef10_resources.mem_bar = efx->mem_bar; + + if (efx->irq_resources) + efx->irq_resources->int_prime = + efx_mem(efx, efx_reg(efx, ER_GZ_EVQ_INT_PRIME)); + efx->ef10_resources.hdr.next = efx->irq_resources ? + &efx->irq_resources->hdr : NULL; + + efx->dl_nic.dl_info = &efx->ef10_resources.hdr; +#endif +#endif + + /* in case the MC rebooted while we were stopped, consume the change + * to the warm reboot count + */ + (void) efx_mcdi_poll_reboot(efx); + + return 0; +} + +/* Initiate a packet transmission. We use one channel per CPU + * (sharing when we have more CPUs than channels). + * + * Context: non-blocking. + * Note that returning anything other than NETDEV_TX_OK will cause the + * OS to free the skb. + */ +static netdev_tx_t ef100_hard_start_xmit(struct sk_buff *skb, + struct net_device *net_dev) +{ + struct efx_nic *efx = efx_netdev_priv(net_dev); + + return __ef100_hard_start_xmit(skb, efx, net_dev, NULL); +} + +netdev_tx_t __ef100_hard_start_xmit(struct sk_buff *skb, + struct efx_nic *efx, + struct net_device *net_dev, + struct efx_rep *efv) +{ + struct efx_tx_queue *tx_queue; + struct efx_channel *channel; + int rc; + +#ifdef CONFIG_SFC_TRACING + trace_sfc_transmit(skb, net_dev); +#endif + channel = efx_get_tx_channel(efx, skb_get_queue_mapping(skb)); + netif_vdbg(efx, tx_queued, net_dev, + "%s len %d data %d channel %d\n", __FUNCTION__, + skb->len, skb->data_len, channel ? channel->channel : -1); + if (!efx_channels(efx) || !efx_tx_channels(efx) || !channel || + !channel->tx_queue_count) { + netif_err(efx, tx_err, net_dev, + "Bad TX channel (%u; %u; %d), stopping queue\n", + efx_channels(efx), efx_tx_channels(efx), + channel ? channel->tx_queue_count : -1); + netif_stop_queue(net_dev); + goto err; + } + + tx_queue = &channel->tx_queues[0]; + rc = __ef100_enqueue_skb(tx_queue, skb, efv); + if (rc == 0) + return NETDEV_TX_OK; + +err: + net_dev->stats.tx_dropped++; + return NETDEV_TX_OK; +} + +#if defined(EFX_NOT_UPSTREAM) +static int ef100_do_siocefx(struct net_device *net_dev, struct ifreq *ifr, + void __user *data) +{ + struct efx_nic *efx = efx_netdev_priv(net_dev); + struct efx_sock_ioctl __user *user_data = data; + u16 efx_cmd; + int rc; + + if (copy_from_user(&efx_cmd, &user_data->cmd, sizeof(efx_cmd))) + return -EFAULT; + rc = efx_private_ioctl_common(efx, efx_cmd, &user_data->u); + if (rc == -EOPNOTSUPP) + netif_err(efx, drv, efx->net_dev, + "unknown private ioctl cmd %x\n", efx_cmd); + return rc; +} + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_NDO_SIOCDEVPRIVATE) +static int ef100_siocdevprivate(struct net_device *net_dev, struct ifreq *ifr, + void __user *data, int cmd) +{ + if (cmd == SIOCEFX) + return ef100_do_siocefx(net_dev, ifr, data); + return -EOPNOTSUPP; +} +#else +static int ef100_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd) +{ + if (cmd == SIOCEFX) + return ef100_do_siocefx(net_dev, ifr, ifr->ifr_data); + return -EOPNOTSUPP; +} +#endif +#endif + +#if defined(EFX_USE_KCOMPAT) && defined(EFX_TC_OFFLOAD) && !defined(EFX_HAVE_FLOW_INDR_BLOCK_CB_REGISTER) && !defined(EFX_HAVE_FLOW_INDR_DEV_REGISTER) +#ifdef EFX_HAVE_NDO_UDP_TUNNEL_ADD +#include "net/udp_tunnel.h" +static int ef100_udp_tunnel_type_map(enum udp_parsable_tunnel_type in) +{ + switch (in) { + case UDP_TUNNEL_TYPE_VXLAN: + return EFX_ENCAP_TYPE_VXLAN; + case UDP_TUNNEL_TYPE_GENEVE: + return EFX_ENCAP_TYPE_GENEVE; + default: + return EFX_ENCAP_TYPE_NONE; + } +} + +static void ef100_udp_tunnel_add(struct net_device *dev, struct udp_tunnel_info *ti) +{ + struct efx_nic *efx = efx_netdev_priv(dev); + struct ef100_udp_tunnel tnl; + + tnl.type = ef100_udp_tunnel_type_map(ti->type); + if (tnl.type == EFX_ENCAP_TYPE_NONE) + return; + + tnl.port = ti->port; + + if (efx->type->udp_tnl_add_port2) + efx->type->udp_tnl_add_port2(efx, tnl); +} + +static void ef100_udp_tunnel_del(struct net_device *dev, struct udp_tunnel_info *ti) +{ + struct efx_nic *efx = efx_netdev_priv(dev); + struct ef100_udp_tunnel tnl; + + tnl.type = ef100_udp_tunnel_type_map(ti->type); + if (tnl.type == EFX_ENCAP_TYPE_NONE) + return; + + tnl.port = ti->port; + + if (efx->type->udp_tnl_del_port2) + efx->type->udp_tnl_del_port2(efx, tnl); +} +#else +#ifdef EFX_HAVE_NDO_ADD_VXLAN_PORT +void ef100_vxlan_add_port(struct net_device *dev, sa_family_t sa_family, + __be16 port) +{ + struct ef100_udp_tunnel tnl = {.port = port, + .type = EFX_ENCAP_TYPE_VXLAN}; + struct efx_nic *efx = efx_netdev_priv(dev); + + if (efx->type->udp_tnl_add_port2) + efx->type->udp_tnl_add_port2(efx, tnl); +} + +void ef100_vxlan_del_port(struct net_device *dev, sa_family_t sa_family, + __be16 port) +{ + struct ef100_udp_tunnel tnl = {.port = port, + .type = EFX_ENCAP_TYPE_VXLAN}; + struct efx_nic *efx = efx_netdev_priv(dev); + + if (efx->type->udp_tnl_del_port2) + efx->type->udp_tnl_del_port2(efx, tnl); +} +#endif +#ifdef EFX_HAVE_NDO_ADD_GENEVE_PORT +void ef100_geneve_add_port(struct net_device *dev, sa_family_t sa_family, + __be16 port) +{ + struct ef100_udp_tunnel tnl = {.port = port, + .type = EFX_ENCAP_TYPE_GENEVE}; + struct efx_nic *efx = efx_netdev_priv(dev); + + if (efx->type->udp_tnl_add_port2) + efx->type->udp_tnl_add_port2(efx, tnl); +} + +void ef100_geneve_del_port(struct net_device *dev, sa_family_t sa_family, + __be16 port) +{ + struct ef100_udp_tunnel tnl = {.port = port, + .type = EFX_ENCAP_TYPE_GENEVE}; + struct efx_nic *efx = efx_netdev_priv(dev); + + if (efx->type->udp_tnl_del_port2) + efx->type->udp_tnl_del_port2(efx, tnl); +} +#endif +#endif +#endif + +static const struct net_device_ops ef100_netdev_ops = { + .ndo_open = ef100_net_open, + .ndo_stop = ef100_net_stop, + .ndo_start_xmit = ef100_hard_start_xmit, + .ndo_tx_timeout = efx_watchdog, +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_USE_NETDEV_STATS64) + .ndo_get_stats64 = efx_net_stats, +#else + .ndo_get_stats = efx_net_stats, +#endif +#if defined(EFX_USE_KCOMPAT) && defined(EFX_HAVE_NDO_EXT_CHANGE_MTU) + .extended.ndo_change_mtu = efx_change_mtu, +#else + .ndo_change_mtu = efx_change_mtu, +#endif + .ndo_validate_addr = eth_validate_addr, +#if defined(EFX_NOT_UPSTREAM) +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_NDO_SIOCDEVPRIVATE) + .ndo_siocdevprivate = ef100_siocdevprivate, +#else + .ndo_do_ioctl = ef100_ioctl, +#endif +#endif + .ndo_set_mac_address = efx_set_mac_address, +#if !defined(EFX_USE_KCOMPAT) || !defined(EFX_HAVE_NDO_SET_MULTICAST_LIST) + .ndo_set_rx_mode = efx_set_rx_mode, /* Lookout */ +#else + /* On older kernel versions, set_rx_mode is expected to + * support multiple unicast addresses and set_multicast_list + * is expected to support only one. On newer versions the + * IFF_UNICAST_FLT flag distinguishes these. + */ + .ndo_set_multicast_list = efx_set_rx_mode, +#endif +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_NDO_SET_FEATURES) +#if defined(EFX_NOT_UPSTREAM) && defined(EFX_USE_SFC_LRO) + .ndo_fix_features = efx_fix_features, +#endif + .ndo_set_features = efx_set_features, +#endif + .ndo_vlan_rx_add_vid = efx_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = efx_vlan_rx_kill_vid, +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_NDO_GET_PHYS_PORT_ID) + .ndo_get_phys_port_id = efx_get_phys_port_id, +#endif +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_NDO_GET_PHYS_PORT_NAME) +#ifndef CONFIG_NET_DEVLINK + .ndo_get_phys_port_name = efx_get_phys_port_name, +#endif +#endif +#if defined(EFX_USE_KCOMPAT) && defined(EFX_WANT_NDO_POLL_CONTROLLER) +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = efx_netpoll, +#endif +#endif +#if defined(EFX_USE_KCOMPAT) && defined(EFX_HAVE_NDO_BUSY_POLL) +#ifdef CONFIG_NET_RX_BUSY_POLL + .ndo_busy_poll = efx_busy_poll, +#endif +#endif +#if !defined(EFX_USE_KCOMPAT) || !defined(EFX_HAVE_NETDEV_RFS_INFO) +#ifdef CONFIG_RFS_ACCEL + .ndo_rx_flow_steer = efx_filter_rfs, +#endif +#endif + +#if defined(EFX_USE_KCOMPAT) && defined(EFX_TC_OFFLOAD) && !defined(EFX_HAVE_FLOW_INDR_BLOCK_CB_REGISTER) && !defined(EFX_HAVE_FLOW_INDR_DEV_REGISTER) +#ifdef EFX_HAVE_NDO_UDP_TUNNEL_ADD + .ndo_udp_tunnel_add = ef100_udp_tunnel_add, + .ndo_udp_tunnel_del = ef100_udp_tunnel_del, +#else +#ifdef EFX_HAVE_NDO_ADD_VXLAN_PORT + .ndo_add_vxlan_port = ef100_vxlan_add_port, + .ndo_del_vxlan_port = ef100_vxlan_del_port, +#endif +#ifdef EFX_HAVE_NDO_ADD_GENEVE_PORT + .ndo_add_geneve_port = ef100_geneve_add_port, + .ndo_del_geneve_port = ef100_geneve_del_port, +#endif +#endif +#endif +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_XDP) + .ndo_bpf = efx_xdp, +#endif +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_XDP_REDIR) + .ndo_xdp_xmit = efx_xdp_xmit, +#if defined(EFX_USE_KCOMPAT) && defined(EFX_NEED_XDP_FLUSH) + .ndo_xdp_flush = efx_xdp_flush, +#endif +#endif +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_TC_OFFLOAD) + .ndo_setup_tc = efx_setup_tc, +#endif +#if defined(EFX_USE_KCOMPAT) && defined(EFX_HAVE_NDO_GET_DEVLINK_PORT) +#ifdef CONFIG_NET_DEVLINK + .ndo_get_devlink_port = efx_get_devlink_port, +#endif +#endif + +#if defined(EFX_USE_KCOMPAT) && defined(EFX_HAVE_NDO_SIZE) + .ndo_size = sizeof(struct net_device_ops), +#endif +#if defined(EFX_USE_KCOMPAT) && defined(EFX_HAVE_NDO_SIZE_RH) + .ndo_size_rh = sizeof(struct net_device_ops), +#endif +}; + +/* Netdev registration + */ +int ef100_netdev_event(struct notifier_block *this, + unsigned long event, void *ptr) +{ + struct efx_nic *efx = container_of(this, struct efx_nic, netdev_notifier); + struct net_device *net_dev = netdev_notifier_info_to_dev(ptr); + struct ef100_nic_data *nic_data = efx->nic_data; + int err; + + if (efx->net_dev == net_dev && + (event == NETDEV_CHANGENAME || event == NETDEV_REGISTER)) + ef100_update_name(efx); + + if (!nic_data->grp_mae) + return NOTIFY_DONE; + err = efx_tc_netdev_event(efx, event, net_dev); + if (err & NOTIFY_STOP_MASK) + return err; + + return NOTIFY_DONE; +} + +int ef100_netevent_event(struct notifier_block *this, unsigned long event, + void *ptr) +{ + struct efx_nic *efx = container_of(this, struct efx_nic, netevent_notifier); + struct ef100_nic_data *nic_data = efx->nic_data; + int err; + + if (!nic_data->grp_mae) + return NOTIFY_DONE; + err = efx_tc_netevent_event(efx, event, ptr); + if (err & NOTIFY_STOP_MASK) + return err; + + return NOTIFY_DONE; +}; + +static int ef100_register_netdev(struct efx_nic *efx) +{ + struct net_device *net_dev = efx->net_dev; + int rc; + + net_dev->watchdog_timeo = 5 * HZ; + net_dev->irq = efx->pci_dev->irq; + net_dev->netdev_ops = &ef100_netdev_ops; +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_NETDEV_MTU_LIMITS) + net_dev->min_mtu = EFX_MIN_MTU; + net_dev->max_mtu = EFX_100_MAX_MTU; +#elif defined(EFX_HAVE_NETDEV_EXT_MTU_LIMITS) + net_dev->extended->min_mtu = EFX_MIN_MTU; + net_dev->extended->max_mtu = EFX_100_MAX_MTU; +#endif + net_dev->ethtool_ops = &ef100_ethtool_ops; + + rtnl_lock(); + + rc = dev_alloc_name(net_dev, net_dev->name); + if (rc < 0) + goto fail_locked; + ef100_update_name(efx); + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_SET_NETDEV_DEVLINK_PORT) +#ifdef CONFIG_NET_DEVLINK + SET_NETDEV_DEVLINK_PORT(net_dev, efx->devlink_port); +#endif +#endif + rc = register_netdevice(net_dev); + if (rc) + goto fail_locked; + + /* Always start with carrier off; PHY events will detect the link */ + netif_carrier_off(net_dev); + + rtnl_unlock(); + efx_init_mcdi_logging(efx); + efx_probe_devlink(efx); + + return 0; + +fail_locked: + rtnl_unlock(); + netif_err(efx, drv, efx->net_dev, "could not register net dev\n"); + return rc; +} + +static void ef100_unregister_netdev(struct efx_nic *efx) +{ + if (efx_dev_registered(efx)) { + efx_fini_devlink(efx); + efx_fini_mcdi_logging(efx); + efx->state = STATE_PROBED; + unregister_netdev(efx->net_dev); + } +} + +void ef100_remove_netdev(struct efx_probe_data *probe_data) +{ + struct efx_nic *efx = &probe_data->efx; + + if (!efx->net_dev) + return; + + rtnl_lock(); +#ifdef EFX_NOT_UPSTREAM +#if IS_MODULE(CONFIG_SFC_DRIVERLINK) + if (efx_dl_supported(efx)) + efx_dl_unregister_nic(&efx->dl_nic); +#endif +#endif + dev_close(efx->net_dev); + rtnl_unlock(); + + /* Unregistering our netdev notifier triggers unbinding of TC indirect + * blocks, so we have to do it before PCI removal. + */ + unregister_netdevice_notifier(&efx->netdev_notifier); + unregister_netevent_notifier(&efx->netevent_notifier); + + ef100_unregister_netdev(efx); + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_XDP) + rtnl_lock(); + efx_xdp_setup_prog(efx, NULL); + rtnl_unlock(); +#endif + + if (!efx->type->is_vf) { +#if defined(CONFIG_SFC_SRIOV) + efx_ef100_pci_sriov_disable(efx, true); +#endif + efx_fini_mae(efx); + efx_ef100_fini_reps(efx); + efx_fini_tc(efx); + } + +#ifdef CONFIG_DEBUG_FS + mutex_lock(&efx->debugfs_symlink_mutex); + efx_fini_debugfs_netdev(efx->net_dev); + mutex_unlock(&efx->debugfs_symlink_mutex); +#endif + + efx_mcdi_filter_table_remove(efx); + efx_fini_interrupts(efx); + efx_mcdi_mac_fini_stats(efx); + kfree(efx->phy_data); + efx->phy_data = NULL; + + efx_mcdi_mon_remove(efx); + + free_netdev(efx->net_dev); + efx->net_dev = NULL; + efx->state = STATE_PROBED; +} + +int ef100_probe_netdev(struct efx_probe_data *probe_data) +{ + struct efx_nic *efx = &probe_data->efx; + struct ef100_nic_data *nic_data; + struct efx_probe_data **probe_ptr; + struct net_device *net_dev; + int rc; + +#if !defined(EFX_USE_KCOMPAT) || !defined(EFX_TC_OFFLOAD) + if (efx->mcdi->fn_flags & + (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_NO_ACTIVE_PORT)) { + return 0; + } +#endif + + /* Allocate and initialise a struct net_device */ + net_dev = alloc_etherdev_mq(sizeof(probe_data), EFX_MAX_CORE_TX_QUEUES); + if (!net_dev) + return -ENOMEM; + probe_ptr = netdev_priv(net_dev); + *probe_ptr = probe_data; + efx->net_dev = net_dev; + SET_NETDEV_DEV(net_dev, &efx->pci_dev->dev); + + /* enable all supported features except rx-fcs and rx-all */ + net_dev->features |= efx->type->offload_features & + ~(NETIF_F_RXFCS | NETIF_F_RXALL); + efx_add_hw_features(efx, efx->type->offload_features); + net_dev->vlan_features |= (NETIF_F_HW_CSUM | NETIF_F_SG | + NETIF_F_HIGHDMA | NETIF_F_ALL_TSO); +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_HW_ENC_FEATURES) + net_dev->hw_enc_features |= efx->type->offload_features; +#endif +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_GSO_MAX_SEGS) +#if !defined(EFX_HAVE_GSO_MAX_SEGS) + if (!READ_ONCE(net_dev->tso_max_segs)) +#else + if (!READ_ONCE(net_dev->gso_max_segs)) +#endif + netif_set_tso_max_segs(net_dev, + ESE_EF100_DP_GZ_TSO_MAX_HDR_NUM_SEGS_DEFAULT); +#endif +#ifdef EFX_NOT_UPSTREAM +#if IS_MODULE(CONFIG_SFC_DRIVERLINK) + efx_dl_probe(efx); + efx->ef10_resources = efx->type->ef10_resources; + efx->n_dl_irqs = EF100_ONLOAD_IRQS; +#endif + efx_nic_check_pcie_link(efx, EFX_BW_PCIE_GEN3_X16, NULL, NULL); +#endif + + rc = efx_ef100_init_datapath_caps(efx); + if (rc < 0) + goto fail; + + rc = efx_mcdi_mon_probe(efx); + if (rc && rc != -EPERM) + netif_warn(efx, drv, efx->net_dev, "could not init sensors\n"); + + rc = ef100_phy_probe(efx); + if (rc) + goto fail; + + rc = efx_mcdi_mac_init_stats(efx); + if (rc) + goto fail; + + nic_data = efx->nic_data; + if (nic_data->grp_mae) { + rc = efx_init_struct_tc(efx); + if (rc) + goto fail; + } + + rc = efx_init_interrupts(efx); + if (rc < 0) + goto fail; + + /* Update maximum channel count for ethtool */ + efx->max_channels = min_t(u16, efx->max_channels, efx->max_irqs); + efx->max_tx_channels = efx->max_channels; + + rc = ef100_filter_table_probe(efx); + if (rc) + goto fail; + + /* Add unspecified VID to support VLAN filtering being disabled */ + rc = efx_mcdi_filter_add_vlan(efx, EFX_FILTER_VID_UNSPEC); + if (rc) + goto fail; + + /* If VLAN filtering is enabled, we need VID 0 to get untagged + * traffic. It is added automatically if 8021q module is loaded, + * but we can't rely on it since module may be not loaded. + */ + rc = efx_mcdi_filter_add_vlan(efx, 0); + if (rc) + goto fail; + + netdev_rss_key_fill(efx->rss_context.rx_hash_key, + sizeof(efx->rss_context.rx_hash_key)); + + /* Don't fail init if RSS setup doesn't work. */ + efx_mcdi_push_default_indir_table(efx, efx->n_rss_channels); + + rc = ef100_get_mac_address(efx, net_dev->perm_addr, CLIENT_HANDLE_SELF, + efx->type->is_vf); + if (rc) + return rc; + /* Assign MAC address */ + eth_hw_addr_set(net_dev, net_dev->perm_addr); + ether_addr_copy(nic_data->port_id, net_dev->perm_addr); + + if (!efx->type->is_vf) { + rc = ef100_probe_netdev_pf(efx); + if (rc) + goto fail; + } + + rc = ef100_register_netdev(efx); + if (rc) + goto fail; + + efx->netdev_notifier.notifier_call = ef100_netdev_event; + rc = register_netdevice_notifier(&efx->netdev_notifier); + if (rc) { + netif_err(efx, probe, efx->net_dev, + "Failed to register netdevice notifier, rc=%d\n", rc); + goto fail; + } + + efx->netevent_notifier.notifier_call = ef100_netevent_event; + rc = register_netevent_notifier(&efx->netevent_notifier); + if (rc) { + netif_err(efx, probe, efx->net_dev, + "Failed to register netevent notifier, rc=%d\n", rc); + goto fail; + } + + if (!efx->type->is_vf) + efx_ef100_init_reps(efx); + +#ifdef EFX_NOT_UPSTREAM +#if IS_MODULE(CONFIG_SFC_DRIVERLINK) + if (efx_dl_supported(efx)) { + rtnl_lock(); + efx_dl_register_nic(&efx->dl_nic); + rtnl_unlock(); + } +#endif +#endif + efx->state = STATE_NET_DOWN; + +fail: + return rc; +} diff --git a/src/drivers/net/ethernet/sfc/ef100_netdev.h b/src/drivers/net/ethernet/sfc/ef100_netdev.h new file mode 100644 index 0000000..acd3a45 --- /dev/null +++ b/src/drivers/net/ethernet/sfc/ef100_netdev.h @@ -0,0 +1,29 @@ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2018 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ +#ifndef EFX_EF100_NETDEV_H +#define EFX_EF100_NETDEV_H + +#include +#include "ef100_nic.h" +#include "ef100_rep.h" + +int ef100_netdev_event(struct notifier_block *this, + unsigned long event, void *ptr); +int ef100_netevent_event(struct notifier_block *this, + unsigned long event, void *ptr); +netdev_tx_t __ef100_hard_start_xmit(struct sk_buff *skb, + struct efx_nic *efx, + struct net_device *net_dev, + struct efx_rep *efv); +int ef100_probe_netdev(struct efx_probe_data *probe_data); +void ef100_remove_netdev(struct efx_probe_data *probe_data); +int ef100_net_alloc(struct efx_nic *efx); +void ef100_net_dealloc(struct efx_nic *efx); + +#endif /* EFX_EF100_NETDEV_H */ diff --git a/src/drivers/net/ethernet/sfc/ef100_nic.c b/src/drivers/net/ethernet/sfc/ef100_nic.c new file mode 100644 index 0000000..002ec86 --- /dev/null +++ b/src/drivers/net/ethernet/sfc/ef100_nic.c @@ -0,0 +1,2080 @@ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2018 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#include "ef100_nic.h" +#include +//#include +#include "efx_common.h" +#include "efx_channels.h" +#include "io.h" +#include "debugfs.h" +#include "selftest.h" +#include "ef100_regs.h" +#include "mcdi.h" +#include "mcdi_pcol.h" +#include "mcdi_port_common.h" +#include "mcdi_functions.h" +#include "mcdi_filters.h" +#include "ef100.h" +#include "ef100_rx.h" +#include "ef100_tx.h" +#include "ef100_sriov.h" +#include "ef100_netdev.h" +#include "ef100_rep.h" +#include "tc.h" +#include "mae.h" +#include "xdp.h" +#include "rx_common.h" +#ifdef CONFIG_SFC_VDPA +#include "ef100_vdpa.h" +#endif + +#define EF100_MAX_VIS 4096 +#define EF100_NUM_MCDI_BUFFERS 1 + +#ifndef EF100_RESET_PORT +#define EF100_RESET_PORT ((ETH_RESET_MAC | ETH_RESET_PHY) << ETH_RESET_SHARED_SHIFT) +#endif + +static bool ef100_has_dynamic_sensors(struct efx_nic *efx) +{ + struct ef100_nic_data *nic_data = efx->nic_data; + + return efx_ef100_has_cap(nic_data->datapath_caps2, DYNAMIC_SENSORS); +} + +/* MCDI + */ +static u8 *ef100_mcdi_buf(struct efx_nic *efx, u8 bufid, + dma_addr_t *dma_addr) +{ + struct ef100_nic_data *nic_data = efx->nic_data; + + if (dma_addr) + *dma_addr = nic_data->mcdi_buf.dma_addr + + bufid * ALIGN(MCDI_BUF_LEN, 256); + return nic_data->mcdi_buf.addr + bufid * ALIGN(MCDI_BUF_LEN, 256); +} + +static int ef100_get_warm_boot_count(struct efx_nic *efx) +{ + efx_dword_t reg; + + efx_readd(efx, ®, efx_reg(efx, ER_GZ_MC_SFT_STATUS)); + + if (EFX_DWORD_FIELD(reg, EFX_DWORD_0) == 0xffffffff) { + netif_err(efx, hw, efx->net_dev, "Hardware unavailable\n"); + efx->state = STATE_DISABLED; + return -ENETDOWN; + } else { + return EFX_DWORD_FIELD(reg, EFX_WORD_1) == 0xb007 ? + EFX_DWORD_FIELD(reg, EFX_WORD_0) : -EIO; + } +} + +static void ef100_mcdi_request(struct efx_nic *efx, u8 bufid, + const efx_dword_t *hdr, size_t hdr_len, + const efx_dword_t *sdu, size_t sdu_len) +{ + dma_addr_t dma_addr; + u8 *pdu = ef100_mcdi_buf(efx, bufid, &dma_addr); + + memcpy(pdu, hdr, hdr_len); + memcpy(pdu + hdr_len, sdu, sdu_len); + wmb(); + + /* The hardware provides 'low' and 'high' (doorbell) registers + * for passing the 64-bit address of an MCDI request to + * firmware. However the dwords are swapped by firmware. The + * least significant bits of the doorbell are then 0 for all + * MCDI requests due to alignment. + */ + _efx_writed(efx, cpu_to_le32((u64)dma_addr >> 32), efx_reg(efx, ER_GZ_MC_DB_LWRD)); + _efx_writed(efx, cpu_to_le32((u32)dma_addr), efx_reg(efx, ER_GZ_MC_DB_HWRD)); +} + +static bool ef100_mcdi_poll_response(struct efx_nic *efx, u8 bufid) +{ + const efx_dword_t hdr = + *(const efx_dword_t *)(ef100_mcdi_buf(efx, bufid, NULL)); + + rmb(); + return EFX_DWORD_FIELD(hdr, MCDI_HEADER_RESPONSE); +} + +static void +ef100_mcdi_read_response(struct efx_nic *efx, u8 bufid, + efx_dword_t *outbuf, size_t offset, size_t outlen) +{ + const u8 *pdu = ef100_mcdi_buf(efx, bufid, NULL); + + memcpy(outbuf, pdu + offset, outlen); +} + +static int ef100_mcdi_poll_reboot(struct efx_nic *efx) +{ + struct ef100_nic_data *nic_data = efx->nic_data; + int rc; + + rc = ef100_get_warm_boot_count(efx); + if (rc < 0) { + /* The firmware is presumably in the process of + * rebooting. However, we are supposed to report each + * reboot just once, so we must only do that once we + * can read and store the updated warm boot count. + */ + return 0; + } + + if (rc == nic_data->warm_boot_count) + return 0; + + nic_data->warm_boot_count = rc; + + return -EIO; +} + +static void ef100_mcdi_reboot_detected(struct efx_nic *efx) +{ + efx->last_reset = jiffies; +} + +/* Get an MCDI buffer + * + * The caller is responsible for preventing racing by holding the + * MCDI iface_lock. + */ +static bool ef100_mcdi_get_buf(struct efx_nic *efx, u8 *bufid) +{ + struct ef100_nic_data *nic_data = efx->nic_data; + + *bufid = ffz(nic_data->mcdi_buf_use); + if (*bufid < EF100_NUM_MCDI_BUFFERS) { + set_bit(*bufid, &nic_data->mcdi_buf_use); + return true; + } + + return false; +} + +/* Return an MCDI buffer */ +static void ef100_mcdi_put_buf(struct efx_nic *efx, u8 bufid) +{ + struct ef100_nic_data *nic_data = efx->nic_data; + + EFX_WARN_ON_PARANOID(bufid >= EF100_NUM_MCDI_BUFFERS); + EFX_WARN_ON_PARANOID(!test_bit(bufid, &nic_data->mcdi_buf_use)); + + clear_bit(bufid, &nic_data->mcdi_buf_use); +} + +/* MCDI calls + */ +int ef100_get_mac_address(struct efx_nic *efx, u8 *mac_address, + int client_handle, bool empty_ok) +{ + MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CLIENT_MAC_ADDRESSES_OUT_LEN(1)); + MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_CLIENT_MAC_ADDRESSES_IN_LEN); + size_t outlen; + int rc; + + MCDI_SET_DWORD(inbuf, GET_CLIENT_MAC_ADDRESSES_IN_CLIENT_HANDLE, + client_handle); + + rc = efx_mcdi_rpc(efx, MC_CMD_GET_CLIENT_MAC_ADDRESSES, inbuf, + sizeof(inbuf), outbuf, sizeof(outbuf), &outlen); + if (rc) + return rc; + + if (outlen >= MC_CMD_GET_CLIENT_MAC_ADDRESSES_OUT_LEN(1)) { + ether_addr_copy(mac_address, MCDI_PTR(outbuf, + GET_CLIENT_MAC_ADDRESSES_OUT_MAC_ADDRS)); + } else if (empty_ok) { + pci_warn(efx->pci_dev, + "No MAC address provisioned for client ID %#x.\n", + client_handle); + eth_zero_addr(mac_address); + } else { + return -ENOENT; + } + return 0; +} + +int efx_ef100_init_datapath_caps(struct efx_nic *efx) +{ + MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CAPABILITIES_V10_OUT_LEN); + struct ef100_nic_data *nic_data = efx->nic_data; + u8 vi_window_mode; + size_t outlen; + int rc; + + BUILD_BUG_ON(MC_CMD_GET_CAPABILITIES_IN_LEN != 0); + + rc = efx_mcdi_rpc(efx, MC_CMD_GET_CAPABILITIES, NULL, 0, + outbuf, sizeof(outbuf), &outlen); + if (rc) + return rc; + if (outlen < MC_CMD_GET_CAPABILITIES_V4_OUT_LEN) { + pci_err(efx->pci_dev, + "unable to read datapath firmware capabilities\n"); + return -EIO; + } + + nic_data->datapath_caps = MCDI_DWORD(outbuf, + GET_CAPABILITIES_OUT_FLAGS1); + nic_data->datapath_caps2 = MCDI_DWORD(outbuf, + GET_CAPABILITIES_V2_OUT_FLAGS2); + nic_data->datapath_caps3 = MCDI_DWORD(outbuf, + GET_CAPABILITIES_V10_OUT_FLAGS3); + + vi_window_mode = MCDI_BYTE(outbuf, + GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE); + rc = efx_mcdi_window_mode_to_stride(efx, vi_window_mode); + if (rc) + return rc; + + if (efx_ef100_has_cap(nic_data->datapath_caps2, TX_TSO_V3)) { + struct net_device *net_dev = efx->net_dev; +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_HW_ENC_FEATURES) +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_GSO_PARTIAL) + netdev_features_t tso = NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_PARTIAL | + NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_UDP_TUNNEL_CSUM | + NETIF_F_GSO_GRE | NETIF_F_GSO_GRE_CSUM; + + net_dev->features |= tso; + efx_add_hw_features(efx, tso); + net_dev->hw_enc_features |= tso; + /* EF100 HW can only offload outer checksums if they are UDP, + * so for GRE_CSUM we have to use GSO_PARTIAL. + */ + net_dev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM; +#else + netdev_features_t tso = NETIF_F_TSO | NETIF_F_TSO6 | + NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_UDP_TUNNEL_CSUM | + NETIF_F_GSO_GRE; + + net_dev->features |= tso; + efx_add_hw_features(efx, tso); + net_dev->hw_enc_features |= tso; +#endif +#else + netdev_features_t tso = NETIF_F_TSO | NETIF_F_TSO6; + + net_dev->features |= tso; + efx_add_hw_features(efx, tso); +#endif + } + efx->num_mac_stats = MCDI_WORD(outbuf, + GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS); + if (outlen >= MC_CMD_GET_CAPABILITIES_V10_OUT_LEN) { + efx->supported_bitmap = + MCDI_DWORD(outbuf, + GET_CAPABILITIES_V10_OUT_SUPPORTED_QUEUE_SIZES); + efx->guaranteed_bitmap = + MCDI_DWORD(outbuf, + GET_CAPABILITIES_V10_OUT_GUARANTEED_QUEUE_SIZES); + } + pci_dbg(efx->pci_dev, + "firmware reports num_mac_stats = %u\n", + efx->num_mac_stats); +#ifdef CONFIG_SFC_VDPA + nic_data->vdpa_supported = efx_ef100_has_cap(nic_data->datapath_caps3, + CLIENT_CMD_VF_PROXY) && + efx->type->is_vf; +#endif + return 0; +} + +/* Event handling + */ +static int ef100_ev_probe(struct efx_channel *channel) +{ + /* Allocate an extra descriptor for the QMDA status completion entry */ + return efx_nic_alloc_buffer(channel->efx, &channel->eventq, + (channel->eventq_mask + 2) * + sizeof(efx_qword_t), GFP_KERNEL); +} + +static int ef100_ev_init(struct efx_channel *channel) +{ + struct ef100_nic_data *nic_data = channel->efx->nic_data; + + /* initial phase is 0 */ + clear_bit(channel->channel, nic_data->evq_phases); + + return efx_mcdi_ev_init(channel, false, false); +} + +static void ef100_ev_read_ack(struct efx_channel *channel) +{ + efx_dword_t evq_prime; + + EFX_POPULATE_DWORD_2(evq_prime, + ERF_GZ_EVQ_ID, channel->channel, + ERF_GZ_IDX, channel->eventq_read_ptr & + channel->eventq_mask); + + efx_writed(channel->efx, &evq_prime, + efx_reg(channel->efx, ER_GZ_EVQ_INT_PRIME)); + +#ifdef EFX_NOT_UPSTREAM +#ifdef SFC_NAPI_DEBUG + channel->last_irq_reprime_jiffies = jiffies; +#endif +#endif +} + +static bool ef100_ev_mcdi_pending(struct efx_channel *channel) +{ + struct efx_nic *efx = channel->efx; + struct ef100_nic_data *nic_data; + unsigned int read_ptr; + efx_qword_t *p_event; + bool evq_phase; + bool ev_phase; + int ev_type; + + if (unlikely(!channel->enabled)) + return false; + + nic_data = efx->nic_data; + evq_phase = test_bit(channel->channel, nic_data->evq_phases); + read_ptr = channel->eventq_read_ptr; + + for (;;) { + p_event = efx_event(channel, read_ptr++); + ev_phase = !!EFX_QWORD_FIELD(*p_event, ESF_GZ_EV_RXPKTS_PHASE); + if (ev_phase != evq_phase) + return false; + + ev_type = EFX_QWORD_FIELD(*p_event, ESF_GZ_E_TYPE); + if (ev_type == ESE_GZ_EF100_EV_MCDI) + return true; + } +} + +static void efx_mcdi_mport_journal_event(struct efx_nic *efx) +{ + /* Only process the event after the PF is fully up */ + if (efx->mae && (efx->state == STATE_NET_UP)) + schedule_work(&efx->mae->mport_work); +} + +static int ef100_ev_mcdi(struct efx_channel *channel, + efx_qword_t *p_event, int quota) +{ + int rc = 0, spent = 0; + + if (!efx_mcdi_process_event(channel, p_event) && + !efx_mcdi_port_process_event_common(channel, p_event, + &rc, quota)) { + int code = EFX_QWORD_FIELD(*p_event, MCDI_EVENT_CODE); + struct efx_nic *efx = channel->efx; + + switch (code) { + case MCDI_EVENT_CODE_DYNAMIC_SENSORS_STATE_CHANGE: + case MCDI_EVENT_CODE_DYNAMIC_SENSORS_CHANGE: + efx_mcdi_dynamic_sensor_event(efx, p_event); + break; + case MCDI_EVENT_CODE_MPORT_JOURNAL_CHANGE: + efx_mcdi_mport_journal_event(efx); + break; + default: + netif_info(efx, drv, efx->net_dev, + "Unhandled MCDI event " EFX_QWORD_FMT " code %d\n", + EFX_QWORD_VAL(*p_event), code); + } + } + if (rc > 0) + spent += rc; + else if (rc < 0) + spent++; + return spent; +} + +static void efx_ef100_handle_driver_generated_event(struct efx_channel *channel, + efx_qword_t *event) +{ + struct efx_nic *efx = channel->efx; + struct efx_rx_queue *rx_queue; + u32 subcode; + + subcode = EFX_QWORD_FIELD(*event, EFX_DWORD_0); + + switch (EFX_EF100_DRVGEN_CODE(subcode)) { + case EFX_EF100_TEST: + netif_info(efx, drv, efx->net_dev, + "Driver initiated event " EFX_QWORD_FMT "\n", + EFX_QWORD_VAL(*event)); + break; + case EFX_EF100_REFILL: + /* The queue must be empty, so we won't receive any rx + * events, so efx_process_channel() won't refill the + * queue. Refill it here + */ + efx_for_each_channel_rx_queue(rx_queue, channel) + if (EFX_EF100_DRVGEN_DATA(subcode) == + efx_rx_queue_index(rx_queue)) + efx_fast_push_rx_descriptors(rx_queue, true); + break; + default: + netif_err(efx, hw, efx->net_dev, + "channel %d unknown driver event type %u" + " (data " EFX_QWORD_FMT ")\n", + channel->channel, (unsigned int) subcode, + EFX_QWORD_VAL(*event)); + } +} + +static int ef100_ev_process(struct efx_channel *channel, int quota) +{ + struct efx_nic *efx = channel->efx; + struct ef100_nic_data *nic_data; + bool evq_phase, old_evq_phase; + unsigned int read_ptr; + efx_qword_t *p_event; + int spent = 0; + bool ev_phase; + int ev_type; + + if (unlikely(!channel->enabled)) + return 0; + + nic_data = efx->nic_data; + evq_phase = test_bit(channel->channel, nic_data->evq_phases); + old_evq_phase = evq_phase; + read_ptr = channel->eventq_read_ptr; + BUILD_BUG_ON(ESF_GZ_EV_RXPKTS_PHASE_LBN != ESF_GZ_EV_TXCMPL_PHASE_LBN); + + while (spent < quota) { + p_event = efx_event(channel, read_ptr); + + ev_phase = !!EFX_QWORD_FIELD(*p_event, ESF_GZ_EV_RXPKTS_PHASE); + if (ev_phase != evq_phase) + break; + + netif_vdbg(efx, drv, efx->net_dev, + "processing event on %d " EFX_QWORD_FMT "\n", + channel->channel, EFX_QWORD_VAL(*p_event)); + + ev_type = EFX_QWORD_FIELD(*p_event, ESF_GZ_E_TYPE); + + switch (ev_type) { + case ESE_GZ_EF100_EV_RX_PKTS: + spent += efx_ef100_ev_rx(channel, p_event); + break; + case ESE_GZ_EF100_EV_MCDI: + spent += ef100_ev_mcdi(channel, p_event, + quota - spent); + break; + case ESE_GZ_EF100_EV_TX_COMPLETION: + ef100_ev_tx(channel, p_event); + break; + case ESE_GZ_EF100_EV_DRIVER: + efx_ef100_handle_driver_generated_event(channel, + p_event); + break; + default: + netif_info(efx, drv, efx->net_dev, + "Unhandled event " EFX_QWORD_FMT "\n", + EFX_QWORD_VAL(*p_event)); + } + + ++read_ptr; + if ((read_ptr & channel->eventq_mask) == 0) + evq_phase = !evq_phase; + +#if defined(EFX_NOT_UPSTREAM) && defined(CONFIG_SFC_BUSYPOLL) + if (efx->interrupt_mode == EFX_INT_MODE_POLLED) + if ((read_ptr % 512) == 0) { + /* Poke EVQ_INT_PRIME once in a while */ + channel->eventq_read_ptr = read_ptr; + ef100_ev_read_ack(channel); + } +#endif + } + + channel->eventq_read_ptr = read_ptr; + if (evq_phase != old_evq_phase) + change_bit(channel->channel, nic_data->evq_phases); + +#if defined(EFX_NOT_UPSTREAM) && defined(CONFIG_SFC_BUSYPOLL) + if (efx->interrupt_mode == EFX_INT_MODE_POLLED) + /* always return quota so we're immediately rescheduled. */ + spent = quota; +#endif + + if (spent > quota) + return quota; + + return spent; +} + +static irqreturn_t ef100_msi_interrupt(int irq, void *dev_id) +{ + struct efx_msi_context *context = dev_id; + struct efx_nic *efx = context->efx; + struct efx_channel *channel; + + channel = efx_get_channel(efx, context->index); + netif_vdbg(efx, intr, efx->net_dev, + "IRQ %d on CPU %d\n", irq, raw_smp_processor_id()); + +#ifdef EFX_NOT_UPSTREAM +#ifdef SFC_NAPI_DEBUG + channel->last_irq_jiffies = jiffies; +#endif +#endif + + if (likely(READ_ONCE(efx->irq_soft_enabled))) { + /* Note test interrupts */ + if (context->index == efx->irq_level) + efx->last_irq_cpu = raw_smp_processor_id(); + + /* Schedule processing of the channel */ + efx_schedule_channel_irq(channel); + } + + return IRQ_HANDLED; +} + +int ef100_phy_probe(struct efx_nic *efx) +{ + struct efx_mcdi_phy_data *phy_data; + int rc; + + /* Probe for the PHY */ + efx->phy_data = kzalloc(sizeof(struct efx_mcdi_phy_data), GFP_KERNEL); + if (efx->phy_data == NULL) + return -ENOMEM; + + rc = efx_mcdi_get_phy_cfg(efx, efx->phy_data); + if (rc) + return rc; + + /* Populate driver and ethtool settings */ + phy_data = efx->phy_data; + mcdi_to_ethtool_linkset(efx, phy_data->media, phy_data->supported_cap, + efx->link_advertising); + efx->fec_config = mcdi_fec_caps_to_ethtool(phy_data->supported_cap, + false); + + /* Default to Autonegotiated flow control if the PHY supports it */ + efx->wanted_fc = EFX_FC_RX | EFX_FC_TX; + if (phy_data->supported_cap & (1 << MC_CMD_PHY_CAP_AN_LBN)) + efx->wanted_fc |= EFX_FC_AUTO; + efx_link_set_wanted_fc(efx, efx->wanted_fc); + + /* Push settings to the PHY. Failure is not fatal, the user can try to + * fix it using ethtool. + */ + rc = efx_mcdi_port_reconfigure(efx); + if (rc && rc != -EPERM) + netif_warn(efx, drv, efx->net_dev, + "could not initialise PHY settings\n"); + + return 0; +} + +int ef100_filter_table_probe(struct efx_nic *efx) +{ + struct ef100_nic_data *nic_data = efx->nic_data; + bool additional_rss; + + additional_rss = efx_ef100_has_cap(nic_data->datapath_caps, + ADDITIONAL_RSS_MODES); + + return efx_mcdi_filter_table_probe(efx, additional_rss); +} + +static int ef100_filter_table_init(struct efx_nic *efx) +{ + struct ef100_nic_data *nic_data = efx->nic_data; + bool encap = efx_ef100_has_cap(nic_data->datapath_caps, + VXLAN_NVGRE); + + return efx_mcdi_filter_table_init(efx, true, encap); +} + +static int ef100_filter_table_up(struct efx_nic *efx) +{ + struct ef100_nic_data *nic_data = efx->nic_data; + int rc = 0, rc2; + + down_write(&efx->filter_sem); + if (nic_data->filters_up) + goto out; + + rc = efx_mcdi_filter_table_up(efx); +out: + nic_data->filters_up = !rc; + up_write(&efx->filter_sem); + + if (!rc) { + rc2 = efx_tc_insert_rep_filters(efx); + if (rc2) + netif_warn(efx, drv, efx->net_dev, + "Failed to insert representor filters, rc %d\n", + rc2); + } + return rc; +} + +static void ef100_filter_table_down(struct efx_nic *efx) +{ + struct ef100_nic_data *nic_data = efx->nic_data; + + efx_tc_remove_rep_filters(efx); + + down_write(&efx->filter_sem); + if (!nic_data->filters_up) + goto out; + + efx_mcdi_filter_table_down(efx); + + nic_data->filters_up = false; +out: + up_write(&efx->filter_sem); +} + +static int efx_ef100_rx_push_rss_config(struct efx_nic *efx, bool user, + const u32 *rx_indir_table, + const u8 *key) +{ + struct ef100_nic_data *nic_data = efx->nic_data; + + if (efx_ef100_has_cap(nic_data->datapath_caps, RX_RSS_LIMITED)) + return -EOPNOTSUPP; + /* on EF100 we have many available RSS contexts, so use the PF version + * of push_rss_config for both PFs and VFs rather than the "VF" + * version that's conservative about allocating RSS contexts. + */ + return efx_mcdi_rx_push_rss_config(efx, user, rx_indir_table, key); +} + +/* Other + */ +static int ef100_reconfigure_mac(struct efx_nic *efx, bool mtu_only) +{ + struct ef100_nic_data *nic_data = efx->nic_data; + int rc; + + WARN_ON(!mutex_is_locked(&efx->mac_lock)); + + efx_mcdi_filter_sync_rx_mode(efx); + + rc = efx_mcdi_set_mac(efx); + if (rc == -EPERM && mtu_only && + efx_ef100_has_cap(nic_data->datapath_caps, SET_MAC_ENHANCED)) + rc = efx_mcdi_set_mtu(efx); +#ifdef EFX_NOT_UPSTREAM + /* XXX temporary hack to allow VFs to set a software MTU + * See SWNETLINUX-4196, FWRIVERHD-1507. + */ + if (rc == -EPERM && efx->type->is_vf) + return 0; +#endif + return rc; +} + + +static enum reset_type ef100_map_reset_reason(enum reset_type reason) +{ + if (reason == RESET_TYPE_TX_WATCHDOG) + return reason; + return RESET_TYPE_DISABLE; +} + +static int ef100_map_reset_flags(u32 *flags) +{ + /* Only perform a RESET_TYPE_ALL because we don't support MC_REBOOTs */ + if ((*flags & EF100_RESET_PORT)) { + *flags &= ~EF100_RESET_PORT; + return RESET_TYPE_ALL; + } + if (*flags & ETH_RESET_MGMT) { + *flags &= ~ETH_RESET_MGMT; + return RESET_TYPE_DISABLE; + } + + return -EINVAL; +} + +static int ef100_reset(struct efx_nic *efx, enum reset_type reset_type) +{ + int rc; + + if (efx->net_dev) + dev_close(efx->net_dev); + + if (reset_type == RESET_TYPE_TX_WATCHDOG) { + netif_device_attach(efx->net_dev); + __clear_bit(reset_type, &efx->reset_pending); + efx->state = STATE_NET_DOWN; + rc = dev_open(efx->net_dev, NULL); + } else if (reset_type == RESET_TYPE_ALL) { + rc = efx_mcdi_reset(efx, reset_type); + if (rc) + return rc; + + efx->last_reset = jiffies; + netif_device_attach(efx->net_dev); + + rc = dev_open(efx->net_dev, NULL); + } else { + rc = 1; /* Leave the device closed */ + } + return rc; +} + +static void ef100_common_stat_mask(unsigned long *mask) +{ + __set_bit(EF100_STAT_port_rx_packets, mask); + __set_bit(EF100_STAT_port_tx_packets, mask); + __set_bit(EF100_STAT_port_rx_bytes, mask); + __set_bit(EF100_STAT_port_tx_bytes, mask); + __set_bit(EF100_STAT_port_rx_multicast, mask); + __set_bit(EF100_STAT_port_rx_bad, mask); + __set_bit(EF100_STAT_port_rx_align_error, mask); + __set_bit(EF100_STAT_port_rx_overflow, mask); +} + +static void ef100_ethtool_stat_mask(unsigned long *mask) +{ + __set_bit(EF100_STAT_port_tx_pause, mask); + __set_bit(EF100_STAT_port_tx_unicast, mask); + __set_bit(EF100_STAT_port_tx_multicast, mask); + __set_bit(EF100_STAT_port_tx_broadcast, mask); + __set_bit(EF100_STAT_port_tx_lt64, mask); + __set_bit(EF100_STAT_port_tx_64, mask); + __set_bit(EF100_STAT_port_tx_65_to_127, mask); + __set_bit(EF100_STAT_port_tx_128_to_255, mask); + __set_bit(EF100_STAT_port_tx_256_to_511, mask); + __set_bit(EF100_STAT_port_tx_512_to_1023, mask); + __set_bit(EF100_STAT_port_tx_1024_to_15xx, mask); + __set_bit(EF100_STAT_port_tx_15xx_to_jumbo, mask); + __set_bit(EF100_STAT_port_rx_good, mask); + __set_bit(EF100_STAT_port_rx_bad_bytes, mask); + __set_bit(EF100_STAT_port_rx_pause, mask); + __set_bit(EF100_STAT_port_rx_unicast, mask); + __set_bit(EF100_STAT_port_rx_broadcast, mask); + __set_bit(EF100_STAT_port_rx_lt64, mask); + __set_bit(EF100_STAT_port_rx_64, mask); + __set_bit(EF100_STAT_port_rx_65_to_127, mask); + __set_bit(EF100_STAT_port_rx_128_to_255, mask); + __set_bit(EF100_STAT_port_rx_256_to_511, mask); + __set_bit(EF100_STAT_port_rx_512_to_1023, mask); + __set_bit(EF100_STAT_port_rx_1024_to_15xx, mask); + __set_bit(EF100_STAT_port_rx_15xx_to_jumbo, mask); + __set_bit(EF100_STAT_port_rx_gtjumbo, mask); + __set_bit(EF100_STAT_port_rx_bad_gtjumbo, mask); + __set_bit(EF100_STAT_port_rx_length_error, mask); + __set_bit(EF100_STAT_port_rx_nodesc_drops, mask); + __set_bit(GENERIC_STAT_rx_nodesc_trunc, mask); + __set_bit(GENERIC_STAT_rx_noskb_drops, mask); +} + +#define EF100_DMA_STAT(ext_name, mcdi_name) \ + [EF100_STAT_ ## ext_name] = \ + { #ext_name, 64, 8 * MC_CMD_MAC_ ## mcdi_name } + +static const struct efx_hw_stat_desc ef100_stat_desc[EF100_STAT_COUNT] = { + EF100_DMA_STAT(port_tx_bytes, TX_BYTES), + EF100_DMA_STAT(port_tx_packets, TX_PKTS), + EF100_DMA_STAT(port_tx_pause, TX_PAUSE_PKTS), + EF100_DMA_STAT(port_tx_unicast, TX_UNICAST_PKTS), + EF100_DMA_STAT(port_tx_multicast, TX_MULTICAST_PKTS), + EF100_DMA_STAT(port_tx_broadcast, TX_BROADCAST_PKTS), + EF100_DMA_STAT(port_tx_lt64, TX_LT64_PKTS), + EF100_DMA_STAT(port_tx_64, TX_64_PKTS), + EF100_DMA_STAT(port_tx_65_to_127, TX_65_TO_127_PKTS), + EF100_DMA_STAT(port_tx_128_to_255, TX_128_TO_255_PKTS), + EF100_DMA_STAT(port_tx_256_to_511, TX_256_TO_511_PKTS), + EF100_DMA_STAT(port_tx_512_to_1023, TX_512_TO_1023_PKTS), + EF100_DMA_STAT(port_tx_1024_to_15xx, TX_1024_TO_15XX_PKTS), + EF100_DMA_STAT(port_tx_15xx_to_jumbo, TX_15XX_TO_JUMBO_PKTS), + EF100_DMA_STAT(port_rx_bytes, RX_BYTES), + EF100_DMA_STAT(port_rx_packets, RX_PKTS), + EF100_DMA_STAT(port_rx_good, RX_GOOD_PKTS), + EF100_DMA_STAT(port_rx_bad, RX_BAD_FCS_PKTS), + EF100_DMA_STAT(port_rx_bad_bytes, RX_BAD_BYTES), + EF100_DMA_STAT(port_rx_pause, RX_PAUSE_PKTS), + EF100_DMA_STAT(port_rx_unicast, RX_UNICAST_PKTS), + EF100_DMA_STAT(port_rx_multicast, RX_MULTICAST_PKTS), + EF100_DMA_STAT(port_rx_broadcast, RX_BROADCAST_PKTS), + EF100_DMA_STAT(port_rx_lt64, RX_UNDERSIZE_PKTS), + EF100_DMA_STAT(port_rx_64, RX_64_PKTS), + EF100_DMA_STAT(port_rx_65_to_127, RX_65_TO_127_PKTS), + EF100_DMA_STAT(port_rx_128_to_255, RX_128_TO_255_PKTS), + EF100_DMA_STAT(port_rx_256_to_511, RX_256_TO_511_PKTS), + EF100_DMA_STAT(port_rx_512_to_1023, RX_512_TO_1023_PKTS), + EF100_DMA_STAT(port_rx_1024_to_15xx, RX_1024_TO_15XX_PKTS), + EF100_DMA_STAT(port_rx_15xx_to_jumbo, RX_15XX_TO_JUMBO_PKTS), + EF100_DMA_STAT(port_rx_gtjumbo, RX_GTJUMBO_PKTS), + EF100_DMA_STAT(port_rx_bad_gtjumbo, RX_JABBER_PKTS), + EF100_DMA_STAT(port_rx_align_error, RX_ALIGN_ERROR_PKTS), + EF100_DMA_STAT(port_rx_length_error, RX_LENGTH_ERROR_PKTS), + EF100_DMA_STAT(port_rx_overflow, RX_OVERFLOW_PKTS), + EF100_DMA_STAT(port_rx_nodesc_drops, RX_NODESC_DROPS), + EFX_GENERIC_SW_STAT(rx_nodesc_trunc), + EFX_GENERIC_SW_STAT(rx_noskb_drops), +}; + +static size_t ef100_describe_stats(struct efx_nic *efx, u8 *names) +{ + DECLARE_BITMAP(mask, EF100_STAT_COUNT) = {}; + + ef100_ethtool_stat_mask(mask); + return efx_nic_describe_stats(ef100_stat_desc, EF100_STAT_COUNT, + mask, names); +} + +static size_t ef100_update_stats_common(struct efx_nic *efx, u64 *full_stats, + struct rtnl_link_stats64 *core_stats) +{ + struct ef100_nic_data *nic_data = efx->nic_data; + DECLARE_BITMAP(mask, EF100_STAT_COUNT) = {}; + size_t stats_count = 0, index; + u64 *stats = nic_data->stats; + + ef100_ethtool_stat_mask(mask); + + if (full_stats) { + for_each_set_bit(index, mask, EF100_STAT_COUNT) { + if (ef100_stat_desc[index].name) { + *full_stats++ = stats[index]; + ++stats_count; + } + } + } + + if (!core_stats) + return stats_count; + + core_stats->rx_packets = stats[EF100_STAT_port_rx_packets]; + core_stats->tx_packets = stats[EF100_STAT_port_tx_packets]; + core_stats->rx_bytes = stats[EF100_STAT_port_rx_bytes]; + core_stats->tx_bytes = stats[EF100_STAT_port_tx_bytes]; + core_stats->rx_dropped = stats[EF100_STAT_port_rx_nodesc_drops] + + stats[GENERIC_STAT_rx_nodesc_trunc] + + stats[GENERIC_STAT_rx_noskb_drops]; + core_stats->multicast = stats[EF100_STAT_port_rx_multicast]; + core_stats->rx_length_errors = + stats[EF100_STAT_port_rx_gtjumbo] + + stats[EF100_STAT_port_rx_length_error]; + core_stats->rx_crc_errors = stats[EF100_STAT_port_rx_bad]; + core_stats->rx_frame_errors = + stats[EF100_STAT_port_rx_align_error]; + core_stats->rx_fifo_errors = stats[EF100_STAT_port_rx_overflow]; + core_stats->rx_errors = (core_stats->rx_length_errors + + core_stats->rx_crc_errors + + core_stats->rx_frame_errors); + + return stats_count; +} + +static size_t ef100_update_stats(struct efx_nic *efx, + u64 *full_stats, + struct rtnl_link_stats64 *core_stats) + __acquires(efx->stats_lock) +{ + __le64 *mc_stats = kmalloc_array(efx->num_mac_stats, sizeof(__le64), + GFP_ATOMIC); + struct ef100_nic_data *nic_data = efx->nic_data; + DECLARE_BITMAP(mask, EF100_STAT_COUNT) = {}; + u64 *stats = nic_data->stats; + + spin_lock_bh(&efx->stats_lock); + + ef100_common_stat_mask(mask); + ef100_ethtool_stat_mask(mask); + + efx_nic_copy_stats(efx, mc_stats); + efx_nic_update_stats(ef100_stat_desc, EF100_STAT_COUNT, mask, + stats, + efx->mc_initial_stats, mc_stats); + + kfree(mc_stats); + + return ef100_update_stats_common(efx, full_stats, core_stats); +} + +static void ef100_pull_stats(struct efx_nic *efx) +{ + efx_mcdi_mac_pull_stats(efx); + if (!efx->stats_initialised) { + efx_reset_sw_stats(efx); + efx_ptp_reset_stats(efx); + efx_nic_reset_stats(efx); + efx->stats_initialised = true; + } +} + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_NEED_GET_PHYS_PORT_ID) +static int efx_ef100_get_phys_port_id(struct efx_nic *efx, + struct netdev_phys_item_id *ppid) +{ + struct ef100_nic_data *nic_data = efx->nic_data; + + if (!is_valid_ether_addr(nic_data->port_id)) + return -EOPNOTSUPP; + + ppid->id_len = ETH_ALEN; + memcpy(ppid->id, nic_data->port_id, ppid->id_len); + + return 0; +} +#endif + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_TC_OFFLOAD) +static struct net_device *ef100_get_vf_rep(struct efx_nic *efx, unsigned int vf) +{ +#if defined(CONFIG_SFC_SRIOV) + struct ef100_nic_data *nic_data = efx->nic_data; + + if (vf < nic_data->vf_rep_count) + return nic_data->vf_rep[vf]; +#endif + return NULL; +} + +static void ef100_detach_remote_reps(struct efx_nic *efx) +{ + struct ef100_nic_data *nic_data = efx->nic_data; + struct net_device *rep_dev; + struct efx_rep *efv; + + ASSERT_RTNL(); + netif_dbg(efx, drv, efx->net_dev, "Detaching remote reps\n"); + list_for_each_entry(efv, &nic_data->rem_reps, list) { + rep_dev = efv->net_dev; + if (!rep_dev) + continue; + netif_carrier_off(rep_dev); + /* See efx_device_detach_sync() */ + netif_tx_lock_bh(rep_dev); + netif_tx_stop_all_queues(rep_dev); + netif_tx_unlock_bh(rep_dev); + } +} + +static void ef100_attach_remote_reps(struct efx_nic *efx) +{ + struct ef100_nic_data *nic_data = efx->nic_data; + struct net_device *rep_dev; + struct efx_rep *efv; + + ASSERT_RTNL(); + netif_dbg(efx, drv, efx->net_dev, "Attaching remote reps\n"); + list_for_each_entry(efv, &nic_data->rem_reps, list) { + rep_dev = efv->net_dev; + if (!rep_dev) + continue; + netif_tx_wake_all_queues(rep_dev); + netif_carrier_on(rep_dev); + } +} + +void __ef100_detach_reps(struct efx_nic *efx) +{ +#if defined(CONFIG_SFC_SRIOV) + struct ef100_nic_data *nic_data = efx->nic_data; + struct net_device *rep_dev; + unsigned int vf; + + netif_dbg(efx, drv, efx->net_dev, "Detaching %d vfreps\n", + nic_data->vf_rep_count); + for (vf = 0; vf < nic_data->vf_rep_count; vf++) { + rep_dev = nic_data->vf_rep[vf]; + if (!rep_dev) + continue; + netif_carrier_off(rep_dev); + /* See efx_device_detach_sync() */ + netif_tx_lock_bh(rep_dev); + netif_tx_stop_all_queues(rep_dev); + netif_tx_unlock_bh(rep_dev); + } +#endif +} + +static void ef100_detach_reps(struct efx_nic *efx) +{ + struct ef100_nic_data *nic_data = efx->nic_data; + + spin_lock_bh(&nic_data->vf_reps_lock); + __ef100_detach_reps(efx); + spin_unlock_bh(&nic_data->vf_reps_lock); + ef100_detach_remote_reps(efx); +} + +void __ef100_attach_reps(struct efx_nic *efx) +{ +#if defined(CONFIG_SFC_SRIOV) + struct ef100_nic_data *nic_data = efx->nic_data; + struct net_device *rep_dev; + unsigned int vf; + + netif_dbg(efx, drv, efx->net_dev, "Attaching %d vfreps\n", + nic_data->vf_rep_count); + for (vf = 0; vf < nic_data->vf_rep_count; vf++) { + rep_dev = nic_data->vf_rep[vf]; + if (!rep_dev) + continue; + netif_tx_wake_all_queues(rep_dev); + netif_carrier_on(rep_dev); + } +#endif +} + +static void ef100_attach_reps(struct efx_nic *efx) +{ + struct ef100_nic_data *nic_data = efx->nic_data; + + spin_lock_bh(&nic_data->vf_reps_lock); + __ef100_attach_reps(efx); + spin_unlock_bh(&nic_data->vf_reps_lock); + ef100_attach_remote_reps(efx); +} +#else /* EFX_TC_OFFLOAD */ +void __ef100_detach_reps(struct efx_nic *efx) +{ +} + +void __ef100_attach_reps(struct efx_nic *efx) +{ +} +#endif + +#if defined(EFX_USE_KCOMPAT) && defined(EFX_TC_OFFLOAD) && !defined(EFX_HAVE_FLOW_INDR_BLOCK_CB_REGISTER) && !defined(EFX_HAVE_FLOW_INDR_DEV_REGISTER) +static struct ef100_udp_tunnel *__efx_ef100_udp_tnl_find_port( + struct ef100_nic_data *nic_data, __be16 port) + __must_hold(nic_data->udp_tunnels_lock) +{ + struct ef100_udp_tunnel *tnl; + + list_for_each_entry(tnl, &nic_data->udp_tunnels, list) + if (port == tnl->port) + return tnl; + return NULL; +} + +static void efx_ef100_udp_tnl_add_port(struct efx_nic *efx, + struct ef100_udp_tunnel tnl) +{ + struct ef100_nic_data *nic_data = efx->nic_data; + struct ef100_udp_tunnel *entry; + + spin_lock(&nic_data->udp_tunnels_lock); + entry = __efx_ef100_udp_tnl_find_port(nic_data, tnl.port); + if (entry) /* EEXIST */ + goto out; + entry = kmalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) /* ENOMEM */ + goto out; + *entry = tnl; + list_add_tail(&entry->list, &nic_data->udp_tunnels); +out: + spin_unlock(&nic_data->udp_tunnels_lock); +} + +enum efx_encap_type efx_ef100_udp_tnl_lookup_port(struct efx_nic *efx, + __be16 port) +{ + struct ef100_nic_data *nic_data = efx->nic_data; + struct ef100_udp_tunnel *entry; + enum efx_encap_type rc; + + spin_lock(&nic_data->udp_tunnels_lock); + entry = __efx_ef100_udp_tnl_find_port(nic_data, port); + if (entry) + rc = entry->type; + else + rc = EFX_ENCAP_TYPE_NONE; + spin_unlock(&nic_data->udp_tunnels_lock); + return rc; +} + +static void efx_ef100_udp_tnl_del_port(struct efx_nic *efx, + struct ef100_udp_tunnel tnl) +{ + struct ef100_nic_data *nic_data = efx->nic_data; + struct ef100_udp_tunnel *entry; + + spin_lock(&nic_data->udp_tunnels_lock); + entry = __efx_ef100_udp_tnl_find_port(nic_data, tnl.port); + if (entry && entry->type == tnl.type) { + list_del(&entry->list); + kfree(entry); + } + spin_unlock(&nic_data->udp_tunnels_lock); +} +#endif + +static int efx_ef100_irq_test_generate(struct efx_nic *efx) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_TRIGGER_INTERRUPT_IN_LEN); + + BUILD_BUG_ON(MC_CMD_TRIGGER_INTERRUPT_OUT_LEN != 0); + + MCDI_SET_DWORD(inbuf, TRIGGER_INTERRUPT_IN_INTR_LEVEL, efx->irq_level); + return efx_mcdi_rpc_quiet(efx, MC_CMD_TRIGGER_INTERRUPT, + inbuf, sizeof(inbuf), NULL, 0, NULL); +} + +static void efx_ef100_ev_test_generate(struct efx_channel *channel) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_DRIVER_EVENT_IN_LEN); + struct efx_nic *efx = channel->efx; + efx_qword_t event; + int rc; + + EFX_POPULATE_QWORD_2(event, + ESF_GZ_E_TYPE, ESE_GZ_EF100_EV_DRIVER, + ESF_GZ_DRIVER_DATA, EFX_EF100_TEST); + + MCDI_SET_DWORD(inbuf, DRIVER_EVENT_IN_EVQ, channel->channel); + + /* MCDI_SET_QWORD is not appropriate here since EFX_POPULATE_* has + * already swapped the data to little-endian order. + */ + memcpy(MCDI_PTR(inbuf, DRIVER_EVENT_IN_DATA), &event.u64[0], + sizeof(efx_qword_t)); + + rc = efx_mcdi_rpc(efx, MC_CMD_DRIVER_EVENT, inbuf, sizeof(inbuf), + NULL, 0, NULL); + if (rc && (rc != -ENETDOWN)) + goto fail; + + return; + +fail: + WARN_ON(true); + netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); +} + +static unsigned int efx_ef100_mcdi_rpc_timeout(struct efx_nic *efx, + unsigned int cmd) +{ + switch (cmd) { + case MC_CMD_NVRAM_ERASE: + case MC_CMD_NVRAM_UPDATE_FINISH: +#ifdef EFX_C_MODEL + case MC_CMD_ENTITY_RESET: +#endif + return MCDI_RPC_LONG_TIMEOUT; + default: + return MCDI_RPC_TIMEOUT; + } +} + +static unsigned int ef100_check_caps(const struct efx_nic *efx, + u8 flag, + u32 offset) +{ + const struct ef100_nic_data *nic_data = efx->nic_data; + + switch (offset) { + case(MC_CMD_GET_CAPABILITIES_V8_OUT_FLAGS1_OFST): + return nic_data->datapath_caps & BIT_ULL(flag); + case(MC_CMD_GET_CAPABILITIES_V8_OUT_FLAGS2_OFST): + return nic_data->datapath_caps2 & BIT_ULL(flag); + default: return 0; + } +} + +static unsigned int efx_ef100_recycle_ring_size(const struct efx_nic *efx) +{ + /* Maximum link speed for Riverhead is 100G */ + return 10 * EFX_RECYCLE_RING_SIZE_10G; +} + +static int efx_ef100_get_base_mport(struct efx_nic *efx) +{ + struct ef100_nic_data *nic_data = efx->nic_data; + u32 selector, id; + int rc; + + /* Construct mport selector for "physical network port" */ + efx_mae_mport_wire(efx, &selector); + /* Look up actual mport ID */ + rc = efx_mae_lookup_mport(efx, selector, &id); + if (rc) + return rc; + /* The ID should always fit in 16 bits, because that's how wide the + * corresponding fields in the RX prefix & TX override descriptor are + */ + if (id >> 16) + netif_warn(efx, probe, efx->net_dev, "Bad base m-port id %#x\n", + id); + nic_data->base_mport = id; + nic_data->have_mport = true; + + /* Construct mport selector for "calling PF" */ + efx_mae_mport_uplink(efx, &selector); + /* Look up actual mport ID */ + rc = efx_mae_lookup_mport(efx, selector, &id); + if (rc) + return rc; + if (id >> 16) + netif_warn(efx, probe, efx->net_dev, "Bad own m-port id %#x\n", + id); + nic_data->own_mport = id; + nic_data->have_own_mport = true; + return 0; +} + +/* BAR configuration. + * To change BAR configuration we tear down the current configuration (which + * leaves the hardware in the PROBED state), and then initialise the new + * BAR state. + */ +static struct { + int (*init)(struct efx_probe_data *probe_data); + void (*fini)(struct efx_probe_data *probe_data); +} bar_config_std[] = { + [EF100_BAR_CONFIG_EF100] = { + .init = ef100_probe_netdev, + .fini = ef100_remove_netdev + }, +#ifdef CONFIG_SFC_VDPA + [EF100_BAR_CONFIG_VDPA] = { + .init = ef100_vdpa_init, + .fini = ef100_vdpa_fini + }, +#endif + [EF100_BAR_CONFIG_NONE] = { + .init = NULL, + .fini = NULL + }, +}; + +/* Keep this in sync with the definition of enum ef100_bar_config. */ +static char *bar_config_name[] = { + [EF100_BAR_CONFIG_NONE] = "None", + [EF100_BAR_CONFIG_EF100] = "EF100", + [EF100_BAR_CONFIG_VDPA] = "vDPA", +}; + +int efx_ef100_set_bar_config(struct efx_nic *efx, + enum ef100_bar_config new_config) +{ + struct ef100_nic_data *nic_data = efx->nic_data; + struct efx_probe_data *probe_data; + enum ef100_bar_config old_config; + int rc; + + if (WARN_ON_ONCE(nic_data->bar_config > EF100_BAR_CONFIG_VDPA)) + return -EINVAL; + +#ifdef CONFIG_SFC_VDPA + /* Current EF100 hardware supports vDPA on VFs only + * (see SF-122427-SW) + */ + if (new_config == EF100_BAR_CONFIG_VDPA && !nic_data->vdpa_supported) { + pci_err(efx->pci_dev, "vdpa over PF not supported : %s", + efx->name); + return -EOPNOTSUPP; + } +#endif + mutex_lock(&nic_data->bar_config_lock); + old_config = nic_data->bar_config; + if (new_config == old_config) { + mutex_unlock(&nic_data->bar_config_lock); + return 0; + } + + probe_data = container_of(efx, struct efx_probe_data, efx); + if (bar_config_std[old_config].fini) + bar_config_std[old_config].fini(probe_data); + nic_data->bar_config = EF100_BAR_CONFIG_NONE; + + if (bar_config_std[new_config].init) { + rc = bar_config_std[new_config].init(probe_data); + if (rc) { + mutex_unlock(&nic_data->bar_config_lock); + return rc; + } + } + + nic_data->bar_config = new_config; + mutex_unlock(&nic_data->bar_config_lock); + + pci_info(efx->pci_dev, "BAR configuration changed to %s\n", + bar_config_name[new_config]); + return 0; +} + +#ifdef EFX_NOT_UPSTREAM +static ssize_t bar_config_show(struct device *dev, + struct device_attribute *attr, char *buf_out) +{ + struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); + struct ef100_nic_data *nic_data = efx->nic_data; + + if (WARN_ON_ONCE(nic_data->bar_config > EF100_BAR_CONFIG_VDPA)) + return 0; /* this should not happen */ + else + return scnprintf(buf_out, PAGE_SIZE, "%s\n", + bar_config_name[nic_data->bar_config]); +} + +static ssize_t bar_config_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); +#ifdef CONFIG_SFC_VDPA + struct ef100_nic_data *nic_data = efx->nic_data; +#endif + enum ef100_bar_config new_config; + int rc; + + if (sysfs_streq(buf, "ef100")) + new_config = EF100_BAR_CONFIG_EF100; +#ifdef CONFIG_SFC_VDPA + else if (sysfs_streq(buf, "vdpa")) + new_config = EF100_BAR_CONFIG_VDPA; +#endif + else if (sysfs_streq(buf, "none")) + new_config = EF100_BAR_CONFIG_NONE; + else + return -EIO; + +#ifdef CONFIG_SFC_VDPA + /* Restrict bar_config writes when vdpa device has been created + * using management interface + */ +#if defined(EFX_USE_KCOMPAT) && !defined(EFX_HAVE_VDPA_MGMT_INTERFACE) + /* When vdpa Management interface is not supported change + * of bar_config which results in vdpa device deletion + * should be denied, if vdpa device is in use. + */ + if (nic_data->bar_config == EF100_BAR_CONFIG_VDPA && + ef100_vdpa_dev_in_use(efx)) { +#else + if (nic_data->bar_config == EF100_BAR_CONFIG_VDPA) { +#endif + pci_warn(efx->pci_dev, + "Device in use. Cannot change bar config"); + return -EBUSY; + } +#endif + + rc = efx_ef100_set_bar_config(efx, new_config); + return rc ?: count; +} + +static DEVICE_ATTR_RW(bar_config); +#endif + +enum ef100_tlv_state_machine { + EF100_TLV_TYPE, + EF100_TLV_TYPE_CONT, + EF100_TLV_LENGTH, + EF100_TLV_VALUE +}; + +struct ef100_tlv_state { + enum ef100_tlv_state_machine state; + u64 value; + u32 value_offset; + u16 type; + u8 len; +}; + +static int ef100_tlv_feed(struct ef100_tlv_state *state, u8 byte) +{ + switch (state->state) { + case EF100_TLV_TYPE: + state->type = byte & 0x7f; + state->state = (byte & 0x80) ? EF100_TLV_TYPE_CONT + : EF100_TLV_LENGTH; + /* Clear ready to read in a new entry */ + state->value = 0; + state->value_offset = 0; + return 0; + case EF100_TLV_TYPE_CONT: + state->type |= byte << 7; + state->state = EF100_TLV_LENGTH; + return 0; + case EF100_TLV_LENGTH: + state->len = byte; + /* We only handle TLVs that fit in a u64 */ + if (state->len > sizeof(state->value)) + return -EOPNOTSUPP; + /* len may be zero, implying a value of zero */ + state->state = state->len ? EF100_TLV_VALUE : EF100_TLV_TYPE; + return 0; + case EF100_TLV_VALUE: + state->value |= ((u64)byte) << (state->value_offset * 8); + state->value_offset++; + if (state->value_offset >= state->len) + state->state = EF100_TLV_TYPE; + return 0; + default: /* state machine error, can't happen */ + WARN_ON_ONCE(1); + return -EIO; + } +} + +static int ef100_process_design_param(struct efx_nic *efx, + const struct ef100_tlv_state *reader) +{ + struct ef100_nic_data *nic_data = efx->nic_data; + + switch (reader->type) { + case ESE_EF100_DP_GZ_PAD: /* padding, skip it */ + return 0; + case ESE_EF100_DP_GZ_PARTIAL_TSTAMP_SUB_NANO_BITS: + /* Driver doesn't support timestamping yet, so we don't care */ + return 0; + case ESE_EF100_DP_GZ_EVQ_UNSOL_CREDIT_SEQ_BITS: + /* Driver doesn't support unsolicited-event credits yet, so + * we don't care + */ + return 0; + case ESE_EF100_DP_GZ_NMMU_GROUP_SIZE: + /* Driver doesn't manage the NMMU (so we don't care) */ + return 0; + case ESE_EF100_DP_GZ_RX_L4_CSUM_PROTOCOLS: + /* Driver uses CHECKSUM_COMPLETE, so we don't care about + * protocol checksum validation + */ + return 0; + case ESE_EF100_DP_GZ_TSO_MAX_HDR_LEN: + nic_data->tso_max_hdr_len = min_t(u64, reader->value, 0xffff); + return 0; + case ESE_EF100_DP_GZ_TSO_MAX_HDR_NUM_SEGS: + /* We always put HDR_NUM_SEGS=1 in our TSO descriptors */ + if (!reader->value) { + netif_err(efx, probe, efx->net_dev, + "TSO_MAX_HDR_NUM_SEGS < 1\n"); + return -EOPNOTSUPP; + } + return 0; + case ESE_EF100_DP_GZ_RXQ_SIZE_GRANULARITY: + case ESE_EF100_DP_GZ_TXQ_SIZE_GRANULARITY: + /* Our TXQ and RXQ sizes are always power-of-two and thus divisible by + * EFX_MIN_DMAQ_SIZE, so we just need to check that + * EFX_MIN_DMAQ_SIZE is divisible by GRANULARITY. + * This is very unlikely to fail. + */ + if (!reader->value || reader->value > efx_min_dmaq_size(efx) || + efx_min_dmaq_size(efx) % (u32)reader->value) { + netif_err(efx, probe, efx->net_dev, + "%s size granularity is %llu, can't guarantee safety\n", + reader->type == ESE_EF100_DP_GZ_RXQ_SIZE_GRANULARITY ? "RXQ" : "TXQ", + reader->value); + return -EOPNOTSUPP; + } + return 0; + case ESE_EF100_DP_GZ_TSO_MAX_PAYLOAD_LEN: + nic_data->tso_max_payload_len = min_t(u64, reader->value, + GSO_LEGACY_MAX_SIZE); + netif_set_tso_max_size(efx->net_dev, + nic_data->tso_max_payload_len); + return 0; + case ESE_EF100_DP_GZ_TSO_MAX_PAYLOAD_NUM_SEGS: + nic_data->tso_max_payload_num_segs = min_t(u64, reader->value, 0xffff); + netif_set_tso_max_segs(efx->net_dev, nic_data->tso_max_payload_num_segs); + return 0; + case ESE_EF100_DP_GZ_TSO_MAX_NUM_FRAMES: + nic_data->tso_max_frames = min_t(u64, reader->value, 0xffff); + return 0; + case ESE_EF100_DP_GZ_COMPAT: + if (reader->value) { + netif_err(efx, probe, efx->net_dev, + "DP_COMPAT has unknown bits %#llx, driver not compatible with this hw\n", + reader->value); + return -EOPNOTSUPP; + } + return 0; + case ESE_EF100_DP_GZ_MEM2MEM_MAX_LEN: + /* Driver doesn't use mem2mem transfers */ + return 0; + case ESE_EF100_DP_GZ_EVQ_TIMER_TICK_NANOS: + /* Driver doesn't currently use EVQ_TIMER */ + return 0; + case ESE_EF100_DP_GZ_NMMU_PAGE_SIZES: + /* Driver doesn't manage the NMMU (so we don't care) */ + return 0; + case ESE_EF100_DP_GZ_VI_STRIDES: + /* We never try to set the VI stride, and we don't rely on + * being able to find VIs past VI 0 until after we've learned + * the current stride from MC_CMD_GET_CAPABILITIES. + * So the value of this shouldn't matter. + */ + if (reader->value != ESE_EF100_DP_GZ_VI_STRIDES_DEFAULT) + netif_dbg(efx, probe, efx->net_dev, + "NIC has other than default VI_STRIDES (mask " + "%#llx), early probing might use wrong one\n", + reader->value); + return 0; + case ESE_EF100_DP_GZ_RX_MAX_RUNT: + /* Driver doesn't look at L2_STATUS:LEN_ERR bit, so we don't + * care whether it indicates runt or overlength for any given + * packet, so we don't care about this parameter. + */ + return 0; + default: + /* Host interface says "Drivers should ignore design parameters + * that they do not recognise." + */ + netif_info(efx, probe, efx->net_dev, + "Ignoring unrecognised design parameter %u\n", + reader->type); + return 0; + } +} + +static int ef100_check_design_params(struct efx_nic *efx) +{ + struct ef100_tlv_state reader = {}; + u32 total_len, offset = 0; + efx_dword_t reg; + int rc = 0, i; + u32 data; + + efx_readd(efx, ®, ER_GZ_PARAMS_TLV_LEN); + total_len = EFX_DWORD_FIELD(reg, EFX_DWORD_0); + pci_dbg(efx->pci_dev, "%u bytes of design parameters\n", total_len); + while (offset < total_len) { + efx_readd(efx, ®, ER_GZ_PARAMS_TLV + offset); + data = EFX_DWORD_FIELD(reg, EFX_DWORD_0); + for (i = 0; i < sizeof(data); i++) { + rc = ef100_tlv_feed(&reader, data); + /* Got a complete value? */ + if (!rc && reader.state == EF100_TLV_TYPE) + rc = ef100_process_design_param(efx, &reader); + if (rc) + goto out; + data >>= 8; + offset++; + } + } +out: + return rc; +} + +static int efx_ef100_update_client_id(struct efx_nic *efx) +{ + struct ef100_nic_data *nic_data = efx->nic_data; + unsigned int pf_index = PCIE_FUNCTION_PF_NULL; + unsigned int vf_index = PCIE_FUNCTION_VF_NULL; + efx_qword_t pciefn; + int rc; + + if (efx->pci_dev->is_virtfn) + vf_index = nic_data->vf_index; + else + pf_index = nic_data->pf_index; + + /* Construct PCIE_FUNCTION structure */ + EFX_POPULATE_QWORD_3(pciefn, + PCIE_FUNCTION_PF, pf_index, + PCIE_FUNCTION_VF, vf_index, + PCIE_FUNCTION_INTF, PCIE_INTERFACE_CALLER); + /* look up self client ID */ + rc = efx_ef100_lookup_client_id(efx, pciefn, &efx->client_id); + if (rc) { + pci_warn(efx->pci_dev, + "%s: Failed to get client ID, rc %d\n", + __func__, rc); + } + + return rc; +} + +/* NIC probe and remove + */ +static int ef100_probe_main(struct efx_nic *efx) +{ + struct ef100_nic_data *nic_data; + unsigned int bar_size = + resource_size(&efx->pci_dev->resource[efx->mem_bar]); + u32 privlege_mask = 0; + int i, rc; + + if (WARN_ON(bar_size == 0)) + return -EIO; + + nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL); + if (!nic_data) + return -ENOMEM; + efx->nic_data = nic_data; + nic_data->efx = efx; + spin_lock_init(&nic_data->vf_reps_lock); + INIT_LIST_HEAD(&nic_data->rem_reps); +#if defined(EFX_USE_KCOMPAT) && defined(EFX_TC_OFFLOAD) && !defined(EFX_HAVE_FLOW_INDR_BLOCK_CB_REGISTER) && !defined(EFX_HAVE_FLOW_INDR_DEV_REGISTER) + spin_lock_init(&nic_data->udp_tunnels_lock); + INIT_LIST_HEAD(&nic_data->udp_tunnels); +#endif + mutex_init(&nic_data->bar_config_lock); + efx->max_vis = EF100_MAX_VIS; + + /* Populate design-parameter defaults */ + nic_data->tso_max_hdr_len = ESE_EF100_DP_GZ_TSO_MAX_HDR_LEN_DEFAULT; + nic_data->tso_max_frames = ESE_EF100_DP_GZ_TSO_MAX_NUM_FRAMES_DEFAULT; + nic_data->tso_max_payload_num_segs = ESE_EF100_DP_GZ_TSO_MAX_PAYLOAD_NUM_SEGS_DEFAULT; + nic_data->tso_max_payload_len = ESE_EF100_DP_GZ_TSO_MAX_PAYLOAD_LEN_DEFAULT; + /* Read design parameters */ + rc = ef100_check_design_params(efx); + if (rc) { + pci_err(efx->pci_dev, "Unsupported design parameters\n"); + goto fail; + } + + /* we assume later that we can copy from this buffer in dwords */ + BUILD_BUG_ON(MCDI_CTL_SDU_LEN_MAX_V2 % 4); + + /* MCDI buffers must be 256 byte aligned. */ + rc = efx_nic_alloc_buffer(efx, &nic_data->mcdi_buf, MCDI_BUF_LEN, + GFP_KERNEL); + if (rc) + goto fail; + + efx->mcdi_buf_mode = EFX_BUF_MODE_EF100; + + /* Get the MC's warm boot count. In case it's rebooting right + * now, be prepared to retry. + */ + i = 0; + for (;;) { + rc = ef100_get_warm_boot_count(efx); + if (rc >= 0) + break; + if (++i == 5) + goto fail; + ssleep(1); + } + nic_data->warm_boot_count = rc; + + /* In case we're recovering from a crash (kexec), we want to + * cancel any outstanding request by the previous user of this + * function. We send a special message using the least + * significant bits of the 'high' (doorbell) register. + */ + _efx_writed(efx, cpu_to_le32(1), efx_reg(efx, ER_GZ_MC_DB_HWRD)); + + /* Post-IO section. */ + + rc = efx_probe_common(efx); + if (rc) + goto fail; + if (efx->mcdi->fn_flags & + (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_NO_ACTIVE_PORT)) { + pci_info(efx->pci_dev, "No network port on this PCI function"); +#if !defined(EFX_USE_KCOMPAT) || !defined(EFX_TC_OFFLOAD) + return 0; +#else + /* N.B. Interfaces with no network port will have port_num set + * implicitly to zero. This will affect the MAC stats. */ +#endif + } else { + rc = efx_mcdi_port_get_number(efx); + if (rc < 0) + goto fail; + efx->port_num = rc; + } + + rc = efx_get_fn_info(efx, &nic_data->pf_index, &nic_data->vf_index); + if (rc) + goto fail; + + rc = efx_ef100_update_client_id(efx); + if (rc) + goto fail; + + efx_mcdi_get_privilege_mask(efx, &privlege_mask); + nic_data->grp_mae = !!(privlege_mask & + MC_CMD_PRIVILEGE_MASK_IN_GRP_MAE); +#ifdef EFX_NOT_UPSTREAM + rc = device_create_file(&efx->pci_dev->dev, &dev_attr_bar_config); +#endif + + return 0; +fail: + return rc; +} + +int efx_ef100_lookup_client_id(struct efx_nic *efx, efx_qword_t pciefn, u32 *id) +{ + MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CLIENT_HANDLE_OUT_LEN); + MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_CLIENT_HANDLE_IN_LEN); + u64 pciefn_flat = le64_to_cpu(pciefn.u64[0]); + size_t outlen; + int rc; + + MCDI_SET_DWORD(inbuf, GET_CLIENT_HANDLE_IN_TYPE, + MC_CMD_GET_CLIENT_HANDLE_IN_TYPE_FUNC); + MCDI_SET_QWORD(inbuf, GET_CLIENT_HANDLE_IN_FUNC, + pciefn_flat); + + rc = efx_mcdi_rpc(efx, MC_CMD_GET_CLIENT_HANDLE, inbuf, + sizeof(inbuf), outbuf, sizeof(outbuf), &outlen); + if (rc) + return rc; + if (outlen < sizeof(outbuf)) + return -EIO; + *id = MCDI_DWORD(outbuf, GET_CLIENT_HANDLE_OUT_HANDLE); + return 0; +} + +int ef100_probe_netdev_pf(struct efx_nic *efx) +{ + struct ef100_nic_data *nic_data = efx->nic_data; + struct net_device *net_dev = efx->net_dev; + int rc; + + if (!nic_data->grp_mae) + return 0; + + rc = efx_ef100_get_base_mport(efx); + if (rc) { + netif_warn(efx, probe, net_dev, + "Failed to probe base mport rc %d; representors will not function\n", + rc); + } else { + rc = efx_init_mae(efx); + if (rc) + pci_warn(efx->pci_dev, + "Failed to init MAE rc %d; representors will not function\n", + rc); + } + + rc = efx_init_tc(efx); + if (rc) { + /* Either we don't have an MAE at all (i.e. legacy v-switching), + * or we do but we failed to probe it. In the latter case, we + * may not have set up default rules, in which case we won't be + * able to pass any traffic. However, we don't fail the probe, + * because the user might need to use the netdevice to apply + * configuration changes to fix whatever's wrong with the MAE. + */ + netif_warn(efx, probe, net_dev, + "Failed to probe MAE rc %d; TC offload unavailable\n", + rc); + } else { +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_TC_OFFLOAD) + net_dev->features |= NETIF_F_HW_TC; + efx->fixed_features |= NETIF_F_HW_TC; +#endif + } + + return 0; +} + +int ef100_probe_vf(struct efx_nic *efx) +{ +#if defined(CONFIG_SFC_VDPA) +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_VDPA_MGMT_INTERFACE) + int err; +#endif +#endif + int rc; + + BUILD_BUG_ON(MAE_MPORT_SELECTOR_NULL); + + rc = ef100_probe_main(efx); +#if defined(CONFIG_SFC_VDPA) +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_VDPA_MGMT_INTERFACE) + err = ef100_vdpa_register_mgmtdev(efx); + if (err) + pci_warn(efx->pci_dev, + "vdpa_register_mgmtdev failed, err: %d\n", err); +#endif +#endif + return rc; +} + +void ef100_remove(struct efx_nic *efx) +{ + struct ef100_nic_data *nic_data = efx->nic_data; + +#ifdef EFX_NOT_UPSTREAM + device_remove_file(&efx->pci_dev->dev, &dev_attr_bar_config); +#endif + efx_remove_common(efx); + if (nic_data) { + if (efx->mcdi_buf_mode == EFX_BUF_MODE_EF100) + efx_nic_free_buffer(efx, &nic_data->mcdi_buf); + mutex_destroy(&nic_data->bar_config_lock); + } + +#if defined(CONFIG_SFC_VDPA) +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_VDPA_MGMT_INTERFACE) + ef100_vdpa_unregister_mgmtdev(efx); +#endif +#endif + + kfree(nic_data); + efx->nic_data = NULL; +} + +/* NIC level access functions + */ +#ifdef EFX_C_MODEL +#define EF100_OFFLOAD_FEATURES (NETIF_F_HW_CSUM | NETIF_F_RXCSUM | \ + NETIF_F_HIGHDMA | NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_NTUPLE | \ + NETIF_F_RXHASH | NETIF_F_RXFCS | NETIF_F_TSO_ECN | NETIF_F_RXALL | \ + NETIF_F_HW_VLAN_CTAG_TX) +#else +#define EF100_OFFLOAD_FEATURES (NETIF_F_HW_CSUM | NETIF_F_RXCSUM | \ + NETIF_F_HIGHDMA | NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_NTUPLE | \ + NETIF_F_RXHASH | NETIF_F_RXFCS | NETIF_F_TSO_ECN | NETIF_F_RXALL | \ + NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_FILTER) +#endif + +const struct efx_nic_type ef100_pf_nic_type = { + .revision = EFX_REV_EF100, + .is_vf = false, + .probe = ef100_probe_main, + .net_alloc = ef100_net_alloc, + .net_dealloc = ef100_net_dealloc, + .offload_features = EF100_OFFLOAD_FEATURES, + .mcdi_max_ver = 2, + .mcdi_rpc_timeout = efx_ef100_mcdi_rpc_timeout, + .mcdi_request = ef100_mcdi_request, + .mcdi_poll_response = ef100_mcdi_poll_response, + .mcdi_read_response = ef100_mcdi_read_response, + .mcdi_poll_reboot = ef100_mcdi_poll_reboot, + .mcdi_get_buf = ef100_mcdi_get_buf, + .mcdi_put_buf = ef100_mcdi_put_buf, + .mcdi_reboot_detected = ef100_mcdi_reboot_detected, + .irq_enable_master = efx_port_dummy_op_void, + .irq_test_generate = efx_ef100_irq_test_generate, + .irq_disable_non_ev = efx_port_dummy_op_void, + .push_irq_moderation = efx_channel_dummy_op_void, +#if defined(EFX_NOT_UPSTREAM) && defined(CONFIG_SFC_BUSYPOLL) + .supported_interrupt_modes = BIT(EFX_INT_MODE_MSIX) | + BIT(EFX_INT_MODE_POLLED), +#else + .supported_interrupt_modes = BIT(EFX_INT_MODE_MSIX), +#endif + .map_reset_reason = ef100_map_reset_reason, + .map_reset_flags = ef100_map_reset_flags, + .reset = ef100_reset, + + .check_caps = ef100_check_caps, + + .ev_probe = ef100_ev_probe, + .ev_init = ef100_ev_init, + .ev_fini = efx_mcdi_ev_fini, + .ev_remove = efx_mcdi_ev_remove, + .irq_handle_msi = ef100_msi_interrupt, + .ev_process = ef100_ev_process, + .ev_mcdi_pending = ef100_ev_mcdi_pending, + .ev_read_ack = ef100_ev_read_ack, + .ev_test_generate = efx_ef100_ev_test_generate, + .tx_probe = ef100_tx_probe, + .tx_init = ef100_tx_init, + .tx_write = ef100_tx_write, + .tx_notify = ef100_notify_tx_desc, + .tx_enqueue = ef100_enqueue_skb, + .tx_max_skb_descs = ef100_tx_max_skb_descs, + .rx_set_rss_flags = efx_mcdi_set_rss_context_flags, + .rx_get_rss_flags = efx_mcdi_get_rss_context_flags, + .rx_probe = ef100_rx_probe, + .rx_init = ef100_rx_init, + .rx_remove = efx_mcdi_rx_remove, + .rx_write = ef100_rx_write, + .rx_defer_refill = efx_ef100_rx_defer_refill, + .rx_packet = __ef100_rx_packet, + .rx_buf_hash_valid = ef100_rx_buf_hash_valid, + .max_rx_ip_filters = EFX_MCDI_FILTER_TBL_ROWS, + .filter_table_probe = ef100_filter_table_init, + .filter_table_up = ef100_filter_table_up, + .filter_table_restore = efx_mcdi_filter_table_restore, + .filter_table_down = ef100_filter_table_down, + .filter_table_remove = efx_mcdi_filter_table_fini, + .filter_insert = efx_mcdi_filter_insert, + .filter_remove_safe = efx_mcdi_filter_remove_safe, + .filter_get_safe = efx_mcdi_filter_get_safe, + .filter_clear_rx = efx_mcdi_filter_clear_rx, + .filter_count_rx_used = efx_mcdi_filter_count_rx_used, + .filter_get_rx_id_limit = efx_mcdi_filter_get_rx_id_limit, + .filter_get_rx_ids = efx_mcdi_filter_get_rx_ids, +#ifdef EFX_NOT_UPSTREAM + .filter_redirect = efx_mcdi_filter_redirect, +#if IS_MODULE(CONFIG_SFC_DRIVERLINK) + .filter_block_kernel = efx_mcdi_filter_block_kernel, + .filter_unblock_kernel = efx_mcdi_filter_unblock_kernel, + .regionmap_buffer = ef100_regionmap_buffer, +#endif +#endif +#ifdef CONFIG_RFS_ACCEL + .filter_rfs_expire_one = efx_mcdi_filter_rfs_expire_one, +#endif + .vlan_rx_add_vid = efx_mcdi_filter_add_vid, + .vlan_rx_kill_vid = efx_mcdi_filter_del_vid, + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_NEED_GET_PHYS_PORT_ID) + .get_phys_port_id = efx_ef100_get_phys_port_id, +#endif + + .rx_prefix_size = ESE_GZ_RX_PKT_PREFIX_LEN, + .rx_hash_offset = ESF_GZ_RX_PREFIX_RSS_HASH_LBN / 8, + .rx_ts_offset = ESF_GZ_RX_PREFIX_PARTIAL_TSTAMP_LBN / 8, + .rx_hash_key_size = 40, + .rx_pull_rss_config = efx_mcdi_rx_pull_rss_config, + .rx_push_rss_config = efx_ef100_rx_push_rss_config, + .rx_push_rss_context_config = efx_mcdi_rx_push_rss_context_config, + .rx_pull_rss_context_config = efx_mcdi_rx_pull_rss_context_config, + .rx_restore_rss_contexts = efx_mcdi_rx_restore_rss_contexts, + .rx_recycle_ring_size = efx_ef100_recycle_ring_size, + + .reconfigure_mac = ef100_reconfigure_mac, + .reconfigure_port = efx_mcdi_port_reconfigure, + .test_nvram = efx_mcdi_nvram_test_all, + .describe_stats = ef100_describe_stats, + .update_stats = ef100_update_stats, + .pull_stats = ef100_pull_stats, + .has_dynamic_sensors = ef100_has_dynamic_sensors, + + /* Per-type bar/size configuration not used on ef100. Location of + * registers is defined by extended capabilities. + */ + .mem_bar = NULL, + .mem_map_size = NULL, + .max_dma_mask = DMA_BIT_MASK(ESF_GZ_TX_SEND_ADDR_WIDTH), + +#if defined(EFX_USE_KCOMPAT) && defined(EFX_TC_OFFLOAD) && !defined(EFX_HAVE_FLOW_INDR_BLOCK_CB_REGISTER) && !defined(EFX_HAVE_FLOW_INDR_DEV_REGISTER) + .udp_tnl_add_port2 = efx_ef100_udp_tnl_add_port, + .udp_tnl_lookup_port2 = efx_ef100_udp_tnl_lookup_port, + .udp_tnl_del_port2 = efx_ef100_udp_tnl_del_port, +#endif + +#if defined(CONFIG_SFC_SRIOV) + .sriov_configure = efx_ef100_sriov_configure, +#endif +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_TC_OFFLOAD) + .get_vf_rep = ef100_get_vf_rep, + .detach_reps = ef100_detach_reps, + .attach_reps = ef100_attach_reps, +#endif + .add_mport = efx_ef100_add_mport, + .remove_mport = efx_ef100_remove_mport, + +#ifdef EFX_NOT_UPSTREAM +#if IS_MODULE(CONFIG_SFC_DRIVERLINK) + .ef10_resources = { + .hdr.type = EFX_DL_EF10_RESOURCES, + }, +#endif +#endif +}; + +const struct efx_nic_type ef100_vf_nic_type = { + .revision = EFX_REV_EF100, + .is_vf = true, + .probe = ef100_probe_vf, + .net_alloc = ef100_net_alloc, + .net_dealloc = ef100_net_dealloc, + .offload_features = EF100_OFFLOAD_FEATURES, + .mcdi_max_ver = 2, + .mcdi_rpc_timeout = efx_ef100_mcdi_rpc_timeout, + .mcdi_request = ef100_mcdi_request, + .mcdi_poll_response = ef100_mcdi_poll_response, + .mcdi_read_response = ef100_mcdi_read_response, + .mcdi_poll_reboot = ef100_mcdi_poll_reboot, + .mcdi_get_buf = ef100_mcdi_get_buf, + .mcdi_put_buf = ef100_mcdi_put_buf, + .mcdi_reboot_detected = ef100_mcdi_reboot_detected, + .irq_enable_master = efx_port_dummy_op_void, + .irq_test_generate = efx_ef100_irq_test_generate, + .irq_disable_non_ev = efx_port_dummy_op_void, + .push_irq_moderation = efx_channel_dummy_op_void, +#if defined(EFX_NOT_UPSTREAM) && defined(CONFIG_SFC_BUSYPOLL) + .supported_interrupt_modes = BIT(EFX_INT_MODE_MSIX) | + BIT(EFX_INT_MODE_POLLED), +#else + .supported_interrupt_modes = BIT(EFX_INT_MODE_MSIX), +#endif + .map_reset_reason = ef100_map_reset_reason, + .map_reset_flags = ef100_map_reset_flags, + .reset = ef100_reset, + .check_caps = ef100_check_caps, + .ev_probe = ef100_ev_probe, + .ev_init = ef100_ev_init, + .ev_fini = efx_mcdi_ev_fini, + .ev_remove = efx_mcdi_ev_remove, + .irq_handle_msi = ef100_msi_interrupt, + .ev_process = ef100_ev_process, + .ev_mcdi_pending = ef100_ev_mcdi_pending, + .ev_read_ack = ef100_ev_read_ack, + .ev_test_generate = efx_ef100_ev_test_generate, + .tx_probe = ef100_tx_probe, + .tx_init = ef100_tx_init, + .tx_write = ef100_tx_write, + .tx_notify = ef100_notify_tx_desc, + .tx_enqueue = ef100_enqueue_skb, + .tx_max_skb_descs = ef100_tx_max_skb_descs, + .rx_set_rss_flags = efx_mcdi_set_rss_context_flags, + .rx_get_rss_flags = efx_mcdi_get_rss_context_flags, + .rx_probe = ef100_rx_probe, + .rx_init = ef100_rx_init, + .rx_remove = efx_mcdi_rx_remove, + .rx_write = ef100_rx_write, + .rx_defer_refill = efx_ef100_rx_defer_refill, + .rx_packet = __ef100_rx_packet, + .rx_buf_hash_valid = ef100_rx_buf_hash_valid, + .max_rx_ip_filters = EFX_MCDI_FILTER_TBL_ROWS, + .filter_table_probe = ef100_filter_table_init, + .filter_table_up = ef100_filter_table_up, + .filter_table_restore = efx_mcdi_filter_table_restore, + .filter_table_down = ef100_filter_table_down, + .filter_table_remove = efx_mcdi_filter_table_fini, + .filter_insert = efx_mcdi_filter_insert, + .filter_remove_safe = efx_mcdi_filter_remove_safe, + .filter_get_safe = efx_mcdi_filter_get_safe, + .filter_clear_rx = efx_mcdi_filter_clear_rx, + .filter_count_rx_used = efx_mcdi_filter_count_rx_used, + .filter_get_rx_id_limit = efx_mcdi_filter_get_rx_id_limit, + .filter_get_rx_ids = efx_mcdi_filter_get_rx_ids, +#ifdef EFX_NOT_UPSTREAM + .filter_redirect = efx_mcdi_filter_redirect, +#if IS_MODULE(CONFIG_SFC_DRIVERLINK) + .filter_block_kernel = efx_mcdi_filter_block_kernel, + .filter_unblock_kernel = efx_mcdi_filter_unblock_kernel, + .regionmap_buffer = ef100_regionmap_buffer, +#endif +#endif +#ifdef CONFIG_RFS_ACCEL + .filter_rfs_expire_one = efx_mcdi_filter_rfs_expire_one, +#endif + .vlan_rx_add_vid = efx_mcdi_filter_add_vid, + .vlan_rx_kill_vid = efx_mcdi_filter_del_vid, + + .rx_prefix_size = ESE_GZ_RX_PKT_PREFIX_LEN, + .rx_hash_offset = ESF_GZ_RX_PREFIX_RSS_HASH_LBN / 8, + .rx_ts_offset = ESF_GZ_RX_PREFIX_PARTIAL_TSTAMP_LBN / 8, + .rx_hash_key_size = 40, + .rx_pull_rss_config = efx_mcdi_rx_pull_rss_config, + .rx_push_rss_config = efx_ef100_rx_push_rss_config, + .rx_restore_rss_contexts = efx_mcdi_rx_restore_rss_contexts, + .rx_recycle_ring_size = efx_ef100_recycle_ring_size, + + .reconfigure_mac = ef100_reconfigure_mac, + .test_nvram = efx_mcdi_nvram_test_all, + .describe_stats = ef100_describe_stats, + .update_stats = ef100_update_stats, + .pull_stats = ef100_pull_stats, + .has_dynamic_sensors = ef100_has_dynamic_sensors, + + .mem_bar = NULL, + .mem_map_size = NULL, + .max_dma_mask = DMA_BIT_MASK(ESF_GZ_TX_SEND_ADDR_WIDTH), +#ifdef EFX_NOT_UPSTREAM +#if IS_MODULE(CONFIG_SFC_DRIVERLINK) + .ef10_resources = { + .hdr.type = EFX_DL_EF10_RESOURCES, + }, +#endif +#endif +}; diff --git a/src/drivers/net/ethernet/sfc/ef100_nic.h b/src/drivers/net/ethernet/sfc/ef100_nic.h new file mode 100644 index 0000000..b78f398 --- /dev/null +++ b/src/drivers/net/ethernet/sfc/ef100_nic.h @@ -0,0 +1,193 @@ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2018 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ +#ifndef EFX_EF100_NIC_H +#define EFX_EF100_NIC_H + +#include "net_driver.h" +#include "nic.h" +#include "mcdi_pcol.h" + +#ifdef EFX_NOT_UPSTREAM +#if IS_MODULE(CONFIG_SFC_DRIVERLINK) + +#ifdef EFX_C_MODEL +#define EF100_ONLOAD_VIS 4 +#else +#define EF100_ONLOAD_VIS 64 +#endif + +#define EF100_ONLOAD_IRQS 8 +#endif + +#define EF100_BRIDGE_DOWNSTREAM_PCI_DEVICE 0x9434 +#define EF100_BRIDGE_UPSTREAM_PCI_DEVICE 0x913f +#endif +#define EF100_QDMA_ADDR_REGIONS_MAX MC_CMD_GET_DESC_ADDR_REGIONS_OUT_REGIONS_MAXNUM + +extern const struct efx_nic_type ef100_pf_nic_type; +extern const struct efx_nic_type ef100_vf_nic_type; + +int ef100_get_mac_address(struct efx_nic *efx, u8 *mac_address, + int client_handle, bool empty_ok); +int ef100_probe_netdev_pf(struct efx_nic *efx); +int ef100_probe_vf(struct efx_nic *efx); +void ef100_remove(struct efx_nic *efx); + +enum { + EF100_STAT_port_tx_bytes = GENERIC_STAT_COUNT, + EF100_STAT_port_tx_packets, + EF100_STAT_port_tx_pause, + EF100_STAT_port_tx_unicast, + EF100_STAT_port_tx_multicast, + EF100_STAT_port_tx_broadcast, + EF100_STAT_port_tx_lt64, + EF100_STAT_port_tx_64, + EF100_STAT_port_tx_65_to_127, + EF100_STAT_port_tx_128_to_255, + EF100_STAT_port_tx_256_to_511, + EF100_STAT_port_tx_512_to_1023, + EF100_STAT_port_tx_1024_to_15xx, + EF100_STAT_port_tx_15xx_to_jumbo, + EF100_STAT_port_rx_bytes, + EF100_STAT_port_rx_packets, + EF100_STAT_port_rx_good, + EF100_STAT_port_rx_bad, + EF100_STAT_port_rx_bad_bytes, + EF100_STAT_port_rx_pause, + EF100_STAT_port_rx_unicast, + EF100_STAT_port_rx_multicast, + EF100_STAT_port_rx_broadcast, + EF100_STAT_port_rx_lt64, + EF100_STAT_port_rx_64, + EF100_STAT_port_rx_65_to_127, + EF100_STAT_port_rx_128_to_255, + EF100_STAT_port_rx_256_to_511, + EF100_STAT_port_rx_512_to_1023, + EF100_STAT_port_rx_1024_to_15xx, + EF100_STAT_port_rx_15xx_to_jumbo, + EF100_STAT_port_rx_gtjumbo, + EF100_STAT_port_rx_bad_gtjumbo, + EF100_STAT_port_rx_align_error, + EF100_STAT_port_rx_length_error, + EF100_STAT_port_rx_overflow, + EF100_STAT_port_rx_nodesc_drops, + EF100_STAT_COUNT +}; + +/* Keep this in sync with the contents of bar_config_name. */ +enum ef100_bar_config { + EF100_BAR_CONFIG_NONE, + EF100_BAR_CONFIG_EF100, + EF100_BAR_CONFIG_VDPA, +}; + +#if defined(EFX_USE_KCOMPAT) && !defined(EFX_HAVE_VDPA_MGMT_INTERFACE) +#ifdef CONFIG_SFC_VDPA +enum ef100_vdpa_class { + EF100_VDPA_CLASS_NONE, + EF100_VDPA_CLASS_NET, +}; +#endif +#endif + +enum { + EFX_EF100_TEST = 1, + EFX_EF100_REFILL, +}; +#define EFX_EF100_DRVGEN_MAGIC(_code, _data) ((_code) | ((_data) << 8)) +#define EFX_EF100_DRVGEN_CODE(_magic) ((_magic) & 0xff) +#define EFX_EF100_DRVGEN_DATA(_magic) ((_magic) >> 8) + +/** QDMA address region + * This is the driver equivalent of DEVEL_NIC_ADDR_REGION structuredef + * in YAML. + */ +struct ef100_addr_region { + dma_addr_t qdma_addr; + dma_addr_t trgt_addr; + u32 size_log2; + u32 trgt_alignment_log2; +}; + +struct ef100_nic_data { + struct efx_nic *efx; + struct efx_buffer mcdi_buf; + unsigned long mcdi_buf_use; + u32 datapath_caps; + u32 datapath_caps2; + u32 datapath_caps3; + unsigned int pf_index; + unsigned int vf_index; + u16 warm_boot_count; + u8 port_id[ETH_ALEN]; + enum ef100_bar_config bar_config; +#if defined(EFX_USE_KCOMPAT) && !defined(EFX_HAVE_VDPA_MGMT_INTERFACE) +#ifdef CONFIG_SFC_VDPA + enum ef100_vdpa_class vdpa_class; +#endif +#endif + struct mutex bar_config_lock; /* lock to control access to bar config */ + u64 licensed_features; + unsigned long *evq_phases; + u64 stats[EF100_STAT_COUNT]; + spinlock_t vf_reps_lock; /* Synchronises 'all-VFreps' operations */ + unsigned int vf_rep_count; /* usually but not always efx->vf_count */ + struct net_device **vf_rep; /* local VF reps */ + struct list_head rem_reps; /* remote reps */ + u32 base_mport; + u32 own_mport; + u32 local_mae_intf; /* interface_idx that corresponds to us, in mport enumerate */ + bool have_mport; /* base_mport was populated successfully */ + bool have_own_mport; /* own_mport was populated successfully */ + bool have_local_intf; /* local_mae_intf was populated successfully */ + bool filters_up; /* filter table has been upped */ + bool grp_mae; /* MAE Privilege */ + bool vdpa_supported; /* true if vdpa is supported on this PCIe FN */ +#if defined(EFX_USE_KCOMPAT) && defined(EFX_TC_OFFLOAD) && \ + !defined(EFX_HAVE_FLOW_INDR_BLOCK_CB_REGISTER) + spinlock_t udp_tunnels_lock; + struct list_head udp_tunnels; +#endif + u16 tso_max_hdr_len; + u16 tso_max_payload_num_segs; + u16 tso_max_frames; + unsigned int tso_max_payload_len; + unsigned int addr_mapping_type; + struct ef100_addr_region addr_region[EF100_QDMA_ADDR_REGIONS_MAX]; +}; + +void __ef100_detach_reps(struct efx_nic *efx); +void __ef100_attach_reps(struct efx_nic *efx); + +#define efx_ef100_has_cap(caps, flag) \ + (!!((caps) & BIT_ULL(MC_CMD_GET_CAPABILITIES_V10_OUT_ ## flag ## _LBN))) + +int efx_ef100_init_datapath_caps(struct efx_nic *efx); +int ef100_phy_probe(struct efx_nic *efx); +int ef100_filter_table_probe(struct efx_nic *efx); +int efx_ef100_lookup_client_id(struct efx_nic *efx, efx_qword_t pciefn, + u32 *id); + +static inline bool +ef100_region_addr_is_populated(struct ef100_addr_region *region) +{ + return region->size_log2 != 0; +} + +static inline bool efx_have_mport_journal_event(struct efx_nic *efx) +{ + struct ef100_nic_data *nic_data = efx->nic_data; + + return efx_ef100_has_cap(nic_data->datapath_caps3, + DYNAMIC_MPORT_JOURNAL); +} + +int efx_ef100_set_bar_config(struct efx_nic *efx, + enum ef100_bar_config new_config); +#endif /* EFX_EF100_NIC_H */ diff --git a/src/drivers/net/ethernet/sfc/ef100_regs.h b/src/drivers/net/ethernet/sfc/ef100_regs.h new file mode 100644 index 0000000..8198c31 --- /dev/null +++ b/src/drivers/net/ethernet/sfc/ef100_regs.h @@ -0,0 +1,728 @@ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2018 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#ifndef EFX_EF100_REGS_H +#define EFX_EF100_REGS_H + +/* EF100 hardware architecture definitions have a name prefix following + * the format: + * + * E__ + * + * The following strings are used: + * + * MMIO register Host memory structure + * ------------------------------------------------------------- + * Address R + * Bitfield RF SF + * Enumerator FE SE + * + * is the first revision to which the definition applies: + * + * G: Riverhead + * + * If the definition has been changed or removed in later revisions + * then is the last revision to which the definition applies; + * otherwise it is "Z". + */ + +/************************************************************************** + * + * EF100 registers and descriptors + * + ************************************************************************** + */ + +/* HW_REV_ID_REG: Hardware revision info register */ +#define ER_GZ_HW_REV_ID 0x00000000 + +/* NIC_REV_ID: SoftNIC revision info register */ +#define ER_GZ_NIC_REV_ID 0x00000004 + +/* NIC_MAGIC: Signature register that should contain a well-known value */ +#define ER_GZ_NIC_MAGIC 0x00000008 +#define ERF_GZ_NIC_MAGIC_LBN 0 +#define ERF_GZ_NIC_MAGIC_WIDTH 32 +#define EFE_GZ_NIC_MAGIC_EXPECTED 0xEF100FCB + +/* MC_SFT_STATUS: MC soft status */ +#define ER_GZ_MC_SFT_STATUS 0x00000010 +#define ER_GZ_MC_SFT_STATUS_STEP 4 +#define ER_GZ_MC_SFT_STATUS_ROWS 2 + +/* MC_DB_LWRD_REG: MC doorbell register, low word */ +#define ER_GZ_MC_DB_LWRD 0x00000020 + +/* MC_DB_HWRD_REG: MC doorbell register, high word */ +#define ER_GZ_MC_DB_HWRD 0x00000024 + +/* EVQ_INT_PRIME: Prime EVQ */ +#define ER_GZ_EVQ_INT_PRIME 0x00000040 +#define ERF_GZ_IDX_LBN 16 +#define ERF_GZ_IDX_WIDTH 16 +#define ERF_GZ_EVQ_ID_LBN 0 +#define ERF_GZ_EVQ_ID_WIDTH 16 + +/* INT_AGG_RING_PRIME: Prime interrupt aggregation ring. */ +#define ER_GZ_INT_AGG_RING_PRIME 0x00000048 +/* defined as ERF_GZ_IDX_LBN 16; access=WO reset=0x0 */ +/* defined as ERF_GZ_IDX_WIDTH 16 */ +#define ERF_GZ_RING_ID_LBN 0 +#define ERF_GZ_RING_ID_WIDTH 16 + +/* EVQ_TMR: EVQ timer control */ +#define ER_GZ_EVQ_TMR 0x00000104 +#define ER_GZ_EVQ_TMR_STEP 65536 +#define ER_GZ_EVQ_TMR_ROWS 1024 + +/* EVQ_UNSOL_CREDIT_GRANT_SEQ: Grant credits for unsolicited events. */ +#define ER_GZ_EVQ_UNSOL_CREDIT_GRANT_SEQ 0x00000108 +#define ER_GZ_EVQ_UNSOL_CREDIT_GRANT_SEQ_STEP 65536 +#define ER_GZ_EVQ_UNSOL_CREDIT_GRANT_SEQ_ROWS 1024 + +/* EVQ_DESC_CREDIT_GRANT_SEQ: Grant credits for descriptor proxy events. */ +#define ER_GZ_EVQ_DESC_CREDIT_GRANT_SEQ 0x00000110 +#define ER_GZ_EVQ_DESC_CREDIT_GRANT_SEQ_STEP 65536 +#define ER_GZ_EVQ_DESC_CREDIT_GRANT_SEQ_ROWS 1024 + +/* RX_RING_DOORBELL: Ring Rx doorbell. */ +#define ER_GZ_RX_RING_DOORBELL 0x00000180 +#define ER_GZ_RX_RING_DOORBELL_STEP 65536 +#define ER_GZ_RX_RING_DOORBELL_ROWS 1024 +#define ERF_GZ_RX_RING_PIDX_LBN 16 +#define ERF_GZ_RX_RING_PIDX_WIDTH 16 + +/* TX_RING_DOORBELL: Ring Tx doorbell. */ +#define ER_GZ_TX_RING_DOORBELL 0x00000200 +#define ER_GZ_TX_RING_DOORBELL_STEP 65536 +#define ER_GZ_TX_RING_DOORBELL_ROWS 1024 +#define ERF_GZ_TX_RING_PIDX_LBN 16 +#define ERF_GZ_TX_RING_PIDX_WIDTH 16 + +/* TX_DESC_PUSH: Tx ring descriptor push. Reserved for future use. */ +#define ER_GZ_TX_DESC_PUSH 0x00000210 +#define ER_GZ_TX_DESC_PUSH_STEP 65536 +#define ER_GZ_TX_DESC_PUSH_ROWS 1024 + +/* THE_TIME: NIC hardware time */ +#define ER_GZ_THE_TIME 0x00000280 +#define ER_GZ_THE_TIME_STEP 65536 +#define ER_GZ_THE_TIME_ROWS 1024 +#define ERF_GZ_THE_TIME_SECS_LBN 32 +#define ERF_GZ_THE_TIME_SECS_WIDTH 32 +#define ERF_GZ_THE_TIME_NANOS_LBN 2 +#define ERF_GZ_THE_TIME_NANOS_WIDTH 30 +#define ERF_GZ_THE_TIME_CLOCK_IN_SYNC_LBN 1 +#define ERF_GZ_THE_TIME_CLOCK_IN_SYNC_WIDTH 1 +#define ERF_GZ_THE_TIME_CLOCK_IS_SET_LBN 0 +#define ERF_GZ_THE_TIME_CLOCK_IS_SET_WIDTH 1 + +/* PARAMS_TLV_LEN: Size of design parameters area in bytes */ +#define ER_GZ_PARAMS_TLV_LEN 0x00000c00 +#define ER_GZ_PARAMS_TLV_LEN_STEP 65536 +#define ER_GZ_PARAMS_TLV_LEN_ROWS 1024 + +/* PARAMS_TLV: Design parameters */ +#define ER_GZ_PARAMS_TLV 0x00000c04 +#define ER_GZ_PARAMS_TLV_STEP 65536 +#define ER_GZ_PARAMS_TLV_ROWS 1024 + +/* EW_EMBEDDED_EVENT */ +#define ESF_GZ_EV_256_EVENT_LBN 0 +#define ESF_GZ_EV_256_EVENT_WIDTH 64 +#define ESE_GZ_EW_EMBEDDED_EVENT_STRUCT_SIZE 64 + +/* NMMU_PAGESZ_2M_ADDR */ +#define ESF_GZ_NMMU_2M_PAGE_SIZE_ID_LBN 59 +#define ESF_GZ_NMMU_2M_PAGE_SIZE_ID_WIDTH 5 +#define ESE_GZ_NMMU_PAGE_SIZE_2M 9 +#define ESF_GZ_NMMU_2M_PAGE_ID_LBN 21 +#define ESF_GZ_NMMU_2M_PAGE_ID_WIDTH 38 +#define ESF_GZ_NMMU_2M_PAGE_OFFSET_LBN 0 +#define ESF_GZ_NMMU_2M_PAGE_OFFSET_WIDTH 21 +#define ESE_GZ_NMMU_PAGESZ_2M_ADDR_STRUCT_SIZE 64 + +/* PARAM_TLV */ +#define ESF_GZ_TLV_VALUE_LBN 16 +#define ESF_GZ_TLV_VALUE_WIDTH 8 +#define ESE_GZ_TLV_VALUE_LENMIN 8 +#define ESE_GZ_TLV_VALUE_LENMAX 2040 +#define ESF_GZ_TLV_LEN_LBN 8 +#define ESF_GZ_TLV_LEN_WIDTH 8 +#define ESF_GZ_TLV_TYPE_LBN 0 +#define ESF_GZ_TLV_TYPE_WIDTH 8 +#define ESE_GZ_DP_NMMU_GROUP_SIZE 5 +#define ESE_GZ_DP_EVQ_UNSOL_CREDIT_SEQ_BITS 4 +#define ESE_GZ_DP_TX_EV_NUM_DESCS_BITS 3 +#define ESE_GZ_DP_RX_EV_NUM_PACKETS_BITS 2 +#define ESE_GZ_DP_PARTIAL_TSTAMP_SUB_NANO_BITS 1 +#define ESE_GZ_DP_PAD 0 +#define ESE_GZ_PARAM_TLV_STRUCT_SIZE 24 + +/* PCI_EXPRESS_XCAP_HDR */ +#define ESF_GZ_PCI_EXPRESS_XCAP_NEXT_LBN 20 +#define ESF_GZ_PCI_EXPRESS_XCAP_NEXT_WIDTH 12 +#define ESF_GZ_PCI_EXPRESS_XCAP_VER_LBN 16 +#define ESF_GZ_PCI_EXPRESS_XCAP_VER_WIDTH 4 +#define ESE_GZ_PCI_EXPRESS_XCAP_VER_VSEC 1 +#define ESF_GZ_PCI_EXPRESS_XCAP_ID_LBN 0 +#define ESF_GZ_PCI_EXPRESS_XCAP_ID_WIDTH 16 +#define ESE_GZ_PCI_EXPRESS_XCAP_ID_VNDR 0xb +#define ESE_GZ_PCI_EXPRESS_XCAP_HDR_STRUCT_SIZE 32 + +/* RHEAD_BASE_EVENT */ +#define ESF_GZ_E_TYPE_LBN 60 +#define ESF_GZ_E_TYPE_WIDTH 4 +#define ESF_GZ_EV_EVQ_PHASE_LBN 59 +#define ESF_GZ_EV_EVQ_PHASE_WIDTH 1 +#define ESE_GZ_RHEAD_BASE_EVENT_STRUCT_SIZE 64 + +/* RHEAD_EW_EVENT */ +#define ESF_GZ_EV_256_EV32_PHASE_LBN 255 +#define ESF_GZ_EV_256_EV32_PHASE_WIDTH 1 +#define ESF_GZ_EV_256_EV32_TYPE_LBN 251 +#define ESF_GZ_EV_256_EV32_TYPE_WIDTH 4 +#define ESE_GZ_EF100_EVEW_VIRTQ_DESC 2 +#define ESE_GZ_EF100_EVEW_TXQ_DESC 1 +#define ESE_GZ_EF100_EVEW_64BIT 0 +#define ESE_GZ_RHEAD_EW_EVENT_STRUCT_SIZE 256 + +/* RX_DESC */ +#define ESF_GZ_RX_BUF_ADDR_LBN 0 +#define ESF_GZ_RX_BUF_ADDR_WIDTH 64 +#define ESE_GZ_RX_DESC_STRUCT_SIZE 64 + +/* TXQ_DESC_PROXY_EVENT */ +#define ESF_GZ_EV_TXQ_DP_VI_ID_LBN 128 +#define ESF_GZ_EV_TXQ_DP_VI_ID_WIDTH 16 +#define ESF_GZ_EV_TXQ_DP_TXQ_DESC_LBN 0 +#define ESF_GZ_EV_TXQ_DP_TXQ_DESC_WIDTH 128 +#define ESE_GZ_TXQ_DESC_PROXY_EVENT_STRUCT_SIZE 144 + +/* TX_DESC_TYPE */ +#define ESF_GZ_TX_DESC_TYPE_LBN 124 +#define ESF_GZ_TX_DESC_TYPE_WIDTH 4 +#define ESE_GZ_TX_DESC_TYPE_DESC2CMPT 7 +#define ESE_GZ_TX_DESC_TYPE_MEM2MEM 4 +#define ESE_GZ_TX_DESC_TYPE_SEG 3 +#define ESE_GZ_TX_DESC_TYPE_TSO 2 +#define ESE_GZ_TX_DESC_TYPE_PREFIX 1 +#define ESE_GZ_TX_DESC_TYPE_SEND 0 +#define ESE_GZ_TX_DESC_TYPE_STRUCT_SIZE 128 + +/* VIRTQ_DESC_PROXY_EVENT */ +#define ESF_GZ_EV_VQ_DP_AVAIL_ENTRY_LBN 144 +#define ESF_GZ_EV_VQ_DP_AVAIL_ENTRY_WIDTH 16 +#define ESF_GZ_EV_VQ_DP_VI_ID_LBN 128 +#define ESF_GZ_EV_VQ_DP_VI_ID_WIDTH 16 +#define ESF_GZ_EV_VQ_DP_VIRTQ_DESC_LBN 0 +#define ESF_GZ_EV_VQ_DP_VIRTQ_DESC_WIDTH 128 +#define ESE_GZ_VIRTQ_DESC_PROXY_EVENT_STRUCT_SIZE 160 + +/* XIL_CFGBAR_TBL_ENTRY */ +#define ESF_GZ_CFGBAR_CONT_CAP_OFF_HI_LBN 96 +#define ESF_GZ_CFGBAR_CONT_CAP_OFF_HI_WIDTH 32 +#define ESF_GZ_CFGBAR_CONT_CAP_OFFSET_LBN 68 +#define ESF_GZ_CFGBAR_CONT_CAP_OFFSET_WIDTH 60 +#define ESE_GZ_CONT_CAP_OFFSET_BYTES_SHIFT 4 +#define ESF_GZ_CFGBAR_EF100_FUNC_CTL_WIN_OFF_LBN 67 +#define ESF_GZ_CFGBAR_EF100_FUNC_CTL_WIN_OFF_WIDTH 29 +#define ESE_GZ_EF100_FUNC_CTL_WIN_OFF_SHIFT 4 +#define ESF_GZ_CFGBAR_CONT_CAP_OFF_LO_LBN 68 +#define ESF_GZ_CFGBAR_CONT_CAP_OFF_LO_WIDTH 28 +#define ESF_GZ_CFGBAR_CONT_CAP_RSV_LBN 67 +#define ESF_GZ_CFGBAR_CONT_CAP_RSV_WIDTH 1 +#define ESF_GZ_CFGBAR_EF100_BAR_LBN 64 +#define ESF_GZ_CFGBAR_EF100_BAR_WIDTH 3 +#define ESE_GZ_CFGBAR_EF100_BAR_NUM_INVALID 7 +#define ESE_GZ_CFGBAR_EF100_BAR_NUM_EXPANSION_ROM 6 +#define ESF_GZ_CFGBAR_CONT_CAP_BAR_LBN 64 +#define ESF_GZ_CFGBAR_CONT_CAP_BAR_WIDTH 3 +#define ESE_GZ_CFGBAR_CONT_CAP_BAR_NUM_INVALID 7 +#define ESE_GZ_CFGBAR_CONT_CAP_BAR_NUM_EXPANSION_ROM 6 +#define ESF_GZ_CFGBAR_ENTRY_SIZE_LBN 32 +#define ESF_GZ_CFGBAR_ENTRY_SIZE_WIDTH 32 +#define ESE_GZ_CFGBAR_ENTRY_SIZE_EF100 12 +#define ESE_GZ_CFGBAR_ENTRY_HEADER_SIZE 8 +#define ESF_GZ_CFGBAR_ENTRY_LAST_LBN 28 +#define ESF_GZ_CFGBAR_ENTRY_LAST_WIDTH 1 +#define ESF_GZ_CFGBAR_ENTRY_REV_LBN 20 +#define ESF_GZ_CFGBAR_ENTRY_REV_WIDTH 8 +#define ESE_GZ_CFGBAR_ENTRY_REV_EF100 0 +#define ESF_GZ_CFGBAR_ENTRY_FORMAT_LBN 0 +#define ESF_GZ_CFGBAR_ENTRY_FORMAT_WIDTH 20 +#define ESE_GZ_CFGBAR_ENTRY_LAST 0xfffff +#define ESE_GZ_CFGBAR_ENTRY_CONT_CAP_ADDR 0xffffe +#define ESE_GZ_CFGBAR_ENTRY_EF100 0xef100 +#define ESE_GZ_XIL_CFGBAR_TBL_ENTRY_STRUCT_SIZE 128 + +/* XIL_CFGBAR_VSEC */ +#define ESF_GZ_VSEC_TBL_OFF_HI_LBN 64 +#define ESF_GZ_VSEC_TBL_OFF_HI_WIDTH 32 +#define ESE_GZ_VSEC_TBL_OFF_HI_BYTES_SHIFT 32 +#define ESF_GZ_VSEC_TBL_OFF_LO_LBN 36 +#define ESF_GZ_VSEC_TBL_OFF_LO_WIDTH 28 +#define ESE_GZ_VSEC_TBL_OFF_LO_BYTES_SHIFT 4 +#define ESF_GZ_VSEC_TBL_BAR_LBN 32 +#define ESF_GZ_VSEC_TBL_BAR_WIDTH 4 +#define ESE_GZ_VSEC_BAR_NUM_INVALID 7 +#define ESE_GZ_VSEC_BAR_NUM_EXPANSION_ROM 6 +#define ESF_GZ_VSEC_LEN_LBN 20 +#define ESF_GZ_VSEC_LEN_WIDTH 12 +#define ESE_GZ_VSEC_LEN_HIGH_OFFT 16 +#define ESE_GZ_VSEC_LEN_MIN 12 +#define ESF_GZ_VSEC_VER_LBN 16 +#define ESF_GZ_VSEC_VER_WIDTH 4 +#define ESE_GZ_VSEC_VER_XIL_CFGBAR 0 +#define ESF_GZ_VSEC_ID_LBN 0 +#define ESF_GZ_VSEC_ID_WIDTH 16 +#define ESE_GZ_XILINX_VSEC_ID 0x20 +#define ESE_GZ_XIL_CFGBAR_VSEC_STRUCT_SIZE 96 + +/* rh_egres_hclass */ +#define ESF_GZ_RX_PREFIX_HCLASS_TUN_OUTER_L4_CSUM_LBN 15 +#define ESF_GZ_RX_PREFIX_HCLASS_TUN_OUTER_L4_CSUM_WIDTH 1 +#define ESF_GZ_RX_PREFIX_HCLASS_TUN_OUTER_L3_CLASS_LBN 13 +#define ESF_GZ_RX_PREFIX_HCLASS_TUN_OUTER_L3_CLASS_WIDTH 2 +#define ESF_GZ_RX_PREFIX_HCLASS_NT_OR_INNER_L4_CSUM_LBN 12 +#define ESF_GZ_RX_PREFIX_HCLASS_NT_OR_INNER_L4_CSUM_WIDTH 1 +#define ESF_GZ_RX_PREFIX_HCLASS_NT_OR_INNER_L4_CLASS_LBN 10 +#define ESF_GZ_RX_PREFIX_HCLASS_NT_OR_INNER_L4_CLASS_WIDTH 2 +#define ESF_GZ_RX_PREFIX_HCLASS_NT_OR_INNER_L3_CLASS_LBN 8 +#define ESF_GZ_RX_PREFIX_HCLASS_NT_OR_INNER_L3_CLASS_WIDTH 2 +#define ESF_GZ_RX_PREFIX_HCLASS_TUNNEL_CLASS_LBN 5 +#define ESF_GZ_RX_PREFIX_HCLASS_TUNNEL_CLASS_WIDTH 3 +#define ESF_GZ_RX_PREFIX_HCLASS_L2_N_VLAN_LBN 3 +#define ESF_GZ_RX_PREFIX_HCLASS_L2_N_VLAN_WIDTH 2 +#define ESF_GZ_RX_PREFIX_HCLASS_L2_CLASS_LBN 2 +#define ESF_GZ_RX_PREFIX_HCLASS_L2_CLASS_WIDTH 1 +#define ESF_GZ_RX_PREFIX_HCLASS_L2_STATUS_LBN 0 +#define ESF_GZ_RX_PREFIX_HCLASS_L2_STATUS_WIDTH 2 +#define ESE_GZ_RH_EGRES_HCLASS_STRUCT_SIZE 16 + +/* sf_driver */ +#define ESF_GZ_DRIVER_E_TYPE_LBN 60 +#define ESF_GZ_DRIVER_E_TYPE_WIDTH 4 +#define ESF_GZ_DRIVER_PHASE_LBN 59 +#define ESF_GZ_DRIVER_PHASE_WIDTH 1 +#define ESF_GZ_DRIVER_DATA_LBN 0 +#define ESF_GZ_DRIVER_DATA_WIDTH 59 +#define ESE_GZ_SF_DRIVER_STRUCT_SIZE 64 + +/* sf_ev_rsvd */ +#define ESF_GZ_EV_RSVD_TBD_NEXT_LBN 34 +#define ESF_GZ_EV_RSVD_TBD_NEXT_WIDTH 3 +#define ESF_GZ_EV_RSVD_EVENT_GEN_FLAGS_LBN 30 +#define ESF_GZ_EV_RSVD_EVENT_GEN_FLAGS_WIDTH 4 +#define ESF_GZ_EV_RSVD_SRC_QID_LBN 18 +#define ESF_GZ_EV_RSVD_SRC_QID_WIDTH 12 +#define ESF_GZ_EV_RSVD_SEQ_NUM_LBN 2 +#define ESF_GZ_EV_RSVD_SEQ_NUM_WIDTH 16 +#define ESF_GZ_EV_RSVD_TBD_LBN 0 +#define ESF_GZ_EV_RSVD_TBD_WIDTH 2 +#define ESE_GZ_SF_EV_RSVD_STRUCT_SIZE 37 + +/* sf_flush_evnt */ +#define ESF_GZ_EV_FLSH_E_TYPE_LBN 60 +#define ESF_GZ_EV_FLSH_E_TYPE_WIDTH 4 +#define ESF_GZ_EV_FLSH_PHASE_LBN 59 +#define ESF_GZ_EV_FLSH_PHASE_WIDTH 1 +#define ESF_GZ_EV_FLSH_SUB_TYPE_LBN 53 +#define ESF_GZ_EV_FLSH_SUB_TYPE_WIDTH 6 +#define ESF_GZ_EV_FLSH_RSVD_LBN 10 +#define ESF_GZ_EV_FLSH_RSVD_WIDTH 43 +#define ESF_GZ_EV_FLSH_LABEL_LBN 4 +#define ESF_GZ_EV_FLSH_LABEL_WIDTH 6 +#define ESF_GZ_EV_FLSH_FLUSH_TYPE_LBN 0 +#define ESF_GZ_EV_FLSH_FLUSH_TYPE_WIDTH 4 +#define ESE_GZ_SF_FLUSH_EVNT_STRUCT_SIZE 64 + +/* sf_rx_pkts */ +#define ESF_GZ_EV_RXPKTS_E_TYPE_LBN 60 +#define ESF_GZ_EV_RXPKTS_E_TYPE_WIDTH 4 +#define ESF_GZ_EV_RXPKTS_PHASE_LBN 59 +#define ESF_GZ_EV_RXPKTS_PHASE_WIDTH 1 +#define ESF_GZ_EV_RXPKTS_RSVD_LBN 22 +#define ESF_GZ_EV_RXPKTS_RSVD_WIDTH 37 +#define ESF_GZ_EV_RXPKTS_Q_LABEL_LBN 16 +#define ESF_GZ_EV_RXPKTS_Q_LABEL_WIDTH 6 +#define ESF_GZ_EV_RXPKTS_NUM_PKT_LBN 0 +#define ESF_GZ_EV_RXPKTS_NUM_PKT_WIDTH 16 +#define ESE_GZ_SF_RX_PKTS_STRUCT_SIZE 64 + +/* sf_rx_prefix */ +#define ESF_GZ_RX_PREFIX_VLAN_STRIP_TCI_LBN 160 +#define ESF_GZ_RX_PREFIX_VLAN_STRIP_TCI_WIDTH 16 +#define ESF_GZ_RX_PREFIX_CSUM_FRAME_LBN 144 +#define ESF_GZ_RX_PREFIX_CSUM_FRAME_WIDTH 16 +#define ESF_GZ_RX_PREFIX_INGRESS_MPORT_LBN 128 +#define ESF_GZ_RX_PREFIX_INGRESS_MPORT_WIDTH 16 +#define ESF_GZ_RX_PREFIX_USER_MARK_LBN 96 +#define ESF_GZ_RX_PREFIX_USER_MARK_WIDTH 32 +#define ESF_GZ_RX_PREFIX_RSS_HASH_LBN 64 +#define ESF_GZ_RX_PREFIX_RSS_HASH_WIDTH 32 +#define ESF_GZ_RX_PREFIX_PARTIAL_TSTAMP_LBN 34 +#define ESF_GZ_RX_PREFIX_PARTIAL_TSTAMP_WIDTH 30 +#define ESF_GZ_RX_PREFIX_VSWITCH_STATUS_LBN 33 +#define ESF_GZ_RX_PREFIX_VSWITCH_STATUS_WIDTH 1 +#define ESF_GZ_RX_PREFIX_VLAN_STRIPPED_LBN 32 +#define ESF_GZ_RX_PREFIX_VLAN_STRIPPED_WIDTH 1 +#define ESF_GZ_RX_PREFIX_CLASS_LBN 16 +#define ESF_GZ_RX_PREFIX_CLASS_WIDTH 16 +#define ESF_GZ_RX_PREFIX_USER_FLAG_LBN 15 +#define ESF_GZ_RX_PREFIX_USER_FLAG_WIDTH 1 +#define ESF_GZ_RX_PREFIX_RSS_HASH_VALID_LBN 14 +#define ESF_GZ_RX_PREFIX_RSS_HASH_VALID_WIDTH 1 +#define ESF_GZ_RX_PREFIX_LENGTH_LBN 0 +#define ESF_GZ_RX_PREFIX_LENGTH_WIDTH 14 +#define ESE_GZ_SF_RX_PREFIX_STRUCT_SIZE 176 + +/* sf_rxtx_generic */ +#define ESF_GZ_EV_BARRIER_LBN 167 +#define ESF_GZ_EV_BARRIER_WIDTH 1 +#define ESF_GZ_EV_RSVD_LBN 130 +#define ESF_GZ_EV_RSVD_WIDTH 37 +#define ESF_GZ_EV_DPRXY_LBN 129 +#define ESF_GZ_EV_DPRXY_WIDTH 1 +#define ESF_GZ_EV_VIRTIO_LBN 128 +#define ESF_GZ_EV_VIRTIO_WIDTH 1 +#define ESF_GZ_EV_COUNT_LBN 0 +#define ESF_GZ_EV_COUNT_WIDTH 128 +#define ESE_GZ_SF_RXTX_GENERIC_STRUCT_SIZE 168 + +/* sf_ts_stamp */ +#define ESF_GZ_EV_TS_E_TYPE_LBN 60 +#define ESF_GZ_EV_TS_E_TYPE_WIDTH 4 +#define ESF_GZ_EV_TS_PHASE_LBN 59 +#define ESF_GZ_EV_TS_PHASE_WIDTH 1 +#define ESF_GZ_EV_TS_RSVD_LBN 56 +#define ESF_GZ_EV_TS_RSVD_WIDTH 3 +#define ESF_GZ_EV_TS_STATUS_LBN 54 +#define ESF_GZ_EV_TS_STATUS_WIDTH 2 +#define ESF_GZ_EV_TS_Q_LABEL_LBN 48 +#define ESF_GZ_EV_TS_Q_LABEL_WIDTH 6 +#define ESF_GZ_EV_TS_DESC_ID_LBN 32 +#define ESF_GZ_EV_TS_DESC_ID_WIDTH 16 +#define ESF_GZ_EV_TS_PARTIAL_STAMP_LBN 0 +#define ESF_GZ_EV_TS_PARTIAL_STAMP_WIDTH 32 +#define ESE_GZ_SF_TS_STAMP_STRUCT_SIZE 64 + +/* sf_tx_cmplt */ +#define ESF_GZ_EV_TXCMPL_E_TYPE_LBN 60 +#define ESF_GZ_EV_TXCMPL_E_TYPE_WIDTH 4 +#define ESF_GZ_EV_TXCMPL_PHASE_LBN 59 +#define ESF_GZ_EV_TXCMPL_PHASE_WIDTH 1 +#define ESF_GZ_EV_TXCMPL_RSVD_LBN 22 +#define ESF_GZ_EV_TXCMPL_RSVD_WIDTH 37 +#define ESF_GZ_EV_TXCMPL_Q_LABEL_LBN 16 +#define ESF_GZ_EV_TXCMPL_Q_LABEL_WIDTH 6 +#define ESF_GZ_EV_TXCMPL_NUM_DESC_LBN 0 +#define ESF_GZ_EV_TXCMPL_NUM_DESC_WIDTH 16 +#define ESE_GZ_SF_TX_CMPLT_STRUCT_SIZE 64 + +/* sf_tx_desc2cmpt_dsc_fmt */ +#define ESF_GZ_D2C_TGT_VI_ID_LBN 108 +#define ESF_GZ_D2C_TGT_VI_ID_WIDTH 16 +#define ESF_GZ_D2C_CMPT2_LBN 107 +#define ESF_GZ_D2C_CMPT2_WIDTH 1 +#define ESF_GZ_D2C_ABS_VI_ID_LBN 106 +#define ESF_GZ_D2C_ABS_VI_ID_WIDTH 1 +#define ESF_GZ_D2C_ORDERED_LBN 105 +#define ESF_GZ_D2C_ORDERED_WIDTH 1 +#define ESF_GZ_D2C_SKIP_N_LBN 97 +#define ESF_GZ_D2C_SKIP_N_WIDTH 8 +#define ESF_GZ_D2C_RSVD_LBN 64 +#define ESF_GZ_D2C_RSVD_WIDTH 33 +#define ESF_GZ_D2C_COMPLETION_LBN 0 +#define ESF_GZ_D2C_COMPLETION_WIDTH 64 +#define ESE_GZ_SF_TX_DESC2CMPT_DSC_FMT_STRUCT_SIZE 124 + +/* sf_tx_mem2mem_dsc_fmt */ +#define ESF_GZ_M2M_ADDR_SPC_EN_LBN 123 +#define ESF_GZ_M2M_ADDR_SPC_EN_WIDTH 1 +#define ESF_GZ_M2M_TRANSLATE_ADDR_LBN 122 +#define ESF_GZ_M2M_TRANSLATE_ADDR_WIDTH 1 +#define ESF_GZ_M2M_RSVD_LBN 120 +#define ESF_GZ_M2M_RSVD_WIDTH 2 +#define ESF_GZ_M2M_ADDR_SPC_ID_LBN 84 +#define ESF_GZ_M2M_ADDR_SPC_ID_WIDTH 36 +#define ESF_GZ_M2M_LEN_MINUS_1_LBN 64 +#define ESF_GZ_M2M_LEN_MINUS_1_WIDTH 20 +#define ESF_GZ_M2M_ADDR_LBN 0 +#define ESF_GZ_M2M_ADDR_WIDTH 64 +#define ESE_GZ_SF_TX_MEM2MEM_DSC_FMT_STRUCT_SIZE 124 + +/* sf_tx_ovr_dsc_fmt */ +#define ESF_GZ_TX_PREFIX_MARK_EN_LBN 123 +#define ESF_GZ_TX_PREFIX_MARK_EN_WIDTH 1 +#define ESF_GZ_TX_PREFIX_INGRESS_MPORT_EN_LBN 122 +#define ESF_GZ_TX_PREFIX_INGRESS_MPORT_EN_WIDTH 1 +#define ESF_GZ_TX_PREFIX_INLINE_CAPSULE_META_LBN 121 +#define ESF_GZ_TX_PREFIX_INLINE_CAPSULE_META_WIDTH 1 +#define ESF_GZ_TX_PREFIX_EGRESS_MPORT_EN_LBN 120 +#define ESF_GZ_TX_PREFIX_EGRESS_MPORT_EN_WIDTH 1 +#define ESF_GZ_TX_PREFIX_RSRVD_LBN 64 +#define ESF_GZ_TX_PREFIX_RSRVD_WIDTH 56 +#define ESF_GZ_TX_PREFIX_EGRESS_MPORT_LBN 48 +#define ESF_GZ_TX_PREFIX_EGRESS_MPORT_WIDTH 16 +#define ESF_GZ_TX_PREFIX_INGRESS_MPORT_LBN 32 +#define ESF_GZ_TX_PREFIX_INGRESS_MPORT_WIDTH 16 +#define ESF_GZ_TX_PREFIX_MARK_LBN 0 +#define ESF_GZ_TX_PREFIX_MARK_WIDTH 32 +#define ESE_GZ_SF_TX_OVR_DSC_FMT_STRUCT_SIZE 124 + +/* sf_tx_seg_dsc_fmt */ +#define ESF_GZ_TX_SEG_ADDR_SPC_EN_LBN 123 +#define ESF_GZ_TX_SEG_ADDR_SPC_EN_WIDTH 1 +#define ESF_GZ_TX_SEG_TRANSLATE_ADDR_LBN 122 +#define ESF_GZ_TX_SEG_TRANSLATE_ADDR_WIDTH 1 +#define ESF_GZ_TX_SEG_RSVD2_LBN 120 +#define ESF_GZ_TX_SEG_RSVD2_WIDTH 2 +#define ESF_GZ_TX_SEG_ADDR_SPC_ID_LBN 84 +#define ESF_GZ_TX_SEG_ADDR_SPC_ID_WIDTH 36 +#define ESF_GZ_TX_SEG_RSVD_LBN 80 +#define ESF_GZ_TX_SEG_RSVD_WIDTH 4 +#define ESF_GZ_TX_SEG_LEN_LBN 64 +#define ESF_GZ_TX_SEG_LEN_WIDTH 16 +#define ESF_GZ_TX_SEG_ADDR_LBN 0 +#define ESF_GZ_TX_SEG_ADDR_WIDTH 64 +#define ESE_GZ_SF_TX_SEG_DSC_FMT_STRUCT_SIZE 124 + +/* sf_tx_std_dsc_fmt */ +#define ESF_GZ_TX_SEND_VLAN_INSERT_TCI_LBN 108 +#define ESF_GZ_TX_SEND_VLAN_INSERT_TCI_WIDTH 16 +#define ESF_GZ_TX_SEND_VLAN_INSERT_EN_LBN 107 +#define ESF_GZ_TX_SEND_VLAN_INSERT_EN_WIDTH 1 +#define ESF_GZ_TX_SEND_TSTAMP_REQ_LBN 106 +#define ESF_GZ_TX_SEND_TSTAMP_REQ_WIDTH 1 +#define ESF_GZ_TX_SEND_CSO_OUTER_L4_LBN 105 +#define ESF_GZ_TX_SEND_CSO_OUTER_L4_WIDTH 1 +#define ESF_GZ_TX_SEND_CSO_OUTER_L3_LBN 104 +#define ESF_GZ_TX_SEND_CSO_OUTER_L3_WIDTH 1 +#define ESF_GZ_TX_SEND_CSO_INNER_L3_LBN 101 +#define ESF_GZ_TX_SEND_CSO_INNER_L3_WIDTH 3 +#define ESF_GZ_TX_SEND_RSVD_LBN 99 +#define ESF_GZ_TX_SEND_RSVD_WIDTH 2 +#define ESF_GZ_TX_SEND_CSO_PARTIAL_EN_LBN 97 +#define ESF_GZ_TX_SEND_CSO_PARTIAL_EN_WIDTH 2 +#define ESF_GZ_TX_SEND_CSO_PARTIAL_CSUM_W_LBN 92 +#define ESF_GZ_TX_SEND_CSO_PARTIAL_CSUM_W_WIDTH 5 +#define ESF_GZ_TX_SEND_CSO_PARTIAL_START_W_LBN 83 +#define ESF_GZ_TX_SEND_CSO_PARTIAL_START_W_WIDTH 9 +#define ESF_GZ_TX_SEND_NUM_SEGS_LBN 78 +#define ESF_GZ_TX_SEND_NUM_SEGS_WIDTH 5 +#define ESF_GZ_TX_SEND_LEN_LBN 64 +#define ESF_GZ_TX_SEND_LEN_WIDTH 14 +#define ESF_GZ_TX_SEND_ADDR_LBN 0 +#define ESF_GZ_TX_SEND_ADDR_WIDTH 64 +#define ESE_GZ_SF_TX_STD_DSC_FMT_STRUCT_SIZE 124 + +/* sf_tx_tso_dsc_fmt */ +#define ESF_GZ_TX_TSO_VLAN_INSERT_TCI_LBN 108 +#define ESF_GZ_TX_TSO_VLAN_INSERT_TCI_WIDTH 16 +#define ESF_GZ_TX_TSO_VLAN_INSERT_EN_LBN 107 +#define ESF_GZ_TX_TSO_VLAN_INSERT_EN_WIDTH 1 +#define ESF_GZ_TX_TSO_TSTAMP_REQ_LBN 106 +#define ESF_GZ_TX_TSO_TSTAMP_REQ_WIDTH 1 +#define ESF_GZ_TX_TSO_CSO_OUTER_L4_LBN 105 +#define ESF_GZ_TX_TSO_CSO_OUTER_L4_WIDTH 1 +#define ESF_GZ_TX_TSO_CSO_OUTER_L3_LBN 104 +#define ESF_GZ_TX_TSO_CSO_OUTER_L3_WIDTH 1 +#define ESF_GZ_TX_TSO_CSO_INNER_L3_LBN 101 +#define ESF_GZ_TX_TSO_CSO_INNER_L3_WIDTH 3 +#define ESF_GZ_TX_TSO_RSVD_LBN 94 +#define ESF_GZ_TX_TSO_RSVD_WIDTH 7 +#define ESF_GZ_TX_TSO_CSO_INNER_L4_LBN 93 +#define ESF_GZ_TX_TSO_CSO_INNER_L4_WIDTH 1 +#define ESF_GZ_TX_TSO_INNER_L4_OFF_W_LBN 85 +#define ESF_GZ_TX_TSO_INNER_L4_OFF_W_WIDTH 8 +#define ESF_GZ_TX_TSO_INNER_L3_OFF_W_LBN 77 +#define ESF_GZ_TX_TSO_INNER_L3_OFF_W_WIDTH 8 +#define ESF_GZ_TX_TSO_OUTER_L4_OFF_W_LBN 69 +#define ESF_GZ_TX_TSO_OUTER_L4_OFF_W_WIDTH 8 +#define ESF_GZ_TX_TSO_OUTER_L3_OFF_W_LBN 64 +#define ESF_GZ_TX_TSO_OUTER_L3_OFF_W_WIDTH 5 +#define ESF_GZ_TX_TSO_PAYLOAD_LEN_LBN 42 +#define ESF_GZ_TX_TSO_PAYLOAD_LEN_WIDTH 22 +#define ESF_GZ_TX_TSO_HDR_LEN_W_LBN 34 +#define ESF_GZ_TX_TSO_HDR_LEN_W_WIDTH 8 +#define ESF_GZ_TX_TSO_ED_OUTER_UDP_LEN_LBN 33 +#define ESF_GZ_TX_TSO_ED_OUTER_UDP_LEN_WIDTH 1 +#define ESF_GZ_TX_TSO_ED_INNER_IP_LEN_LBN 32 +#define ESF_GZ_TX_TSO_ED_INNER_IP_LEN_WIDTH 1 +#define ESF_GZ_TX_TSO_ED_OUTER_IP_LEN_LBN 31 +#define ESF_GZ_TX_TSO_ED_OUTER_IP_LEN_WIDTH 1 +#define ESF_GZ_TX_TSO_ED_INNER_IP4_ID_LBN 29 +#define ESF_GZ_TX_TSO_ED_INNER_IP4_ID_WIDTH 2 +#define ESF_GZ_TX_TSO_ED_OUTER_IP4_ID_LBN 27 +#define ESF_GZ_TX_TSO_ED_OUTER_IP4_ID_WIDTH 2 +#define ESF_GZ_TX_TSO_PAYLOAD_NUM_SEGS_LBN 17 +#define ESF_GZ_TX_TSO_PAYLOAD_NUM_SEGS_WIDTH 10 +#define ESF_GZ_TX_TSO_HDR_NUM_SEGS_LBN 14 +#define ESF_GZ_TX_TSO_HDR_NUM_SEGS_WIDTH 3 +#define ESF_GZ_TX_TSO_MSS_LBN 0 +#define ESF_GZ_TX_TSO_MSS_WIDTH 14 +#define ESE_GZ_SF_TX_TSO_DSC_FMT_STRUCT_SIZE 124 + + +/* Enum D2VIO_MSG_OP */ +#define ESE_GZ_QUE_JBDNE 3 +#define ESE_GZ_QUE_EVICT 2 +#define ESE_GZ_QUE_EMPTY 1 +#define ESE_GZ_NOP 0 + +/* Enum DESIGN_PARAMS */ +#define ESE_EF100_DP_GZ_RX_MAX_RUNT 17 +#define ESE_EF100_DP_GZ_VI_STRIDES 16 +#define ESE_EF100_DP_GZ_NMMU_PAGE_SIZES 15 +#define ESE_EF100_DP_GZ_EVQ_TIMER_TICK_NANOS 14 +#define ESE_EF100_DP_GZ_MEM2MEM_MAX_LEN 13 +#define ESE_EF100_DP_GZ_COMPAT 12 +#define ESE_EF100_DP_GZ_TSO_MAX_NUM_FRAMES 11 +#define ESE_EF100_DP_GZ_TSO_MAX_PAYLOAD_NUM_SEGS 10 +#define ESE_EF100_DP_GZ_TSO_MAX_PAYLOAD_LEN 9 +#define ESE_EF100_DP_GZ_TXQ_SIZE_GRANULARITY 8 +#define ESE_EF100_DP_GZ_RXQ_SIZE_GRANULARITY 7 +#define ESE_EF100_DP_GZ_TSO_MAX_HDR_NUM_SEGS 6 +#define ESE_EF100_DP_GZ_TSO_MAX_HDR_LEN 5 +#define ESE_EF100_DP_GZ_RX_L4_CSUM_PROTOCOLS 4 +#define ESE_EF100_DP_GZ_NMMU_GROUP_SIZE 3 +#define ESE_EF100_DP_GZ_EVQ_UNSOL_CREDIT_SEQ_BITS 2 +#define ESE_EF100_DP_GZ_PARTIAL_TSTAMP_SUB_NANO_BITS 1 +#define ESE_EF100_DP_GZ_PAD 0 + +/* Enum DESIGN_PARAM_DEFAULTS */ +#define ESE_EF100_DP_GZ_TSO_MAX_PAYLOAD_LEN_DEFAULT 0x3fffff +#define ESE_EF100_DP_GZ_TSO_MAX_NUM_FRAMES_DEFAULT 8192 +#define ESE_EF100_DP_GZ_MEM2MEM_MAX_LEN_DEFAULT 8192 +#define ESE_EF100_DP_GZ_RX_L4_CSUM_PROTOCOLS_DEFAULT 0x1106 +#define ESE_EF100_DP_GZ_TSO_MAX_PAYLOAD_NUM_SEGS_DEFAULT 0x3ff +#define ESE_EF100_DP_GZ_RX_MAX_RUNT_DEFAULT 640 +#define ESE_EF100_DP_GZ_EVQ_TIMER_TICK_NANOS_DEFAULT 512 +#define ESE_EF100_DP_GZ_NMMU_PAGE_SIZES_DEFAULT 512 +#define ESE_EF100_DP_GZ_TSO_MAX_HDR_LEN_DEFAULT 192 +#define ESE_EF100_DP_GZ_RXQ_SIZE_GRANULARITY_DEFAULT 64 +#define ESE_EF100_DP_GZ_TXQ_SIZE_GRANULARITY_DEFAULT 64 +#define ESE_EF100_DP_GZ_NMMU_GROUP_SIZE_DEFAULT 32 +#define ESE_EF100_DP_GZ_VI_STRIDES_DEFAULT 16 +#define ESE_EF100_DP_GZ_EVQ_UNSOL_CREDIT_SEQ_BITS_DEFAULT 7 +#define ESE_EF100_DP_GZ_TSO_MAX_HDR_NUM_SEGS_DEFAULT 4 +#define ESE_EF100_DP_GZ_PARTIAL_TSTAMP_SUB_NANO_BITS_DEFAULT 2 +#define ESE_EF100_DP_GZ_COMPAT_DEFAULT 0 + +/* Enum HOST_IF_CONSTANTS */ +#define ESE_GZ_FCW_LEN 0x4C +#define ESE_GZ_RX_PKT_PREFIX_LEN 22 + +/* Enum PCI_CONSTANTS */ +#define ESE_GZ_PCI_BASE_CONFIG_SPACE_SIZE 256 +#define ESE_GZ_PCI_EXPRESS_XCAP_HDR_SIZE 4 + +/* Enum RH_DSC_TYPE */ +#define ESE_GZ_TX_TOMB 0xF +#define ESE_GZ_TX_VIO 0xE +#define ESE_GZ_TX_TSO_OVRRD 0x8 +#define ESE_GZ_TX_D2CMP 0x7 +#define ESE_GZ_TX_DATA 0x6 +#define ESE_GZ_TX_D2M 0x5 +#define ESE_GZ_TX_M2M 0x4 +#define ESE_GZ_TX_SEG 0x3 +#define ESE_GZ_TX_TSO 0x2 +#define ESE_GZ_TX_OVRRD 0x1 +#define ESE_GZ_TX_SEND 0x0 + +/* Enum RH_HCLASS_L2_CLASS */ +#define ESE_GZ_RH_HCLASS_L2_CLASS_E2_0123VLAN 1 +#define ESE_GZ_RH_HCLASS_L2_CLASS_OTHER 0 + +/* Enum RH_HCLASS_L2_STATUS */ +#define ESE_GZ_RH_HCLASS_L2_STATUS_RESERVED 3 +#define ESE_GZ_RH_HCLASS_L2_STATUS_FCS_ERR 2 +#define ESE_GZ_RH_HCLASS_L2_STATUS_LEN_ERR 1 +#define ESE_GZ_RH_HCLASS_L2_STATUS_OK 0 + +/* Enum RH_HCLASS_L3_CLASS */ +#define ESE_GZ_RH_HCLASS_L3_CLASS_OTHER 3 +#define ESE_GZ_RH_HCLASS_L3_CLASS_IP6 2 +#define ESE_GZ_RH_HCLASS_L3_CLASS_IP4BAD 1 +#define ESE_GZ_RH_HCLASS_L3_CLASS_IP4GOOD 0 + +/* Enum RH_HCLASS_L4_CLASS */ +#define ESE_GZ_RH_HCLASS_L4_CLASS_OTHER 3 +#define ESE_GZ_RH_HCLASS_L4_CLASS_FRAG 2 +#define ESE_GZ_RH_HCLASS_L4_CLASS_UDP 1 +#define ESE_GZ_RH_HCLASS_L4_CLASS_TCP 0 + +/* Enum RH_HCLASS_L4_CSUM */ +#define ESE_GZ_RH_HCLASS_L4_CSUM_GOOD 1 +#define ESE_GZ_RH_HCLASS_L4_CSUM_BAD_OR_UNKNOWN 0 + +/* Enum RH_HCLASS_TUNNEL_CLASS */ +#define ESE_GZ_RH_HCLASS_TUNNEL_CLASS_RESERVED_7 7 +#define ESE_GZ_RH_HCLASS_TUNNEL_CLASS_RESERVED_6 6 +#define ESE_GZ_RH_HCLASS_TUNNEL_CLASS_RESERVED_5 5 +#define ESE_GZ_RH_HCLASS_TUNNEL_CLASS_RESERVED_4 4 +#define ESE_GZ_RH_HCLASS_TUNNEL_CLASS_GENEVE 3 +#define ESE_GZ_RH_HCLASS_TUNNEL_CLASS_NVGRE 2 +#define ESE_GZ_RH_HCLASS_TUNNEL_CLASS_VXLAN 1 +#define ESE_GZ_RH_HCLASS_TUNNEL_CLASS_NONE 0 + +/* Enum SF_CTL_EVENT_SUBTYPE */ +#define ESE_GZ_EF100_CTL_EV_EVQ_TIMEOUT 0x3 +#define ESE_GZ_EF100_CTL_EV_FLUSH 0x2 +#define ESE_GZ_EF100_CTL_EV_TIME_SYNC 0x1 +#define ESE_GZ_EF100_CTL_EV_UNSOL_OVERFLOW 0x0 + +/* Enum SF_EVENT_TYPE */ +#define ESE_GZ_EF100_EV_DRIVER 0x5 +#define ESE_GZ_EF100_EV_MCDI 0x4 +#define ESE_GZ_EF100_EV_CONTROL 0x3 +#define ESE_GZ_EF100_EV_TX_TIMESTAMP 0x2 +#define ESE_GZ_EF100_EV_TX_COMPLETION 0x1 +#define ESE_GZ_EF100_EV_RX_PKTS 0x0 + +/* Enum SF_EW_EVENT_TYPE */ +#define ESE_GZ_EF100_EWEV_VIRTQ_DESC 0x2 +#define ESE_GZ_EF100_EWEV_TXQ_DESC 0x1 +#define ESE_GZ_EF100_EWEV_64BIT 0x0 + +/* Enum TX_DESC_CSO_PARTIAL_EN */ +#define ESE_GZ_TX_DESC_CSO_PARTIAL_EN_TCP 2 +#define ESE_GZ_TX_DESC_CSO_PARTIAL_EN_UDP 1 +#define ESE_GZ_TX_DESC_CSO_PARTIAL_EN_OFF 0 + +/* Enum TX_DESC_CS_INNER_L3 */ +#define ESE_GZ_TX_DESC_CS_INNER_L3_GENEVE 3 +#define ESE_GZ_TX_DESC_CS_INNER_L3_NVGRE 2 +#define ESE_GZ_TX_DESC_CS_INNER_L3_VXLAN 1 +#define ESE_GZ_TX_DESC_CS_INNER_L3_OFF 0 + +/* Enum TX_DESC_IP4_ID */ +#define ESE_GZ_TX_DESC_IP4_ID_INC_MOD16 2 +#define ESE_GZ_TX_DESC_IP4_ID_INC_MOD15 1 +#define ESE_GZ_TX_DESC_IP4_ID_NO_OP 0 + +/* Enum VIRTIO_NET_HDR_F */ +#define ESE_GZ_NEEDS_CSUM 0x1 + +/* Enum VIRTIO_NET_HDR_GSO */ +#define ESE_GZ_TCPV6 0x4 +#define ESE_GZ_UDP 0x3 +#define ESE_GZ_TCPV4 0x1 +#define ESE_GZ_NONE 0x0 +/**************************************************************************/ + +#define ESF_GZ_EV_DEBUG_EVENT_GEN_FLAGS_LBN 44 +#define ESF_GZ_EV_DEBUG_EVENT_GEN_FLAGS_WIDTH 4 +#define ESF_GZ_EV_DEBUG_SRC_QID_LBN 32 +#define ESF_GZ_EV_DEBUG_SRC_QID_WIDTH 12 +#define ESF_GZ_EV_DEBUG_SEQ_NUM_LBN 16 +#define ESF_GZ_EV_DEBUG_SEQ_NUM_WIDTH 16 + +#endif /* EFX_EF100_REGS_H */ diff --git a/src/drivers/net/ethernet/sfc/ef100_rep.c b/src/drivers/net/ethernet/sfc/ef100_rep.c new file mode 100644 index 0000000..8aaf367 --- /dev/null +++ b/src/drivers/net/ethernet/sfc/ef100_rep.c @@ -0,0 +1,880 @@ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2019 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#include "ef100_rep.h" +#include "ef100_netdev.h" +#include "ef100_nic.h" +#include "mae.h" +#include "tc_bindings.h" +#include "rx_common.h" + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_TC_OFFLOAD) + +#define EFX_EF100_REP_DRIVER "efx_ef100_rep" +#define EFX_EF100_REP_VERSION "0.0.1" + +#define EFX_REP_DEFAULT_PSEUDO_RING_SIZE 64 + +static int efx_ef100_rep_poll(struct napi_struct *napi, int weight); + +static int efx_ef100_rep_init_struct(struct efx_nic *efx, struct efx_rep *efv, + bool remote, unsigned int i) +{ + efv->parent = efx; + BUILD_BUG_ON(MAE_MPORT_SELECTOR_NULL); + efv->idx = i; + efv->remote = remote; + INIT_LIST_HEAD(&efv->list); + efv->dflt.cookie = (remote ? 0x10000 : 0x100) + i; + efv->dflt.fw_id = MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL; + INIT_LIST_HEAD(&efv->dflt.acts.list); +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_SKB__LIST) + INIT_LIST_HEAD(&efv->rx_list); +#else + __skb_queue_head_init(&efv->rx_list); +#endif + spin_lock_init(&efv->rx_lock); + efv->msg_enable = NETIF_MSG_DRV | NETIF_MSG_PROBE | + NETIF_MSG_LINK | NETIF_MSG_IFDOWN | + NETIF_MSG_IFUP | NETIF_MSG_RX_ERR | + NETIF_MSG_TX_ERR | NETIF_MSG_HW; + return 0; +} + +static int efx_ef100_rep_open(struct net_device *net_dev) +{ + struct efx_rep *efv = netdev_priv(net_dev); + +#if defined(EFX_USE_KCOMPAT) && defined(EFX_HAVE_OLD_NETIF_NAPI_ADD) + netif_napi_add(net_dev, &efv->napi, efx_ef100_rep_poll, + NAPI_POLL_WEIGHT); +#else + netif_napi_add(net_dev, &efv->napi, efx_ef100_rep_poll); +#endif + napi_enable(&efv->napi); + return 0; +} + +static int efx_ef100_rep_close(struct net_device *net_dev) +{ + struct efx_rep *efv = netdev_priv(net_dev); + + napi_disable(&efv->napi); + netif_napi_del(&efv->napi); + return 0; +} + +static netdev_tx_t efx_ef100_rep_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + struct efx_rep *efv = netdev_priv(dev); + struct efx_nic *efx = efv->parent; + netdev_tx_t rc; + + /* __ef100_hard_start_xmit() will always return success even in the + * case of TX drops, where it will increment efx's tx_dropped. The + * efv stats really only count attempted TX, not success/failure. + */ + atomic64_inc(&efv->stats.tx_packets); + atomic64_add(skb->len, &efv->stats.tx_bytes); + netif_tx_lock(efx->net_dev); + rc = __ef100_hard_start_xmit(skb, efx, dev, efv); + netif_tx_unlock(efx->net_dev); + return rc; +} + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_NDO_GET_PORT_PARENT_ID) +static int efx_ef100_rep_get_port_parent_id(struct net_device *dev, + struct netdev_phys_item_id *ppid) +{ + struct efx_rep *efv = netdev_priv(dev); + struct efx_nic *efx = efv->parent; + struct ef100_nic_data *nic_data; + netdevice_tracker dev_tracker; + + /* Block removal of the parent network device */ + netdev_hold(efx->net_dev, &dev_tracker, GFP_KERNEL); + if (!netif_device_present(efx->net_dev)) { + netdev_put(efx->net_dev, &dev_tracker); + return -EOPNOTSUPP; + } + + nic_data = efx->nic_data; + /* nic_data->port_id is a u8[] */ + ppid->id_len = sizeof(nic_data->port_id); + memcpy(ppid->id, nic_data->port_id, sizeof(nic_data->port_id)); + netdev_put(efx->net_dev, &dev_tracker); + return 0; +} +#endif + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_NDO_GET_PHYS_PORT_NAME) +static int efx_ef100_rep_get_phys_port_name(struct net_device *dev, + char *buf, size_t len) +{ + struct efx_rep *efv = netdev_priv(dev); + struct efx_nic *efx = efv->parent; + struct ef100_nic_data *nic_data; + netdevice_tracker dev_tracker; + int ret; + + /* Block removal of the parent network device */ + netdev_hold(efx->net_dev, &dev_tracker, GFP_KERNEL); + if (!netif_device_present(efx->net_dev)) { + netdev_put(efx->net_dev, &dev_tracker); + return -EOPNOTSUPP; + } + + if (efv->remote) { + struct mae_mport_desc *mport_desc; + + mport_desc = efx_mae_get_mport(efx, efv->mport); + if (IS_ERR_OR_NULL(mport_desc)) { + netdev_put(efx->net_dev, &dev_tracker); + return PTR_ERR_OR_ZERO(mport_desc) ?: -EOPNOTSUPP; + } + + ret = snprintf(buf, len, "p%uif%upf%u", efx->port_num, + mport_desc->interface_idx, mport_desc->pf_idx); + if (ret > 0 && mport_desc->vf_idx != 0xFFFF) + ret = snprintf(buf + ret, len - ret, + "vf%u", mport_desc->vf_idx); + efx_mae_put_mport(efx, mport_desc); + } else { + nic_data = efx->nic_data; + if (!nic_data) { + netdev_put(efx->net_dev, &dev_tracker); + return -EOPNOTSUPP; + } + ret = snprintf(buf, len, "p%upf%uvf%u", efx->port_num, + nic_data->pf_index, efv->idx); + } + netdev_put(efx->net_dev, &dev_tracker); + if (ret >= len) + return -EOPNOTSUPP; + + return 0; +} +#endif + +static int efx_ef100_rep_set_mac_address(struct net_device *net_dev, void *data) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_CLIENT_MAC_ADDRESSES_IN_LEN(1)); + struct efx_rep *efv = netdev_priv(net_dev); + struct efx_nic *efx = efv->parent; + struct sockaddr *addr = data; + const u8 *new_addr = addr->sa_data; + int rc; + + if (efv->clid == CLIENT_HANDLE_NULL) { + netif_info(efx, drv, net_dev, "Unable to set representee MAC address (client ID is null)\n"); + } else { + BUILD_BUG_ON(MC_CMD_SET_CLIENT_MAC_ADDRESSES_OUT_LEN); + MCDI_SET_DWORD(inbuf, SET_CLIENT_MAC_ADDRESSES_IN_CLIENT_HANDLE, + efv->clid); + ether_addr_copy(MCDI_PTR(inbuf, SET_CLIENT_MAC_ADDRESSES_IN_MAC_ADDRS), + new_addr); + rc = efx_mcdi_rpc(efx, MC_CMD_SET_CLIENT_MAC_ADDRESSES, inbuf, + sizeof(inbuf), NULL, 0, NULL); + if (rc) + return rc; + } + + eth_hw_addr_set(net_dev, new_addr); + return 0; +} + +static int efx_ef100_rep_setup_tc(struct net_device *net_dev, + enum tc_setup_type type, void *type_data) +{ + struct efx_rep *efv = netdev_priv(net_dev); + struct efx_nic *efx = efv->parent; + + if (type == TC_SETUP_CLSFLOWER) + return efx_tc_flower(efx, net_dev, type_data, efv); + if (type == TC_SETUP_BLOCK) + return efx_tc_setup_block(net_dev, efx, type_data, efv); + + return -EOPNOTSUPP; +} + +static void efx_ef100_rep_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *stats) +{ + struct efx_rep *efv = netdev_priv(dev); + + stats->rx_packets = atomic64_read(&efv->stats.rx_packets); + stats->tx_packets = atomic64_read(&efv->stats.tx_packets); + stats->rx_bytes = atomic64_read(&efv->stats.rx_bytes); + stats->tx_bytes = atomic64_read(&efv->stats.tx_bytes); + stats->rx_dropped = atomic64_read(&efv->stats.rx_dropped); + stats->tx_errors = atomic64_read(&efv->stats.tx_errors); +} + +const struct net_device_ops efx_ef100_rep_netdev_ops = { + .ndo_open = efx_ef100_rep_open, + .ndo_stop = efx_ef100_rep_close, + .ndo_start_xmit = efx_ef100_rep_xmit, +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_NDO_GET_PORT_PARENT_ID) + .ndo_get_port_parent_id = efx_ef100_rep_get_port_parent_id, +#endif +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_NDO_GET_PHYS_PORT_NAME) + .ndo_get_phys_port_name = efx_ef100_rep_get_phys_port_name, +#endif + .ndo_set_mac_address = efx_ef100_rep_set_mac_address, + .ndo_get_stats64 = efx_ef100_rep_get_stats64, + .ndo_setup_tc = efx_ef100_rep_setup_tc, +}; + +static void efx_ef100_rep_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *drvinfo) +{ + strscpy(drvinfo->driver, EFX_EF100_REP_DRIVER, sizeof(drvinfo->driver)); + strscpy(drvinfo->version, EFX_EF100_REP_VERSION, sizeof(drvinfo->version)); +} + +static u32 efx_ef100_rep_ethtool_get_msglevel(struct net_device *net_dev) +{ + struct efx_rep *efv = netdev_priv(net_dev); + return efv->msg_enable; +} + +static void efx_ef100_rep_ethtool_set_msglevel(struct net_device *net_dev, + u32 msg_enable) +{ + struct efx_rep *efv = netdev_priv(net_dev); + efv->msg_enable = msg_enable; +} + +static void efx_ef100_rep_ethtool_get_ringparam(struct net_device *net_dev, +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_ETHTOOL_GET_RINGPARAM_EXTACK) + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kring, + struct netlink_ext_ack *ext_ack) +#else + struct ethtool_ringparam *ring) +#endif +{ + struct efx_rep *efv = netdev_priv(net_dev); + + ring->rx_max_pending = U32_MAX; + ring->rx_pending = efv->rx_pring_size; +} + +static int efx_ef100_rep_ethtool_set_ringparam(struct net_device *net_dev, +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_ETHTOOL_SET_RINGPARAM_EXTACK) + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kring, + struct netlink_ext_ack *ext_ack) +#else + struct ethtool_ringparam *ring) +#endif +{ + struct efx_rep *efv = netdev_priv(net_dev); + + if (ring->rx_mini_pending || ring->rx_jumbo_pending || ring->tx_pending) + return -EINVAL; + + efv->rx_pring_size = ring->rx_pending; + return 0; +} + +static const struct ethtool_ops efx_ef100_rep_ethtool_ops = { + .get_drvinfo = efx_ef100_rep_get_drvinfo, + .get_msglevel = efx_ef100_rep_ethtool_get_msglevel, + .set_msglevel = efx_ef100_rep_ethtool_set_msglevel, + .get_ringparam = efx_ef100_rep_ethtool_get_ringparam, + .set_ringparam = efx_ef100_rep_ethtool_set_ringparam, +}; + +static struct efx_rep *efx_ef100_rep_create_netdev(struct efx_nic *efx, + unsigned int i, bool remote) +{ + struct ef100_nic_data *nic_data = efx->nic_data; + struct net_device *net_dev; + struct efx_rep *efv; + int rc; + + net_dev = alloc_etherdev_mq(sizeof(*efv), 1); + if (!net_dev) + return ERR_PTR(-ENOMEM); + + efv = netdev_priv(net_dev); + rc = efx_ef100_rep_init_struct(efx, efv, remote, i); + if (rc) + goto fail1; + efv->net_dev = net_dev; + rtnl_lock(); + if (remote) + list_add_tail(&efv->list, &nic_data->rem_reps); + if (netif_running(efx->net_dev) && efx->state == STATE_NET_UP) { + netif_device_attach(net_dev); + netif_carrier_on(net_dev); + } else { + netif_tx_stop_all_queues(net_dev); + netif_carrier_off(net_dev); + } + rtnl_unlock(); + + net_dev->netdev_ops = &efx_ef100_rep_netdev_ops; + net_dev->ethtool_ops = &efx_ef100_rep_ethtool_ops; +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_NETDEV_MTU_LIMITS) + net_dev->min_mtu = EFX_MIN_MTU; + net_dev->max_mtu = EFX_100_MAX_MTU; +#elif defined(EFX_HAVE_NETDEV_EXT_MTU_LIMITS) + net_dev->extended->min_mtu = EFX_MIN_MTU; + net_dev->extended->max_mtu = EFX_100_MAX_MTU; +#endif + net_dev->features |= NETIF_F_HW_TC | NETIF_F_LLTX; + net_dev->hw_features |= NETIF_F_HW_TC | NETIF_F_LLTX; + return efv; +fail1: + free_netdev(net_dev); + return ERR_PTR(rc); +} + +static void efx_ef100_rep_destroy_netdev(struct efx_rep *efv) +{ + free_netdev(efv->net_dev); +} + +#if defined(EFX_USE_KCOMPAT) && !defined(EFX_HAVE_FLOW_INDR_DEV_REGISTER) && !defined(EFX_HAVE_FLOW_INDR_BLOCK_CB_REGISTER) +static int efx_ef100_rep_tc_egdev_cb(enum tc_setup_type type, void *type_data, + void *cb_priv) +{ + struct efx_rep *efv = cb_priv; + + switch (type) { + case TC_SETUP_CLSFLOWER: + return efx_tc_flower(efv->parent, NULL, type_data, efv); + default: + return -EOPNOTSUPP; + } +} +#endif + +static int efx_ef100_configure_rep(struct efx_rep *efv, + struct mae_mport_desc *mport_desc) +{ + struct net_device *net_dev = efv->net_dev; + struct efx_nic *efx = efv->parent; + efx_qword_t pciefn; + u32 selector; + int rc; + + efv->rx_pring_size = EFX_REP_DEFAULT_PSEUDO_RING_SIZE; + if (mport_desc) { + /* Remote rep; use the passed m-port */ + efv->mport = mport_desc->mport_id; + } else { + /* Local VFrep; determine m-port from the VF index */ + /* Construct mport selector for corresponding VF */ + efx_mae_mport_vf(efx, efv->idx, &selector); + /* Look up actual mport ID */ + rc = efx_mae_lookup_mport(efx, selector, &efv->mport); + if (rc) + return rc; + } + pci_dbg(efx->pci_dev, "%s mport ID %#x\n", + mport_desc ? "Remote representee" : "Representee", + efv->mport); + /* mport label should fit in 16 bits */ + WARN_ON(efv->mport >> 16); + + /* Construct PCIE_FUNCTION structure for the representee */ + if (mport_desc) + EFX_POPULATE_QWORD_3(pciefn, + PCIE_FUNCTION_PF, mport_desc->pf_idx, + PCIE_FUNCTION_VF, mport_desc->vf_idx, + PCIE_FUNCTION_INTF, mport_desc->interface_idx); + else + EFX_POPULATE_QWORD_3(pciefn, + PCIE_FUNCTION_PF, PCIE_FUNCTION_PF_NULL, + PCIE_FUNCTION_VF, efv->idx, + PCIE_FUNCTION_INTF, PCIE_INTERFACE_CALLER); + /* look up representee's client ID */ + rc = efx_ef100_lookup_client_id(efx, pciefn, &efv->clid); + if (rc) { + /* We won't be able to set the representee's MAC address */ + efv->clid = CLIENT_HANDLE_NULL; + pci_dbg(efx->pci_dev, "Failed to get %s client ID, rc %d\n", + mport_desc ? "remote representee" : "representee", + rc); + } else { + pci_dbg(efx->pci_dev, "%s client ID %#x\n", + mport_desc ? "Remote representee" : "Representee", + efv->clid); + + /* Get the assigned MAC address */ + (void)ef100_get_mac_address(efx, net_dev->perm_addr, efv->clid, + true); + eth_hw_addr_set(net_dev, net_dev->perm_addr); + } + + mutex_lock(&efx->tc->mutex); + rc = efx_tc_configure_default_rule_rep(efv); +#if defined(EFX_USE_KCOMPAT) && !defined(EFX_HAVE_FLOW_INDR_DEV_REGISTER) && !defined(EFX_HAVE_FLOW_INDR_BLOCK_CB_REGISTER) + if (!rc) { + rc = tc_setup_cb_egdev_register(net_dev, + efx_ef100_rep_tc_egdev_cb, efv); + if (rc) + efx_tc_deconfigure_default_rule(efx, &efv->dflt); + } +#endif + mutex_unlock(&efx->tc->mutex); + return rc; +} + +static void efx_ef100_deconfigure_rep(struct efx_rep *efv) +{ + struct efx_nic *efx = efv->parent; + + mutex_lock(&efx->tc->mutex); +#if defined(EFX_USE_KCOMPAT) && !defined(EFX_HAVE_FLOW_INDR_DEV_REGISTER) && !defined(EFX_HAVE_FLOW_INDR_BLOCK_CB_REGISTER) + tc_setup_cb_egdev_unregister(efv->net_dev, efx_ef100_rep_tc_egdev_cb, + efv); +#endif + efx_tc_deconfigure_default_rule(efx, &efv->dflt); + mutex_unlock(&efx->tc->mutex); +} + +int efx_ef100_vfrep_create(struct efx_nic *efx, unsigned int i) +{ + struct ef100_nic_data *nic_data = efx->nic_data; + struct efx_rep *efv; + int rc; + + efv = efx_ef100_rep_create_netdev(efx, i, false); + if (IS_ERR(efv)) { + rc = PTR_ERR(efv); + pci_err(efx->pci_dev, + "Failed to create representor for VF %d, rc %d\n", i, + rc); + return rc; + } + rc = efx_ef100_configure_rep(efv, NULL); + if (rc) { + pci_err(efx->pci_dev, + "Failed to configure representor for VF %d, rc %d\n", + i, rc); + goto fail1; + } + rc = register_netdev(efv->net_dev); + if (rc) { + pci_err(efx->pci_dev, + "Failed to register representor for VF %d, rc %d\n", + i, rc); + goto fail2; + } + nic_data->vf_rep[i] = efv->net_dev; + pci_dbg(efx->pci_dev, "Representor for VF %d is %s\n", i, + efv->net_dev->name); + return 0; +fail2: + efx_ef100_deconfigure_rep(efv); +fail1: + nic_data->vf_rep[i] = NULL; + efx_ef100_rep_destroy_netdev(efv); + return rc; +} + +static int efx_ef100_remote_rep_create(struct efx_nic *efx, + struct mae_mport_desc *mport_desc) +{ + struct efx_rep *efv; + int rc; + + efv = efx_ef100_rep_create_netdev(efx, mport_desc->mport_id, true); + if (IS_ERR(efv)) { + rc = PTR_ERR(efv); + pci_err(efx->pci_dev, + "Failed to create representor for IF %u PF %u VF %u, rc %d\n", + mport_desc->interface_idx, mport_desc->pf_idx, + mport_desc->vf_idx, rc); + return rc; + } + rc = efx_ef100_configure_rep(efv, mport_desc); + if (rc) { + pci_err(efx->pci_dev, + "Failed to configure representor for IF %u PF %u VF %u, rc %d\n", + mport_desc->interface_idx, mport_desc->pf_idx, + mport_desc->vf_idx, rc); + goto fail1; + } + rc = register_netdev(efv->net_dev); + if (rc) { + pci_err(efx->pci_dev, + "Failed to register representor for IF %u PF %u VF %u, rc %d\n", + mport_desc->interface_idx, mport_desc->pf_idx, + mport_desc->vf_idx, rc); + goto fail2; + } + mport_desc->efv = efv; + pci_dbg(efx->pci_dev, "Representor for IF %u PF %u VF %u is %s\n", + mport_desc->interface_idx, mport_desc->pf_idx, + mport_desc->vf_idx, efv->net_dev->name); + return 0; +fail2: + efx_ef100_deconfigure_rep(efv); +fail1: + efx_ef100_rep_destroy_netdev(efv); + return rc; +} + +void efx_ef100_vfrep_destroy(struct efx_nic *efx, unsigned int i) +{ + struct ef100_nic_data *nic_data = efx->nic_data; + struct net_device *rep_dev; + struct efx_rep *efv; + + rep_dev = nic_data->vf_rep[i]; + if (!rep_dev) + return; + netif_dbg(efx, drv, rep_dev, "Removing VF representor\n"); + efv = netdev_priv(rep_dev); + unregister_netdev(rep_dev); + efx_ef100_deconfigure_rep(efv); + nic_data->vf_rep[i] = NULL; + efx_ef100_rep_destroy_netdev(efv); +} + +static void efx_ef100_remote_rep_destroy(struct efx_nic *efx, + struct efx_rep *efv, + struct mae_mport_desc *mport) +{ + netif_dbg(efx, drv, efv->net_dev, "Removing remote representor\n"); + if (mport) + mport->efv = NULL; + list_del(&efv->list); + unregister_netdev(efv->net_dev); + efx_ef100_deconfigure_rep(efv); + efx_ef100_rep_destroy_netdev(efv); +} + +static int efx_ef100_rep_poll(struct napi_struct *napi, int weight) +{ + struct efx_rep *efv = container_of(napi, struct efx_rep, napi); + unsigned int read_index; +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_SKB__LIST) + struct list_head head; +#else + struct sk_buff_head head; +#endif + struct sk_buff *skb; + bool need_resched; + int spent = 0; + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_SKB__LIST) + INIT_LIST_HEAD(&head); +#else + __skb_queue_head_init(&head); +#endif + /* Grab up to 'weight' pending SKBs */ + spin_lock_bh(&efv->rx_lock); + read_index = efv->write_index; +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_SKB__LIST) + while (spent < weight && !list_empty(&efv->rx_list)) { + skb = list_first_entry(&efv->rx_list, struct sk_buff, list); + list_del(&skb->list); + list_add_tail(&skb->list, &head); +#else + while (spent < weight) { + skb = __skb_dequeue(&efv->rx_list); + if (!skb) + break; + __skb_queue_tail(&head, skb); +#endif + spent++; + } + spin_unlock_bh(&efv->rx_lock); + /* Receive them */ + netif_receive_skb_list(&head); + if (spent < weight) + if (napi_complete_done(napi, spent)) { + spin_lock_bh(&efv->rx_lock); + efv->read_index = read_index; + /* If write_index advanced while we were doing the + * RX, then storing our read_index won't re-prime the + * fake-interrupt. In that case, we need to schedule + * NAPI again to consume the additional packet(s). + */ + need_resched = efv->write_index != read_index; + spin_unlock_bh(&efv->rx_lock); + if (need_resched) + napi_schedule(&efv->napi); + } + return spent; +} + +void efx_ef100_rep_rx_packet(struct efx_rep *efv, struct efx_rx_buffer *rx_buf) +{ + u8 *eh = efx_rx_buf_va(rx_buf); + struct sk_buff *skb; + bool primed; + + /* Don't allow too many queued SKBs to build up, as they consume + * GFP_ATOMIC memory. If we overrun, just start dropping. + */ + if (efv->write_index - READ_ONCE(efv->read_index) > efv->rx_pring_size) { + atomic64_inc(&efv->stats.rx_dropped); + if (net_ratelimit()) + netif_dbg(efv->parent, rx_err, efv->net_dev, + "nodesc-dropped packet of length %u\n", + rx_buf->len); + return; + } + + skb = netdev_alloc_skb(efv->net_dev, rx_buf->len); + if (!skb) { + atomic64_inc(&efv->stats.rx_dropped); + if (net_ratelimit()) + netif_dbg(efv->parent, rx_err, efv->net_dev, + "noskb-dropped packet of length %u\n", + rx_buf->len); + return; + } + memcpy(skb->data, eh, rx_buf->len); + __skb_put(skb, rx_buf->len); + + skb_record_rx_queue(skb, 0); /* rep is single-queue */ + + /* Move past the ethernet header */ + skb->protocol = eth_type_trans(skb, efv->net_dev); + + skb_checksum_none_assert(skb); + + atomic64_inc(&efv->stats.rx_packets); + atomic64_add(rx_buf->len, &efv->stats.rx_bytes); + + /* Add it to the rx list */ + spin_lock_bh(&efv->rx_lock); + primed = efv->read_index == efv->write_index; +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_SKB__LIST) + list_add_tail(&skb->list, &efv->rx_list); +#else + __skb_queue_tail(&efv->rx_list, skb); +#endif + efv->write_index++; + spin_unlock_bh(&efv->rx_lock); + /* Trigger rx work */ + if (primed) + napi_schedule(&efv->napi); +} + +struct net_device *efx_ef100_find_rep_by_mport(struct efx_nic *efx, u16 mport) +{ + struct ef100_nic_data *nic_data = efx->nic_data; + struct mae_mport_desc *mport_desc; + struct efx_rep *efv = NULL; +#if defined(CONFIG_SFC_SRIOV) + unsigned int i; +#endif + + if (likely(efx_have_mport_journal_event(efx))) { + mport_desc = efx_mae_get_mport(efx, mport); + if (!IS_ERR_OR_NULL(mport_desc)) { + efv = mport_desc->efv; + /* Caller should have taken the RCU read lock to ensure + * that efv doesn't go away while we're using it. + */ + efx_mae_put_mport(efx, mport_desc); + if (efv) + return efv->net_dev; + } + } + /* Backward compatibility with old firmwares */ + list_for_each_entry(efv, &nic_data->rem_reps, list) + if (efv->mport == mport) + return efv->net_dev; +#if defined(CONFIG_SFC_SRIOV) + if (nic_data->vf_rep) + for (i = 0; i < nic_data->vf_rep_count; i++) { + if (!nic_data->vf_rep[i]) + continue; + efv = netdev_priv(nic_data->vf_rep[i]); + if (efv->mport == mport) + return nic_data->vf_rep[i]; + } +#endif + return NULL; +} + +static bool ef100_mport_needs_rep(struct efx_nic *efx, + struct mae_mport_desc *mport_desc) +{ + bool vnic, pcie_func, local_intf, local_pf, self, vf; + struct ef100_nic_data *nic_data = efx->nic_data; + + vnic = mport_desc->mport_type == MAE_MPORT_DESC_MPORT_TYPE_VNIC; + self = vnic && nic_data->have_own_mport && + mport_desc->mport_id == nic_data->own_mport; + pcie_func = vnic && + mport_desc->vnic_client_type == MAE_MPORT_DESC_VNIC_CLIENT_TYPE_FUNCTION; + local_intf = nic_data->have_local_intf && pcie_func && + mport_desc->interface_idx == nic_data->local_mae_intf; + local_pf = pcie_func && mport_desc->pf_idx == nic_data->pf_index; + WARN_ON(self && !local_pf); + vf = pcie_func && mport_desc->vf_idx != MAE_MPORT_DESC_VF_IDX_NULL; + + /* All VNICs, even VNIC_PLUGIN require rep. + * But no reps for ourself, or for our VFs (if we can identify them) + * since those get local VFreps at sriov_enable time + */ + return vnic && !self && !(local_intf && local_pf && vf); +} + +void efx_ef100_remove_mport(struct efx_nic *efx, struct mae_mport_desc *mport) +{ + struct efx_rep *efv = mport->efv; + + if (!efv) + return; + + efx_ef100_remote_rep_destroy(efx, efv, mport); +} + +int efx_ef100_add_mport(struct efx_nic *efx, struct mae_mport_desc *mport) +{ + struct ef100_nic_data *nic_data = efx->nic_data; + int rc; + + if (nic_data->have_own_mport && + mport->mport_id == nic_data->own_mport) { + WARN_ON(mport->mport_type != MAE_MPORT_DESC_MPORT_TYPE_VNIC); + WARN_ON(mport->vnic_client_type != + MAE_MPORT_DESC_VNIC_CLIENT_TYPE_FUNCTION); + nic_data->local_mae_intf = mport->interface_idx; + nic_data->have_local_intf = true; + pci_dbg(efx->pci_dev, "MAE interface_idx is %u\n", + nic_data->local_mae_intf); + } + + if (!ef100_mport_needs_rep(efx, mport)) + return 0; + + rc = efx_ef100_remote_rep_create(efx, mport); + if (rc) + pci_warn(efx->pci_dev, "Failed to create a remote_rep, rc %d\n", + rc); + return rc; +} + +void efx_ef100_init_reps(struct efx_nic *efx) +{ + struct ef100_nic_data *nic_data = efx->nic_data; + int rc; + + if (!efx->mae) + return; + + nic_data->have_local_intf = false; + /* Enumeration failure is not fatal, but means we cannot + * create PF representors for other interfaces. + */ + rc = efx_mae_enumerate_mports(efx); + if (rc) + pci_warn(efx->pci_dev, + "Could not enumerate mports (rc=%d), are we admin?", + rc); + + if (!efx_have_mport_journal_event(efx) && !nic_data->have_local_intf) + pci_warn(efx->pci_dev, + "Own m-port desc not found; using remote_reps for local VFs\n"); +} + +void efx_ef100_fini_vfreps(struct efx_nic *efx) +{ + struct ef100_nic_data *nic_data = efx->nic_data; + unsigned int vf_rep_count; + int i; + + if (!nic_data->grp_mae) + return; + + /* We take the lock as a barrier to ensure no-one holding the + * lock still sees nonzero rep_count when we start destroying + * representors. + */ + spin_lock_bh(&nic_data->vf_reps_lock); + vf_rep_count = nic_data->vf_rep_count; + nic_data->vf_rep_count = 0; + spin_unlock_bh(&nic_data->vf_reps_lock); + + for (i = 0; i < vf_rep_count; i++) + efx_ef100_vfrep_destroy(efx, i); + + kfree(nic_data->vf_rep); + nic_data->vf_rep = NULL; +} + +void efx_ef100_fini_reps(struct efx_nic *efx) +{ + struct ef100_nic_data *nic_data = efx->nic_data; + struct efx_rep *efv, *next; + + efx_ef100_fini_vfreps(efx); + list_for_each_entry_safe(efv, next, &nic_data->rem_reps, list) + /* mae should already have been fini()ed, there are no mports */ + efx_ef100_remote_rep_destroy(efx, efv, NULL); +} + +#else /* EFX_TC_OFFLOAD */ + +int efx_ef100_vfrep_create(struct efx_nic *efx, unsigned int i) +{ + /* Without all the various bits we need to make TC flower offload work, + * there's not much use in VFs or their representors, even if we + * technically could create them - they'd never be connected to the + * outside world. + */ + if (net_ratelimit()) + pci_info(efx->pci_dev, + "VF representors not supported on this kernel version\n"); + return -EOPNOTSUPP; +} + +void efx_ef100_vfrep_destroy(struct efx_nic *efx, unsigned int i) +{ +} + +const struct net_device_ops efx_ef100_rep_netdev_ops = {}; + +void efx_ef100_rep_rx_packet(struct efx_rep *efv, struct efx_rx_buffer *rx_buf) +{ + WARN_ON_ONCE(1); +} + +struct net_device *efx_ef100_find_rep_by_mport(struct efx_nic *efx, u16 mport) +{ + return NULL; +} + +void efx_ef100_remove_mport(struct efx_nic *efx, struct mae_mport_desc *mport) +{ +} + +int efx_ef100_add_mport(struct efx_nic *efx, struct mae_mport_desc *mport) +{ + return 0; +} + +void efx_ef100_init_reps(struct efx_nic *efx) +{ + pci_info(efx->pci_dev, + "Representors not supported on this kernel version\n"); +} + +void efx_ef100_fini_vfreps(struct efx_nic *efx) +{ +} + +void efx_ef100_fini_reps(struct efx_nic *efx) +{ +} +#endif /* EFX_TC_OFFLOAD */ diff --git a/src/drivers/net/ethernet/sfc/ef100_rep.h b/src/drivers/net/ethernet/sfc/ef100_rep.h new file mode 100644 index 0000000..c56ec0c --- /dev/null +++ b/src/drivers/net/ethernet/sfc/ef100_rep.h @@ -0,0 +1,63 @@ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2019 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +/* Handling for ef100 representor netdevs */ +#ifndef EF100_REP_H +#define EF100_REP_H + +#include "net_driver.h" +#include "mae.h" +#include "tc.h" + +void efx_ef100_init_reps(struct efx_nic *efx); +void efx_ef100_fini_reps(struct efx_nic *efx); +void efx_ef100_remove_mport(struct efx_nic *efx, struct mae_mport_desc *mport); +int efx_ef100_add_mport(struct efx_nic *efx, struct mae_mport_desc *mport); + +void efx_ef100_fini_vfreps(struct efx_nic *efx); +int efx_ef100_vfrep_create(struct efx_nic *efx, unsigned int i); +void efx_ef100_vfrep_destroy(struct efx_nic *efx, unsigned int i); + +/* Returns the representor netdevice corresponding to a VF m-port, or NULL + * @mport is an m-port label, *not* an m-port ID! + */ +struct net_device *efx_ef100_find_rep_by_mport(struct efx_nic *efx, u16 mport); + +struct efx_rep_sw_stats { + atomic64_t rx_packets, tx_packets; + atomic64_t rx_bytes, tx_bytes; + atomic64_t rx_dropped, tx_errors; +}; + +/* Private data for an Efx representor */ +struct efx_rep { + struct efx_nic *parent; + struct net_device *net_dev; + bool remote; /* flag to indicate remote rep */ + struct list_head list; /* entry on nic_data->rem_reps */ + u32 msg_enable; + u32 mport; /* m-port ID of corresponding PF/VF */ + u32 clid; /* client ID of corresponding PF/VF */ + unsigned int idx; /* rep index */ + unsigned int write_index, read_index; + unsigned int rx_pring_size; /* max length of RX list */ + struct efx_tc_flow_rule dflt; /* default-rule for switching */ +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_SKB__LIST) + struct list_head rx_list; +#else + struct sk_buff_head rx_list; +#endif + spinlock_t rx_lock; + struct napi_struct napi; + struct efx_rep_sw_stats stats; +}; + +void efx_ef100_rep_rx_packet(struct efx_rep *efv, struct efx_rx_buffer *rx_buf); +extern const struct net_device_ops efx_ef100_rep_netdev_ops; +#endif /* EF10_REP_H */ diff --git a/src/drivers/net/ethernet/sfc/ef100_rx.c b/src/drivers/net/ethernet/sfc/ef100_rx.c new file mode 100644 index 0000000..810fa9f --- /dev/null +++ b/src/drivers/net/ethernet/sfc/ef100_rx.c @@ -0,0 +1,317 @@ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2005-2019 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#include "net_driver.h" +#include "ef100_rx.h" +#include "efx.h" +#include "nic.h" +#include "rx_common.h" +#include "xdp.h" +#include "mcdi_functions.h" +#include "ef100.h" +#include "ef100_regs.h" +#include "ef100_nic.h" +#include "ef100_rep.h" +#include "io.h" + +/* Do the frame checksum on the whole frame. */ +#undef BUG85159_CSUM_FRAME + +/* Get the value of a field in the RX prefix */ +#define PREFIX_OFFSET_W(_f) (ESF_GZ_RX_PREFIX_ ## _f ## _LBN)/32 +#define PREFIX_OFFSET_B(_f) (ESF_GZ_RX_PREFIX_ ## _f ## _LBN)%32 +#define PREFIX_WIDTH_MASK(_f) ((1UL<<(ESF_GZ_RX_PREFIX_ ## _f ## _WIDTH)) - 1) +#define PREFIX_WORD(_p,_f) le32_to_cpu((__force __le32) (_p)[ PREFIX_OFFSET_W(_f) ]) +#define PREFIX_FIELD(_p,_f) ((PREFIX_WORD(_p,_f) >> \ + PREFIX_OFFSET_B(_f)) & PREFIX_WIDTH_MASK(_f)) + +/* Get the value of a field in the classification and checksum substructure + * of the RX prefix (rh_egres_hclass) + */ +#define PREFIX_HCLASS_OFFSET_W(_f) \ + (ESF_GZ_RX_PREFIX_CLASS_LBN + ESF_GZ_RX_PREFIX_HCLASS_ ## _f ## _LBN)/32 +#define PREFIX_HCLASS_OFFSET_B(_f) \ + (ESF_GZ_RX_PREFIX_CLASS_LBN + ESF_GZ_RX_PREFIX_HCLASS_ ## _f ## _LBN)%32 +#define PREFIX_HCLASS_WIDTH_MASK(_f) \ + ((1UL<<(ESF_GZ_RX_PREFIX_HCLASS_ ## _f ## _WIDTH)) - 1) +#define PREFIX_HCLASS_WORD(_p,_f) \ + le32_to_cpu((__force __le32) (_p)[ PREFIX_HCLASS_OFFSET_W(_f) ]) +#define PREFIX_HCLASS_FIELD(_p,_f) \ + ((PREFIX_HCLASS_WORD(_p,_f) >> PREFIX_HCLASS_OFFSET_B(_f)) & \ + PREFIX_HCLASS_WIDTH_MASK(_f)) + +int ef100_rx_probe(struct efx_rx_queue *rx_queue) +{ + return efx_nic_alloc_buffer(rx_queue->efx, &rx_queue->rxd, + (rx_queue->ptr_mask + 1) * + sizeof(efx_qword_t), GFP_KERNEL); +} + +int ef100_rx_init(struct efx_rx_queue *rx_queue) +{ + return efx_mcdi_rx_init(rx_queue, false); +} + +bool ef100_rx_buf_hash_valid(const u8 *prefix) +{ + return PREFIX_FIELD(prefix, RSS_HASH_VALID); +} + +/* On older FPGA images the slice gives us the ones complement checksum of the + * whole frame in big endian format. + * + * The IP stack wants the ones complement checksum starting after the + * ethernet header in CPU endian-ness. So on older FPGA images subtract the + * ethernet header from the original value. + */ +static __wsum get_csum(u8 *va, u32 *prefix, bool csum_frame) +{ + __sum16 ethsum, ret16; + __u16 hw1; + + hw1 = be16_to_cpu((__force __be16) PREFIX_FIELD(prefix, CSUM_FRAME)); + WARN_ON_ONCE(!hw1); + if (!csum_frame) + return (__force __wsum) hw1; + + ethsum = csum_fold(csum_partial(va, sizeof(struct ethhdr), 0)); + ret16 = csum16_sub((__force __sum16) ~hw1, (__force __be16) ethsum); + return (__force __wsum) ~ret16; +} + +static bool ef100_has_fcs_error(struct efx_rx_queue *rx_queue, u32 *prefix) +{ + u16 fcsum; + + fcsum = le16_to_cpu((__force __le16) PREFIX_FIELD(prefix, CLASS)); + fcsum = PREFIX_FIELD(&fcsum, HCLASS_L2_STATUS); + + if (likely(fcsum == ESE_GZ_RH_HCLASS_L2_STATUS_OK)) { + /* Everything is ok */ + return false; + } else if (fcsum == ESE_GZ_RH_HCLASS_L2_STATUS_FCS_ERR) { + rx_queue->n_rx_eth_crc_err++; + } + + return true; +} + +void __ef100_rx_packet(struct efx_rx_queue *rx_queue) +{ + struct efx_rx_buffer *rx_buf = efx_rx_buf_pipe(rx_queue); + struct efx_nic *efx = rx_queue->efx; + struct ef100_nic_data *nic_data; + u8 *eh = efx_rx_buf_va(rx_buf); + __wsum csum = 0; + u16 ing_port; + u32 *prefix; + + prefix = (u32 *)(eh - ESE_GZ_RX_PKT_PREFIX_LEN); + + if (rx_queue->receive_raw) { + u32 mark = PREFIX_FIELD(prefix, USER_MARK); + + if (rx_queue->receive_raw(rx_queue, mark)) + return; /* packet was consumed */ + } + + if (ef100_has_fcs_error(rx_queue, prefix) && + unlikely(!(efx->net_dev->features & NETIF_F_RXALL))) + goto free_rx_buffer; + + rx_buf->len = le16_to_cpu((__force __le16) PREFIX_FIELD(prefix, LENGTH)); + if (rx_buf->len <= sizeof(struct ethhdr)) { + if (net_ratelimit()) + netif_err(efx, rx_err, efx->net_dev, + "RX packet too small (%d)\n", rx_buf->len); + ++rx_queue->n_rx_frm_trunc; + goto out; + } + + nic_data = efx->nic_data; + + ing_port = le16_to_cpu((__force __le16) PREFIX_FIELD(prefix, INGRESS_MPORT)); + + if (nic_data->have_mport && ing_port != nic_data->base_mport) { + struct net_device *rep_dev; + + rcu_read_lock(); + rep_dev = efx_ef100_find_rep_by_mport(efx, ing_port); + if (rep_dev) { + if (rep_dev->flags & IFF_UP) + efx_ef100_rep_rx_packet(netdev_priv(rep_dev), + rx_buf); + rcu_read_unlock(); + /* Representor Rx doesn't care about PF Rx buffer + * ownership, it just makes a copy. So, we are done + * with the Rx buffer from PF point of view and should + * free it. + */ + goto free_rx_buffer; + } + rcu_read_unlock(); + if (net_ratelimit()) + netif_warn(efx, drv, efx->net_dev, + "Unrecognised ing_port %04x (base %04x), dropping\n", + ing_port, nic_data->base_mport); + rx_queue->n_rx_mport_bad++; + goto free_rx_buffer; + } + + if (!efx_xdp_rx(efx, rx_queue, rx_buf, &eh)) + goto out; + + if (likely(efx->net_dev->features & NETIF_F_RXCSUM)) { + if (PREFIX_HCLASS_FIELD(prefix, NT_OR_INNER_L3_CLASS) == + ESE_GZ_RH_HCLASS_L3_CLASS_IP4BAD) { + ++rx_queue->n_rx_ip_hdr_chksum_err; + } +#ifdef BUG85159_CSUM_FRAME + csum = get_csum(eh, prefix, true); +#else + csum = get_csum(eh, prefix, false); +#endif + switch (PREFIX_HCLASS_FIELD(prefix, NT_OR_INNER_L4_CLASS)) { + case ESE_GZ_RH_HCLASS_L4_CLASS_TCP: + case ESE_GZ_RH_HCLASS_L4_CLASS_UDP: + if (PREFIX_HCLASS_FIELD(prefix, NT_OR_INNER_L4_CSUM) == + ESE_GZ_RH_HCLASS_L4_CSUM_BAD_OR_UNKNOWN) + ++rx_queue->n_rx_tcp_udp_chksum_err; + break; + } + } + + if (rx_queue->receive_skb) { + /* no support for special channels yet, so just discard */ + WARN_ON_ONCE(1); + goto free_rx_buffer; + } + + efx_rx_packet_gro(rx_queue, rx_buf, rx_queue->rx_pkt_n_frags, eh, csum); + +#if defined(EFX_USE_KCOMPAT) && defined(EFX_USE_NET_DEVICE_LAST_RX) + efx->net_dev->last_rx = jiffies; +#endif + goto out; +free_rx_buffer: + efx_free_rx_buffers(rx_queue, rx_buf, 1); +out: + rx_queue->rx_pkt_n_frags = 0; +} + +static void ef100_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index) +{ + struct efx_rx_buffer *rx_buf = efx_rx_buffer(rx_queue, index); + struct efx_channel *channel = efx_rx_queue_channel(rx_queue); + struct efx_nic *efx = rx_queue->efx; + + ++rx_queue->rx_packets; + + netif_vdbg(efx, rx_status, efx->net_dev, + "RX queue %d received id %x\n", + efx_rx_queue_index(rx_queue), index); + + efx_sync_rx_buffer(efx, rx_buf, efx->rx_dma_len); + + prefetch(efx_rx_buf_va(rx_buf)); + + rx_buf->page_offset += efx->rx_prefix_size; + + efx_recycle_rx_pages(channel, rx_buf, 1); + + efx_rx_flush_packet(rx_queue); + rx_queue->rx_pkt_n_frags = 1; + rx_queue->rx_pkt_index = index; +} + +int efx_ef100_ev_rx(struct efx_channel *channel, const efx_qword_t *p_event) +{ + struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel); + unsigned int n_packets = EFX_QWORD_FIELD(*p_event, + ESF_GZ_EV_RXPKTS_NUM_PKT); + unsigned int label = EFX_QWORD_FIELD(*p_event, + ESF_GZ_EV_RXPKTS_Q_LABEL); + int i; + + WARN_ON_ONCE(label != efx_rx_queue_index(rx_queue)); + WARN_ON_ONCE(!n_packets); + if (n_packets > 1) + ++rx_queue->n_rx_merge_events; + + channel->irq_mod_score += 2 * n_packets; + + for (i = 0; i < n_packets; ++i) { + ef100_rx_packet(rx_queue, + rx_queue->removed_count & rx_queue->ptr_mask); + ++rx_queue->removed_count; + } + + return n_packets; +} + +void ef100_rx_write(struct efx_rx_queue *rx_queue) +{ + unsigned int notified_count = rx_queue->notified_count; + struct efx_nic *efx = rx_queue->efx; + struct efx_rx_buffer *rx_buf; + dma_addr_t dma_addr; + unsigned int idx; + efx_qword_t *rxd; + efx_dword_t rxdb; + int rc; + + while (notified_count != rx_queue->added_count) { + idx = notified_count & rx_queue->ptr_mask; + rx_buf = efx_rx_buffer(rx_queue, idx); + rxd = efx_rx_desc(rx_queue, idx); + dma_addr = rx_buf->dma_addr; + rc = ef100_regionmap_buffer(efx, &dma_addr); + + /* TODO: Deal with failure. */ + if (rc) + break; + + EFX_POPULATE_QWORD_1(*rxd, ESF_GZ_RX_BUF_ADDR, dma_addr); + + ++notified_count; + } + if (notified_count == rx_queue->notified_count) + return; + + wmb(); + EFX_POPULATE_DWORD_1(rxdb, ERF_GZ_RX_RING_PIDX, + rx_queue->added_count & rx_queue->ptr_mask); + efx_writed_page(rx_queue->efx, &rxdb, + ER_GZ_RX_RING_DOORBELL, efx_rx_queue_index(rx_queue)); + if (rx_queue->grant_credits) + wmb(); + rx_queue->notified_count = notified_count; + if (rx_queue->grant_credits) + schedule_work(&rx_queue->grant_work); +} + +int efx_ef100_rx_defer_refill(struct efx_rx_queue *rx_queue) +{ + struct efx_channel *channel = efx_rx_queue_channel(rx_queue); + MCDI_DECLARE_BUF(inbuf, MC_CMD_DRIVER_EVENT_IN_LEN); + efx_qword_t *event = (efx_qword_t *)MCDI_PTR(inbuf, DRIVER_EVENT_IN_DATA); + size_t outlen; + u32 magic; + + magic = EFX_EF100_DRVGEN_MAGIC(EFX_EF100_REFILL, + efx_rx_queue_index(rx_queue)); + EFX_POPULATE_QWORD_2(*event, + ESF_GZ_E_TYPE, ESE_GZ_EF100_EV_DRIVER, + ESF_GZ_DRIVER_DATA, magic); + + MCDI_SET_DWORD(inbuf, DRIVER_EVENT_IN_EVQ, channel->channel); + + return efx_mcdi_rpc(channel->efx, MC_CMD_DRIVER_EVENT, + inbuf, sizeof(inbuf), NULL, 0, &outlen); +} diff --git a/src/drivers/net/ethernet/sfc/ef100_rx.h b/src/drivers/net/ethernet/sfc/ef100_rx.h new file mode 100644 index 0000000..14564d2 --- /dev/null +++ b/src/drivers/net/ethernet/sfc/ef100_rx.h @@ -0,0 +1,23 @@ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2019 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#ifndef EFX_EF100_RX_H +#define EFX_EF100_RX_H + +#include "net_driver.h" + +int ef100_rx_probe(struct efx_rx_queue *rx_queue); +int ef100_rx_init(struct efx_rx_queue *rx_queue); +bool ef100_rx_buf_hash_valid(const u8 *prefix); +int efx_ef100_ev_rx(struct efx_channel *channel, const efx_qword_t *p_event); +void ef100_rx_write(struct efx_rx_queue *rx_queue); +void __ef100_rx_packet(struct efx_rx_queue *rx_queue); +int efx_ef100_rx_defer_refill(struct efx_rx_queue *rx_queue); + +#endif diff --git a/src/drivers/net/ethernet/sfc/ef100_sriov.c b/src/drivers/net/ethernet/sfc/ef100_sriov.c new file mode 100644 index 0000000..93271dd --- /dev/null +++ b/src/drivers/net/ethernet/sfc/ef100_sriov.c @@ -0,0 +1,93 @@ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2019 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ +#include "ef100_sriov.h" +#include "ef100_rep.h" +#include "ef100_nic.h" + +#if defined(CONFIG_SFC_SRIOV) +static int efx_ef100_pci_sriov_enable(struct efx_nic *efx, int num_vfs) +{ + struct ef100_nic_data *nic_data = efx->nic_data; + struct pci_dev *dev = efx->pci_dev; + int rc, i; + + if (WARN_ON(nic_data->vf_rep_count)) + return -EEXIST; + + rc = pci_enable_sriov(dev, num_vfs); + if (rc) + goto fail1; + + if (!nic_data->grp_mae) + return 0; + + if (!nic_data->have_local_intf) + /* We weren't able to identify our local interface, so we will + * have created remote_reps for these VFs. Thus, don't create + * local vf_reps for them too. + */ + return 0; + + nic_data->vf_rep = kcalloc(num_vfs, sizeof(struct net_device *), + GFP_KERNEL); + if (!nic_data->vf_rep) { + rc = -ENOMEM; + goto fail1; + } + + for (i = 0; i < num_vfs; i++) { + rc = efx_ef100_vfrep_create(efx, i); + if (rc) + goto fail2; + } + spin_lock_bh(&nic_data->vf_reps_lock); + nic_data->vf_rep_count = num_vfs; + spin_unlock_bh(&nic_data->vf_reps_lock); + return 0; + +fail2: + for (; i--;) + efx_ef100_vfrep_destroy(efx, i); + pci_disable_sriov(dev); + kfree(nic_data->vf_rep); + nic_data->vf_rep = NULL; +fail1: + netif_err(efx, probe, efx->net_dev, "Failed to enable SRIOV VFs\n"); + return rc; +} + +int efx_ef100_pci_sriov_disable(struct efx_nic *efx, bool force) +{ + struct pci_dev *dev = efx->pci_dev; + unsigned int vfs_assigned = 0; + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_PCI_DEV_FLAGS_ASSIGNED) + vfs_assigned = pci_vfs_assigned(dev); + + if (vfs_assigned && !force) { + netif_info(efx, drv, efx->net_dev, "VFs are assigned to guests; " + "please detach them before disabling SR-IOV\n"); + return -EBUSY; + } +#endif + + efx_ef100_fini_vfreps(efx); + if (!vfs_assigned) + pci_disable_sriov(dev); + return 0; +} + +int efx_ef100_sriov_configure(struct efx_nic *efx, int num_vfs) +{ + if (num_vfs == 0) + return efx_ef100_pci_sriov_disable(efx, false); + else + return efx_ef100_pci_sriov_enable(efx, num_vfs); +} +#endif diff --git a/src/drivers/net/ethernet/sfc/ef100_sriov.h b/src/drivers/net/ethernet/sfc/ef100_sriov.h new file mode 100644 index 0000000..bf7f9f0 --- /dev/null +++ b/src/drivers/net/ethernet/sfc/ef100_sriov.h @@ -0,0 +1,12 @@ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2019 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ +#include "net_driver.h" + +int efx_ef100_sriov_configure(struct efx_nic *efx, int num_vfs); +int efx_ef100_pci_sriov_disable(struct efx_nic *efx, bool force); diff --git a/src/drivers/net/ethernet/sfc/ef100_tx.c b/src/drivers/net/ethernet/sfc/ef100_tx.c new file mode 100644 index 0000000..f17a542 --- /dev/null +++ b/src/drivers/net/ethernet/sfc/ef100_tx.c @@ -0,0 +1,580 @@ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2018 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#include + +#include "net_driver.h" +#include "efx.h" +#include "nic.h" +#include "mcdi_functions.h" +#include "tx_common.h" +#include "ef100_regs.h" +#include "io.h" +#include "ef100.h" +#include "ef100_tx.h" +#include "ef100_nic.h" + +#ifdef EFX_C_MODEL +#define EF100_MAX_DESC_BATCH 63 /* Bug 85534 */ +#endif + +int ef100_tx_probe(struct efx_tx_queue *tx_queue) +{ + /* Allocate an extra descriptor for the QMDA status completion entry */ + return efx_nic_alloc_buffer(tx_queue->efx, &tx_queue->txd, + (tx_queue->ptr_mask + 2) * + sizeof(efx_oword_t), GFP_KERNEL); +} + +int ef100_tx_init(struct efx_tx_queue *tx_queue) +{ + tx_queue->tso_version = 3; /* Not used but shown in debugfs */ + return efx_mcdi_tx_init(tx_queue, false); +} + +unsigned int ef100_tx_max_skb_descs(struct efx_nic *efx) +{ + /* Option (PREFIX) descriptor, then header (TSO or SEND descriptor), + * potentially one (SEG) descriptor for the rest of the linear area, + * plus one (SEG) descriptor per fragment. + */ + return 3 + MAX_SKB_FRAGS; +} + +static bool ef100_tx_can_tso(struct efx_tx_queue *tx_queue, struct sk_buff *skb) +{ + struct efx_nic *efx = tx_queue->efx; + struct ef100_nic_data *nic_data; + struct efx_tx_buffer *buffer; + size_t header_len; + u32 mss; + + nic_data = efx->nic_data; + + if (!skb_is_gso_tcp(skb)) + return false; + if (!(efx->net_dev->features & NETIF_F_TSO)) + return false; + +#if defined(EFX_USE_KCOMPAT) && !defined(EFX_HAVE_HW_ENC_FEATURES) && defined(EFX_HAVE_SKB_ENCAPSULATION) + if (skb->encapsulation) + return false; +#endif + + mss = skb_shinfo(skb)->gso_size; + if (unlikely(mss < 4)) { + WARN_ONCE(1, "MSS of %u is too small for TSO\n", mss); + return false; + } + + header_len = efx_tx_tso_header_length(skb); + if (header_len > nic_data->tso_max_hdr_len) + return false; + + if (skb_shinfo(skb)->gso_segs > nic_data->tso_max_payload_num_segs) { +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_GSO_MAX_SEGS) + /* net_dev->gso_max_segs should've caught this */ + WARN_ON_ONCE(1); +#endif + return false; + } + + if (skb->data_len / mss > nic_data->tso_max_frames) + return false; + + /* net_dev->gso_max_size should've caught this */ + if (WARN_ON_ONCE(skb->data_len > nic_data->tso_max_payload_len)) + return false; + + /* Reserve an empty buffer for the TSO V3 descriptor. + * Convey the length of the header since we already know it. + */ + buffer = efx_tx_queue_get_insert_buffer(tx_queue); + buffer->flags = EFX_TX_BUF_TSO_V3 | EFX_TX_BUF_CONT; + buffer->len = header_len; + buffer->unmap_len = 0; + buffer->skb = skb; + ++tx_queue->insert_count; + return true; +} + +static inline efx_oword_t *ef100_tx_desc(struct efx_tx_queue *tx_queue, + unsigned int index) +{ + if (likely(tx_queue->txd.addr)) + return ((efx_oword_t *)(tx_queue->txd.addr)) + index; + else + return NULL; +} + +void ef100_notify_tx_desc(struct efx_tx_queue *tx_queue) +{ + unsigned int write_ptr; + efx_dword_t reg; + + tx_queue->xmit_pending = false; + + if (unlikely(tx_queue->notify_count == tx_queue->write_count)) + return; + +#ifdef EF100_MAX_DESC_BATCH + /* We can only ring the doorbell for a limited number of descriptors + * at a time. + * Each notification must be aligned to a TxQ request boundary. + */ + WARN_ON(tx_queue->write_count - tx_queue->notify_count > + EF100_MAX_DESC_BATCH); +#endif + + write_ptr = tx_queue->write_count & tx_queue->ptr_mask; + /* The write pointer goes into the high word */ + EFX_POPULATE_DWORD_1(reg, ERF_GZ_TX_RING_PIDX, write_ptr); + efx_writed_page(tx_queue->efx, ®, + ER_GZ_TX_RING_DOORBELL, tx_queue->queue); + tx_queue->notify_count = tx_queue->write_count; + tx_queue->notify_jiffies = jiffies; + ++tx_queue->doorbell_notify_tx; +} + +static void ef100_tx_push_buffers(struct efx_tx_queue *tx_queue) +{ + /* If the completion path is running and module option + * to enable coalescing is set we let the completion path + * handle the doorbell ping. + */ + if (!tx_queue->channel->holdoff_doorbell) { + /* Completion handler not running so send out */ + ef100_notify_tx_desc(tx_queue); + ++tx_queue->pushes; + } +} + +static void ef100_set_tx_csum_partial(const struct sk_buff *skb, + struct efx_tx_buffer *buffer, efx_oword_t *txd) +{ + efx_oword_t csum; + int csum_start; + + if (!skb || skb->ip_summed != CHECKSUM_PARTIAL) + return; + + /* skb->csum_start has the offset from head, but we need the offset + * from data. + */ + csum_start = skb_checksum_start_offset(skb); + EFX_POPULATE_OWORD_3(csum, + ESF_GZ_TX_SEND_CSO_PARTIAL_EN, 1, + ESF_GZ_TX_SEND_CSO_PARTIAL_START_W, + csum_start >> 1, + ESF_GZ_TX_SEND_CSO_PARTIAL_CSUM_W, + skb->csum_offset >> 1); + EFX_OR_OWORD(*txd, *txd, csum); +} + +static void ef100_set_tx_hw_vlan(const struct sk_buff *skb, efx_oword_t *txd) +{ + u16 vlan_tci = skb_vlan_tag_get(skb); + efx_oword_t vlan; + + EFX_POPULATE_OWORD_2(vlan, + ESF_GZ_TX_SEND_VLAN_INSERT_EN, 1, + ESF_GZ_TX_SEND_VLAN_INSERT_TCI, vlan_tci); + EFX_OR_OWORD(*txd, *txd, vlan); +} + +static void ef100_make_send_desc(struct efx_nic *efx, + const struct sk_buff *skb, + struct efx_tx_buffer *buffer, efx_oword_t *txd, + unsigned int segment_count) +{ + dma_addr_t dma_addr = buffer->dma_addr; + + /* TODO: Deal with failure. */ + if (ef100_regionmap_buffer(efx, &dma_addr)) + return; + + /* TX send descriptor */ + EFX_POPULATE_OWORD_3(*txd, + ESF_GZ_TX_SEND_NUM_SEGS, segment_count, + ESF_GZ_TX_SEND_LEN, buffer->len, + ESF_GZ_TX_SEND_ADDR, dma_addr); + + if (likely(efx->net_dev->features & NETIF_F_HW_CSUM)) + ef100_set_tx_csum_partial(skb, buffer, txd); + if (efx->net_dev->features & NETIF_F_HW_VLAN_CTAG_TX && + skb && skb_vlan_tag_present(skb)) + ef100_set_tx_hw_vlan(skb, txd); +} + +static void ef100_make_tso_desc(struct efx_nic *efx, + const struct sk_buff *skb, + struct efx_tx_buffer *buffer, efx_oword_t *txd, + unsigned int segment_count) +{ +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_GSO_PARTIAL) + bool gso_partial = skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL; +#else + bool gso_partial = false; +#endif + unsigned int len, ip_offset, tcp_offset, payload_segs; + u32 mangleid = ESE_GZ_TX_DESC_IP4_ID_INC_MOD16; + unsigned int outer_ip_offset, outer_l4_offset; + u16 vlan_tci = skb_vlan_tag_get(skb); + u32 mss = skb_shinfo(skb)->gso_size; +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_SKB_ENCAPSULATION) + bool encap = skb->encapsulation; +#else + bool encap = false; +#endif + bool vlan_enable = false; + bool udp_encap = false; + struct tcphdr *tcp; + bool outer_csum; + u32 paylen; + + if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_FIXEDID) + mangleid = ESE_GZ_TX_DESC_IP4_ID_NO_OP; + if (efx->net_dev->features & NETIF_F_HW_VLAN_CTAG_TX) + vlan_enable = skb_vlan_tag_present(skb); + + len = skb->len - buffer->len; + /* We use 1 for the TSO descriptor and 1 for the header */ + payload_segs = segment_count - 2; + if (encap) { +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_HW_ENC_FEATURES) + outer_ip_offset = skb_network_offset(skb); + outer_l4_offset = skb_transport_offset(skb); + ip_offset = skb_inner_network_offset(skb); + tcp_offset = skb_inner_transport_offset(skb); + if (skb_shinfo(skb)->gso_type & + (SKB_GSO_UDP_TUNNEL | SKB_GSO_UDP_TUNNEL_CSUM)) + udp_encap = true; +#else + /* can't happen */ + WARN_ON_ONCE(1); + return; +#endif + } else { + ip_offset = skb_network_offset(skb); + tcp_offset = skb_transport_offset(skb); + outer_ip_offset = outer_l4_offset = 0; + } + outer_csum = skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM; + + /* subtract TCP payload length from inner checksum */ + tcp = (void *)skb->data + tcp_offset; + paylen = skb->len - tcp_offset; + csum_replace_by_diff(&tcp->check, (__force __wsum)htonl(paylen)); + + EFX_POPULATE_OWORD_19(*txd, + ESF_GZ_TX_DESC_TYPE, ESE_GZ_TX_DESC_TYPE_TSO, + ESF_GZ_TX_TSO_MSS, mss, + ESF_GZ_TX_TSO_HDR_NUM_SEGS, 1, + ESF_GZ_TX_TSO_PAYLOAD_NUM_SEGS, payload_segs, + ESF_GZ_TX_TSO_HDR_LEN_W, buffer->len >> 1, + ESF_GZ_TX_TSO_PAYLOAD_LEN, len, + ESF_GZ_TX_TSO_CSO_OUTER_L4, outer_csum, + ESF_GZ_TX_TSO_CSO_INNER_L4, 1, + ESF_GZ_TX_TSO_INNER_L3_OFF_W, ip_offset >> 1, + ESF_GZ_TX_TSO_INNER_L4_OFF_W, tcp_offset >> 1, + ESF_GZ_TX_TSO_ED_INNER_IP4_ID, mangleid, + ESF_GZ_TX_TSO_ED_INNER_IP_LEN, 1, + ESF_GZ_TX_TSO_OUTER_L3_OFF_W, outer_ip_offset >> 1, + ESF_GZ_TX_TSO_OUTER_L4_OFF_W, outer_l4_offset >> 1, + ESF_GZ_TX_TSO_ED_OUTER_UDP_LEN, udp_encap && !gso_partial, + ESF_GZ_TX_TSO_ED_OUTER_IP_LEN, encap && !gso_partial, + ESF_GZ_TX_TSO_ED_OUTER_IP4_ID, encap ? mangleid : + ESE_GZ_TX_DESC_IP4_ID_NO_OP, + ESF_GZ_TX_TSO_VLAN_INSERT_EN, vlan_enable, + ESF_GZ_TX_TSO_VLAN_INSERT_TCI, vlan_tci + ); +} + +static void ef100_make_seg_desc(struct efx_nic *efx, + struct efx_tx_buffer *buffer, efx_oword_t *txd) +{ + dma_addr_t dma_addr = buffer->dma_addr; + + /* TODO: Deal with failure. */ + if (ef100_regionmap_buffer(efx, &dma_addr)) + return; + + /* TX segment descriptor */ + EFX_POPULATE_OWORD_3(*txd, + ESF_GZ_TX_DESC_TYPE, ESE_GZ_TX_DESC_TYPE_SEG, + ESF_GZ_TX_SEG_LEN, buffer->len, + ESF_GZ_TX_SEG_ADDR, dma_addr); +} + +static void ef100_tx_make_descriptors(struct efx_tx_queue *tx_queue, + const struct sk_buff *skb, + unsigned int segment_count, + struct efx_rep *efv) +{ + unsigned int old_write_count = tx_queue->write_count; + unsigned int new_write_count = old_write_count; + struct efx_nic *efx = tx_queue->efx; + struct efx_tx_buffer *buffer; + unsigned int next_desc_type; + unsigned int write_ptr; + efx_oword_t *txd; + unsigned int nr_descs = tx_queue->insert_count - old_write_count; + + if (unlikely(nr_descs == 0)) + return; + +#ifdef EF100_MAX_DESC_BATCH + /* Ensure there is room for the new descriptors. */ + if (old_write_count - tx_queue->notify_count + nr_descs > + EF100_MAX_DESC_BATCH) + ef100_notify_tx_desc(tx_queue); +#endif + + if (segment_count) + next_desc_type = ESE_GZ_TX_DESC_TYPE_TSO; + else + next_desc_type = ESE_GZ_TX_DESC_TYPE_SEND; + + if (unlikely(efv)) { + /* Create TX override descriptor */ + write_ptr = new_write_count & tx_queue->ptr_mask; + txd = ef100_tx_desc(tx_queue, write_ptr); + ++new_write_count; + + tx_queue->packet_write_count = new_write_count; + EFX_POPULATE_OWORD_3(*txd, + ESF_GZ_TX_DESC_TYPE, ESE_GZ_TX_DESC_TYPE_PREFIX, + ESF_GZ_TX_PREFIX_EGRESS_MPORT, efv->mport, + ESF_GZ_TX_PREFIX_EGRESS_MPORT_EN, 1); + nr_descs--; + } + + /* if it's a raw write (such as XDP) then always SEND single frames */ + if (!skb) + nr_descs = 1; + + do { + write_ptr = new_write_count & tx_queue->ptr_mask; + buffer = &tx_queue->buffer[write_ptr]; + + txd = ef100_tx_desc(tx_queue, write_ptr); + ++new_write_count; + /* Create TX descriptor ring entry */ + tx_queue->packet_write_count = new_write_count; + + switch (next_desc_type) { + case ESE_GZ_TX_DESC_TYPE_SEND: + ef100_make_send_desc(efx, skb, buffer, txd, nr_descs); + break; + case ESE_GZ_TX_DESC_TYPE_TSO: + /* TX TSO descriptor */ + WARN_ON_ONCE(!(buffer->flags & EFX_TX_BUF_TSO_V3)); + ef100_make_tso_desc(efx, skb, buffer, txd, nr_descs); + break; + default: + ef100_make_seg_desc(efx, buffer, txd); + } + /* if it's a raw write (such as XDP) then always SEND */ + next_desc_type = skb ? ESE_GZ_TX_DESC_TYPE_SEG : + ESE_GZ_TX_DESC_TYPE_SEND; + /* mark as an EFV buffer if applicable */ + if (unlikely(efv)) + buffer->flags |= EFX_TX_BUF_EFV; + + } while (new_write_count != tx_queue->insert_count); + + wmb(); /* Ensure descriptors are written before they are fetched */ + + tx_queue->write_count = new_write_count; + + /* The write_count above must be updated before reading + * channel->holdoff_doorbell to avoid a race with the + * completion path, so ensure these operations are not + * re-ordered. This also flushes the update of write_count + * back into the cache. + */ + smp_mb(); +} + +void ef100_tx_write(struct efx_tx_queue *tx_queue) +{ + ef100_tx_make_descriptors(tx_queue, NULL, 0, NULL); + ef100_tx_push_buffers(tx_queue); +} + +void ef100_ev_tx(struct efx_channel *channel, const efx_qword_t *p_event) +{ + unsigned int tx_done = + EFX_QWORD_FIELD(*p_event, ESF_GZ_EV_TXCMPL_NUM_DESC); + unsigned int qlabel = + EFX_QWORD_FIELD(*p_event, ESF_GZ_EV_TXCMPL_Q_LABEL); + struct efx_tx_queue *tx_queue = + efx_channel_get_tx_queue(channel, qlabel); + unsigned int tx_index = (tx_queue->read_count + tx_done - 1) & + tx_queue->ptr_mask; + + efx_xmit_done(tx_queue, tx_index); +} + +/* + * Add a socket buffer to a TX queue + * + * You must hold netif_tx_lock() to call this function. + + * Returns 0 on success, error code otherwise. In case of an error this + * function will free the SKB. + */ +int ef100_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) +{ + return __ef100_enqueue_skb(tx_queue, skb, NULL); +} + +int __ef100_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb, + struct efx_rep *efv) +{ + unsigned int old_insert_count = tx_queue->insert_count; + struct efx_nic *efx = tx_queue->efx; + bool xmit_more = netdev_xmit_more(); + unsigned int fill_level; + unsigned int segments; + int rc; + + if (!tx_queue->buffer || !tx_queue->ptr_mask) { + netif_err(efx, tx_err, efx->net_dev, + "Bad TX %s, stopping queue\n", + tx_queue->buffer ? "ring mask" : "buffer array"); + netif_stop_queue(efx->net_dev); + dev_kfree_skb_any(skb); + return -ENODEV; + } + + segments = skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 0; + if (segments == 1) + segments = 0; /* Don't use TSO/GSO for a single segment. */ + if (segments && !ef100_tx_can_tso(tx_queue, skb)) { + rc = efx_tx_tso_fallback(tx_queue, skb); + tx_queue->tso_fallbacks++; + if (rc) + goto err; + else + return 0; + } + + if (unlikely(efv)) { + struct efx_tx_buffer *buffer = __efx_tx_queue_get_insert_buffer(tx_queue); + + /* Drop representor packets if the queue is stopped. + * We currently don't assert backoff to representors so this is + * to make sure representor traffic can't starve the main + * net device. + * And, of course, if there are no TX descriptors left. + */ + if (netif_tx_queue_stopped(tx_queue->core_txq) || + unlikely(efx_tx_buffer_in_use(buffer))) { + atomic64_inc(&efv->stats.tx_errors); + rc = -ENOSPC; + goto err; + } + + /* Also drop representor traffic if it could cause us to + * stop the queue. If we assert backoff and we haven't + * received traffic on the main net device recently then the + * TX watchdog can go off erroneously. + */ + fill_level = efx_channel_tx_old_fill_level(tx_queue->channel); + fill_level += efx->type->tx_max_skb_descs(efx); + if (fill_level > efx->txq_stop_thresh) { + struct efx_tx_queue *txq2; + + /* Refresh cached fill level and re-check */ + efx_for_each_channel_tx_queue(txq2, tx_queue->channel) + txq2->old_read_count = READ_ONCE(txq2->read_count); + + fill_level = efx_channel_tx_old_fill_level(tx_queue->channel); + fill_level += efx->type->tx_max_skb_descs(efx); + if (fill_level > efx->txq_stop_thresh) { + atomic64_inc(&efv->stats.tx_errors); + rc = -ENOSPC; + goto err; + } + } + + buffer->flags = EFX_TX_BUF_OPTION | EFX_TX_BUF_EFV; + tx_queue->insert_count++; + } + + /* Map for DMA and create descriptors */ + rc = efx_tx_map_data(tx_queue, skb, segments); + if (rc) + goto err; + ef100_tx_make_descriptors(tx_queue, skb, segments, efv); + + EFX_WARN_ON_PARANOID(!tx_queue->core_txq); + + fill_level = efx_channel_tx_old_fill_level(tx_queue->channel); + if (fill_level > efx->txq_stop_thresh) { + struct efx_tx_queue *txq2; + + /* Because of checks above, representor traffic should + * not be able to stop the queue. + */ + WARN_ON(efv); + + netif_tx_stop_queue(tx_queue->core_txq); + /* Re-read after a memory barrier in case we've raced with + * the completion path. Otherwise there's a danger we'll never + * restart the queue if all completions have just happened. + */ + smp_mb(); + efx_for_each_channel_tx_queue(txq2, tx_queue->channel) + txq2->old_read_count = READ_ONCE(txq2->read_count); + fill_level = efx_channel_tx_old_fill_level(tx_queue->channel); + if (fill_level < efx->txq_stop_thresh) + netif_tx_start_queue(tx_queue->core_txq); + } + + if (unlikely(efv)) + /* always push for representor traffic */ + tx_queue->xmit_pending = false; + else if (__netdev_tx_sent_queue(tx_queue->core_txq, skb->len, + xmit_more)) + tx_queue->xmit_pending = false; /* push doorbell */ + else if (tx_queue->write_count - tx_queue->notify_count > 255) + /* Ensure we never push more than 256 packets at once */ + tx_queue->xmit_pending = false; /* push */ + else + tx_queue->xmit_pending = true; /* don't push yet */ + + if (!tx_queue->xmit_pending) + ef100_tx_push_buffers(tx_queue); + + if (segments) { + tx_queue->tso_bursts++; + tx_queue->tso_packets += segments; + tx_queue->tx_packets += segments; + } else { + tx_queue->tx_packets++; + } + tx_queue->tx_bytes += skb->len; + return 0; + +err: + efx_enqueue_unwind(tx_queue, old_insert_count); + if (!IS_ERR_OR_NULL(skb)) + dev_kfree_skb_any(skb); + + /* If we're not expecting another transmit and we had something to push + * on this queue then we need to push here to get the previous packets + * out. We only enter this branch from before the xmit_more handling + * above, so xmit_pending still refers to the old state. + */ + if (tx_queue->xmit_pending && !xmit_more) + ef100_tx_push_buffers(tx_queue); + return rc; +} diff --git a/src/drivers/net/ethernet/sfc/ef100_tx.h b/src/drivers/net/ethernet/sfc/ef100_tx.h new file mode 100644 index 0000000..7ea0ebd --- /dev/null +++ b/src/drivers/net/ethernet/sfc/ef100_tx.h @@ -0,0 +1,27 @@ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2019 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#ifndef EFX_EF100_TX_H +#define EFX_EF100_TX_H + +#include "net_driver.h" +#include "ef100_rep.h" + +int ef100_tx_probe(struct efx_tx_queue *tx_queue); +int ef100_tx_init(struct efx_tx_queue *tx_queue); +void ef100_tx_write(struct efx_tx_queue *tx_queue); +void ef100_notify_tx_desc(struct efx_tx_queue *tx_queue); +unsigned int ef100_tx_max_skb_descs(struct efx_nic *efx); + +void ef100_ev_tx(struct efx_channel *channel, const efx_qword_t *p_event); + +int ef100_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb); +int __ef100_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb, + struct efx_rep *efv); +#endif diff --git a/src/drivers/net/ethernet/sfc/ef100_vdpa.c b/src/drivers/net/ethernet/sfc/ef100_vdpa.c new file mode 100644 index 0000000..f402300 --- /dev/null +++ b/src/drivers/net/ethernet/sfc/ef100_vdpa.c @@ -0,0 +1,859 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Driver for Xilinx network controllers and boards + * Copyright 2020 Xilinx Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#include +#include +#include "ef100_vdpa.h" +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_VDPA_MGMT_INTERFACE) +#include +#endif +#include "mcdi_vdpa.h" +#include "filter.h" +#include "mcdi_functions.h" +#include "ef100_netdev.h" +#include "mcdi_filters.h" +#include "debugfs.h" + +#if defined(CONFIG_SFC_VDPA) +#define EFX_VDPA_NAME_LEN 32 + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_VDPA_MGMT_INTERFACE) + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_VDPA_ALLOC_ASID_NAME_USEVA_PARAMS) +static struct virtio_device_id ef100_vdpa_id_table[] = +#else +static const struct virtio_device_id ef100_vdpa_id_table[] = +#endif +{ + { .device = VIRTIO_ID_NET, .vendor = PCI_VENDOR_ID_REDHAT_QUMRANET }, + { 0 }, +}; + +static void ef100_vdpa_net_dev_del(struct vdpa_mgmt_dev *mgmt_dev, + struct vdpa_device *vdev) +{ + struct ef100_nic_data *nic_data; + struct efx_nic *efx; + int rc; + + efx = pci_get_drvdata(to_pci_dev(mgmt_dev->device)); + nic_data = efx->nic_data; + + rc = efx_ef100_set_bar_config(efx, EF100_BAR_CONFIG_EF100); + if (rc) + pci_err(efx->pci_dev, + "set_bar_config EF100 failed, err: %d\n", rc); + else + pci_dbg(efx->pci_dev, + "vdpa net device deleted, vf: %u\n", + nic_data->vf_index); +} + +static int ef100_vdpa_net_dev_add(struct vdpa_mgmt_dev *mgmt_dev, +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_VDPA_MGMT_OPS_CONFIG_PARAM) + const char *name, + const struct vdpa_dev_set_config *config) +#else + const char *name) +#endif +{ + struct ef100_vdpa_nic *vdpa_nic; + struct ef100_nic_data *nic_data; + struct efx_nic *efx; + int rc, err; + + efx = pci_get_drvdata(to_pci_dev(mgmt_dev->device)); + nic_data = efx->nic_data; + + if (efx->vdpa_nic) { + pci_warn(efx->pci_dev, + "vDPA device already exists on this VF\n"); + return -EEXIST; + } +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_VDPA_MGMT_OPS_CONFIG_PARAM) + if (config->mask & BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MACADDR)) { + if (!is_valid_ether_addr(config->net.mac)) { + pci_err(efx->pci_dev, "Invalid MAC address %pM\n", + config->net.mac); + return -EINVAL; + } + } +#endif + + rc = efx_ef100_set_bar_config(efx, EF100_BAR_CONFIG_VDPA); + if (rc) { + pci_err(efx->pci_dev, + "set_bar_config vDPA failed, err: %d\n", rc); + goto err_set_bar_config; + } + + /* TODO: handle in_order feature with management interface + * This will be done in VDPALINUX-242 + */ + + vdpa_nic = ef100_vdpa_create(efx, name); + if (IS_ERR(vdpa_nic)) { + pci_err(efx->pci_dev, + "vDPA device creation failed, vf: %u, err: %ld\n", + nic_data->vf_index, PTR_ERR(vdpa_nic)); + rc = PTR_ERR(vdpa_nic); + goto err_set_bar_config; + } else { + pci_info(efx->pci_dev, + "vDPA net device created, vf: %u\n", + nic_data->vf_index); + } + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_VDPA_MGMT_OPS_CONFIG_PARAM) + if (config->mask & BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MACADDR)) { + ether_addr_copy(vdpa_nic->mac_address, config->net.mac); + + /* It has been observed during testing with virtio-vdpa + * that virtio feature negotiation and vring creation + * happens before control reaches here. + */ + if (vdpa_nic->vring[0].vring_state & EF100_VRING_CREATED) + ef100_vdpa_insert_filter(efx); + } +#endif + + /* TODO: handle MTU configuration from config parameter */ + return 0; + +err_set_bar_config: + err = efx_ef100_set_bar_config(efx, EF100_BAR_CONFIG_EF100); + if (err) + pci_err(efx->pci_dev, + "set_bar_config EF100 failed, err: %d\n", err); + + return rc; +} + +static const struct vdpa_mgmtdev_ops ef100_vdpa_net_mgmtdev_ops = { + .dev_add = ef100_vdpa_net_dev_add, + .dev_del = ef100_vdpa_net_dev_del +}; + +int ef100_vdpa_register_mgmtdev(struct efx_nic *efx) +{ + struct vdpa_mgmt_dev *mgmt_dev; + u64 features; + int rc; + + mgmt_dev = kzalloc(sizeof(*mgmt_dev), GFP_KERNEL); + if (!mgmt_dev) + return -ENOMEM; + + rc = efx_vdpa_get_features(efx, EF100_VDPA_DEVICE_TYPE_NET, &features); + if (rc) { + pci_err(efx->pci_dev, "%s: MCDI get features error:%d\n", + __func__, rc); + goto free_mgmt_dev; + } + + efx->mgmt_dev = mgmt_dev; + mgmt_dev->device = &efx->pci_dev->dev; + mgmt_dev->id_table = ef100_vdpa_id_table; + mgmt_dev->ops = &ef100_vdpa_net_mgmtdev_ops; +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_VDPA_SUPPORTED_FEATURES) + mgmt_dev->supported_features = features; +#endif +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_VDPA_MAX_SUPPORTED_VQS) + mgmt_dev->max_supported_vqs = EF100_VDPA_MAX_QUEUES_PAIRS * 2; +#endif +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_VDPA_MGMT_OPS_CONFIG_PARAM) + mgmt_dev->config_attr_mask = BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MACADDR); +#endif + rc = vdpa_mgmtdev_register(mgmt_dev); + if (rc) { + pci_err(efx->pci_dev, + "vdpa management device register failed, err: %d\n", + rc); + goto free_mgmt_dev; +#ifdef EFX_NOT_UPSTREAM + } else { + pci_dbg(efx->pci_dev, "vdpa management device created\n"); +#endif + } + + return 0; + +free_mgmt_dev: + kfree(mgmt_dev); + efx->mgmt_dev = NULL; + return rc; +} + +void ef100_vdpa_unregister_mgmtdev(struct efx_nic *efx) +{ +#ifdef EFX_NOT_UPSTREAM + pci_dbg(efx->pci_dev, "Unregister vdpa_management_device\n"); +#endif + if (efx->mgmt_dev) { + vdpa_mgmtdev_unregister(efx->mgmt_dev); + kfree(efx->mgmt_dev); + efx->mgmt_dev = NULL; + } +} +#endif + +static void +ef100_vdpa_get_addrs(struct efx_nic *efx, + bool *uc_promisc, + bool *mc_promisc) +{ + struct ef100_vdpa_nic *vdpa_nic = efx->vdpa_nic; + struct efx_mcdi_dev_addr addr; + + *uc_promisc = false; + /* As Control Virtqueue is yet to be implemented in vDPA + * driver, it is not possible to pass on the MAC address + * corresponding to Solicited-Node Multicast Address via + * VIRTIO_NET_CTRL_MAC_TABLE_SET to hypervisor. To deal + * with this limitation, enable multicast promiscuous to + * receive neighbor solicitation messages in order to + * allow the IPv6 ping to succeed. + */ + *mc_promisc = true; + + if (is_valid_ether_addr(vdpa_nic->mac_address)) { + dev_dbg(&vdpa_nic->vdpa_dev.dev, "ucast mac: %pM\n", + vdpa_nic->mac_address); + ether_addr_copy(addr.addr, vdpa_nic->mac_address); + efx_mcdi_filter_uc_addr(efx, &addr); + } else { + dev_dbg(&vdpa_nic->vdpa_dev.dev, "Invalid MAC address %pM\n", + vdpa_nic->mac_address); + } +} + +void ef100_vdpa_insert_filter(struct efx_nic *efx) +{ + mutex_lock(&efx->mac_lock); + down_read(&efx->filter_sem); + efx_mcdi_filter_sync_rx_mode(efx); + up_read(&efx->filter_sem); + mutex_unlock(&efx->mac_lock); +} + +#ifdef EFX_NOT_UPSTREAM +#if defined(EFX_USE_KCOMPAT) && !defined(EFX_HAVE_VDPA_MGMT_OPS_CONFIG_PARAM) +static ssize_t vdpa_mac_show(struct device *dev, + struct device_attribute *attr, + char *buf_out) +{ + struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); + struct ef100_vdpa_nic *vdpa_nic = efx->vdpa_nic; + int len; + + /* print MAC in big-endian format */ + len = scnprintf(buf_out, PAGE_SIZE, "%pM\n", vdpa_nic->mac_address); + + return len; +} + +static ssize_t vdpa_mac_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); +#if defined(EFX_USE_KCOMPAT) && !defined(EFX_HAVE_VDPA_MGMT_INTERFACE) + struct ef100_nic_data *nic_data = efx->nic_data; +#endif + struct ef100_vdpa_nic *vdpa_nic; + struct vdpa_device *vdev; + u8 mac_address[ETH_ALEN]; + int rc = 0; + + vdpa_nic = efx->vdpa_nic; + if (vdpa_nic == NULL) { + pci_err(efx->pci_dev, + "vDPA device doesn't exist!\n"); + return -ENOENT; + } + + mutex_lock(&vdpa_nic->lock); + vdev = &vdpa_nic->vdpa_dev; +#if defined(EFX_USE_KCOMPAT) && !defined(EFX_HAVE_VDPA_MGMT_INTERFACE) + if (nic_data->vdpa_class != EF100_VDPA_CLASS_NET) { + dev_err(&vdev->dev, + "Invalid vDPA device class: %u\n", nic_data->vdpa_class); + rc = -EINVAL; + goto err; + } +#endif + + rc = sscanf(buf, "%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx", + &mac_address[0], &mac_address[1], &mac_address[2], + &mac_address[3], &mac_address[4], &mac_address[5]); + + if (rc != ETH_ALEN) { + dev_err(&vdev->dev, + "Invalid MAC address %s\n", buf); + rc = -EINVAL; + goto err; + } + + ether_addr_copy(vdpa_nic->mac_address, (const u8 *)&mac_address); + + if (vdpa_nic->vring[0].vring_state & EF100_VRING_CREATED) + ef100_vdpa_insert_filter(efx); + +err: + mutex_unlock(&vdpa_nic->lock); + if (rc < 0) + return rc; + return count; +} + +static DEVICE_ATTR_RW(vdpa_mac); +#endif +#endif /* EFX_NOT_UPSTREAM */ + +#if defined(EFX_USE_KCOMPAT) && !defined(EFX_HAVE_VDPA_MGMT_INTERFACE) +static ssize_t vdpa_class_show(struct device *dev, + struct device_attribute *attr, + char *buf_out) +{ + struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); + struct ef100_nic_data *nic_data = efx->nic_data; + int len = 0; + + mutex_lock(&nic_data->bar_config_lock); + switch (nic_data->vdpa_class) { + case EF100_VDPA_CLASS_NONE: + len = scnprintf(buf_out, PAGE_SIZE, + "VDPA_CLASS_NONE\n"); + break; + case EF100_VDPA_CLASS_NET: + len = scnprintf(buf_out, PAGE_SIZE, + "VDPA_CLASS_NET\n"); + break; + default: + len = scnprintf(buf_out, PAGE_SIZE, + "VDPA_CLASS_INVALID\n"); + } + + mutex_unlock(&nic_data->bar_config_lock); + return len; +} + +static ssize_t vdpa_class_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); + struct ef100_nic_data *nic_data = efx->nic_data; + enum ef100_vdpa_class vdpa_class; + struct ef100_vdpa_nic *vdpa_nic; + bool in_order = false; + int rc = 0; + + mutex_lock(&nic_data->bar_config_lock); + if (sysfs_streq(buf, "net")) { + vdpa_class = EF100_VDPA_CLASS_NET; + } else if (sysfs_streq(buf, "net_io")) { + vdpa_class = EF100_VDPA_CLASS_NET; + in_order = true; + } else if (sysfs_streq(buf, "none")) { + vdpa_class = EF100_VDPA_CLASS_NONE; + } else { + rc = -EINVAL; + goto fail; + } + + switch (nic_data->vdpa_class) { + case EF100_VDPA_CLASS_NONE: + if (vdpa_class == EF100_VDPA_CLASS_NET) { + char name[EFX_VDPA_NAME_LEN]; + + snprintf(name, sizeof(name), EFX_VDPA_NAME(nic_data)); + /* only vdpa net devices are supported as of now */ + vdpa_nic = ef100_vdpa_create(efx, name, EF100_VDPA_CLASS_NET); + if (IS_ERR(vdpa_nic)) { + pci_err(efx->pci_dev, + "vDPA device creation failed, err: %ld", + PTR_ERR(vdpa_nic)); + rc = PTR_ERR(vdpa_nic); + goto fail; + } + + vdpa_nic->in_order = in_order; + pci_info(efx->pci_dev, + "vDPA net device created, vf: %u\n", + nic_data->vf_index); + } else { + pci_err(efx->pci_dev, + "Invalid vdpa class transition %u->%u", + EF100_VDPA_CLASS_NONE, vdpa_class); + rc = -EINVAL; + goto fail; + } + break; + + case EF100_VDPA_CLASS_NET: + + if (vdpa_class == EF100_VDPA_CLASS_NONE) { + if (ef100_vdpa_dev_in_use(efx)) { + pci_warn(efx->pci_dev, + "Device in use cannot change class"); + rc = -EBUSY; + goto fail; + } + ef100_vdpa_delete(efx); + nic_data->vdpa_class = EF100_VDPA_CLASS_NONE; + pci_info(efx->pci_dev, + "vDPA net device removed, vf: %u\n", + nic_data->vf_index); + } else { + pci_err(efx->pci_dev, + "Invalid vdpa class transition %u->%u", + EF100_VDPA_CLASS_NET, vdpa_class); + rc = -EINVAL; + goto fail; + } + break; + + default: + break; + } + +fail: + mutex_unlock(&nic_data->bar_config_lock); + if (rc) + return rc; + return count; +} + +static DEVICE_ATTR_RW(vdpa_class); +#endif + +static int vdpa_create_files(struct efx_nic *efx) +{ +#if (defined(EFX_USE_KCOMPAT) && !defined(EFX_HAVE_VDPA_MGMT_INTERFACE)) ||\ +defined(EFX_NOT_UPSTREAM) +#if defined(EFX_USE_KCOMPAT) && !defined(EFX_HAVE_VDPA_MGMT_OPS_CONFIG_PARAM) + int rc; +#endif +#endif + +#if defined(EFX_USE_KCOMPAT) && !defined(EFX_HAVE_VDPA_MGMT_INTERFACE) + rc = device_create_file(&efx->pci_dev->dev, &dev_attr_vdpa_class); + if (rc) { + pci_err(efx->pci_dev, "vdpa_class file creation failed\n"); + return rc; + } +#endif + +#ifdef EFX_NOT_UPSTREAM +#if defined(EFX_USE_KCOMPAT) && !defined(EFX_HAVE_VDPA_MGMT_OPS_CONFIG_PARAM) + rc = device_create_file(&efx->pci_dev->dev, &dev_attr_vdpa_mac); + if (rc) { + pci_err(efx->pci_dev, "vdpa_mac file creation failed\n"); +#if defined(EFX_USE_KCOMPAT) && !defined(EFX_HAVE_VDPA_MGMT_INTERFACE) + device_remove_file(&efx->pci_dev->dev, &dev_attr_vdpa_class); +#endif + return rc; + } +#endif +#endif + + return 0; +} + +static void vdpa_remove_files(struct efx_nic *efx) +{ +#ifdef EFX_NOT_UPSTREAM +#if defined(EFX_USE_KCOMPAT) && !defined(EFX_HAVE_VDPA_MGMT_OPS_CONFIG_PARAM) + device_remove_file(&efx->pci_dev->dev, &dev_attr_vdpa_mac); +#endif +#endif +#if defined(EFX_USE_KCOMPAT) && !defined(EFX_HAVE_VDPA_MGMT_INTERFACE) + device_remove_file(&efx->pci_dev->dev, &dev_attr_vdpa_class); +#endif +} + +static const struct efx_mcdi_filter_addr_ops vdpa_addr_ops = { + .get_addrs = ef100_vdpa_get_addrs, +}; + +int ef100_vdpa_init(struct efx_probe_data *probe_data) +{ + struct efx_nic *efx = &probe_data->efx; + int rc = 0; + + if (efx->state != STATE_PROBED) { + pci_err(efx->pci_dev, "Invalid efx state %u", efx->state); + return -EBUSY; + } + + rc = vdpa_create_files(efx); + if (rc) { + pci_err(efx->pci_dev, "vdpa_create_file failed, err: %d\n", rc); + return rc; + } + efx->state = STATE_VDPA; + rc = ef100_filter_table_probe(efx); + if (rc) { + pci_err(efx->pci_dev, "filter probe failed, err: %d\n", rc); + goto err_remove_vdpa_files; + } + + /* Add unspecified VID to support VLAN filtering being disabled */ + rc = efx_mcdi_filter_add_vlan(efx, EFX_FILTER_VID_UNSPEC); + if (rc) + goto err_remove_filter_table; + + efx_mcdi_filter_set_addr_ops(efx, &vdpa_addr_ops); + + if (!efx->type->filter_table_probe) + goto err_remove_filter_table; + + rc = efx->type->filter_table_probe(efx); + if (!rc) + return 0; + + pci_err(efx->pci_dev, "filter_table_probe failed, err: %d\n", rc); + +err_remove_filter_table: + efx_mcdi_filter_table_remove(efx); + +err_remove_vdpa_files: + vdpa_remove_files(efx); + efx->state = STATE_PROBED; + return rc; +} + +void ef100_vdpa_fini(struct efx_probe_data *probe_data) +{ + struct efx_nic *efx = &probe_data->efx; +#if defined(EFX_USE_KCOMPAT) && !defined(EFX_HAVE_VDPA_MGMT_INTERFACE) + struct ef100_nic_data *nic_data = efx->nic_data; +#endif + + if (efx->state != STATE_VDPA && efx->state != STATE_DISABLED) { + pci_err(efx->pci_dev, "%s: Invalid efx state %u", + __func__, efx->state); + return; + } + + /* Handle vdpa device deletion, if not done explicitly */ + ef100_vdpa_delete(efx); +#if defined(EFX_USE_KCOMPAT) && !defined(EFX_HAVE_VDPA_MGMT_INTERFACE) + nic_data->vdpa_class = EF100_VDPA_CLASS_NONE; +#endif + + vdpa_remove_files(efx); + + efx->state = STATE_PROBED; + + if (efx->type->filter_table_remove) + efx->type->filter_table_remove(efx); + efx_mcdi_filter_table_remove(efx); +} + +static int get_net_config(struct ef100_vdpa_nic *vdpa_nic) +{ + struct efx_nic *efx = vdpa_nic->efx; + u16 mtu, link_up; + u32 speed; + u8 duplex; + int rc = 0; + + rc = efx_vdpa_get_mac_address(efx, + vdpa_nic->net_config.mac); + if (rc) { + dev_err(&vdpa_nic->vdpa_dev.dev, + "%s: Get MAC address for vf:%u failed, rc:%d\n", + __func__, vdpa_nic->vf_index, rc); + return rc; + } + + vdpa_nic->net_config.max_virtqueue_pairs = + cpu_to_efx_vdpa16(vdpa_nic, vdpa_nic->max_queue_pairs); + + rc = efx_vdpa_get_mtu(efx, &mtu); + if (rc) { + dev_err(&vdpa_nic->vdpa_dev.dev, + "%s: Get MTU for vf:%u failed:%d\n", __func__, + vdpa_nic->vf_index, rc); + return rc; + } + vdpa_nic->net_config.mtu = cpu_to_efx_vdpa16(vdpa_nic, mtu); + + rc = efx_vdpa_get_link_details(efx, &link_up, &speed, &duplex); + if (rc) { + dev_err(&vdpa_nic->vdpa_dev.dev, + "%s: Get Link details for vf:%u failed:%d\n", __func__, + vdpa_nic->vf_index, rc); + return rc; + } + vdpa_nic->net_config.status = cpu_to_efx_vdpa16(vdpa_nic, link_up); + vdpa_nic->net_config.speed = cpu_to_efx_vdpa32(vdpa_nic, speed); +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_VIRTIO_NET_SPEED_LE32) + vdpa_nic->net_config.speed = cpu_to_le32(speed); +#else + vdpa_nic->net_config.speed = cpu_to_efx_vdpa32(speed); +#endif + vdpa_nic->net_config.duplex = duplex; + +#ifdef EFX_NOT_UPSTREAM + dev_info(&vdpa_nic->vdpa_dev.dev, "%s: mac address: %pM\n", __func__, + vdpa_nic->net_config.mac); + dev_info(&vdpa_nic->vdpa_dev.dev, "%s: MTU:%u\n", __func__, + vdpa_nic->net_config.mtu); + dev_info(&vdpa_nic->vdpa_dev.dev, + "%s: Status:%u Link Speed:%u Duplex:%u\n", __func__, + vdpa_nic->net_config.status, + vdpa_nic->net_config.speed, + vdpa_nic->net_config.duplex); +#endif + return 0; +} + +static int vdpa_allocate_vis(struct efx_nic *efx, unsigned int *allocated_vis) +{ + /* The first VI is reserved for MCDI + * 1 VI each for rx + tx ring + */ + unsigned int max_vis = 1 + EF100_VDPA_MAX_QUEUES_PAIRS; + unsigned int min_vis = 1 + 1; + int rc = 0; + + rc = efx_mcdi_alloc_vis(efx, min_vis, max_vis, + NULL, NULL, + allocated_vis); + if (!rc) + return rc; + if (*allocated_vis < min_vis) + return -ENOSPC; + return 0; +} + +static void vdpa_free_vis(struct efx_nic *efx) +{ + struct ef100_nic_data *nic_data = efx->nic_data; + int rc; + + rc = efx_mcdi_free_vis(efx); + if (rc) + pci_err(efx->pci_dev, "vDPA free vis failed for vf: %u\n", + nic_data->vf_index); +} + +static void unmap_mcdi_buffer(struct efx_nic *efx) +{ + struct ef100_nic_data *nic_data = efx->nic_data; + struct efx_mcdi_iface *mcdi; + + mcdi = efx_mcdi(efx); + spin_lock_bh(&mcdi->iface_lock); + /* Save current MCDI mode to be restored later */ + efx->vdpa_nic->mcdi_mode = mcdi->mode; + efx->mcdi_buf_mode = EFX_BUF_MODE_VDPA; + mcdi->mode = MCDI_MODE_FAIL; + spin_unlock_bh(&mcdi->iface_lock); + efx_mcdi_wait_for_cleanup(efx); + efx_nic_free_buffer(efx, &nic_data->mcdi_buf); +} + +#if defined(EFX_USE_KCOMPAT) && !defined(EFX_HAVE_VDPA_MGMT_INTERFACE) +struct ef100_vdpa_nic *ef100_vdpa_create(struct efx_nic *efx, + const char *dev_name, + enum ef100_vdpa_class dev_type) +#else +struct ef100_vdpa_nic *ef100_vdpa_create(struct efx_nic *efx, + const char *dev_name) +#endif +{ + struct ef100_nic_data *nic_data = efx->nic_data; + struct ef100_vdpa_nic *vdpa_nic; + unsigned int allocated_vis; + struct device *dev; + int rc; + u16 i; + +#if defined(EFX_USE_KCOMPAT) && !defined(EFX_HAVE_VDPA_MGMT_INTERFACE) + if (dev_type != EF100_VDPA_CLASS_NET) { + /* only vdpa net devices are supported as of now */ + pci_err(efx->pci_dev, + "Invalid vDPA device class: %u\n", dev_type); + return ERR_PTR(-EINVAL); + } else { + nic_data->vdpa_class = dev_type; + } +#endif + + rc = vdpa_allocate_vis(efx, &allocated_vis); + if (rc) { + pci_err(efx->pci_dev, + "%s Alloc VIs failed for vf:%u error:%d\n", + __func__, nic_data->vf_index, rc); + return ERR_PTR(rc); + } + + vdpa_nic = vdpa_alloc_device(struct ef100_vdpa_nic, + vdpa_dev, &efx->pci_dev->dev, + &ef100_vdpa_config_ops +#if defined(EFX_USE_KCOMPAT) && !defined(EFX_HAVE_VDPA_REGISTER_NVQS_PARAM) && defined(EFX_HAVE_VDPA_ALLOC_NVQS_PARAM) + , (allocated_vis - 1) * 2 +#endif +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_VDPA_ALLOC_ASID_NAME_USEVA_PARAMS) + , 1, 1 +#endif +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_VDPA_ALLOC_NAME_PARAM) || defined(EFX_HAVE_VDPA_ALLOC_NAME_USEVA_PARAMS) || defined(EFX_HAVE_VDPA_ALLOC_ASID_NAME_USEVA_PARAMS) + , dev_name +#endif +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_VDPA_ALLOC_NAME_USEVA_PARAMS) || defined(EFX_HAVE_VDPA_ALLOC_ASID_NAME_USEVA_PARAMS) + , false +#endif + ); + if (!vdpa_nic) { + pci_err(efx->pci_dev, + "vDPA device allocation failed for vf: %u\n", + nic_data->vf_index); + rc = -ENOMEM; + goto err_alloc_vis_free; + } + + mutex_init(&vdpa_nic->lock); + vdpa_nic->vdpa_dev.dma_dev = &efx->pci_dev->dev; + efx->vdpa_nic = vdpa_nic; + vdpa_nic->efx = efx; + vdpa_nic->max_queue_pairs = allocated_vis - 1; + vdpa_nic->pf_index = nic_data->pf_index; + vdpa_nic->vf_index = nic_data->vf_index; + vdpa_nic->vdpa_state = EF100_VDPA_STATE_INITIALIZED; + vdpa_nic->mac_address = (u8 *)&vdpa_nic->net_config.mac; + + dev = &vdpa_nic->vdpa_dev.dev; +#ifdef EFX_NOT_UPSTREAM + dev_info(dev, "%s: vDPA dev pf_index:%u vf_index:%u max_queues:%u\n", + __func__, vdpa_nic->pf_index, vdpa_nic->vf_index, + vdpa_nic->max_queue_pairs); +#endif + + for (i = 0; i < (2 * vdpa_nic->max_queue_pairs); i++) { + rc = ef100_vdpa_init_vring(vdpa_nic, i); + if (rc) { + pci_err(efx->pci_dev, + "vring init idx: %u failed, rc: %d\n", i, rc); + goto err_put_device; + } + } + + rc = devm_add_action_or_reset(&efx->pci_dev->dev, + ef100_vdpa_irq_vectors_free, + efx->pci_dev); + if (rc) { + pci_err(efx->pci_dev, + "Failed adding devres for freeing irq vectors\n"); + goto err_put_device; + } + + unmap_mcdi_buffer(efx); + + rc = efx_init_debugfs_vdpa(vdpa_nic); + if (rc) + goto err_put_device; + + rc = get_net_config(vdpa_nic); + if (rc) + goto err_put_device; + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_VDPA_MGMT_INTERFACE) + vdpa_nic->vdpa_dev.mdev = efx->mgmt_dev; + /* Caller must invoke this routine in the management device + * dev_add() callback + */ + rc = _vdpa_register_device(&vdpa_nic->vdpa_dev, + (allocated_vis - 1) * 2); +#else +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_VDPA_REGISTER_NVQS_PARAM) + rc = vdpa_register_device(&vdpa_nic->vdpa_dev, + (allocated_vis - 1) * 2); +#else + rc = vdpa_register_device(&vdpa_nic->vdpa_dev); +#endif +#endif + if (rc) { + pci_err(efx->pci_dev, + "vDPA device registration failed for vf: %u\n", + nic_data->vf_index); + goto err_put_device; + } + + return vdpa_nic; + +err_put_device: + /* put_device invokes ef100_vdpa_free */ + put_device(&vdpa_nic->vdpa_dev.dev); + +err_alloc_vis_free: + vdpa_free_vis(efx); + return ERR_PTR(rc); +} + +void ef100_vdpa_delete(struct efx_nic *efx) +{ + int rc; + + if (efx->vdpa_nic) { + mutex_lock(&efx->vdpa_nic->lock); + if (efx->mcdi_buf_mode == EFX_BUF_MODE_VDPA) { + rc = ef100_vdpa_map_mcdi_buffer(efx); + if (rc) { + pci_err(efx->pci_dev, + "map_mcdi_buffer failed, err: %d\n", + rc); + } + } + reset_vdpa_device(efx->vdpa_nic); + mutex_unlock(&efx->vdpa_nic->lock); + +#ifdef EFX_NOT_UPSTREAM + pci_info(efx->pci_dev, + "%s: Calling vdpa unregister device\n", __func__); +#endif + +#if defined(EFX_USE_KCOMPAT) && !defined(EFX_HAVE_VDPA_MGMT_INTERFACE) + vdpa_unregister_device(&efx->vdpa_nic->vdpa_dev); +#else + /* Caller must invoke this routine as part of + * management device dev_del() callback + */ + _vdpa_unregister_device(&efx->vdpa_nic->vdpa_dev); +#endif + +#ifdef EFX_NOT_UPSTREAM + pci_info(efx->pci_dev, + "%s: vdpa unregister device completed\n", __func__); +#endif + efx->vdpa_nic = NULL; + vdpa_free_vis(efx); + } +} + +/* A non zero value of vdpa status signifies that the vDPA device + * is in use and hence cannot be removed. Also on killing qemu + * a device reset is called with status equal to 0. + */ +bool ef100_vdpa_dev_in_use(struct efx_nic *efx) +{ + struct ef100_vdpa_nic *vdpa_nic = efx->vdpa_nic; + + if (vdpa_nic) + if (vdpa_nic->status >= VIRTIO_CONFIG_S_DRIVER) + return true; + + return false; +} + +#endif + diff --git a/src/drivers/net/ethernet/sfc/ef100_vdpa.h b/src/drivers/net/ethernet/sfc/ef100_vdpa.h new file mode 100644 index 0000000..854317e --- /dev/null +++ b/src/drivers/net/ethernet/sfc/ef100_vdpa.h @@ -0,0 +1,222 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Driver for Xilinx network controllers and boards + * Copyright 2020 Xilinx Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#ifndef __EF100_VDPA_H__ +#define __EF100_VDPA_H__ + +#include +#include +#include +#include +#include "net_driver.h" +#include "ef100_nic.h" + +#if defined(CONFIG_SFC_VDPA) + +/* Device ID of a virtio net device */ +#define EF100_VDPA_VIRTIO_NET_DEVICE_ID VIRTIO_ID_NET + +/* Vendor ID of Xilinx vDPA NIC */ +#define EF100_VDPA_VENDOR_ID PCI_VENDOR_ID_XILINX + +/* Max Queue pairs currently supported */ +#define EF100_VDPA_MAX_QUEUES_PAIRS 1 + +/* Vector 0 assigned for vring */ +#define EF100_VDPA_VRING_VECTOR_BASE 0 + +/* Max number of Buffers supported in the virtqueue */ +#define EF100_VDPA_VQ_NUM_MAX_SIZE 512 + +/* Alignment requirement of the Virtqueue */ +#define EF100_VDPA_VQ_ALIGN 4096 + +/* Vring configuration definitions */ +#define EF100_VRING_ADDRESS_CONFIGURED 0x1 +#define EF100_VRING_SIZE_CONFIGURED 0x2 +#define EF100_VRING_READY_CONFIGURED 0x4 +#define EF100_VRING_CONFIGURED (EF100_VRING_ADDRESS_CONFIGURED | \ + EF100_VRING_SIZE_CONFIGURED | \ + EF100_VRING_READY_CONFIGURED) + +#define EF100_VRING_CREATED 0x8 + +/* Maximum size of msix name */ +#define EF100_VDPA_MAX_MSIX_NAME_SIZE 256 + +/* Default high IOVA for MCDI buffer */ +#define EF100_VDPA_IOVA_BASE_ADDR 0x20000000000 + +#define EFX_VDPA_NAME(_vdpa) "vdpa_%d_%d", (_vdpa)->pf_index, (_vdpa)->vf_index +#define EFX_VDPA_VRING_NAME(_idx) "vring_%d", _idx + +/** + * enum ef100_vdpa_nic_state - possible states for a vDPA NIC + * + * @EF100_VDPA_STATE_INITIALIZED: State after vDPA NIC created + * @EF100_VDPA_STATE_NEGOTIATED: State after feature negotiation + * @EF100_VDPA_STATE_STARTED: State after driver ok + * @EF100_VDPA_STATE_SUSPENDED: State after device suspend + * @EF100_VDPA_STATE_NSTATES: Number of VDPA states + */ +enum ef100_vdpa_nic_state { + EF100_VDPA_STATE_INITIALIZED, + EF100_VDPA_STATE_NEGOTIATED, + EF100_VDPA_STATE_STARTED, + EF100_VDPA_STATE_SUSPENDED, + EF100_VDPA_STATE_NSTATES +}; + +enum ef100_vdpa_device_type { + EF100_VDPA_DEVICE_TYPE_NET, + EF100_VDPA_DEVICE_NTYPES +}; + +enum ef100_vdpa_vq_type { + EF100_VDPA_VQ_TYPE_NET_RXQ, + EF100_VDPA_VQ_TYPE_NET_TXQ, + EF100_VDPA_VQ_NTYPES +}; + +/** + * struct ef100_vdpa_vring_info - vDPA vring data structure + * + * @desc: Descriptor area address of the vring + * @avail: Available area address of the vring + * @used: Device area address of the vring + * @size: Number of entries in the vring + * @vring_state: bit map to track vring configuration + * @last_avail_idx: last available index of the vring + * @last_used_idx: last used index of the vring + * @doorbell_offset: doorbell offset + * @doorbell_offset_valid: true if @doorbell_offset is updated + * @vring_type: type of vring created + * @vring_ctx: vring context information + * @msix_name: device name for vring irq handler + * @irq: irq number for vring irq handler + * @cb: callback for vring interrupts + * @debug_dir: vDPA vring debugfs directory + */ +struct ef100_vdpa_vring_info { + dma_addr_t desc; + dma_addr_t avail; + dma_addr_t used; + u32 size; + u16 vring_state; + u32 last_avail_idx; + u32 last_used_idx; + u32 doorbell_offset; + bool doorbell_offset_valid; + enum ef100_vdpa_vq_type vring_type; + struct efx_vring_ctx *vring_ctx; + char msix_name[EF100_VDPA_MAX_MSIX_NAME_SIZE]; + u32 irq; + struct vdpa_callback cb; +#ifdef CONFIG_DEBUG_FS + struct dentry *debug_dir; +#endif +}; + +/** + * struct ef100_vdpa_nic - vDPA NIC data structure + * + * @vdpa_dev: vdpa_device object which registers on the vDPA bus. + * @vdpa_state: ensures correct device status transitions via set_status cb + * @mcdi_mode: MCDI mode at the time of unmapping VF mcdi buffer + * @efx: pointer to the VF's efx_nic object + * @lock: Managing access to vdpa config operations + * @pf_index: PF index of the vDPA VF + * @vf_index: VF index of the vDPA VF + * @status: device status as per VIRTIO spec + * @features: negotiated feature bits + * @max_queue_pairs: maximum number of queue pairs supported + * @net_config: virtio_net_config data + * @vring: vring information of the vDPA device. + * @mac_address: mac address of interface associated with this vdpa device + * @cfg_cb: callback for config change + * @debug_dir: vDPA debugfs directory + * @in_order: if true, allow VIRTIO_F_IN_ORDER feature negotiation + */ +struct ef100_vdpa_nic { + struct vdpa_device vdpa_dev; + enum ef100_vdpa_nic_state vdpa_state; + enum efx_mcdi_mode mcdi_mode; + struct efx_nic *efx; + /* for synchronizing access to vdpa config operations */ + struct mutex lock; + u32 pf_index; + u32 vf_index; + u8 status; + u64 features; + u32 max_queue_pairs; + struct virtio_net_config net_config; + struct ef100_vdpa_vring_info vring[EF100_VDPA_MAX_QUEUES_PAIRS * 2]; + u8 *mac_address; + struct vdpa_callback cfg_cb; +#ifdef CONFIG_DEBUG_FS + struct dentry *debug_dir; +#endif + bool in_order; +}; + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_VDPA_MGMT_INTERFACE) +int ef100_vdpa_register_mgmtdev(struct efx_nic *efx); +void ef100_vdpa_unregister_mgmtdev(struct efx_nic *efx); +struct ef100_vdpa_nic *ef100_vdpa_create(struct efx_nic *efx, + const char *dev_name); +#else +struct ef100_vdpa_nic *ef100_vdpa_create(struct efx_nic *efx, + const char *dev_name, + enum ef100_vdpa_class dev_type); +#endif +int ef100_vdpa_init(struct efx_probe_data *probe_data); +void ef100_vdpa_fini(struct efx_probe_data *probe_data); +void ef100_vdpa_delete(struct efx_nic *efx); +void ef100_vdpa_insert_filter(struct efx_nic *efx); +void ef100_vdpa_irq_vectors_free(void *data); +void reset_vdpa_device(struct ef100_vdpa_nic *vdpa_nic); +int ef100_vdpa_reset(struct vdpa_device *vdev); +bool ef100_vdpa_dev_in_use(struct efx_nic *efx); +int ef100_vdpa_init_vring(struct ef100_vdpa_nic *vdpa_nic, u16 idx); +int ef100_vdpa_map_mcdi_buffer(struct efx_nic *efx); + +static inline bool efx_vdpa_is_little_endian(struct ef100_vdpa_nic *vdpa_nic) +{ + return virtio_legacy_is_little_endian() || + (vdpa_nic->features & (1ULL << VIRTIO_F_VERSION_1)); +} + +static inline u16 efx_vdpa16_to_cpu(struct ef100_vdpa_nic *vdpa_nic, + __virtio16 val) +{ + return __virtio16_to_cpu(efx_vdpa_is_little_endian(vdpa_nic), val); +} + +static inline __virtio16 cpu_to_efx_vdpa16(struct ef100_vdpa_nic *vdpa_nic, + u16 val) +{ + return __cpu_to_virtio16(efx_vdpa_is_little_endian(vdpa_nic), val); +} + +static inline u32 efx_vdpa32_to_cpu(struct ef100_vdpa_nic *vdpa_nic, + __virtio32 val) +{ + return __virtio32_to_cpu(efx_vdpa_is_little_endian(vdpa_nic), val); +} + +static inline __virtio32 cpu_to_efx_vdpa32(struct ef100_vdpa_nic *vdpa_nic, + u32 val) +{ + return __cpu_to_virtio32(efx_vdpa_is_little_endian(vdpa_nic), val); +} + +extern const struct vdpa_config_ops ef100_vdpa_config_ops; +#endif /* CONFIG_SFC_VDPA */ + +#endif /* __EF100_VDPA_H__ */ diff --git a/src/drivers/net/ethernet/sfc/ef100_vdpa_dbg.c b/src/drivers/net/ethernet/sfc/ef100_vdpa_dbg.c new file mode 100644 index 0000000..df497ba --- /dev/null +++ b/src/drivers/net/ethernet/sfc/ef100_vdpa_dbg.c @@ -0,0 +1,121 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Driver for AMD network controllers and boards + * Copyright(C) 2023, Advanced Micro Devices, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#include "ef100_vdpa.h" +#include "ef100_vdpa_dbg.h" + +#define SIZE 100 + +struct bit_label { + u8 bit; + char *str; +}; + +static const struct bit_label virtio_net_features[] = { + {VIRTIO_F_NOTIFY_ON_EMPTY, "VIRTIO_F_NOTIFY_ON_EMPTY"}, + {VIRTIO_F_ANY_LAYOUT, "VIRTIO_F_ANY_LAYOUT"}, + {VIRTIO_F_VERSION_1, "VIRTIO_F_VERSION_1"}, + {VIRTIO_F_ACCESS_PLATFORM, "VIRTIO_F_ACCESS_PLATFORM"}, + {VIRTIO_F_RING_PACKED, "VIRTIO_F_RING_PACKED"}, + {VIRTIO_F_ORDER_PLATFORM, "VIRTIO_F_ORDER_PLATFORM"}, + {VIRTIO_F_IN_ORDER, "VIRTIO_F_IN_ORDER"}, + {VIRTIO_F_SR_IOV, "VIRTIO_F_SR_IOV"}, + {VIRTIO_NET_F_CSUM, "VIRTIO_NET_F_CSUM"}, + {VIRTIO_NET_F_GUEST_CSUM, "VIRTIO_NET_F_GUEST_CSUM"}, + {VIRTIO_NET_F_CTRL_GUEST_OFFLOADS, "VIRTIO_NET_F_CTRL_GUEST_OFFLOADS"}, + {VIRTIO_NET_F_MTU, "VIRTIO_NET_F_MTU"}, + {VIRTIO_NET_F_MAC, "VIRTIO_NET_F_MAC"}, + {VIRTIO_NET_F_GUEST_TSO4, "VIRTIO_NET_F_GUEST_TSO4"}, + {VIRTIO_NET_F_GUEST_TSO6, "VIRTIO_NET_F_GUEST_TSO6"}, + {VIRTIO_NET_F_GUEST_ECN, "VIRTIO_NET_F_GUEST_ECN"}, + {VIRTIO_NET_F_GUEST_UFO, "VIRTIO_NET_F_GUEST_UFO"}, + {VIRTIO_NET_F_HOST_TSO4, "VIRTIO_NET_F_HOST_TSO4"}, + {VIRTIO_NET_F_HOST_TSO6, "VIRTIO_NET_F_HOST_TSO6"}, + {VIRTIO_NET_F_HOST_ECN, "VIRTIO_NET_F_HOST_ECN"}, + {VIRTIO_NET_F_HOST_UFO, "VIRTIO_NET_F_HOST_UFO"}, + {VIRTIO_NET_F_MRG_RXBUF, "VIRTIO_NET_F_MRG_RXBUF"}, + {VIRTIO_NET_F_STATUS, "VIRTIO_NET_F_STATUS"}, + {VIRTIO_NET_F_CTRL_VQ, "VIRTIO_NET_F_CTRL_VQ"}, + {VIRTIO_NET_F_CTRL_RX, "VIRTIO_NET_F_CTRL_RX"}, + {VIRTIO_NET_F_CTRL_VLAN, "VIRTIO_NET_F_CTRL_VLAN"}, + {VIRTIO_NET_F_CTRL_RX_EXTRA, "VIRTIO_NET_F_CTRL_RX_EXTRA"}, + {VIRTIO_NET_F_GUEST_ANNOUNCE, "VIRTIO_NET_F_GUEST_ANNOUNCE"}, + {VIRTIO_NET_F_MQ, "VIRTIO_NET_F_MQ"}, + {VIRTIO_NET_F_CTRL_MAC_ADDR, "VIRTIO_NET_F_CTRL_MAC_ADDR"}, + {VIRTIO_NET_F_HASH_REPORT, "VIRTIO_NET_F_HASH_REPORT"}, + {VIRTIO_NET_F_RSS, "VIRTIO_NET_F_RSS"}, + {VIRTIO_NET_F_RSC_EXT, "VIRTIO_NET_F_RSC_EXT"}, + {VIRTIO_NET_F_STANDBY, "VIRTIO_NET_F_STANDBY"}, + {VIRTIO_NET_F_SPEED_DUPLEX, "VIRTIO_NET_F_SPEED_DUPLEX"}, + {VIRTIO_NET_F_GSO, "VIRTIO_NET_F_GSO"}, +}; + +static const struct bit_label virtio_net_status[] = { + {VIRTIO_CONFIG_S_ACKNOWLEDGE, "ACKNOWLEDGE"}, + {VIRTIO_CONFIG_S_DRIVER, "DRIVER"}, + {VIRTIO_CONFIG_S_FEATURES_OK, "FEATURES_OK"}, + {VIRTIO_CONFIG_S_DRIVER_OK, "DRIVER_OK"}, + {VIRTIO_CONFIG_S_FAILED, "FAILED"} +}; + +void print_status_str(u8 status, struct vdpa_device *vdev) +{ + u16 table_len = ARRAY_SIZE(virtio_net_status); + char concat_str[] = ", "; + char buf[SIZE] = {0}; + u16 i; + + if (status == 0) { + dev_info(&vdev->dev, "RESET\n"); + return; + } + for (i = 0; (i < table_len) && status; i++) { + if (status & virtio_net_status[i].bit) { + snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), + "%s", virtio_net_status[i].str); + status &= ~virtio_net_status[i].bit; + snprintf(buf + strlen(buf), + sizeof(buf) - strlen(buf), "%s", concat_str); + } + } + dev_info(&vdev->dev, "%s\n", buf); + if (status) + dev_info(&vdev->dev, "Unknown status:0x%x\n", status); +} + +void print_features_str(u64 features, struct vdpa_device *vdev) +{ + int table_len = ARRAY_SIZE(virtio_net_features); + int i; + + for (i = 0; (i < table_len) && features; i++) { + if (features & BIT_ULL(virtio_net_features[i].bit)) { + dev_info(&vdev->dev, "%s: %s\n", __func__, + virtio_net_features[i].str); + features &= ~BIT_ULL(virtio_net_features[i].bit); + } + } + if (features) { + dev_info(&vdev->dev, + "%s: Unknown Features:0x%llx\n", + __func__, features); + } +} + +void print_vring_state(u16 state, struct vdpa_device *vdev) +{ + bool addr_conf = state & EF100_VRING_ADDRESS_CONFIGURED; + bool size_conf = state & EF100_VRING_SIZE_CONFIGURED; + bool ready_conf = state & EF100_VRING_READY_CONFIGURED; + bool vring_created = state & EF100_VRING_CREATED; + + dev_info(&vdev->dev, + "%s: addr_conf: %u, sz_conf: %u, rdy_conf: %u, created: %u\n", + __func__, addr_conf, size_conf, ready_conf, vring_created); +} diff --git a/src/drivers/net/ethernet/sfc/ef100_vdpa_dbg.h b/src/drivers/net/ethernet/sfc/ef100_vdpa_dbg.h new file mode 100644 index 0000000..1dd5b7d --- /dev/null +++ b/src/drivers/net/ethernet/sfc/ef100_vdpa_dbg.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Driver for AMD network controllers and boards + * Copyright(C) 2023, Advanced Micro Devices, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#ifndef EFX_EF100_VDPA_DBG_H +#define EFX_EF100_VDPA_DBG_H + +void print_status_str(u8 status, struct vdpa_device *vdev); +void print_vring_state(u16 state, struct vdpa_device *vdev); +void print_features_str(u64 features, struct vdpa_device *vdev); + +#endif /* EFX_EF100_VDPA_DBG_H */ diff --git a/src/drivers/net/ethernet/sfc/ef100_vdpa_ops.c b/src/drivers/net/ethernet/sfc/ef100_vdpa_ops.c new file mode 100644 index 0000000..a0f5f63 --- /dev/null +++ b/src/drivers/net/ethernet/sfc/ef100_vdpa_ops.c @@ -0,0 +1,1079 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Driver for Xilinx network controllers and boards + * Copyright 2020 Xilinx Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#include +#include "ef100_nic.h" +#include "io.h" +#include "ef100_vdpa.h" +#include "mcdi_vdpa.h" +#include "mcdi_filters.h" +#include "debugfs.h" +#ifdef EFX_NOT_UPSTREAM +#include "ef100_vdpa_dbg.h" +#endif + +#if defined(CONFIG_SFC_VDPA) + +/* Get the queue's function-local index of the associated VI + * virtqueue number queue 0 is reserved for MCDI + */ +#define EFX_GET_VI_INDEX(vq_num) (((vq_num) / 2) + 1) + +static const char *get_vdpa_state_str(enum ef100_vdpa_nic_state state) +{ + const char *vdpa_state_str[] = {"INITIALIZED", "NEGOTIATED", + "STARTED", "SUSPENDED"}; + + if (state < EF100_VDPA_STATE_NSTATES) + return vdpa_state_str[state]; + return "UNKNOWN"; +} + +static struct ef100_vdpa_nic *get_vdpa_nic(struct vdpa_device *vdev) +{ + return container_of(vdev, struct ef100_vdpa_nic, vdpa_dev); +} + +static irqreturn_t vring_intr_handler(int irq, void *arg) +{ + struct ef100_vdpa_vring_info *vring = arg; + + if (vring->cb.callback) + return vring->cb.callback(vring->cb.private); + + return IRQ_NONE; +} + +static int ef100_vdpa_irq_vectors_alloc(struct pci_dev *pci_dev, u16 nvqs) +{ + int rc; + + rc = pci_alloc_irq_vectors(pci_dev, nvqs, nvqs, PCI_IRQ_MSIX); + if (rc < 0) + pci_err(pci_dev, + "Failed to alloc %d IRQ vectors, err:%d\n", nvqs, rc); + return rc; +} + +void ef100_vdpa_irq_vectors_free(void *data) +{ + pci_free_irq_vectors(data); +} + +static bool is_qid_invalid(struct ef100_vdpa_nic *vdpa_nic, u16 idx, + const char *caller) +{ + if (unlikely(idx >= (vdpa_nic->max_queue_pairs * 2))) { + dev_err(&vdpa_nic->vdpa_dev.dev, + "%s: Invalid qid %u\n", caller, idx); + return true; + } + return false; +} + +int ef100_vdpa_map_mcdi_buffer(struct efx_nic *efx) +{ + struct ef100_nic_data *nic_data = efx->nic_data; + struct efx_mcdi_iface *mcdi; + int rc; + + /* Update VF's MCDI buffer when switching out of vdpa mode */ + rc = efx_nic_alloc_buffer(efx, &nic_data->mcdi_buf, + MCDI_BUF_LEN, GFP_KERNEL); + if (rc) + return rc; + + mcdi = efx_mcdi(efx); + spin_lock_bh(&mcdi->iface_lock); + mcdi->mode = efx->vdpa_nic->mcdi_mode; + efx->mcdi_buf_mode = EFX_BUF_MODE_EF100; + spin_unlock_bh(&mcdi->iface_lock); + + return 0; +} + +static int irq_vring_init(struct ef100_vdpa_nic *vdpa_nic, u16 idx) +{ + struct ef100_vdpa_vring_info *vring = &vdpa_nic->vring[idx]; + struct pci_dev *pci_dev = vdpa_nic->efx->pci_dev; + int irq; + int rc; + + snprintf(vring->msix_name, 256, "x_vdpa[%s]-%d\n", + pci_name(pci_dev), idx); + irq = pci_irq_vector(pci_dev, idx); + rc = devm_request_irq(&pci_dev->dev, irq, vring_intr_handler, 0, + vring->msix_name, vring); + if (rc) + pci_err(pci_dev, + "Failed to request irq for vring %d, rc %u\n", idx, rc); + else + vring->irq = irq; + + return rc; +} + +static void irq_vring_fini(struct ef100_vdpa_nic *vdpa_nic, u16 idx) +{ + struct ef100_vdpa_vring_info *vring = &vdpa_nic->vring[idx]; + struct pci_dev *pci_dev = vdpa_nic->efx->pci_dev; + + devm_free_irq(&pci_dev->dev, vring->irq, vring); + vring->irq = -EINVAL; +} + +static int ef100_vdpa_filter_configure(struct ef100_vdpa_nic *vdpa_nic) +{ + struct efx_nic *efx = vdpa_nic->efx; + struct ef100_nic_data *nic_data; + int rc; + + nic_data = efx->nic_data; + if (efx->type->filter_table_up) { + rc = efx->type->filter_table_up(efx); + if (rc) + return rc; + } + efx_mcdi_push_default_indir_table(efx, + vdpa_nic->max_queue_pairs); + ef100_vdpa_insert_filter(vdpa_nic->efx); + + return 0; +} + +static bool can_create_vring(struct ef100_vdpa_nic *vdpa_nic, u16 idx) +{ + if (vdpa_nic->vring[idx].vring_state == EF100_VRING_CONFIGURED && + vdpa_nic->status & VIRTIO_CONFIG_S_DRIVER_OK && + !(vdpa_nic->vring[idx].vring_state & EF100_VRING_CREATED)) { +#ifdef EFX_NOT_UPSTREAM + dev_info(&vdpa_nic->vdpa_dev.dev, + "%s: vring to be created for Index:%u\n", __func__, + idx); +#endif + return true; + } +#ifdef EFX_NOT_UPSTREAM + dev_info(&vdpa_nic->vdpa_dev.dev, + "%s: vring cannot be created for Index:%u\n", __func__, + idx); + print_vring_state(vdpa_nic->vring[idx].vring_state, + &vdpa_nic->vdpa_dev); + dev_info(&vdpa_nic->vdpa_dev.dev, "%s: Vring status:\n", + __func__); + print_status_str(vdpa_nic->status, &vdpa_nic->vdpa_dev); + dev_info(&vdpa_nic->vdpa_dev.dev, + "%s: Vring %u created\n", __func__, idx); +#endif + return false; +} + +static int create_vring_ctx(struct ef100_vdpa_nic *vdpa_nic, u16 idx) +{ + struct efx_vring_ctx *vring_ctx; + u32 vi_index; + int rc = 0; + + if (!vdpa_nic->efx) { + dev_err(&vdpa_nic->vdpa_dev.dev, + "%s: Invalid efx for idx:%u\n", __func__, idx); + return -EINVAL; + } + if (idx % 2) /* Even VQ for RX and odd for TX */ + vdpa_nic->vring[idx].vring_type = EF100_VDPA_VQ_TYPE_NET_TXQ; + else + vdpa_nic->vring[idx].vring_type = EF100_VDPA_VQ_TYPE_NET_RXQ; + vi_index = EFX_GET_VI_INDEX(idx); + vring_ctx = efx_vdpa_vring_init(vdpa_nic->efx, vi_index, + vdpa_nic->vring[idx].vring_type); + if (IS_ERR(vring_ctx)) { + rc = PTR_ERR(vring_ctx); + return rc; + } + vdpa_nic->vring[idx].vring_ctx = vring_ctx; + return 0; +} + +static void delete_vring_ctx(struct ef100_vdpa_nic *vdpa_nic, u16 idx) +{ + efx_vdpa_vring_fini(vdpa_nic->vring[idx].vring_ctx); + vdpa_nic->vring[idx].vring_ctx = NULL; +} + +static int delete_vring(struct ef100_vdpa_nic *vdpa_nic, u16 idx) +{ + struct efx_vring_dyn_cfg vring_dyn_cfg; + int rc = 0; + + if (!(vdpa_nic->vring[idx].vring_state & EF100_VRING_CREATED)) + return 0; + + /* delete vring debugfs directory */ + efx_fini_debugfs_vdpa_vring(&vdpa_nic->vring[idx]); +#ifdef EFX_NOT_UPSTREAM + dev_info(&vdpa_nic->vdpa_dev.dev, + "%s: Called for %u\n", __func__, idx); +#endif + rc = efx_vdpa_vring_destroy(vdpa_nic->vring[idx].vring_ctx, + &vring_dyn_cfg); + if (rc) + dev_err(&vdpa_nic->vdpa_dev.dev, + "%s: Queue delete failed index:%u Err:%d\n", + __func__, idx, rc); + vdpa_nic->vring[idx].last_avail_idx = vring_dyn_cfg.avail_idx; + vdpa_nic->vring[idx].last_used_idx = vring_dyn_cfg.used_idx; + vdpa_nic->vring[idx].vring_state &= ~EF100_VRING_CREATED; + + irq_vring_fini(vdpa_nic, idx); + + return rc; +} + +int ef100_vdpa_init_vring(struct ef100_vdpa_nic *vdpa_nic, u16 idx) +{ + u32 offset; + int rc; + + vdpa_nic->vring[idx].irq = -EINVAL; + rc = create_vring_ctx(vdpa_nic, idx); + if (rc) { + dev_err(&vdpa_nic->vdpa_dev.dev, + "%s: create_vring_ctx failed, idx:%u, err:%d\n", + __func__, idx, rc); + return rc; + } + + rc = efx_vdpa_get_doorbell_offset(vdpa_nic->vring[idx].vring_ctx, + &offset); + if (rc) { + dev_err(&vdpa_nic->vdpa_dev.dev, + "%s: get_doorbell failed idx:%u, err:%d\n", + __func__, idx, rc); + goto err_get_doorbell_offset; + } + vdpa_nic->vring[idx].doorbell_offset = offset; + vdpa_nic->vring[idx].doorbell_offset_valid = true; + + return 0; + +err_get_doorbell_offset: + delete_vring_ctx(vdpa_nic, idx); + return rc; +} + +static void ef100_vdpa_kick_vq(struct vdpa_device *vdev, u16 idx) +{ + struct ef100_vdpa_nic *vdpa_nic = get_vdpa_nic(vdev); + u32 idx_val; + + if (is_qid_invalid(vdpa_nic, idx, __func__)) + return; + + if (!(vdpa_nic->vring[idx].vring_state & EF100_VRING_CREATED)) { + dev_err(&vdev->dev, "%s: Invalid vring%u\n", __func__, idx); + return; + } + idx_val = idx; +#ifdef EFX_NOT_UPSTREAM + dev_vdbg(&vdev->dev, "%s: Writing value:%u in offset register:%u\n", + __func__, idx_val, vdpa_nic->vring[idx].doorbell_offset); +#endif + _efx_writed(vdpa_nic->efx, cpu_to_le32(idx_val), + vdpa_nic->vring[idx].doorbell_offset); +} + +static int create_vring(struct ef100_vdpa_nic *vdpa_nic, u16 idx) +{ + struct efx_vring_dyn_cfg vring_dyn_cfg; + struct efx_vring_cfg vring_cfg; + int rc; + +#ifdef EFX_NOT_UPSTREAM + dev_info(&vdpa_nic->vdpa_dev.dev, + "%s: Called for %u\n", __func__, idx); +#endif + + rc = irq_vring_init(vdpa_nic, idx); + if (rc) { + dev_err(&vdpa_nic->vdpa_dev.dev, + "%s: irq_vring_init failed. index:%u Err:%d\n", + __func__, idx, rc); + return rc; + } + vring_cfg.desc = vdpa_nic->vring[idx].desc; + vring_cfg.avail = vdpa_nic->vring[idx].avail; + vring_cfg.used = vdpa_nic->vring[idx].used; + vring_cfg.size = vdpa_nic->vring[idx].size; + vring_cfg.features = vdpa_nic->features; + vring_cfg.use_pasid = false; + vring_cfg.pasid = 0; + vring_cfg.msix_vector = idx; + vring_dyn_cfg.avail_idx = vdpa_nic->vring[idx].last_avail_idx; + vring_dyn_cfg.used_idx = vdpa_nic->vring[idx].last_used_idx; + + rc = efx_vdpa_vring_create(vdpa_nic->vring[idx].vring_ctx, &vring_cfg, + &vring_dyn_cfg); + if (rc != 0) { + dev_err(&vdpa_nic->vdpa_dev.dev, + "%s: Queue create failed index:%u Err:%d\n", + __func__, idx, rc); + goto err_vring_create; + } + vdpa_nic->vring[idx].vring_state |= EF100_VRING_CREATED; + + + rc = efx_init_debugfs_vdpa_vring(vdpa_nic, &vdpa_nic->vring[idx], idx); + if (rc) + goto err_debugfs_vdpa_init; + + /* A VQ kick allows the device to read the avail_idx, which will be + * required at the destination after live migration. + */ + ef100_vdpa_kick_vq(&vdpa_nic->vdpa_dev, idx); + + return 0; + +err_debugfs_vdpa_init: + efx_vdpa_vring_destroy(vdpa_nic->vring[idx].vring_ctx, + &vring_dyn_cfg); + vdpa_nic->vring[idx].vring_state &= ~EF100_VRING_CREATED; +err_vring_create: + irq_vring_fini(vdpa_nic, idx); + + return rc; +} + +static void reset_vring(struct ef100_vdpa_nic *vdpa_nic, u16 idx) +{ + delete_vring(vdpa_nic, idx); + vdpa_nic->vring[idx].vring_type = EF100_VDPA_VQ_NTYPES; + vdpa_nic->vring[idx].vring_state = 0; + vdpa_nic->vring[idx].last_avail_idx = 0; + vdpa_nic->vring[idx].last_used_idx = 0; +} + +void reset_vdpa_device(struct ef100_vdpa_nic *vdpa_nic) +{ + struct efx_nic *efx = vdpa_nic->efx; + int i, rc; + + WARN_ON(!mutex_is_locked(&vdpa_nic->lock)); + + if (!vdpa_nic->status) + return; + + rc = efx_mcdi_filter_remove_all(vdpa_nic->efx, + EFX_FILTER_PRI_AUTO); + if (rc < 0) { + dev_err(&vdpa_nic->vdpa_dev.dev, + "%s: vdpa remove filter failed, err:%d\n", + __func__, rc); + } + efx->type->filter_table_down(efx); + vdpa_nic->vdpa_state = EF100_VDPA_STATE_INITIALIZED; + vdpa_nic->status = 0; + vdpa_nic->features = 0; + for (i = 0; i < (vdpa_nic->max_queue_pairs * 2); i++) + reset_vring(vdpa_nic, i); + ef100_vdpa_irq_vectors_free(vdpa_nic->efx->pci_dev); +} + +/* May be called under the rtnl lock */ +int ef100_vdpa_reset(struct vdpa_device *vdev) +{ + struct ef100_vdpa_nic *vdpa_nic = get_vdpa_nic(vdev); + + /* vdpa device can be deleted anytime but the bar_config + * could still be vdpa and hence efx->state would be STATE_VDPA. + * Accordingly, ensure vdpa device exists before reset handling + */ + if (!vdpa_nic) + return -ENODEV; + + mutex_lock(&vdpa_nic->lock); + reset_vdpa_device(vdpa_nic); + mutex_unlock(&vdpa_nic->lock); + return 0; +} + +static int start_vdpa_device(struct ef100_vdpa_nic *vdpa_nic) +{ + struct efx_nic *efx = vdpa_nic->efx; + struct ef100_nic_data *nic_data; + int rc, i, j; + + nic_data = efx->nic_data; + rc = ef100_vdpa_irq_vectors_alloc(efx->pci_dev, + vdpa_nic->max_queue_pairs * 2); + if (rc < 0) { + dev_err(&vdpa_nic->vdpa_dev.dev, + "vDPA IRQ alloc failed for vf: %u err:%d\n", + nic_data->vf_index, rc); + return rc; + } + + for (i = 0; i < (vdpa_nic->max_queue_pairs * 2); i++) { + if (can_create_vring(vdpa_nic, i)) { + rc = create_vring(vdpa_nic, i); + if (rc) + goto clear_vring; + } + } + + rc = ef100_vdpa_filter_configure(vdpa_nic); + if (rc) + goto clear_vring; + + vdpa_nic->vdpa_state = EF100_VDPA_STATE_STARTED; + return 0; + +clear_vring: + for (j = 0; j < i; j++) + delete_vring(vdpa_nic, j); + + ef100_vdpa_irq_vectors_free(efx->pci_dev); + return rc; +} + +static int ef100_vdpa_set_vq_address(struct vdpa_device *vdev, + u16 idx, u64 desc_area, u64 driver_area, + u64 device_area) +{ + struct ef100_vdpa_nic *vdpa_nic = NULL; + int rc = 0; + + vdpa_nic = get_vdpa_nic(vdev); + if (is_qid_invalid(vdpa_nic, idx, __func__)) + return -EINVAL; + +#ifdef EFX_NOT_UPSTREAM + dev_info(&vdev->dev, "%s: Invoked for index %u\n", __func__, idx); +#endif + mutex_lock(&vdpa_nic->lock); + vdpa_nic->vring[idx].desc = desc_area; + vdpa_nic->vring[idx].avail = driver_area; + vdpa_nic->vring[idx].used = device_area; + vdpa_nic->vring[idx].vring_state |= EF100_VRING_ADDRESS_CONFIGURED; + mutex_unlock(&vdpa_nic->lock); + return rc; +} + +static void ef100_vdpa_set_vq_num(struct vdpa_device *vdev, u16 idx, u32 num) +{ + struct ef100_vdpa_nic *vdpa_nic; + + vdpa_nic = get_vdpa_nic(vdev); + if (is_qid_invalid(vdpa_nic, idx, __func__)) + return; + +#ifdef EFX_NOT_UPSTREAM + dev_info(&vdev->dev, "%s: Invoked for index:%u size:%u\n", __func__, + idx, num); +#endif + if (!is_power_of_2(num)) { + dev_err(&vdev->dev, "%s: Index:%u size:%u not power of 2\n", + __func__, idx, num); + return; + } + if (num > EF100_VDPA_VQ_NUM_MAX_SIZE) { + dev_err(&vdev->dev, "%s: Index:%u size:%u more than max:%u\n", + __func__, idx, num, EF100_VDPA_VQ_NUM_MAX_SIZE); + return; + } + mutex_lock(&vdpa_nic->lock); + vdpa_nic->vring[idx].size = num; + vdpa_nic->vring[idx].vring_state |= EF100_VRING_SIZE_CONFIGURED; + mutex_unlock(&vdpa_nic->lock); +} + +static void ef100_vdpa_set_vq_cb(struct vdpa_device *vdev, u16 idx, + struct vdpa_callback *cb) +{ + struct ef100_vdpa_nic *vdpa_nic; + + vdpa_nic = get_vdpa_nic(vdev); + if (is_qid_invalid(vdpa_nic, idx, __func__)) + return; + + if (cb) + vdpa_nic->vring[idx].cb = *cb; + +#ifdef EFX_NOT_UPSTREAM + dev_info(&vdev->dev, "%s: Setting vq callback for vring %u\n", + __func__, idx); +#endif +} + +static void ef100_vdpa_set_vq_ready(struct vdpa_device *vdev, u16 idx, + bool ready) +{ + struct ef100_vdpa_nic *vdpa_nic; + int rc; + + vdpa_nic = get_vdpa_nic(vdev); + if (is_qid_invalid(vdpa_nic, idx, __func__)) + return; + +#ifdef EFX_NOT_UPSTREAM + dev_info(&vdev->dev, "%s: Queue Id: %u Ready :%u\n", __func__, + idx, ready); +#endif + mutex_lock(&vdpa_nic->lock); + if (ready) { + vdpa_nic->vring[idx].vring_state |= + EF100_VRING_READY_CONFIGURED; + if (vdpa_nic->vdpa_state == EF100_VDPA_STATE_STARTED && + can_create_vring(vdpa_nic, idx)) { + rc = create_vring(vdpa_nic, idx); + if (rc) + /* Rollback ready configuration + * So that the above layer driver + * can make another attempt to set ready + */ + vdpa_nic->vring[idx].vring_state &= + ~EF100_VRING_READY_CONFIGURED; + } + } else { + vdpa_nic->vring[idx].vring_state &= + ~EF100_VRING_READY_CONFIGURED; + delete_vring(vdpa_nic, idx); + } + mutex_unlock(&vdpa_nic->lock); +} + +static bool ef100_vdpa_get_vq_ready(struct vdpa_device *vdev, u16 idx) +{ + struct ef100_vdpa_nic *vdpa_nic; + bool ready; + + vdpa_nic = get_vdpa_nic(vdev); + if (is_qid_invalid(vdpa_nic, idx, __func__)) + return false; + + mutex_lock(&vdpa_nic->lock); +#ifdef EFX_NOT_UPSTREAM + dev_info(&vdev->dev, "%s: Index:%u Value returned: %u\n", __func__, + idx, vdpa_nic->vring[idx].vring_state & + EF100_VRING_READY_CONFIGURED); +#endif + ready = vdpa_nic->vring[idx].vring_state & EF100_VRING_READY_CONFIGURED; + mutex_unlock(&vdpa_nic->lock); + return ready; +} + +static int ef100_vdpa_set_vq_state(struct vdpa_device *vdev, u16 idx, +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_VDPA_VQ_STATE) + const struct vdpa_vq_state *state) +#else + u64 state) +#endif +{ + struct ef100_vdpa_nic *vdpa_nic; + + vdpa_nic = get_vdpa_nic(vdev); + if (is_qid_invalid(vdpa_nic, idx, __func__)) + return -EINVAL; + + mutex_lock(&vdpa_nic->lock); +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_VDPA_VQ_STATE) +#ifdef EFX_NOT_UPSTREAM + dev_info(&vdev->dev, "%s: Queue:%u State:0x%x", __func__, idx, +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_VDPA_VQ_STATE_SPLIT) + state->split.avail_index); +#else + state->avail_index); +#endif +#endif +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_VDPA_VQ_STATE_SPLIT) + vdpa_nic->vring[idx].last_avail_idx = state->split.avail_index; + vdpa_nic->vring[idx].last_used_idx = state->split.avail_index; +#else + vdpa_nic->vring[idx].last_avail_idx = state->avail_index; + vdpa_nic->vring[idx].last_used_idx = state->avail_index; +#endif +#else +#ifdef EFX_NOT_UPSTREAM + dev_info(&vdev->dev, "%s: Queue:%u State:0x%llx", __func__, idx, state); +#endif + vdpa_nic->vring[idx].last_avail_idx = state; + vdpa_nic->vring[idx].last_used_idx = state; +#endif + mutex_unlock(&vdpa_nic->lock); + return 0; +} + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_VDPA_VQ_STATE) +static int ef100_vdpa_get_vq_state(struct vdpa_device *vdev, + u16 idx, struct vdpa_vq_state *state) +#else +static u64 ef100_vdpa_get_vq_state(struct vdpa_device *vdev, u16 idx) +#endif +{ + struct ef100_vdpa_nic *vdpa_nic; + u32 last_avail_index = 0; + + vdpa_nic = get_vdpa_nic(vdev); + if (is_qid_invalid(vdpa_nic, idx, __func__)) + return -EINVAL; + +#ifdef EFX_NOT_UPSTREAM + dev_info(&vdev->dev, "%s: Queue:%u State:0x%x", __func__, idx, + vdpa_nic->vring[idx].last_avail_idx); +#endif + + mutex_lock(&vdpa_nic->lock); +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_VDPA_VQ_STATE) +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_VDPA_VQ_STATE_SPLIT) + /* In get_vq_state, we have to return the indices of the + * last processed descriptor buffer by the device. + */ + state->split.avail_index = (u16)vdpa_nic->vring[idx].last_used_idx; +#else + state->avail_index = (u16)vdpa_nic->vring[idx].last_used_idx; +#endif +#else + last_avail_index = vdpa_nic->vring[idx].last_used_idx; +#endif + mutex_unlock(&vdpa_nic->lock); + + return last_avail_index; +} + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_GET_VQ_NOTIFY) +static struct vdpa_notification_area + ef100_vdpa_get_vq_notification(struct vdpa_device *vdev, u16 idx) +{ + struct vdpa_notification_area notify_area = {0, 0}; + struct ef100_vdpa_nic *vdpa_nic; + struct efx_nic *efx; + + vdpa_nic = get_vdpa_nic(vdev); + if (is_qid_invalid(vdpa_nic, idx, __func__)) + return notify_area; + + mutex_lock(&vdpa_nic->lock); + + efx = vdpa_nic->efx; + notify_area.addr = (uintptr_t)(efx->membase_phys + + vdpa_nic->vring[idx].doorbell_offset); + + /* VDPA doorbells are at a stride of VI/2 + * One VI stride is shared by both rx & tx doorbells + */ + notify_area.size = efx->vi_stride / 2; + +#ifdef EFX_NOT_UPSTREAM + dev_info(&vdev->dev, "%s: Queue Id:%u Notification addr:0x%x size:0x%x", + __func__, idx, (u32)notify_area.addr, (u32)notify_area.size); +#endif + mutex_unlock(&vdpa_nic->lock); + + return notify_area; +} +#endif + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_GET_VQ_IRQ) +static int ef100_get_vq_irq(struct vdpa_device *vdev, u16 idx) +{ + struct ef100_vdpa_nic *vdpa_nic = get_vdpa_nic(vdev); + u32 irq; + + if (is_qid_invalid(vdpa_nic, idx, __func__)) + return -EINVAL; + + mutex_lock(&vdpa_nic->lock); + irq = vdpa_nic->vring[idx].irq; + mutex_unlock(&vdpa_nic->lock); +#ifdef EFX_NOT_UPSTREAM + dev_info(&vdev->dev, "%s: Queue Id %u, irq: %d\n", __func__, idx, irq); +#endif + + return irq; +} +#endif + +static u32 ef100_vdpa_get_vq_align(struct vdpa_device *vdev) +{ +#ifdef EFX_NOT_UPSTREAM + dev_info(&vdev->dev, "%s: Returning value:%u\n", __func__, + EF100_VDPA_VQ_ALIGN); +#endif + return EF100_VDPA_VQ_ALIGN; +} + +static u64 ef100_vdpa_get_device_features(struct vdpa_device *vdev) +{ + struct ef100_vdpa_nic *vdpa_nic; + u64 features = 0; + int rc = 0; + + vdpa_nic = get_vdpa_nic(vdev); + rc = efx_vdpa_get_features(vdpa_nic->efx, + EF100_VDPA_DEVICE_TYPE_NET, &features); + if (rc != 0) { + dev_err(&vdev->dev, "%s: MCDI get features error:%d\n", + __func__, rc); + /* Returning 0 as value of features will lead to failure + * of feature negotiation. + */ + return 0; + } + if (!vdpa_nic->in_order) + features &= ~BIT_ULL(VIRTIO_F_IN_ORDER); + features |= (1ULL << VIRTIO_NET_F_MAC); + /* TODO: QEMU Shadow VirtQueue (SVQ) doesn't support + * VIRTIO_F_ORDER_PLATFORM, so masking it off to allow Live Migration + */ + features &= ~(1ULL << VIRTIO_F_ORDER_PLATFORM); +#ifdef EFX_NOT_UPSTREAM + dev_info(&vdev->dev, "%s: Features returned:\n", __func__); + print_features_str(features, vdev); +#endif + return features; +} + +static int ef100_vdpa_set_driver_features(struct vdpa_device *vdev, + u64 features) +{ + struct ef100_vdpa_nic *vdpa_nic; + u64 verify_features = features; + int rc = 0; + +#ifdef EFX_NOT_UPSTREAM + dev_info(&vdev->dev, "%s: Features received:\n", __func__); + print_features_str(features, vdev); +#endif + vdpa_nic = get_vdpa_nic(vdev); + mutex_lock(&vdpa_nic->lock); + if (vdpa_nic->vdpa_state != EF100_VDPA_STATE_INITIALIZED) { + dev_err(&vdev->dev, "%s: Invalid current state %s\n", + __func__, get_vdpa_state_str(vdpa_nic->vdpa_state)); + rc = -EINVAL; + goto err; + } + verify_features = features & ~(1ULL << VIRTIO_NET_F_MAC); + rc = efx_vdpa_verify_features(vdpa_nic->efx, + EF100_VDPA_DEVICE_TYPE_NET, + verify_features); + + if (rc != 0) { + dev_err(&vdev->dev, "%s: MCDI verify features error:%d\n", + __func__, rc); + goto err; + } + + vdpa_nic->features = features; +err: + mutex_unlock(&vdpa_nic->lock); + return rc; +} + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_GET_DEVICE_FEATURES) +static u64 ef100_vdpa_get_driver_features(struct vdpa_device *vdev) +{ + struct ef100_vdpa_nic *vdpa_nic = get_vdpa_nic(vdev); + + return vdpa_nic->features; +} +#endif + +static void ef100_vdpa_set_config_cb(struct vdpa_device *vdev, + struct vdpa_callback *cb) +{ + struct ef100_vdpa_nic *vdpa_nic = get_vdpa_nic(vdev); + + if (cb) + vdpa_nic->cfg_cb = *cb; + +#ifdef EFX_NOT_UPSTREAM + dev_info(&vdev->dev, "%s: Setting config callback\n", __func__); +#endif +} + +static u16 ef100_vdpa_get_vq_num_max(struct vdpa_device *vdev) +{ +#ifdef EFX_NOT_UPSTREAM + dev_info(&vdev->dev, "%s: Returning value:%u\n", __func__, + EF100_VDPA_VQ_NUM_MAX_SIZE); +#endif + return EF100_VDPA_VQ_NUM_MAX_SIZE; +} + +static u32 ef100_vdpa_get_device_id(struct vdpa_device *vdev) +{ +#ifdef EFX_NOT_UPSTREAM + dev_info(&vdev->dev, "%s: Returning value:%u\n", __func__, + EF100_VDPA_VIRTIO_NET_DEVICE_ID); +#endif + return EF100_VDPA_VIRTIO_NET_DEVICE_ID; +} + +static u32 ef100_vdpa_get_vendor_id(struct vdpa_device *vdev) +{ +#ifdef EFX_NOT_UPSTREAM + dev_info(&vdev->dev, "%s: Returning value:0x%x\n", __func__, + EF100_VDPA_VENDOR_ID); +#endif + return EF100_VDPA_VENDOR_ID; +} + +static u8 ef100_vdpa_get_status(struct vdpa_device *vdev) +{ + struct ef100_vdpa_nic *vdpa_nic = get_vdpa_nic(vdev); + u8 status; + + mutex_lock(&vdpa_nic->lock); + status = vdpa_nic->status; + mutex_unlock(&vdpa_nic->lock); +#ifdef EFX_NOT_UPSTREAM + dev_info(&vdev->dev, "%s: Returning current status bit(s):\n", + __func__); + print_status_str(status, vdev); +#endif + return status; +} + +static void ef100_vdpa_set_status(struct vdpa_device *vdev, u8 status) +{ + struct ef100_vdpa_nic *vdpa_nic = get_vdpa_nic(vdev); + u8 new_status; + int rc = 0; + + mutex_lock(&vdpa_nic->lock); + if (!status) { + dev_info(&vdev->dev, + "%s: Status received is 0. Device reset being done\n", + __func__); + reset_vdpa_device(vdpa_nic); + goto unlock_return; + } + new_status = status & ~vdpa_nic->status; + if (new_status == 0) { + dev_info(&vdev->dev, + "%s: New status equal/subset of existing status:\n", + __func__); + dev_info(&vdev->dev, "%s: New status bits:\n", __func__); +#ifdef EFX_NOT_UPSTREAM + print_status_str(status, vdev); + dev_info(&vdev->dev, "%s: Existing status bits:\n", __func__); + print_status_str(vdpa_nic->status, vdev); +#endif + goto unlock_return; + } + if (new_status & VIRTIO_CONFIG_S_FAILED) { + reset_vdpa_device(vdpa_nic); + goto unlock_return; + } +#ifdef EFX_NOT_UPSTREAM + dev_info(&vdev->dev, "%s: New status:\n", __func__); + print_status_str(new_status, vdev); +#endif + while (new_status) { + if (new_status & VIRTIO_CONFIG_S_ACKNOWLEDGE && + vdpa_nic->vdpa_state == EF100_VDPA_STATE_INITIALIZED) { + vdpa_nic->status |= VIRTIO_CONFIG_S_ACKNOWLEDGE; + new_status = new_status & ~VIRTIO_CONFIG_S_ACKNOWLEDGE; + } else if (new_status & VIRTIO_CONFIG_S_DRIVER && + vdpa_nic->vdpa_state == + EF100_VDPA_STATE_INITIALIZED) { + vdpa_nic->status |= VIRTIO_CONFIG_S_DRIVER; + new_status = new_status & ~VIRTIO_CONFIG_S_DRIVER; + } else if (new_status & VIRTIO_CONFIG_S_FEATURES_OK && + vdpa_nic->vdpa_state == + EF100_VDPA_STATE_INITIALIZED) { + vdpa_nic->status |= VIRTIO_CONFIG_S_FEATURES_OK; + vdpa_nic->vdpa_state = EF100_VDPA_STATE_NEGOTIATED; + new_status = new_status & ~VIRTIO_CONFIG_S_FEATURES_OK; + } else if (new_status & VIRTIO_CONFIG_S_DRIVER_OK && + vdpa_nic->vdpa_state == + EF100_VDPA_STATE_NEGOTIATED) { + vdpa_nic->status |= VIRTIO_CONFIG_S_DRIVER_OK; + rc = start_vdpa_device(vdpa_nic); + if (rc) { + dev_err(&vdpa_nic->vdpa_dev.dev, + "%s: vDPA device failed:%d\n", + __func__, rc); + vdpa_nic->status &= + ~VIRTIO_CONFIG_S_DRIVER_OK; + goto unlock_return; + } + new_status = new_status & ~VIRTIO_CONFIG_S_DRIVER_OK; + } else { + dev_warn(&vdev->dev, "%s: Mismatch Status & State\n", + __func__); + dev_warn(&vdev->dev, "%s: New status Bits:\n", __func__); +#ifdef EFX_NOT_UPSTREAM + print_status_str(new_status, &vdpa_nic->vdpa_dev); +#endif + dev_warn(&vdev->dev, "%s: Current vDPA State: %s\n", + __func__, + get_vdpa_state_str(vdpa_nic->vdpa_state)); + break; + } + } +unlock_return: + mutex_unlock(&vdpa_nic->lock); + return; +} + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_GET_CONFIG_SIZE) +static size_t ef100_vdpa_get_config_size(struct vdpa_device *vdev) +{ +#ifdef EFX_NOT_UPSTREAM + dev_info(&vdev->dev, "%s: config size:%lu\n", __func__, + sizeof(struct virtio_net_config)); +#endif + return sizeof(struct virtio_net_config); +} +#endif + +static void ef100_vdpa_get_config(struct vdpa_device *vdev, unsigned int offset, + void *buf, unsigned int len) +{ + struct ef100_vdpa_nic *vdpa_nic = get_vdpa_nic(vdev); + +#ifdef EFX_NOT_UPSTREAM + dev_info(&vdev->dev, "%s: offset:%u len:%u\n", __func__, offset, len); +#endif + /* Avoid the possibility of wrap-up after the sum exceeds U32_MAX */ + if (WARN_ON(((u64)offset + len) > sizeof(struct virtio_net_config))) { + dev_err(&vdev->dev, + "%s: Offset + len exceeds config size\n", __func__); + return; + } + memcpy(buf, (u8 *)&vdpa_nic->net_config + offset, len); +} + +static void ef100_vdpa_set_config(struct vdpa_device *vdev, unsigned int offset, + const void *buf, unsigned int len) +{ + struct ef100_vdpa_nic *vdpa_nic = get_vdpa_nic(vdev); + + if (vdpa_nic->vdpa_state == EF100_VDPA_STATE_SUSPENDED) { + dev_err(&vdev->dev, + "config update not allowed in suspended state\n"); + return; + } +#ifdef EFX_NOT_UPSTREAM + dev_info(&vdev->dev, "%s: offset:%u len:%u config size:%lu\n", + __func__, offset, len, sizeof(vdpa_nic->net_config)); +#endif + /* Avoid the possibility of wrap-up after the sum exceeds U32_MAX */ + if (WARN_ON(((u64)offset + len) > sizeof(vdpa_nic->net_config))) { + dev_err(&vdev->dev, + "%s: Offset + len exceeds config size\n", __func__); + return; + } + + memcpy((u8 *)&vdpa_nic->net_config + offset, buf, len); + ef100_vdpa_insert_filter(vdpa_nic->efx); + +#ifdef EFX_NOT_UPSTREAM + dev_dbg(&vdpa_nic->vdpa_dev.dev, + "%s: Status:%u MAC:%pM max_qps:%u MTU:%u\n", + __func__, vdpa_nic->net_config.status, + vdpa_nic->net_config.mac, + vdpa_nic->net_config.max_virtqueue_pairs, + vdpa_nic->net_config.mtu); +#endif +} + +static void ef100_vdpa_free(struct vdpa_device *vdev) +{ + struct ef100_vdpa_nic *vdpa_nic = get_vdpa_nic(vdev); + int rc; + int i; + +#ifdef EFX_NOT_UPSTREAM + dev_info(&vdev->dev, "%s: Releasing vDPA resources\n", __func__); +#endif + if (vdpa_nic) { + efx_fini_debugfs_vdpa(vdpa_nic); + if (vdpa_nic->efx->mcdi_buf_mode == EFX_BUF_MODE_VDPA) { + rc = ef100_vdpa_map_mcdi_buffer(vdpa_nic->efx); + if (rc) { + dev_err(&vdev->dev, + "map_mcdi_buffer failed, err: %d\n", + rc); + } + } + for (i = 0; i < (vdpa_nic->max_queue_pairs * 2); i++) { + reset_vring(vdpa_nic, i); + if (vdpa_nic->vring[i].vring_ctx) + delete_vring_ctx(vdpa_nic, i); + } + mutex_destroy(&vdpa_nic->lock); + } + vdpa_nic->efx->vdpa_nic = NULL; +} + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_VDPA_CONFIG_OP_SUSPEND) +static int ef100_vdpa_suspend(struct vdpa_device *vdev) +{ + struct ef100_vdpa_nic *vdpa_nic = get_vdpa_nic(vdev); + int i, rc; + + mutex_lock(&vdpa_nic->lock); + for (i = 0; i < vdpa_nic->max_queue_pairs * 2; i++) { + rc = delete_vring(vdpa_nic, i); + if (rc) + break; + } + vdpa_nic->vdpa_state = EF100_VDPA_STATE_SUSPENDED; + mutex_unlock(&vdpa_nic->lock); + return rc; +} +#endif + +const struct vdpa_config_ops ef100_vdpa_config_ops = { + .set_vq_address = ef100_vdpa_set_vq_address, + .set_vq_num = ef100_vdpa_set_vq_num, + .kick_vq = ef100_vdpa_kick_vq, + .set_vq_cb = ef100_vdpa_set_vq_cb, + .set_vq_ready = ef100_vdpa_set_vq_ready, + .get_vq_ready = ef100_vdpa_get_vq_ready, + .set_vq_state = ef100_vdpa_set_vq_state, + .get_vq_state = ef100_vdpa_get_vq_state, +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_GET_VQ_NOTIFY) + .get_vq_notification = ef100_vdpa_get_vq_notification, +#endif +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_GET_VQ_IRQ) + .get_vq_irq = ef100_get_vq_irq, +#endif + .get_vq_align = ef100_vdpa_get_vq_align, +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_GET_DEVICE_FEATURES) + .get_device_features = ef100_vdpa_get_device_features, + .set_driver_features = ef100_vdpa_set_driver_features, + .get_driver_features = ef100_vdpa_get_driver_features, +#else + .get_features = ef100_vdpa_get_device_features, + .set_features = ef100_vdpa_set_driver_features, +#endif + .set_config_cb = ef100_vdpa_set_config_cb, + .get_vq_num_max = ef100_vdpa_get_vq_num_max, + .get_device_id = ef100_vdpa_get_device_id, + .get_vendor_id = ef100_vdpa_get_vendor_id, + .get_status = ef100_vdpa_get_status, + .set_status = ef100_vdpa_set_status, +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_VDPA_RESET) + .reset = ef100_vdpa_reset, +#endif +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_GET_CONFIG_SIZE) + .get_config_size = ef100_vdpa_get_config_size, +#endif + .get_config = ef100_vdpa_get_config, + .set_config = ef100_vdpa_set_config, + .get_generation = NULL, + .set_map = NULL, +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_VDPA_CONFIG_OP_SUSPEND) + .suspend = ef100_vdpa_suspend, +#endif + .free = ef100_vdpa_free, +}; +#endif diff --git a/src/drivers/net/ethernet/sfc/ef10_regs.h b/src/drivers/net/ethernet/sfc/ef10_regs.h new file mode 100644 index 0000000..2090b99 --- /dev/null +++ b/src/drivers/net/ethernet/sfc/ef10_regs.h @@ -0,0 +1,439 @@ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2012-2017 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#ifndef EFX_EF10_REGS_H +#define EFX_EF10_REGS_H + +/* EF10 hardware architecture definitions have a name prefix following + * the format: + * + * E__ + * + * The following strings are used: + * + * MMIO register Host memory structure + * ------------------------------------------------------------- + * Address R + * Bitfield RF SF + * Enumerator FE SE + * + * is the first revision to which the definition applies: + * + * D: Huntington A0 + * + * If the definition has been changed or removed in later revisions + * then is the last revision to which the definition applies; + * otherwise it is "Z". + */ + +/************************************************************************** + * + * EF10 registers and descriptors + * + ************************************************************************** + */ + +/* BIU_HW_REV_ID_REG: */ +#define ER_DZ_BIU_HW_REV_ID 0x00000000 +#define ERF_DZ_HW_REV_ID_LBN 0 +#define ERF_DZ_HW_REV_ID_WIDTH 32 + +/* BIU_MC_SFT_STATUS_REG: */ +#define ER_DZ_BIU_MC_SFT_STATUS 0x00000010 +#define ER_DZ_BIU_MC_SFT_STATUS_STEP 4 +#define ER_DZ_BIU_MC_SFT_STATUS_ROWS 8 +#define ERF_DZ_MC_SFT_STATUS_LBN 0 +#define ERF_DZ_MC_SFT_STATUS_WIDTH 32 + +/* BIU_INT_ISR_REG: */ +#define ER_DZ_BIU_INT_ISR 0x00000090 +#define ERF_DZ_ISR_REG_LBN 0 +#define ERF_DZ_ISR_REG_WIDTH 32 + +/* MC_DB_LWRD_REG: */ +#define ER_DZ_MC_DB_LWRD 0x00000200 +#define ERF_DZ_MC_DOORBELL_L_LBN 0 +#define ERF_DZ_MC_DOORBELL_L_WIDTH 32 + +/* MC_DB_HWRD_REG: */ +#define ER_DZ_MC_DB_HWRD 0x00000204 +#define ERF_DZ_MC_DOORBELL_H_LBN 0 +#define ERF_DZ_MC_DOORBELL_H_WIDTH 32 + +/* EVQ_RPTR_REG: */ +#define ER_DZ_EVQ_RPTR 0x00000400 +#define ER_DZ_EVQ_RPTR_STEP 8192 +#define ER_DZ_EVQ_RPTR_ROWS 2048 +#define ERF_DZ_EVQ_RPTR_VLD_LBN 15 +#define ERF_DZ_EVQ_RPTR_VLD_WIDTH 1 +#define ERF_DZ_EVQ_RPTR_LBN 0 +#define ERF_DZ_EVQ_RPTR_WIDTH 15 + +/* EVQ_TMR_REG: */ +#define ER_DZ_EVQ_TMR 0x00000420 +#define ER_DZ_EVQ_TMR_STEP 8192 +#define ER_DZ_EVQ_TMR_ROWS 2048 +#define ERF_FZ_TC_TMR_REL_VAL_LBN 16 +#define ERF_FZ_TC_TMR_REL_VAL_WIDTH 14 +#define ERF_DZ_TC_TIMER_MODE_LBN 14 +#define ERF_DZ_TC_TIMER_MODE_WIDTH 2 +#define ERF_DZ_TC_TIMER_VAL_LBN 0 +#define ERF_DZ_TC_TIMER_VAL_WIDTH 14 + +/* RX_DESC_UPD_REG: */ +#define ER_DZ_RX_DESC_UPD 0x00000830 +#define ER_DZ_RX_DESC_UPD_STEP 8192 +#define ER_DZ_RX_DESC_UPD_ROWS 2048 +#define ERF_DZ_RX_DESC_WPTR_LBN 0 +#define ERF_DZ_RX_DESC_WPTR_WIDTH 12 + +/* TX_DESC_UPD_REG: */ +#define ER_DZ_TX_DESC_UPD 0x00000a10 +#define ER_DZ_TX_DESC_UPD_STEP 8192 +#define ER_DZ_TX_DESC_UPD_ROWS 2048 +#define ERF_DZ_RSVD_LBN 76 +#define ERF_DZ_RSVD_WIDTH 20 +#define ERF_DZ_TX_DESC_WPTR_LBN 64 +#define ERF_DZ_TX_DESC_WPTR_WIDTH 12 +#define ERF_DZ_TX_DESC_HWORD_LBN 32 +#define ERF_DZ_TX_DESC_HWORD_WIDTH 32 +#define ERF_DZ_TX_DESC_LWORD_LBN 0 +#define ERF_DZ_TX_DESC_LWORD_WIDTH 32 + +/* DRIVER_EV */ +#define ESF_DZ_DRV_CODE_LBN 60 +#define ESF_DZ_DRV_CODE_WIDTH 4 +#define ESF_DZ_DRV_SUB_CODE_LBN 56 +#define ESF_DZ_DRV_SUB_CODE_WIDTH 4 +#define ESE_DZ_DRV_TIMER_EV 3 +#define ESE_DZ_DRV_START_UP_EV 2 +#define ESE_DZ_DRV_WAKE_UP_EV 1 +#define ESF_DZ_DRV_SUB_DATA_LBN 0 +#define ESF_DZ_DRV_SUB_DATA_WIDTH 56 +#define ESF_DZ_DRV_EVQ_ID_LBN 0 +#define ESF_DZ_DRV_EVQ_ID_WIDTH 14 +#define ESF_DZ_DRV_TMR_ID_LBN 0 +#define ESF_DZ_DRV_TMR_ID_WIDTH 14 + +/* EVENT_ENTRY */ +#define ESF_DZ_EV_CODE_LBN 60 +#define ESF_DZ_EV_CODE_WIDTH 4 +#define ESE_DZ_EV_CODE_MCDI_EV 12 +#define ESE_DZ_EV_CODE_DRIVER_EV 5 +#define ESE_DZ_EV_CODE_TX_EV 2 +#define ESE_DZ_EV_CODE_RX_EV 0 +#define ESE_DZ_OTHER other +#define ESF_DZ_EV_DATA_LBN 0 +#define ESF_DZ_EV_DATA_WIDTH 60 + +/* MC_EVENT */ +#define ESF_DZ_MC_CODE_LBN 60 +#define ESF_DZ_MC_CODE_WIDTH 4 +#define ESF_DZ_MC_OVERRIDE_HOLDOFF_LBN 59 +#define ESF_DZ_MC_OVERRIDE_HOLDOFF_WIDTH 1 +#define ESF_DZ_MC_DROP_EVENT_LBN 58 +#define ESF_DZ_MC_DROP_EVENT_WIDTH 1 +#define ESF_DZ_MC_SOFT_LBN 0 +#define ESF_DZ_MC_SOFT_WIDTH 58 + +/* RX_EVENT */ +#define ESF_DZ_RX_CODE_LBN 60 +#define ESF_DZ_RX_CODE_WIDTH 4 +#define ESF_DZ_RX_OVERRIDE_HOLDOFF_LBN 59 +#define ESF_DZ_RX_OVERRIDE_HOLDOFF_WIDTH 1 +#define ESF_DZ_RX_DROP_EVENT_LBN 58 +#define ESF_DZ_RX_DROP_EVENT_WIDTH 1 +#define ESF_DD_RX_EV_RSVD2_LBN 54 +#define ESF_DD_RX_EV_RSVD2_WIDTH 4 +#define ESF_EZ_RX_TCP_UDP_INNER_CHKSUM_ERR_LBN 57 +#define ESF_EZ_RX_TCP_UDP_INNER_CHKSUM_ERR_WIDTH 1 +#define ESF_EZ_RX_IP_INNER_CHKSUM_ERR_LBN 56 +#define ESF_EZ_RX_IP_INNER_CHKSUM_ERR_WIDTH 1 +#define ESF_EZ_RX_EV_RSVD2_LBN 54 +#define ESF_EZ_RX_EV_RSVD2_WIDTH 2 +#define ESF_DZ_RX_EV_SOFT2_LBN 52 +#define ESF_DZ_RX_EV_SOFT2_WIDTH 2 +#define ESF_DZ_RX_DSC_PTR_LBITS_LBN 48 +#define ESF_DZ_RX_DSC_PTR_LBITS_WIDTH 4 +#define ESF_DE_RX_L4_CLASS_LBN 45 +#define ESF_DE_RX_L4_CLASS_WIDTH 3 +#define ESE_DE_L4_CLASS_RSVD7 7 +#define ESE_DE_L4_CLASS_RSVD6 6 +#define ESE_DE_L4_CLASS_RSVD5 5 +#define ESE_DE_L4_CLASS_RSVD4 4 +#define ESE_DE_L4_CLASS_RSVD3 3 +#define ESE_DE_L4_CLASS_UDP 2 +#define ESE_DE_L4_CLASS_TCP 1 +#define ESE_DE_L4_CLASS_UNKNOWN 0 +#define ESF_FZ_RX_FASTPD_INDCTR_LBN 47 +#define ESF_FZ_RX_FASTPD_INDCTR_WIDTH 1 +#define ESF_FZ_RX_L4_CLASS_LBN 45 +#define ESF_FZ_RX_L4_CLASS_WIDTH 2 +#define ESE_FZ_L4_CLASS_RSVD3 3 +#define ESE_FZ_L4_CLASS_UDP 2 +#define ESE_FZ_L4_CLASS_TCP 1 +#define ESE_FZ_L4_CLASS_UNKNOWN 0 +#define ESF_DZ_RX_L3_CLASS_LBN 42 +#define ESF_DZ_RX_L3_CLASS_WIDTH 3 +#define ESE_DZ_L3_CLASS_RSVD7 7 +#define ESE_DZ_L3_CLASS_IP6_FRAG 6 +#define ESE_DZ_L3_CLASS_ARP 5 +#define ESE_DZ_L3_CLASS_IP4_FRAG 4 +#define ESE_DZ_L3_CLASS_FCOE 3 +#define ESE_DZ_L3_CLASS_IP6 2 +#define ESE_DZ_L3_CLASS_IP4 1 +#define ESE_DZ_L3_CLASS_UNKNOWN 0 +#define ESF_DZ_RX_ETH_TAG_CLASS_LBN 39 +#define ESF_DZ_RX_ETH_TAG_CLASS_WIDTH 3 +#define ESE_DZ_ETH_TAG_CLASS_RSVD7 7 +#define ESE_DZ_ETH_TAG_CLASS_RSVD6 6 +#define ESE_DZ_ETH_TAG_CLASS_RSVD5 5 +#define ESE_DZ_ETH_TAG_CLASS_RSVD4 4 +#define ESE_DZ_ETH_TAG_CLASS_RSVD3 3 +#define ESE_DZ_ETH_TAG_CLASS_VLAN2 2 +#define ESE_DZ_ETH_TAG_CLASS_VLAN1 1 +#define ESE_DZ_ETH_TAG_CLASS_NONE 0 +#define ESF_DZ_RX_ETH_BASE_CLASS_LBN 36 +#define ESF_DZ_RX_ETH_BASE_CLASS_WIDTH 3 +#define ESE_DZ_ETH_BASE_CLASS_LLC_SNAP 2 +#define ESE_DZ_ETH_BASE_CLASS_LLC 1 +#define ESE_DZ_ETH_BASE_CLASS_ETH2 0 +#define ESF_DZ_RX_MAC_CLASS_LBN 35 +#define ESF_DZ_RX_MAC_CLASS_WIDTH 1 +#define ESE_DZ_MAC_CLASS_MCAST 1 +#define ESE_DZ_MAC_CLASS_UCAST 0 +#define ESF_DD_RX_EV_SOFT1_LBN 32 +#define ESF_DD_RX_EV_SOFT1_WIDTH 3 +#define ESF_EZ_RX_EV_SOFT1_LBN 34 +#define ESF_EZ_RX_EV_SOFT1_WIDTH 1 +#define ESF_EZ_RX_ENCAP_HDR_LBN 32 +#define ESF_EZ_RX_ENCAP_HDR_WIDTH 2 +#define ESE_EZ_ENCAP_HDR_GRE 2 +#define ESE_EZ_ENCAP_HDR_VXLAN 1 +#define ESE_EZ_ENCAP_HDR_NONE 0 +#define ESF_DD_RX_EV_RSVD1_LBN 30 +#define ESF_DD_RX_EV_RSVD1_WIDTH 2 +#define ESF_EZ_RX_EV_RSVD1_LBN 31 +#define ESF_EZ_RX_EV_RSVD1_WIDTH 1 +#define ESF_EZ_RX_ABORT_LBN 30 +#define ESF_EZ_RX_ABORT_WIDTH 1 +#define ESF_DZ_RX_ECC_ERR_LBN 29 +#define ESF_DZ_RX_ECC_ERR_WIDTH 1 +#define ESF_DZ_RX_TRUNC_ERR_LBN 29 +#define ESF_DZ_RX_TRUNC_ERR_WIDTH 1 +#define ESF_DZ_RX_CRC1_ERR_LBN 28 +#define ESF_DZ_RX_CRC1_ERR_WIDTH 1 +#define ESF_DZ_RX_CRC0_ERR_LBN 27 +#define ESF_DZ_RX_CRC0_ERR_WIDTH 1 +#define ESF_DZ_RX_TCPUDP_CKSUM_ERR_LBN 26 +#define ESF_DZ_RX_TCPUDP_CKSUM_ERR_WIDTH 1 +#define ESF_DZ_RX_IPCKSUM_ERR_LBN 25 +#define ESF_DZ_RX_IPCKSUM_ERR_WIDTH 1 +#define ESF_DZ_RX_ECRC_ERR_LBN 24 +#define ESF_DZ_RX_ECRC_ERR_WIDTH 1 +#define ESF_DZ_RX_QLABEL_LBN 16 +#define ESF_DZ_RX_QLABEL_WIDTH 5 +#define ESF_DZ_RX_PARSE_INCOMPLETE_LBN 15 +#define ESF_DZ_RX_PARSE_INCOMPLETE_WIDTH 1 +#define ESF_DZ_RX_CONT_LBN 14 +#define ESF_DZ_RX_CONT_WIDTH 1 +#define ESF_DZ_RX_BYTES_LBN 0 +#define ESF_DZ_RX_BYTES_WIDTH 14 + +/* RX_KER_DESC */ +#define ESF_DZ_RX_KER_RESERVED_LBN 62 +#define ESF_DZ_RX_KER_RESERVED_WIDTH 2 +#define ESF_DZ_RX_KER_BYTE_CNT_LBN 48 +#define ESF_DZ_RX_KER_BYTE_CNT_WIDTH 14 +#define ESF_DZ_RX_KER_BUF_ADDR_LBN 0 +#define ESF_DZ_RX_KER_BUF_ADDR_WIDTH 48 + +/* TX_CSUM_TSTAMP_DESC */ +#define ESF_DZ_TX_DESC_IS_OPT_LBN 63 +#define ESF_DZ_TX_DESC_IS_OPT_WIDTH 1 +#define ESF_DZ_TX_OPTION_TYPE_LBN 60 +#define ESF_DZ_TX_OPTION_TYPE_WIDTH 3 +#define ESE_DZ_TX_OPTION_DESC_TSO 7 +#define ESE_DZ_TX_OPTION_DESC_VLAN 6 +#define ESE_DZ_TX_OPTION_DESC_CRC_CSUM 0 +#define ESF_DZ_TX_OPTION_TS_AT_TXDP_LBN 8 +#define ESF_DZ_TX_OPTION_TS_AT_TXDP_WIDTH 1 +#define ESF_DZ_TX_OPTION_INNER_UDP_TCP_CSUM_LBN 7 +#define ESF_DZ_TX_OPTION_INNER_UDP_TCP_CSUM_WIDTH 1 +#define ESF_DZ_TX_OPTION_INNER_IP_CSUM_LBN 6 +#define ESF_DZ_TX_OPTION_INNER_IP_CSUM_WIDTH 1 +#define ESF_DZ_TX_TIMESTAMP_LBN 5 +#define ESF_DZ_TX_TIMESTAMP_WIDTH 1 +#define ESF_DZ_TX_OPTION_CRC_MODE_LBN 2 +#define ESF_DZ_TX_OPTION_CRC_MODE_WIDTH 3 +#define ESE_DZ_TX_OPTION_CRC_FCOIP_MPA 5 +#define ESE_DZ_TX_OPTION_CRC_FCOIP_FCOE 4 +#define ESE_DZ_TX_OPTION_CRC_ISCSI_HDR_AND_PYLD 3 +#define ESE_DZ_TX_OPTION_CRC_ISCSI_HDR 2 +#define ESE_DZ_TX_OPTION_CRC_FCOE 1 +#define ESE_DZ_TX_OPTION_CRC_OFF 0 +#define ESF_DZ_TX_OPTION_UDP_TCP_CSUM_LBN 1 +#define ESF_DZ_TX_OPTION_UDP_TCP_CSUM_WIDTH 1 +#define ESF_DZ_TX_OPTION_IP_CSUM_LBN 0 +#define ESF_DZ_TX_OPTION_IP_CSUM_WIDTH 1 + +/* TX_EVENT */ +#define ESF_DZ_TX_CODE_LBN 60 +#define ESF_DZ_TX_CODE_WIDTH 4 +#define ESF_DZ_TX_OVERRIDE_HOLDOFF_LBN 59 +#define ESF_DZ_TX_OVERRIDE_HOLDOFF_WIDTH 1 +#define ESF_DZ_TX_DROP_EVENT_LBN 58 +#define ESF_DZ_TX_DROP_EVENT_WIDTH 1 +#define ESF_DD_TX_EV_RSVD_LBN 48 +#define ESF_DD_TX_EV_RSVD_WIDTH 10 +#define ESF_EZ_TCP_UDP_INNER_CHKSUM_ERR_LBN 57 +#define ESF_EZ_TCP_UDP_INNER_CHKSUM_ERR_WIDTH 1 +#define ESF_EZ_IP_INNER_CHKSUM_ERR_LBN 56 +#define ESF_EZ_IP_INNER_CHKSUM_ERR_WIDTH 1 +#define ESF_EZ_TX_EV_RSVD_LBN 48 +#define ESF_EZ_TX_EV_RSVD_WIDTH 8 +#define ESF_DZ_TX_SOFT2_LBN 32 +#define ESF_DZ_TX_SOFT2_WIDTH 16 +#define ESF_DD_TX_SOFT1_LBN 24 +#define ESF_DD_TX_SOFT1_WIDTH 8 +#define ESF_EZ_TX_CAN_MERGE_LBN 31 +#define ESF_EZ_TX_CAN_MERGE_WIDTH 1 +#define ESF_EZ_TX_SOFT1_LBN 24 +#define ESF_EZ_TX_SOFT1_WIDTH 7 +#define ESF_DZ_TX_QLABEL_LBN 16 +#define ESF_DZ_TX_QLABEL_WIDTH 5 +#define ESF_DZ_TX_DESCR_INDX_LBN 0 +#define ESF_DZ_TX_DESCR_INDX_WIDTH 16 + +/* TX_KER_DESC */ +#define ESF_DZ_TX_KER_TYPE_LBN 63 +#define ESF_DZ_TX_KER_TYPE_WIDTH 1 +#define ESF_DZ_TX_KER_CONT_LBN 62 +#define ESF_DZ_TX_KER_CONT_WIDTH 1 +#define ESF_DZ_TX_KER_BYTE_CNT_LBN 48 +#define ESF_DZ_TX_KER_BYTE_CNT_WIDTH 14 +#define ESF_DZ_TX_KER_BUF_ADDR_LBN 0 +#define ESF_DZ_TX_KER_BUF_ADDR_WIDTH 48 + +/* TX_PIO_DESC */ +#define ESF_DZ_TX_PIO_TYPE_LBN 63 +#define ESF_DZ_TX_PIO_TYPE_WIDTH 1 +#define ESF_DZ_TX_PIO_OPT_LBN 60 +#define ESF_DZ_TX_PIO_OPT_WIDTH 3 +#define ESF_DZ_TX_PIO_CONT_LBN 59 +#define ESF_DZ_TX_PIO_CONT_WIDTH 1 +#define ESF_DZ_TX_PIO_BYTE_CNT_LBN 32 +#define ESF_DZ_TX_PIO_BYTE_CNT_WIDTH 12 +#define ESF_DZ_TX_PIO_BUF_ADDR_LBN 0 +#define ESF_DZ_TX_PIO_BUF_ADDR_WIDTH 12 + +/* TX_TSO_DESC */ +#define ESF_DZ_TX_DESC_IS_OPT_LBN 63 +#define ESF_DZ_TX_DESC_IS_OPT_WIDTH 1 +#define ESF_DZ_TX_OPTION_TYPE_LBN 60 +#define ESF_DZ_TX_OPTION_TYPE_WIDTH 3 +#define ESE_DZ_TX_OPTION_DESC_TSO 7 +#define ESE_DZ_TX_OPTION_DESC_VLAN 6 +#define ESE_DZ_TX_OPTION_DESC_CRC_CSUM 0 +#define ESF_DZ_TX_TSO_OPTION_TYPE_LBN 56 +#define ESF_DZ_TX_TSO_OPTION_TYPE_WIDTH 4 +#define ESE_DZ_TX_TSO_OPTION_DESC_FATSO2B 3 +#define ESE_DZ_TX_TSO_OPTION_DESC_FATSO2A 2 +#define ESE_DZ_TX_TSO_OPTION_DESC_ENCAP 1 +#define ESE_DZ_TX_TSO_OPTION_DESC_NORMAL 0 +#define ESF_DZ_TX_TSO_TCP_FLAGS_LBN 48 +#define ESF_DZ_TX_TSO_TCP_FLAGS_WIDTH 8 +#define ESF_DZ_TX_TSO_IP_ID_LBN 32 +#define ESF_DZ_TX_TSO_IP_ID_WIDTH 16 +#define ESF_DZ_TX_TSO_TCP_SEQNO_LBN 0 +#define ESF_DZ_TX_TSO_TCP_SEQNO_WIDTH 32 + +/* TX_TSO_V2_DESC_A */ +#define ESF_DZ_TX_DESC_IS_OPT_LBN 63 +#define ESF_DZ_TX_DESC_IS_OPT_WIDTH 1 +#define ESF_DZ_TX_OPTION_TYPE_LBN 60 +#define ESF_DZ_TX_OPTION_TYPE_WIDTH 3 +#define ESE_DZ_TX_OPTION_DESC_TSO 7 +#define ESE_DZ_TX_OPTION_DESC_VLAN 6 +#define ESE_DZ_TX_OPTION_DESC_CRC_CSUM 0 +#define ESF_DZ_TX_TSO_OPTION_TYPE_LBN 56 +#define ESF_DZ_TX_TSO_OPTION_TYPE_WIDTH 4 +#define ESE_DZ_TX_TSO_OPTION_DESC_FATSO2B 3 +#define ESE_DZ_TX_TSO_OPTION_DESC_FATSO2A 2 +#define ESE_DZ_TX_TSO_OPTION_DESC_ENCAP 1 +#define ESE_DZ_TX_TSO_OPTION_DESC_NORMAL 0 +#define ESF_DZ_TX_TSO_IP_ID_LBN 32 +#define ESF_DZ_TX_TSO_IP_ID_WIDTH 16 +#define ESF_DZ_TX_TSO_TCP_SEQNO_LBN 0 +#define ESF_DZ_TX_TSO_TCP_SEQNO_WIDTH 32 + +/* TX_TSO_V2_DESC_B */ +#define ESF_DZ_TX_DESC_IS_OPT_LBN 63 +#define ESF_DZ_TX_DESC_IS_OPT_WIDTH 1 +#define ESF_DZ_TX_OPTION_TYPE_LBN 60 +#define ESF_DZ_TX_OPTION_TYPE_WIDTH 3 +#define ESE_DZ_TX_OPTION_DESC_TSO 7 +#define ESE_DZ_TX_OPTION_DESC_VLAN 6 +#define ESE_DZ_TX_OPTION_DESC_CRC_CSUM 0 +#define ESF_DZ_TX_TSO_OPTION_TYPE_LBN 56 +#define ESF_DZ_TX_TSO_OPTION_TYPE_WIDTH 4 +#define ESE_DZ_TX_TSO_OPTION_DESC_FATSO2B 3 +#define ESE_DZ_TX_TSO_OPTION_DESC_FATSO2A 2 +#define ESE_DZ_TX_TSO_OPTION_DESC_ENCAP 1 +#define ESE_DZ_TX_TSO_OPTION_DESC_NORMAL 0 +#define ESF_DZ_TX_TSO_TCP_MSS_LBN 32 +#define ESF_DZ_TX_TSO_TCP_MSS_WIDTH 16 +#define ESF_DZ_TX_TSO_OUTER_IPID_LBN 0 +#define ESF_DZ_TX_TSO_OUTER_IPID_WIDTH 16 + +/*************************************************************************/ + +/* TX_DESC_UPD_REG: Transmit descriptor update register. + * We may write just one dword of these registers. + */ +#define ER_DZ_TX_DESC_UPD_DWORD (ER_DZ_TX_DESC_UPD + 2 * 4) +#define ERF_DZ_TX_DESC_WPTR_DWORD_LBN (ERF_DZ_TX_DESC_WPTR_LBN - 2 * 32) +#define ERF_DZ_TX_DESC_WPTR_DWORD_WIDTH ERF_DZ_TX_DESC_WPTR_WIDTH + +/* The workaround for bug 35388 requires multiplexing writes through + * the TX_DESC_UPD_DWORD address. + * TX_DESC_UPD: 0ppppppppppp (bit 11 lost) + * EVQ_RPTR: 1000hhhhhhhh, 1001llllllll (split into high and low bits) + * EVQ_TMR: 11mmvvvvvvvv (bits 8:13 of value lost) + */ +#define ER_DD_EVQ_INDIRECT ER_DZ_TX_DESC_UPD_DWORD +#define ERF_DD_EVQ_IND_RPTR_FLAGS_LBN 8 +#define ERF_DD_EVQ_IND_RPTR_FLAGS_WIDTH 4 +#define EFE_DD_EVQ_IND_RPTR_FLAGS_HIGH 8 +#define EFE_DD_EVQ_IND_RPTR_FLAGS_LOW 9 +#define ERF_DD_EVQ_IND_RPTR_LBN 0 +#define ERF_DD_EVQ_IND_RPTR_WIDTH 8 +#define ERF_DD_EVQ_IND_TIMER_FLAGS_LBN 10 +#define ERF_DD_EVQ_IND_TIMER_FLAGS_WIDTH 2 +#define EFE_DD_EVQ_IND_TIMER_FLAGS 3 +#define ERF_DD_EVQ_IND_TIMER_MODE_LBN 8 +#define ERF_DD_EVQ_IND_TIMER_MODE_WIDTH 2 +#define ERF_DD_EVQ_IND_TIMER_VAL_LBN 0 +#define ERF_DD_EVQ_IND_TIMER_VAL_WIDTH 8 + +/* TX_PIOBUF + * PIO buffer aperture (paged) + */ +#define ER_DZ_TX_PIOBUF 4096 +#define ER_DZ_TX_PIOBUF_SIZE 2048 + +/* RX packet prefix */ +#define ES_DZ_RX_PREFIX_HASH_OFST 0 +#define ES_DZ_RX_PREFIX_VLAN1_OFST 4 +#define ES_DZ_RX_PREFIX_VLAN2_OFST 6 +#define ES_DZ_RX_PREFIX_PKTLEN_OFST 8 +#define ES_DZ_RX_PREFIX_TSTAMP_OFST 10 +#define ES_DZ_RX_PREFIX_SIZE 14 + +#endif /* EFX_EF10_REGS_H */ diff --git a/src/drivers/net/ethernet/sfc/ef10_sriov.c b/src/drivers/net/ethernet/sfc/ef10_sriov.c new file mode 100644 index 0000000..45a9000 --- /dev/null +++ b/src/drivers/net/ethernet/sfc/ef10_sriov.c @@ -0,0 +1,1020 @@ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2014-2017 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ +#include +#include +#include "net_driver.h" +#include "efx.h" +#include "nic.h" +#include "efx_common.h" +#include "mcdi_pcol.h" +#include "sriov.h" +#include "ef10_sriov.h" +#include "workarounds.h" + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_SRIOV_GET_TOTALVFS) || defined(CONFIG_SFC_SRIOV) +/* + * Force allocation of a vswitch. + */ +static bool enable_vswitch; +module_param(enable_vswitch, bool, 0444); +MODULE_PARM_DESC(enable_vswitch, + "Force allocation of a VEB vswitch on supported adapters"); +#endif + +#ifdef CONFIG_SFC_SRIOV + +static bool vfs_vlan_restrict; +module_param(vfs_vlan_restrict, bool, 0444); +MODULE_PARM_DESC(vfs_vlan_restrict, + "[SFC9100-family] Restrict VLANs usage on VFs. VF driver " + "needs to use HW VLAN filtering to get VLAN tagged traffic; " + "default=N"); + + +static void efx_ef10_sriov_free_vf_vports(struct efx_nic *efx) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + int i; + + if (!nic_data->vf) + return; + + for (i = 0; i < nic_data->vf_count; i++) { + struct ef10_vf *vf = nic_data->vf + i; + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_PCI_DEV_FLAGS_ASSIGNED) + /* If VF is assigned, do not free the vport */ + if (vf->pci_dev && + vf->pci_dev->dev_flags & PCI_DEV_FLAGS_ASSIGNED) + continue; +#endif + + if (vf->vport_assigned) { + efx_ef10_evb_port_assign(efx, EVB_PORT_ID_NULL, i); + vf->vport_assigned = 0; + } + + if (!is_zero_ether_addr(vf->mac)) { + efx_ef10_vport_del_mac(efx, vf->vport_id, vf->mac); + eth_zero_addr(vf->mac); + } + + if (vf->vport_id) { + efx_ef10_vport_free(efx, vf->vport_id); + vf->vport_id = 0; + } + + vf->efx = NULL; + } +} + +static void efx_ef10_sriov_free_vf_vswitching(struct efx_nic *efx) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + + if (!nic_data) + return; + + efx_ef10_sriov_free_vf_vports(efx); + kfree(nic_data->vf); + nic_data->vf = NULL; +} + +static int efx_ef10_sriov_assign_vf_vport(struct efx_nic *efx, + unsigned int vf_i) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + struct ef10_vf *vf = nic_data->vf + vf_i; + int rc; + + if (WARN_ON_ONCE(nic_data->vf == NULL)) + return -EOPNOTSUPP; + + rc = efx_ef10_vport_alloc(efx, vf->vlan, vf->vlan_restrict, + &vf->vport_id); + if (rc) + return rc; + + rc = efx_ef10_vport_add_mac(efx, vf->vport_id, vf->mac); + if (rc) { + eth_zero_addr(vf->mac); + return rc; + } + + rc = efx_ef10_evb_port_assign(efx, vf->vport_id, vf_i); + if (rc) + return rc; + + vf->vport_assigned = 1; + return 0; +} + +static int efx_ef10_sriov_alloc_vf_vswitching(struct efx_nic *efx) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + unsigned int i; + int rc; + + nic_data->vf = kcalloc(nic_data->vf_count, sizeof(struct ef10_vf), + GFP_KERNEL); + if (!nic_data->vf) + return -ENOMEM; + + for (i = 0; i < nic_data->vf_count; i++) { + eth_random_addr(nic_data->vf[i].mac); + nic_data->vf[i].efx = NULL; + nic_data->vf[i].vlan = EFX_VF_VID_DEFAULT; + nic_data->vf[i].vlan_restrict = vfs_vlan_restrict; + + rc = efx_ef10_sriov_assign_vf_vport(efx, i); + if (rc) + goto fail; + + } + + return 0; +fail: + efx_ef10_sriov_free_vf_vswitching(efx); + return rc; +} + +static int efx_ef10_sriov_restore_vf_vswitching(struct efx_nic *efx) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + unsigned int i; + int rc; + + for (i = 0; i < nic_data->vf_count; i++) { + rc = efx_ef10_sriov_assign_vf_vport(efx, i); + if (rc) + goto fail; + } + + return 0; +fail: + efx_ef10_sriov_free_vf_vswitching(efx); + return rc; +} + +static int efx_ef10_vadaptor_alloc_set_features(struct efx_nic *efx) +{ + int rc; + u32 port_flags; + + rc = efx_ef10_vadaptor_alloc(efx, efx->vport.vport_id); + if (rc) + goto fail_vadaptor_alloc; + + rc = efx_ef10_vadaptor_query(efx, efx->vport.vport_id, + &port_flags, NULL, NULL); + if (rc) + goto fail_vadaptor_query; + + if (efx_supported_features(efx) & NETIF_F_HW_VLAN_CTAG_FILTER) { + if (port_flags & (1 << MC_CMD_VPORT_ALLOC_IN_FLAG_VLAN_RESTRICT_LBN)) + efx->fixed_features |= NETIF_F_HW_VLAN_CTAG_FILTER; + else + efx->fixed_features &= ~NETIF_F_HW_VLAN_CTAG_FILTER; + } + + return 0; + +fail_vadaptor_query: + efx_ef10_vadaptor_free(efx, EVB_PORT_ID_ASSIGNED); +fail_vadaptor_alloc: + return rc; +} +#endif + +/* On top of the default firmware vswitch setup, create a VEB vswitch and + * expansion vport for use by this function. + */ +int efx_ef10_vswitching_probe_pf(struct efx_nic *efx) +{ +#ifdef CONFIG_SFC_SRIOV + struct efx_ef10_nic_data *nic_data = efx->nic_data; + struct net_device *net_dev = efx->net_dev; + int rc; + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_SRIOV_GET_TOTALVFS) + if (pci_sriov_get_totalvfs(efx->pci_dev) <= 0 && !enable_vswitch) { +#else + if (nic_data->max_vfs <= 0 && !enable_vswitch) { +#endif + /* vswitch not needed as we have no VFs */ + efx_ef10_vadaptor_alloc_set_features(efx); + return 0; + } + + rc = efx_ef10_vswitch_alloc(efx, EVB_PORT_ID_ASSIGNED, + MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VEB); + if (rc) + goto fail1; + + rc = efx_ef10_vport_alloc(efx, EFX_FILTER_VID_UNSPEC, false, + &efx->vport.vport_id); + if (rc) + goto fail2; + + rc = efx_ef10_vport_add_mac(efx, efx->vport.vport_id, net_dev->dev_addr); + if (rc) + goto fail3; + ether_addr_copy(nic_data->vport_mac, net_dev->dev_addr); + + rc = efx_ef10_vadaptor_alloc_set_features(efx); + if (rc) + goto fail4; + + return 0; +fail4: + efx_ef10_vport_del_mac(efx, efx->vport.vport_id, nic_data->vport_mac); + eth_zero_addr(nic_data->vport_mac); +fail3: + efx_ef10_vport_free(efx, efx->vport.vport_id); + efx->vport.vport_id = EVB_PORT_ID_ASSIGNED; +fail2: + efx_ef10_vswitch_free(efx, EVB_PORT_ID_ASSIGNED); +fail1: + return rc; +#else + return 0; +#endif +} + +int efx_ef10_vswitching_probe_vf(struct efx_nic *efx) +{ +#ifdef CONFIG_SFC_SRIOV + return efx_ef10_vadaptor_alloc_set_features(efx); +#else + return 0; +#endif +} + +int efx_ef10_vswitching_restore_pf(struct efx_nic *efx) +{ +#ifdef CONFIG_SFC_SRIOV + struct efx_ef10_nic_data *nic_data = efx->nic_data; + int rc; + + if (!nic_data->must_probe_vswitching) + return 0; + + rc = efx_ef10_vswitching_probe_pf(efx); + if (rc) + return rc; + + rc = efx_ef10_sriov_restore_vf_vswitching(efx); + if (rc) + return rc; + + nic_data->must_probe_vswitching = false; + return rc; +#else + return 0; +#endif +} + +int efx_ef10_vswitching_restore_vf(struct efx_nic *efx) +{ +#ifdef CONFIG_SFC_SRIOV + struct efx_ef10_nic_data *nic_data = efx->nic_data; + int rc; + u8 new_addr[ETH_ALEN]; + u8 *perm_addr; + + rc = efx->type->get_mac_address(efx, new_addr); + if (rc) + return rc; + + perm_addr = efx->net_dev->perm_addr; + if (!ether_addr_equal(perm_addr, new_addr)) { + netif_warn(efx, drv, efx->net_dev, + "PF has changed my MAC to %pM\n", + new_addr); + ether_addr_copy(perm_addr, new_addr); + eth_hw_addr_set(efx->net_dev, new_addr); + } + + if (!nic_data->must_probe_vswitching) + return 0; + + rc = efx_ef10_vswitching_probe_vf(efx); + if (rc) + return rc; + + nic_data->must_probe_vswitching = false; +#endif + return 0; +} + +void efx_ef10_vswitching_remove_pf(struct efx_nic *efx) +{ +#ifdef CONFIG_SFC_SRIOV + struct efx_ef10_nic_data *nic_data = efx->nic_data; + + efx_ef10_sriov_free_vf_vswitching(efx); + + efx_ef10_vadaptor_free(efx, efx->vport.vport_id); + + if (efx->vport.vport_id == EVB_PORT_ID_ASSIGNED) + return; /* No vswitch was ever created */ + + if (!is_zero_ether_addr(nic_data->vport_mac)) { + efx_ef10_vport_del_mac(efx, efx->vport.vport_id, + efx->net_dev->dev_addr); + eth_zero_addr(nic_data->vport_mac); + } + + efx_ef10_vport_free(efx, efx->vport.vport_id); + efx->vport.vport_id = EVB_PORT_ID_ASSIGNED; + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_PCI_DEV_FLAGS_ASSIGNED) + /* Only free the vswitch if no VFs are assigned */ + if (!pci_vfs_assigned(efx->pci_dev)) +#endif + efx_ef10_vswitch_free(efx, efx->vport.vport_id); +#endif +} + +void efx_ef10_vswitching_remove_vf(struct efx_nic *efx) +{ +#ifdef CONFIG_SFC_SRIOV + efx_ef10_vadaptor_free(efx, EVB_PORT_ID_ASSIGNED); +#endif +} + + +#ifdef CONFIG_SFC_SRIOV +static int efx_ef10_pci_sriov_enable(struct efx_nic *efx, int num_vfs) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + struct pci_dev *dev = efx->pci_dev; + int rc; + + nic_data->vf_count = num_vfs; + + rc = efx_ef10_sriov_alloc_vf_vswitching(efx); + if (rc) + goto fail1; + + rc = pci_enable_sriov(dev, num_vfs); + if (!rc) + return 0; + + efx_ef10_sriov_free_vf_vswitching(efx); +fail1: + nic_data->vf_count = 0; + netif_err(efx, probe, efx->net_dev, "Failed to enable SRIOV VFs\n"); + return rc; +} + +static int efx_ef10_pci_sriov_disable(struct efx_nic *efx, bool force) +{ + struct pci_dev *dev = efx->pci_dev; + struct efx_ef10_nic_data *nic_data = efx->nic_data; + unsigned int vfs_assigned = 0; + int i; + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_PCI_DEV_FLAGS_ASSIGNED) + vfs_assigned = pci_vfs_assigned(dev); + + if (vfs_assigned && !force) { + netif_info(efx, drv, efx->net_dev, "VFs are assigned to guests; " + "please detach them before disabling SR-IOV\n"); + return -EBUSY; + } +#endif + + if (!vfs_assigned) { + for (i = 0; i < nic_data->vf_count; i++) + nic_data->vf[i].pci_dev = NULL; + pci_disable_sriov(dev); + } + + efx_ef10_sriov_free_vf_vswitching(efx); + nic_data->vf_count = 0; + return 0; +} +#endif + +int efx_ef10_sriov_configure(struct efx_nic *efx, int num_vfs) +{ +#ifdef CONFIG_SFC_SRIOV + if (num_vfs == 0) + return efx_ef10_pci_sriov_disable(efx, false); + else + return efx_ef10_pci_sriov_enable(efx, num_vfs); +#else + return -EOPNOTSUPP; +#endif +} + +#ifdef CONFIG_SFC_SRIOV +static int efx_ef10_sriov_vf_max(struct efx_nic *efx) +{ + MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_SRIOV_CFG_OUT_LEN); + size_t outlen; + int rc; + int vf_max; + + BUILD_BUG_ON(MC_CMD_GET_SRIOV_CFG_IN_LEN != 0); + rc = efx_mcdi_rpc(efx, MC_CMD_GET_SRIOV_CFG, NULL, 0, + outbuf, sizeof(outbuf), &outlen); + if (rc) + vf_max = 0; + else + vf_max = MCDI_DWORD(outbuf, GET_SRIOV_CFG_OUT_VF_MAX); + + return vf_max; +} +#endif + +int efx_ef10_sriov_init(struct efx_nic *efx) +{ +#ifdef CONFIG_SFC_SRIOV + struct efx_ef10_nic_data *nic_data = efx->nic_data; + int vf_max = efx_ef10_sriov_vf_max(efx); + int vf_count; + int rc; + + vf_count = min(vf_max, nic_data->max_vfs); + if (vf_count > 0) { + rc = efx->type->sriov_configure(efx, vf_count); + if (rc) + return rc; + } + + + return 0; +#else + return -EOPNOTSUPP; +#endif +} + +void efx_ef10_sriov_fini(struct efx_nic *efx) +{ +#ifdef CONFIG_SFC_SRIOV + struct efx_ef10_nic_data *nic_data = efx->nic_data; + unsigned int i; + int rc; + + if (!nic_data) + return; + + if (!nic_data->vf) { + /* Remove any un-assigned orphaned VFs */ +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_PCI_DEV_FLAGS_ASSIGNED) + if (pci_num_vf(efx->pci_dev) && !pci_vfs_assigned(efx->pci_dev)) +#endif + pci_disable_sriov(efx->pci_dev); + return; + } + + /* Remove any VFs in the host */ + for (i = 0; i < nic_data->vf_count; ++i) { + struct efx_nic *vf_efx = nic_data->vf[i].efx; + + if (vf_efx) { + efx_device_detach_sync(vf_efx); + rtnl_lock(); + efx_net_stop(vf_efx->net_dev); + rtnl_unlock(); + efx_ef10_vadaptor_free(vf_efx, EVB_PORT_ID_ASSIGNED); + vf_efx->pci_dev->driver->remove(vf_efx->pci_dev); + } + } + + rc = efx_ef10_pci_sriov_disable(efx, true); + if (rc) + netif_dbg(efx, drv, efx->net_dev, "Disabling SRIOV was not successful rc=%d\n", rc); + else + netif_dbg(efx, drv, efx->net_dev, "SRIOV disabled\n"); +#endif +} + +#ifdef CONFIG_SFC_SRIOV + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_NDO_SET_VF_MAC) +int efx_ef10_sriov_get_vf_config(struct efx_nic *efx, int vf_i, + struct ifla_vf_info *ivf) +{ + struct ef10_vf *vf; +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_VF_LINK_STATE) + MCDI_DECLARE_BUF(inbuf, MC_CMD_LINK_STATE_MODE_IN_LEN); + MCDI_DECLARE_BUF(outbuf, MC_CMD_LINK_STATE_MODE_OUT_LEN); + struct efx_ef10_nic_data *nic_data = efx->nic_data; + size_t outlen; + int rc; +#endif + + vf = efx_ef10_vf_info(efx, vf_i); + if (!vf) + return -EINVAL; + + ivf->vf = vf_i; +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_VF_INFO_MIN_TX_RATE) + ivf->min_tx_rate = 0; + ivf->max_tx_rate = 0; +#else + ivf->tx_rate = 0; +#endif + ether_addr_copy(ivf->mac, vf->mac); + ivf->vlan = (vf->vlan == EFX_FILTER_VID_UNSPEC) ? 0 : vf->vlan; + ivf->qos = 0; + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_VF_LINK_STATE) + MCDI_POPULATE_DWORD_2(inbuf, LINK_STATE_MODE_IN_FUNCTION, + LINK_STATE_MODE_IN_FUNCTION_PF, nic_data->pf_index, + LINK_STATE_MODE_IN_FUNCTION_VF, vf_i); + MCDI_SET_DWORD(inbuf, LINK_STATE_MODE_IN_NEW_MODE, + MC_CMD_LINK_STATE_MODE_IN_DO_NOT_CHANGE); + rc = efx_mcdi_rpc(efx, MC_CMD_LINK_STATE_MODE, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), &outlen); + if (rc) + return rc; + if (outlen < MC_CMD_LINK_STATE_MODE_OUT_LEN) + return -EIO; + ivf->linkstate = MCDI_DWORD(outbuf, LINK_STATE_MODE_OUT_OLD_MODE); +#endif + + return 0; +} +#endif + +static int efx_ef10_vport_del_vf_mac(struct efx_nic *efx, unsigned int port_id, + u8 *mac) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_LEN); + MCDI_DECLARE_BUF_ERR(outbuf); + size_t outlen; + int rc; + + MCDI_SET_DWORD(inbuf, VPORT_DEL_MAC_ADDRESS_IN_VPORT_ID, port_id); + ether_addr_copy(MCDI_PTR(inbuf, VPORT_DEL_MAC_ADDRESS_IN_MACADDR), mac); + + rc = efx_mcdi_rpc(efx, MC_CMD_VPORT_DEL_MAC_ADDRESS, inbuf, + sizeof(inbuf), outbuf, sizeof(outbuf), &outlen); + + return rc; +} + +static int efx_ef10_vport_reconfigure(struct efx_nic *efx, unsigned int port_id, + const u16 *vlan, const u8* mac, + bool *reset) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_RECONFIGURE_IN_LEN); + MCDI_DECLARE_BUF(outbuf, MC_CMD_VPORT_RECONFIGURE_OUT_LEN); + struct efx_ef10_nic_data *nic_data = efx->nic_data; + size_t outlen; + int rc; + + if (!efx_ef10_has_cap(nic_data->datapath_caps, VPORT_RECONFIGURE)) + return -EOPNOTSUPP; + + MCDI_SET_DWORD(inbuf, VPORT_RECONFIGURE_IN_VPORT_ID, port_id); + if (vlan) { + MCDI_POPULATE_DWORD_1(inbuf, VPORT_RECONFIGURE_IN_FLAGS, + VPORT_RECONFIGURE_IN_REPLACE_VLAN_TAGS, + 1); + if (*vlan != EFX_FILTER_VID_UNSPEC) { + MCDI_SET_DWORD(inbuf, + VPORT_RECONFIGURE_IN_NUM_VLAN_TAGS, + 1); + MCDI_POPULATE_DWORD_1(inbuf, + VPORT_RECONFIGURE_IN_VLAN_TAGS, + VPORT_RECONFIGURE_IN_VLAN_TAG_0, + *vlan); + } + } + if (mac) { + MCDI_POPULATE_DWORD_1(inbuf, VPORT_RECONFIGURE_IN_FLAGS, + VPORT_RECONFIGURE_IN_REPLACE_MACADDRS, 1); + MCDI_SET_DWORD(inbuf, VPORT_RECONFIGURE_IN_NUM_MACADDRS, + !is_zero_ether_addr(mac)); + ether_addr_copy(MCDI_PTR(inbuf, VPORT_RECONFIGURE_IN_MACADDRS), + mac); + } + + rc = efx_mcdi_rpc(efx, MC_CMD_VPORT_RECONFIGURE, + inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), &outlen); + if (rc) + return rc; + if (outlen < MC_CMD_VPORT_RECONFIGURE_OUT_LEN) + return -EIO; + + *reset = ((MCDI_DWORD(outbuf, VPORT_RECONFIGURE_OUT_FLAGS) & + (1 << MC_CMD_VPORT_RECONFIGURE_OUT_RESET_DONE_LBN)) != 0); + + return 0; +} + +static int efx_ef10_sriov_close(struct efx_nic *efx) +{ + efx_device_detach_sync(efx); +#ifdef EFX_NOT_UPSTREAM +#if IS_MODULE(CONFIG_SFC_DRIVERLINK) + efx_dl_reset_suspend(&efx->dl_nic); +#endif +#endif + efx_net_stop(efx->net_dev); + + if (efx->state == STATE_NET_UP) + return -EBUSY; + + return efx_ef10_vadaptor_free(efx, EVB_PORT_ID_ASSIGNED); +} + +static int efx_ef10_sriov_reopen(struct efx_nic *efx) +{ + int rc; + + rc = efx_net_open(efx->net_dev); +#ifdef EFX_NOT_UPSTREAM +#if IS_MODULE(CONFIG_SFC_DRIVERLINK) + if (!rc) + efx_dl_reset_resume(&efx->dl_nic, efx->state != STATE_DISABLED); +#endif +#endif + return rc; +} + +int efx_ef10_sriov_set_vf_mac(struct efx_nic *efx, int vf_i, const u8 *mac, + bool *reset) +{ + enum nic_state old_state = STATE_UNINIT; + struct ef10_vf *vf; + int rc, rc2 = 0; + + *reset = false; + + vf = efx_ef10_vf_info(efx, vf_i); + if (!vf) + return -EINVAL; + + + if (ether_addr_equal(mac, vf->mac)) + return 0; + + /* If we have control over VF driver, do changes accurately + * without VF datapath reset triggered by VPORT_RECONFIGURE. + */ + if (vf->efx) { + old_state = vf->efx->state; + rc = efx_ef10_sriov_close(vf->efx); + if (rc) + goto reopen; + } else { + rc = efx_ef10_vport_reconfigure(efx, vf->vport_id, NULL, mac, + reset); + if (rc == 0) { + if (*reset) + netif_warn(efx, drv, efx->net_dev, + "VF %d has been reset to reconfigure MAC\n", + vf_i); + /* Successfully reconfigured */ + ether_addr_copy(vf->mac, mac); + return 0; + } else if (rc != -EOPNOTSUPP) { + return rc; + } + /* VPORT_RECONFIGURE is not supported, try to remove old + * MAC and add a new one (may be VF driver is not bound). + */ + } + + rc = efx_ef10_evb_port_assign(efx, EVB_PORT_ID_NULL, vf_i); + if (rc) { + netif_warn(efx, drv, efx->net_dev, + "Failed to change MAC on VF %d.\n", vf_i); + netif_warn(efx, drv, efx->net_dev, + "This is likely because the VF is bound to a driver in a VM.\n"); + netif_warn(efx, drv, efx->net_dev, + "Please unload the driver in the VM.\n"); + goto restore_vadaptor; + } + + if (!is_zero_ether_addr(vf->mac)) { + rc = efx_ef10_vport_del_vf_mac(efx, vf->vport_id, vf->mac); + if (rc) + goto restore_evb_port; + eth_zero_addr(vf->mac); + if (vf->efx) + eth_hw_addr_set(vf->efx->net_dev, vf->mac); + } + + if (!is_zero_ether_addr(mac)) { + rc = efx_ef10_vport_add_mac(efx, vf->vport_id, mac); + if (rc) + goto reset_nic; + ether_addr_copy(vf->mac, mac); + if (vf->efx) + eth_hw_addr_set(vf->efx->net_dev, mac); + } + +restore_evb_port: + rc = efx_ef10_evb_port_assign(efx, vf->vport_id, vf_i); + if (rc) + goto reset_nic; + +restore_vadaptor: + if (vf->efx) { + /* VF cannot use the vport_id that the PF created */ + rc = efx_ef10_vadaptor_alloc(vf->efx, EVB_PORT_ID_ASSIGNED); + if (rc) + goto reset_nic; + } +reopen: + if (vf->efx) { + if (old_state == STATE_NET_UP) { + rc2 = efx_ef10_sriov_reopen(vf->efx); + if (rc2) + goto reset_nic; + } + efx_device_attach_if_not_resetting(vf->efx); + } + + return rc; + +reset_nic: + if (vf->efx) { + netif_err(efx, drv, efx->net_dev, + "Failed to restore the VF - scheduling reset.\n"); + + efx_schedule_reset(vf->efx, RESET_TYPE_DATAPATH); + *reset = true; + } else { + netif_err(efx, drv, efx->net_dev, + "Failed to restore the VF and cannot reset the VF - VF is not functional.\n"); + netif_err(efx, drv, efx->net_dev, + "Please reload the driver attached to the VF.\n"); + } + return rc ? rc : rc2; +} + +int efx_ef10_sriov_set_vf_vlan(struct efx_nic *efx, int vf_i, u16 vlan, + u8 qos) +{ + enum nic_state old_state = STATE_UNINIT; + struct ef10_vf *vf; + u16 new_vlan; + int rc = 0, rc2 = 0; + + vf = efx_ef10_vf_info(efx, vf_i); + if (!vf) + return -EINVAL; + + new_vlan = (vlan == 0) ? EFX_FILTER_VID_UNSPEC : vlan; + if (new_vlan == vf->vlan) + return 0; + + /* If we have control over VF driver, do changes accurately + * without VF datapath reset triggered by VPORT_RECONFIGURE. + */ + if (vf->efx) { + old_state = vf->efx->state; + rc = efx_ef10_sriov_close(vf->efx); + if (rc) + goto reopen; + } else { + bool reset = false; + + rc = efx_ef10_vport_reconfigure(efx, vf->vport_id, &new_vlan, + NULL, &reset); + if (rc == 0) { + if (reset) + netif_warn(efx, drv, efx->net_dev, + "VF %d has been reset to reconfigure VLAN\n", + vf_i); + /* Successfully reconfigured */ + vf->vlan = new_vlan; + return 0; + } else if (rc != -EOPNOTSUPP) { + return rc; + } + /* VPORT_RECONFIGURE is not supported, try to cleanup + * vport, change VLAN and restore (may be VF driver is not + * bound). + */ + } + + if (vf->vport_assigned) { + rc = efx_ef10_evb_port_assign(efx, EVB_PORT_ID_NULL, vf_i); + if (rc) { + netif_warn(efx, drv, efx->net_dev, + "Failed to change vlan on VF %d.\n", vf_i); + netif_warn(efx, drv, efx->net_dev, + "This is likely because the VF is bound to a driver in a VM.\n"); + netif_warn(efx, drv, efx->net_dev, + "Please unload the driver in the VM.\n"); + goto restore_vadaptor; + } + vf->vport_assigned = 0; + } + + if (!is_zero_ether_addr(vf->mac)) { + rc = efx_ef10_vport_del_mac(efx, vf->vport_id, vf->mac); + if (rc) + goto restore_evb_port; + } + + if (vf->vport_id) { + rc = efx_ef10_vport_free(efx, vf->vport_id); + if (rc) + goto restore_mac; + vf->vport_id = 0; + } + + /* Do the actual vlan change */ + vf->vlan = new_vlan; + + /* Restore everything in reverse order */ + rc = efx_ef10_vport_alloc(efx, vf->vlan, vf->vlan_restrict, + &vf->vport_id); + if (rc) + goto reset_nic; + +restore_mac: + if (!is_zero_ether_addr(vf->mac)) { + rc2 = efx_ef10_vport_add_mac(efx, vf->vport_id, vf->mac); + if (rc2) { + eth_zero_addr(vf->mac); + goto reset_nic; + } + } + +restore_evb_port: + rc2 = efx_ef10_evb_port_assign(efx, vf->vport_id, vf_i); + if (rc2) + goto reset_nic; + vf->vport_assigned = 1; + +restore_vadaptor: + if (vf->efx) { + rc2 = efx_ef10_vadaptor_alloc(vf->efx, EVB_PORT_ID_ASSIGNED); + if (rc2) + goto reset_nic; + } + +reopen: + if (vf->efx) { + if (old_state == STATE_NET_UP) { + rc2 = efx_ef10_sriov_reopen(vf->efx); + if (rc2) + goto reset_nic; + } + efx_device_attach_if_not_resetting(vf->efx); + } + + return rc; + +reset_nic: + if (vf->efx) { + netif_err(efx, drv, efx->net_dev, + "Failed to restore the VF - scheduling reset.\n"); + + efx_schedule_reset(vf->efx, RESET_TYPE_DATAPATH); + } else { + netif_err(efx, drv, efx->net_dev, + "Failed to restore the VF and cannot reset the VF " + "- VF is not functional.\n"); + netif_err(efx, drv, efx->net_dev, + "Please reload the driver attached to the VF.\n"); + } + + return rc ? rc : rc2; +} + +static int efx_ef10_sriov_set_privilege_mask(struct efx_nic *efx, int vf_i, + u32 mask, u32 value) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + MCDI_DECLARE_BUF(pm_inbuf, MC_CMD_PRIVILEGE_MASK_IN_LEN); + MCDI_DECLARE_BUF(pm_outbuf, MC_CMD_PRIVILEGE_MASK_OUT_LEN); + size_t outlen; + int rc; + u32 old_mask, new_mask; + + EFX_WARN_ON_PARANOID((value & ~mask) != 0); + + /* Get privilege mask */ + MCDI_POPULATE_DWORD_2(pm_inbuf, PRIVILEGE_MASK_IN_FUNCTION, + PRIVILEGE_MASK_IN_FUNCTION_PF, nic_data->pf_index, + PRIVILEGE_MASK_IN_FUNCTION_VF, vf_i); + + rc = efx_mcdi_rpc(efx, MC_CMD_PRIVILEGE_MASK, + pm_inbuf, sizeof(pm_inbuf), + pm_outbuf, sizeof(pm_outbuf), &outlen); + + if (rc != 0) + return rc; + if (outlen != MC_CMD_PRIVILEGE_MASK_OUT_LEN) + return -EIO; + + old_mask = MCDI_DWORD(pm_outbuf, PRIVILEGE_MASK_OUT_OLD_MASK); + + new_mask = old_mask & ~mask; + new_mask |= value; + + if (new_mask == old_mask) + return 0; + + new_mask |= MC_CMD_PRIVILEGE_MASK_IN_DO_CHANGE; + + /* Set privilege mask */ + MCDI_SET_DWORD(pm_inbuf, PRIVILEGE_MASK_IN_NEW_MASK, new_mask); + + rc = efx_mcdi_rpc(efx, MC_CMD_PRIVILEGE_MASK, + pm_inbuf, sizeof(pm_inbuf), + pm_outbuf, sizeof(pm_outbuf), &outlen); + + if (rc != 0) + return rc; + if (outlen != MC_CMD_PRIVILEGE_MASK_OUT_LEN) + return -EIO; + + return 0; +} + +int efx_ef10_sriov_set_vf_spoofchk(struct efx_nic *efx, int vf_i, + bool spoofchk) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + + /* Can't enable spoofchk if firmware doesn't support it. */ + if (!(nic_data->datapath_caps & + BIT(MC_CMD_GET_CAPABILITIES_OUT_TX_MAC_SECURITY_FILTERING_LBN)) && + spoofchk) + return -EOPNOTSUPP; + + return efx_ef10_sriov_set_privilege_mask(efx, vf_i, + MC_CMD_PRIVILEGE_MASK_IN_GRP_MAC_SPOOFING_TX, + spoofchk ? 0 : MC_CMD_PRIVILEGE_MASK_IN_GRP_MAC_SPOOFING_TX); +} + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_VF_LINK_STATE) +int efx_ef10_sriov_set_vf_link_state(struct efx_nic *efx, int vf_i, + int link_state) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_LINK_STATE_MODE_IN_LEN); + struct efx_ef10_nic_data *nic_data = efx->nic_data; + + BUILD_BUG_ON(IFLA_VF_LINK_STATE_AUTO != MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_AUTO); + BUILD_BUG_ON(IFLA_VF_LINK_STATE_ENABLE != MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_UP); + BUILD_BUG_ON(IFLA_VF_LINK_STATE_DISABLE != MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_DOWN); + MCDI_POPULATE_DWORD_2(inbuf, LINK_STATE_MODE_IN_FUNCTION, + LINK_STATE_MODE_IN_FUNCTION_PF, nic_data->pf_index, + LINK_STATE_MODE_IN_FUNCTION_VF, vf_i); + MCDI_SET_DWORD(inbuf, LINK_STATE_MODE_IN_NEW_MODE, link_state); + return efx_mcdi_rpc(efx, MC_CMD_LINK_STATE_MODE, inbuf, sizeof(inbuf), + NULL, 0, NULL); /* don't care what old mode was */ +} +#endif /* EFX_HAVE_VF_LINK_STATE */ + + +#else /* CONFIG_SFC_SRIOV */ + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_NDO_SET_VF_MAC) +int efx_ef10_sriov_get_vf_config(struct efx_nic *efx, int vf_i, + struct ifla_vf_info *ivf) +{ + return -EOPNOTSUPP; +} +#endif + +int efx_ef10_sriov_set_vf_mac(struct efx_nic *efx, int vf_i, const u8 *mac, + bool *reset) +{ + return -EOPNOTSUPP; +} + +int efx_ef10_sriov_set_vf_vlan(struct efx_nic *efx, int vf_i, u16 vlan, + u8 qos) +{ + return -EOPNOTSUPP; +} + +int efx_ef10_sriov_set_vf_spoofchk(struct efx_nic *efx, int vf_i, + bool spoofchk) +{ + return -EOPNOTSUPP; +} + +#endif /* CONFIG_SRC_SRIOV */ + +bool efx_ef10_sriov_wanted(struct efx_nic *efx) +{ +#ifdef CONFIG_SFC_SRIOV + struct efx_ef10_nic_data *nic_data = efx->nic_data; + + return nic_data->max_vfs != 0 && efx_ef10_sriov_vf_max(efx) > 0; +#else + return false; +#endif +} diff --git a/src/drivers/net/ethernet/sfc/ef10_sriov.h b/src/drivers/net/ethernet/sfc/ef10_sriov.h new file mode 100644 index 0000000..794016a --- /dev/null +++ b/src/drivers/net/ethernet/sfc/ef10_sriov.h @@ -0,0 +1,99 @@ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2014-2017 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#ifndef EF10_SRIOV_H +#define EF10_SRIOV_H + +#include "net_driver.h" + + +/** + * struct ef10_vf - PF's store of VF data + * @efx: efx_nic struct for the current VF + * @pci_dev: the pci_dev struct for the VF, retained while the VF is assigned + * @vport_id: vport ID for the VF + * @vport_assigned: record whether the vport is currently assigned to the VF + * @mac: MAC address for the VF, zero when address is removed from the vport + * @vlan: Default VLAN for the VF or #EFX_FILTER_VID_UNSPEC + * @vlan_restrict: Restrict VLAN traffic VF is allowed to receive and send + * (if Tx MAC spoofing privilege is not granted). If restricted, VF + * driver should install filter with VLAN to get corresponding + * traffic. GRP_UNRESTRICTED_VLAN privilege controls if the filter + * installation is permitted. + */ +struct ef10_vf { + struct efx_nic *efx; + struct pci_dev *pci_dev; + unsigned int vport_id; + unsigned int vport_assigned; + u8 mac[ETH_ALEN]; + u16 vlan; +/* Default VF VLAN ID on creation */ +#define EFX_VF_VID_DEFAULT EFX_FILTER_VID_UNSPEC + bool vlan_restrict; +}; + +#ifdef CONFIG_SFC_SRIOV +static inline struct ef10_vf *efx_ef10_vf_info(struct efx_nic *efx, int vf_i) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + + if (!nic_data->vf || vf_i < 0 || vf_i >= nic_data->vf_count) + return NULL; + + return nic_data->vf + vf_i; +} +#endif + +int efx_ef10_sriov_init(struct efx_nic *efx); +void efx_ef10_sriov_fini(struct efx_nic *efx); +bool efx_ef10_sriov_wanted(struct efx_nic *efx); +int efx_ef10_sriov_configure(struct efx_nic *efx, int num_vfs); +void efx_ef10_sriov_flr(struct efx_nic *efx, unsigned int flr); + +int efx_ef10_vswitching_probe_pf(struct efx_nic *efx); +int efx_ef10_vswitching_probe_vf(struct efx_nic *efx); +int efx_ef10_vswitching_restore_pf(struct efx_nic *efx); +int efx_ef10_vswitching_restore_vf(struct efx_nic *efx); +void efx_ef10_vswitching_remove_pf(struct efx_nic *efx); +void efx_ef10_vswitching_remove_vf(struct efx_nic *efx); +int efx_ef10_vadaptor_alloc(struct efx_nic *efx, unsigned int port_id); +int efx_ef10_vadaptor_query(struct efx_nic *efx, unsigned int port_id, + u32 *port_flags, u32 *vadaptor_flags, + unsigned int *vlan_tags); +int efx_ef10_vadaptor_free(struct efx_nic *efx, unsigned int port_id); + +int efx_ef10_sriov_set_vf_mac(struct efx_nic *efx, int vf_i, const u8 *mac, + bool *reset); +int efx_ef10_sriov_set_vf_vlan(struct efx_nic *efx, int vf_i, u16 vlan, + u8 qos); +int efx_ef10_sriov_set_vf_spoofchk(struct efx_nic *efx, int vf, bool spoofchk); +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_NDO_SET_VF_MAC) +int efx_ef10_sriov_get_vf_config(struct efx_nic *efx, int vf_i, + struct ifla_vf_info *ivf); +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_VF_LINK_STATE) +int efx_ef10_sriov_set_vf_link_state(struct efx_nic *efx, int vf_i, + int link_state); +#endif +#endif + + +/* MCFW vswitch operations */ +int efx_ef10_vswitch_alloc(struct efx_nic *efx, unsigned int port_id, + unsigned int vswitch_type); +int efx_ef10_vswitch_free(struct efx_nic *efx, unsigned int port_id); +int efx_ef10_vport_alloc(struct efx_nic *efx, u16 vlan, bool vlan_restrict, + unsigned int *port_id_out); +int efx_ef10_vport_free(struct efx_nic *efx, unsigned int port_id); +int efx_ef10_evb_port_assign(struct efx_nic *efx, unsigned int port_id, + unsigned int vf_fn); +int efx_ef10_vport_add_mac(struct efx_nic *efx, unsigned int port_id, const u8 *mac); +int efx_ef10_vport_del_mac(struct efx_nic *efx, unsigned int port_id, const u8 *mac); + +#endif /* EF10_SRIOV_H */ diff --git a/src/drivers/net/ethernet/sfc/efx.c b/src/drivers/net/ethernet/sfc/efx.c new file mode 100644 index 0000000..8714daa --- /dev/null +++ b/src/drivers/net/ethernet/sfc/efx.c @@ -0,0 +1,1930 @@ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2005-2006 Fen Systems Ltd. + * Copyright 2005-2017 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#if defined(EFX_USE_KCOMPAT) && defined(EFX_HAVE_PCI_ENABLE_PCIE_ERROR_REPORTING) +#include +#endif +#include +#ifdef EFX_NOT_UPSTREAM +#include +#endif +#if defined(CONFIG_EEH) +#include +#endif +#include "net_driver.h" +#include +#if defined(EFX_USE_KCOMPAT) && (defined(EFX_HAVE_NDO_ADD_VXLAN_PORT) || defined(EFX_HAVE_NDO_UDP_TUNNEL_ADD)) +#include +#endif +#if defined(EFX_USE_KCOMPAT) && defined(EFX_HAVE_NDO_UDP_TUNNEL_ADD) +#include +#endif +#include "debugfs.h" +#ifdef CONFIG_SFC_DUMP +#include "dump.h" +#endif +#include "efx.h" +#include "efx_common.h" +#include "efx_channels.h" +#include "ef100.h" +#include "nic.h" +#include "ef100_nic.h" +#include "io.h" +#include "rx_common.h" +#include "tx_common.h" +#include "efx_devlink.h" +#include "efx_auxbus.h" +#include "selftest.h" +#include "sriov.h" +#include "xdp.h" +#ifdef EFX_USE_KCOMPAT +#include "efx_ioctl.h" +#endif + +#include "mcdi_port_common.h" +#include "mcdi_filters.h" +#include "mcdi_pcol.h" +#include "workarounds.h" + +#ifdef CONFIG_SFC_TRACING +#define CREATE_TRACE_POINTS +#include +#endif + +#ifdef EFX_NOT_UPSTREAM +/* Allocate resources for XDP transmit and redirect functionality. + * + * This allocates a transmit queue per CPU and enough event queues to cover + * those - multiple transmit queues will share a single event queue. + */ +static bool xdp_alloc_tx_resources; +module_param(xdp_alloc_tx_resources, bool, 0444); +MODULE_PARM_DESC(xdp_alloc_tx_resources, + "[EXPERIMENTAL] Allocate resources for XDP TX"); +#endif + +/************************************************************************** + * + * Type name strings + * + ************************************************************************** + */ + +/* UDP tunnel type names */ +static const char *efx_udp_tunnel_type_names[] = { + [TUNNEL_ENCAP_UDP_PORT_ENTRY_VXLAN] = "vxlan", + [TUNNEL_ENCAP_UDP_PORT_ENTRY_GENEVE] = "geneve", +}; + +void efx_get_udp_tunnel_type_name(u16 type, char *buf, size_t buflen) +{ + if (type < ARRAY_SIZE(efx_udp_tunnel_type_names) && + efx_udp_tunnel_type_names[type] != NULL) + snprintf(buf, buflen, "%s", efx_udp_tunnel_type_names[type]); + else + snprintf(buf, buflen, "type %d", type); +} + +/************************************************************************** + * + * Configurable values + * + *************************************************************************/ + +#if defined(EFX_USE_KCOMPAT) && (defined(EFX_USE_GRO) || defined(EFX_USE_SFC_LRO)) +/* + * Enable large receive offload (LRO) aka soft segment reassembly (SSR) + * + * This sets the default for new devices. It can be controlled later + * using ethtool. + */ +static bool lro = true; +module_param(lro, bool, 0444); +MODULE_PARM_DESC(lro, "Large receive offload acceleration"); +#endif + +extern bool separate_tx_channels; +module_param(separate_tx_channels, bool, 0444); +MODULE_PARM_DESC(separate_tx_channels, + "Use separate channels for TX and RX"); + +/* Initial interrupt moderation settings. They can be modified after + * module load with ethtool. + * + * The default for RX should strike a balance between increasing the + * round-trip latency and reducing overhead. + */ +static unsigned int rx_irq_mod_usec = 60; + +/* Initial interrupt moderation settings. They can be modified after + * module load with ethtool. + * + * This default is chosen to ensure that a 10G link does not go idle + * while a TX queue is stopped after it has become full. A queue is + * restarted when it drops below half full. The time this takes (assuming + * worst case 3 descriptors per packet and 1024 descriptors) is + * 512 / 3 * 1.2 = 205 usec. + */ +static unsigned int tx_irq_mod_usec = 150; + +module_param_named(interrupt_mode, efx_interrupt_mode, uint, 0444); +MODULE_PARM_DESC(interrupt_mode, + "Interrupt mode (0=>MSIX 1=>MSI)"); + +#if !defined(EFX_NOT_UPSTREAM) +module_param(rss_cpus, uint, 0444); +MODULE_PARM_DESC(rss_cpus, "Number of CPUs to use for Receive-Side Scaling"); + +#endif +static bool irq_adapt_enable = true; +module_param(irq_adapt_enable, bool, 0444); +MODULE_PARM_DESC(irq_adapt_enable, + "Enable adaptive interrupt moderation"); + +static unsigned int rx_ring = EFX_DEFAULT_RX_DMAQ_SIZE; +module_param(rx_ring, uint, 0644); +MODULE_PARM_DESC(rx_ring, + "Maximum number of descriptors in a receive ring"); + +static unsigned int tx_ring = EFX_DEFAULT_TX_DMAQ_SIZE; +module_param(tx_ring, uint, 0644); +MODULE_PARM_DESC(tx_ring, + "Maximum number of descriptors in a transmit ring"); + +#ifdef EFX_NOT_UPSTREAM +int efx_target_num_vis = -1; +module_param_named(num_vis, efx_target_num_vis, int, 0644); +MODULE_PARM_DESC(num_vis, "Set number of VIs"); +#endif + +#ifdef EFX_NOT_UPSTREAM +static char *performance_profile; +module_param(performance_profile, charp, 0444); +MODULE_PARM_DESC(performance_profile, + "Tune settings for different performance profiles: 'throughput', 'latency' or (default) 'auto'"); +#endif + +/************************************************************************** + * + * Port handling + * + **************************************************************************/ + +static void efx_fini_port(struct efx_nic *efx); + +static int efx_init_port(struct efx_nic *efx) +{ + int rc; + + netif_dbg(efx, drv, efx->net_dev, "init port\n"); + + mutex_lock(&efx->mac_lock); + + efx->port_initialized = true; + + /* Ensure the PHY advertises the correct flow control settings */ + rc = efx_mcdi_port_reconfigure(efx); + if (rc && rc != -EPERM) + goto fail; + + mutex_unlock(&efx->mac_lock); + return 0; + +fail: + efx->port_initialized = false; + mutex_unlock(&efx->mac_lock); + return rc; +} + +static void efx_fini_port(struct efx_nic *efx) +{ + netif_dbg(efx, drv, efx->net_dev, "shut down port\n"); + + if (!efx->port_initialized) + return; + + efx->port_initialized = false; + + efx->link_state.up = false; + efx_link_status_changed(efx); +} + +/************************************************************************** + * + * Interrupt moderation + * + **************************************************************************/ +unsigned int efx_usecs_to_ticks(struct efx_nic *efx, unsigned int usecs) +{ + if (usecs == 0) + return 0; + if (usecs * 1000 < efx->timer_quantum_ns) + return 1; /* never round down to 0 */ + return usecs * 1000 / efx->timer_quantum_ns; +} + +unsigned int efx_ticks_to_usecs(struct efx_nic *efx, unsigned int ticks) +{ + /* We must round up when converting ticks to microseconds + * because we round down when converting the other way. + */ + return DIV_ROUND_UP(ticks * efx->timer_quantum_ns, 1000); +} + +/* Set interrupt moderation parameters */ +int efx_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs, + unsigned int rx_usecs, bool rx_adaptive, + bool rx_may_override_tx) +{ + struct efx_channel *channel; + unsigned int timer_max_us; + + EFX_ASSERT_RESET_SERIALISED(efx); + + timer_max_us = efx->timer_max_ns / 1000; + + if (tx_usecs > timer_max_us || rx_usecs > timer_max_us) + return -EINVAL; + + if (tx_usecs != rx_usecs && efx->tx_channel_offset == 0 && + !rx_may_override_tx) { + netif_err(efx, drv, efx->net_dev, "Channels are shared. " + "RX and TX IRQ moderation must be equal\n"); + return -EINVAL; + } + + efx->irq_rx_adaptive = rx_adaptive; + efx->irq_rx_moderation_us = rx_usecs; + efx_for_each_channel(channel, efx) { + if (efx_channel_has_rx_queue(channel)) + channel->irq_moderation_us = rx_usecs; + else if (efx_channel_has_tx_queues(channel)) + channel->irq_moderation_us = tx_usecs; + else if (efx_channel_is_xdp_tx(channel)) + channel->irq_moderation_us = tx_usecs; + } + + return 0; +} + +void efx_get_irq_moderation(struct efx_nic *efx, unsigned int *tx_usecs, + unsigned int *rx_usecs, bool *rx_adaptive) +{ + *rx_adaptive = efx->irq_rx_adaptive; + *rx_usecs = efx->irq_rx_moderation_us; + + /* If channels are shared between RX and TX, so is IRQ + * moderation. Otherwise, IRQ moderation is the same for all + * TX channels and is not adaptive. + */ + if (efx->tx_channel_offset == 0) { + *tx_usecs = *rx_usecs; + } else { + struct efx_channel *tx_channel; + + tx_channel = efx_get_channel(efx, efx->tx_channel_offset); + *tx_usecs = tx_channel->irq_moderation_us; + } +} + +/************************************************************************** + * + * ioctls + * + *************************************************************************/ + +#if defined(EFX_NOT_UPSTREAM) && defined(EFX_USE_PRIVATE_IOCTL) +static int efx_do_siocefx(struct net_device *net_dev, struct ifreq *ifr, + struct efx_sock_ioctl __user *user_data) +{ + struct efx_nic *efx = efx_netdev_priv(net_dev); + u16 efx_cmd; + + if (copy_from_user(&efx_cmd, &user_data->cmd, sizeof(efx_cmd))) + return -EFAULT; + return efx_private_ioctl(efx, efx_cmd, &user_data->u); +} +#endif + +#if defined(EFX_NOT_UPSTREAM) && defined(EFX_USE_PRIVATE_IOCTL) +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_NDO_SIOCDEVPRIVATE) +static int efx_siocdevprivate(struct net_device *net_dev, struct ifreq *ifr, + void __user *data, int cmd) +{ + if (cmd == SIOCEFX) + return efx_do_siocefx(net_dev, ifr, data); + return -EOPNOTSUPP; +} +#endif +#endif + +static int efx_eth_ioctl(struct net_device *net_dev, struct ifreq *ifr, + int cmd) +{ + struct efx_nic *efx = efx_netdev_priv(net_dev); + + switch (cmd) { + case SIOCSHWTSTAMP: + return efx_ptp_set_ts_config(efx, ifr); + case SIOCGHWTSTAMP: + return efx_ptp_get_ts_config(efx, ifr); + default: + return -EOPNOTSUPP; + } +} + +#if defined(EFX_USE_KCOMPAT) && !defined(EFX_HAVE_NDO_SIOCDEVPRIVATE) && !defined(EFX_HAVE_NDO_ETH_IOCTL) +/* Net device ioctl + * Context: process, rtnl_lock() held. + */ +static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd) +{ + switch (cmd) { +#if defined(EFX_NOT_UPSTREAM) && defined(EFX_USE_PRIVATE_IOCTL) +#if defined(EFX_USE_KCOMPAT) && !defined(EFX_HAVE_NDO_SIOCDEVPRIVATE) + case SIOCEFX: + return efx_do_siocefx(net_dev, ifr, ifr->ifr_data); +#endif +#endif +#if defined(EFX_USE_KCOMPAT) && !defined(EFX_HAVE_NDO_ETH_IOCTL) + case SIOCSHWTSTAMP: + case SIOCGHWTSTAMP: + return efx_eth_ioctl(net_dev, ifr, cmd); +#endif + default: + return -EOPNOTSUPP; + } +} +#endif + +/************************************************************************** + * + * Kernel net device interface + * + *************************************************************************/ + +/* Context: process, rtnl_lock() held. */ +int efx_net_open(struct net_device *net_dev) +{ + struct efx_nic *efx = efx_netdev_priv(net_dev); + int rc; + + netif_dbg(efx, ifup, efx->net_dev, "opening device on CPU %d\n", + raw_smp_processor_id()); + + rc = __efx_net_alloc(efx); + if (rc) + goto fail; + + /* Notify the kernel of the link state polled during driver load, + * before the monitor starts running + */ + efx_link_status_changed(efx); + + rc = efx_start_all(efx); + if (rc) + goto fail; + + if (efx->state == STATE_DISABLED || efx->reset_pending) { +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_TC_OFFLOAD) + if (efx->type->detach_reps) + efx->type->detach_reps(efx); +#endif + netif_device_detach(efx->net_dev); + } else { + efx->state = STATE_NET_UP; + } + + efx_selftest_async_start(efx); + + return 0; + +fail: + efx_net_stop(net_dev); + return rc; +} + +/* Allocate resources and transition state from + * STATE_NET_DOWN to STATE_ALLOCATED. + * After calling this function, you must call efx_net_dealloc + * *even if it fails*. + */ +int __efx_net_alloc(struct efx_nic *efx) +{ + unsigned int loops = 2; + int rc; + +#ifdef EFX_NOT_UPSTREAM +#if IS_MODULE(CONFIG_SFC_DRIVERLINK) + if (efx->open_count++) { + netif_dbg(efx, drv, efx->net_dev, + "already open, now by %hu clients\n", efx->open_count); + /* inform the kernel about link state again */ + efx_link_status_changed(efx); + return 0; + } +#endif +#endif + + rc = efx_check_disabled(efx); + if (rc) + return rc; + if (efx->phy_mode & PHY_MODE_SPECIAL) + return -EBUSY; + if (efx_mcdi_poll_reboot(efx) && efx_reset(efx, RESET_TYPE_ALL)) + return -EIO; + efx->reset_count = 0; + + efx->stats_initialised = false; + + rc = efx_init_channels(efx); + if (rc) + return rc; + + do { + if (!efx->max_channels || !efx->max_tx_channels) { + netif_err(efx, drv, efx->net_dev, + "Insufficient resources to allocate any channels\n"); + return -ENOSPC; + } + + /* Determine the number of channels and queues by trying to hook + * in MSI-X interrupts. + */ + rc = efx_probe_interrupts(efx); + if (rc) + return rc; + + rc = efx_set_channels(efx); + if (rc) + return rc; + + /* dimension_resources can fail with EAGAIN */ + rc = efx->type->dimension_resources(efx); + if (rc != 0 && rc != -EAGAIN) + return rc; + + if (rc == -EAGAIN) { + /* try again with new max_channels */ + efx_unset_channels(efx); + efx_remove_interrupts(efx); + } + } while (rc == -EAGAIN && --loops); + /* rc should be 0 here or we would have jumped to fail: */ + WARN_ON(rc); + + rc = efx_probe_channels(efx); + if (rc) + return rc; + + rc = efx_init_napi(efx); + if (rc) + return rc; + + rc = efx_init_port(efx); + if (rc) + return rc; + + rc = efx_init_filters(efx); + if (rc) + return rc; + + rc = efx_nic_init_interrupt(efx); + if (rc) + return rc; + efx_set_interrupt_affinity(efx); +#ifdef EFX_USE_IRQ_NOTIFIERS + efx_register_irq_notifiers(efx); +#endif + + down_write(&efx->filter_sem); + rc = efx->type->init(efx); + up_write(&efx->filter_sem); + if (rc) + return rc; + + rc = efx_enable_interrupts(efx); + if (rc) + return rc; + + efx->state = STATE_NET_ALLOCATED; + + return 0; +} + +/* Context: process, rtnl_lock() held. + * Note that the kernel will ignore our return code; this method + * should really be a void. + */ +int efx_net_stop(struct net_device *net_dev) +{ + struct efx_nic *efx = efx_netdev_priv(net_dev); + + netif_dbg(efx, ifdown, efx->net_dev, "closing on CPU %d\n", + raw_smp_processor_id()); + + netif_stop_queue(efx->net_dev); + efx_stop_all(efx); + + efx->state = STATE_NET_ALLOCATED; + + efx_net_dealloc(efx); + + return 0; +} + +void __efx_net_dealloc(struct efx_nic *efx) +{ +#ifdef EFX_NOT_UPSTREAM +#if IS_MODULE(CONFIG_SFC_DRIVERLINK) + if (efx->open_count && --efx->open_count) { + netif_dbg(efx, drv, efx->net_dev, "still open by %hu clients\n", + efx->open_count); + return; + } +#endif +#endif + + if (efx->state == STATE_DISABLED) + return; + + efx_disable_interrupts(efx); + if (efx->type->fini) + efx->type->fini(efx); + efx_clear_interrupt_affinity(efx); +#ifdef EFX_USE_IRQ_NOTIFIERS + efx_unregister_irq_notifiers(efx); +#endif + efx_nic_fini_interrupt(efx); + efx_fini_filters(efx); + efx_fini_port(efx); + efx_fini_napi(efx); + efx_remove_channels(efx); + if (efx->type->free_resources) + efx->type->free_resources(efx); + efx_unset_channels(efx); + efx_remove_interrupts(efx); + efx_fini_channels(efx); + + efx->state = STATE_NET_DOWN; +} + +#if defined(EFX_NOT_UPSTREAM) && defined(EFX_HAVE_VLAN_RX_PATH) +void efx_vlan_rx_register(struct net_device *dev, struct vlan_group *vlan_group) +{ + struct efx_nic *efx = efx_netdev_priv(dev); + struct efx_channel *channel; + + /* Before changing efx_nic::vlan_group to null, we must flush + * out all VLAN-tagged skbs currently in the software RX + * pipeline. Changing it to non-null might be safe, but we + * conservatively pause the RX path in both cases. + */ + efx_for_each_channel(channel, efx) + if (efx_channel_has_rx_queue(channel)) + efx_stop_eventq(channel); + + efx->vlan_group = vlan_group; + + efx_for_each_channel(channel, efx) + if (efx_channel_has_rx_queue(channel)) + efx_start_eventq(channel); +} + +#endif /* EFX_NOT_UPSTREAM */ + +#if defined(EFX_USE_KCOMPAT) +#if defined(EFX_HAVE_NDO_UDP_TUNNEL_ADD) && !defined(EFX_HAVE_UDP_TUNNEL_NIC_INFO) +static int efx_udp_tunnel_type_map(enum udp_parsable_tunnel_type in) +{ + switch (in) { + case UDP_TUNNEL_TYPE_VXLAN: + return TUNNEL_ENCAP_UDP_PORT_ENTRY_VXLAN; + case UDP_TUNNEL_TYPE_GENEVE: + return TUNNEL_ENCAP_UDP_PORT_ENTRY_GENEVE; + default: + return -1; + } +} + +static void efx_udp_tunnel_add(struct net_device *dev, struct udp_tunnel_info *ti) +{ + struct efx_nic *efx = efx_netdev_priv(dev); + struct efx_udp_tunnel tnl; + int efx_tunnel_type; + + efx_tunnel_type = efx_udp_tunnel_type_map(ti->type); + if (efx_tunnel_type < 0) + return; + + tnl.type = (u16)efx_tunnel_type; + tnl.port = ti->port; + + if (efx->type->udp_tnl_add_port) + efx->type->udp_tnl_add_port(efx, tnl); +} + +static void efx_udp_tunnel_del(struct net_device *dev, struct udp_tunnel_info *ti) +{ + struct efx_nic *efx = efx_netdev_priv(dev); + struct efx_udp_tunnel tnl; + int efx_tunnel_type; + + efx_tunnel_type = efx_udp_tunnel_type_map(ti->type); + if (efx_tunnel_type < 0) + return; + + tnl.type = (u16)efx_tunnel_type; + tnl.port = ti->port; + + if (efx->type->udp_tnl_del_port) + efx->type->udp_tnl_del_port(efx, tnl); +} +#else +#if defined(EFX_HAVE_NDO_ADD_VXLAN_PORT) +static void efx_vxlan_add_port(struct net_device *dev, sa_family_t sa_family, + __be16 port) +{ + struct efx_udp_tunnel tnl = {.port = port, + .type = TUNNEL_ENCAP_UDP_PORT_ENTRY_VXLAN}; + struct efx_nic *efx = efx_netdev_priv(dev); + + if (efx->type->udp_tnl_add_port) + efx->type->udp_tnl_add_port(efx, tnl); +} + +static void efx_vxlan_del_port(struct net_device *dev, sa_family_t sa_family, + __be16 port) +{ + struct efx_udp_tunnel tnl = {.port = port, + .type = TUNNEL_ENCAP_UDP_PORT_ENTRY_VXLAN}; + struct efx_nic *efx = efx_netdev_priv(dev); + + if (efx->type->udp_tnl_del_port) + efx->type->udp_tnl_del_port(efx, tnl); +} +#endif +#if defined(EFX_HAVE_NDO_ADD_GENEVE_PORT) +static void efx_geneve_add_port(struct net_device *dev, sa_family_t sa_family, + __be16 port) +{ + struct efx_udp_tunnel tnl = {.port = port, + .type = TUNNEL_ENCAP_UDP_PORT_ENTRY_GENEVE}; + struct efx_nic *efx = efx_netdev_priv(dev); + + if (efx->type->udp_tnl_add_port) + efx->type->udp_tnl_add_port(efx, tnl); +} + +static void efx_geneve_del_port(struct net_device *dev, sa_family_t sa_family, + __be16 port) +{ + struct efx_udp_tunnel tnl = {.port = port, + .type = TUNNEL_ENCAP_UDP_PORT_ENTRY_GENEVE}; + struct efx_nic *efx = efx_netdev_priv(dev); + + if (efx->type->udp_tnl_del_port) + efx->type->udp_tnl_del_port(efx, tnl); +} +#endif +#endif +#endif + +extern const struct net_device_ops efx_netdev_ops; + +#if defined(EFX_USE_KCOMPAT) && defined(EFX_HAVE_NET_DEVICE_OPS_EXT) +extern const struct net_device_ops_ext efx_net_device_ops_ext; +#endif + +static void efx_update_name(struct efx_nic *efx) +{ + strcpy(efx->name, efx->net_dev->name); + +#if defined(CONFIG_SFC_MTD) && !defined(EFX_WORKAROUND_87308) + efx_mtd_rename(efx); +#endif + + efx_set_channel_names(efx); + efx_update_debugfs_netdev(efx); +} + +static int efx_netdev_event(struct notifier_block *this, + unsigned long event, void *ptr) +{ + struct efx_nic *efx = container_of(this, struct efx_nic, netdev_notifier); + struct net_device *net_dev = netdev_notifier_info_to_dev(ptr); + + if (efx->net_dev == net_dev && + (event == NETDEV_CHANGENAME || event == NETDEV_REGISTER)) { + efx_update_name(efx); + +#if defined(CONFIG_SFC_MTD) && defined(EFX_WORKAROUND_87308) + if (efx->mtd_struct && + (atomic_xchg(&efx->mtd_struct->probed_flag, 1) == 0)) + (void)efx_mtd_probe(efx); +#endif + } + + return NOTIFY_DONE; +} + +#if defined(EFX_NOT_UPSTREAM) && defined(EFX_USE_SFC_LRO) +static ssize_t lro_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); + return scnprintf(buf, PAGE_SIZE, "%d\n", efx_ssr_enabled(efx)); +} +static ssize_t lro_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); + bool enable = count > 0 && *buf != '0'; + ssize_t rc; + + rtnl_lock(); + if (!efx->lro_available && enable) { + rc = -EINVAL; + goto out; + } +#ifdef NETIF_F_LRO + if (enable != !!(efx->net_dev->features & NETIF_F_LRO)) { + efx->net_dev->features ^= NETIF_F_LRO; + netdev_features_change(efx->net_dev); + } +#else + efx->lro_enabled = enable; +#endif + rc = count; +out: + rtnl_unlock(); + return rc; +} +static DEVICE_ATTR_RW(lro); +#endif + +static ssize_t phy_type_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); + return scnprintf(buf, PAGE_SIZE, "%d\n", efx->phy_type); +} +static DEVICE_ATTR_RO(phy_type); + +static void efx_init_features(struct efx_nic *efx) +{ + struct net_device *net_dev = efx->net_dev; + + efx->fixed_features |= NETIF_F_HIGHDMA; + net_dev->features |= (efx->type->offload_features | NETIF_F_SG | + NETIF_F_TSO | NETIF_F_TSO_ECN | + NETIF_F_RXCSUM | NETIF_F_RXALL); + if (efx->type->offload_features & (NETIF_F_IPV6_CSUM | NETIF_F_HW_CSUM)) + net_dev->features |= NETIF_F_TSO6; +#if defined(EFX_USE_KCOMPAT) && defined(EFX_USE_GRO) + if (lro) + net_dev->features |= NETIF_F_GRO; +#endif +#if defined(EFX_NOT_UPSTREAM) && defined(EFX_USE_SFC_LRO) + if (lro) { +#if defined(NETIF_F_LRO) + net_dev->features |= NETIF_F_LRO; +#endif + } +#endif +#if defined(EFX_NOT_UPSTREAM) && defined(EFX_USE_FAKE_VLAN_RX_ACCEL) + /* soft VLAN acceleration cannot be disabled at runtime */ + efx->fixed_features |= NETIF_F_HW_VLAN_CTAG_RX; +#endif +#if defined(EFX_NOT_UPSTREAM) && defined(EFX_USE_FAKE_VLAN_TX_ACCEL) + efx->fixed_features |= NETIF_F_HW_VLAN_CTAG_TX; +#endif + /* Mask for features that also apply to VLAN devices */ + net_dev->vlan_features |= (NETIF_F_CSUM_MASK | NETIF_F_SG | + NETIF_F_HIGHDMA | NETIF_F_ALL_TSO | + NETIF_F_RXCSUM); + + efx_add_hw_features(efx, net_dev->features & ~efx->fixed_features); + + /* Disable receiving frames with bad FCS, by default. */ + net_dev->features &= ~NETIF_F_RXALL; + + /* Disable VLAN filtering by default. It may be enforced if + * the feature is fixed (i.e. VLAN filters are required to + * receive VLAN tagged packets due to vPort restrictions). + */ + net_dev->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER; + net_dev->features |= efx->fixed_features; +} + +static int efx_register_netdev(struct efx_nic *efx) +{ + struct net_device *net_dev = efx->net_dev; + int rc; + + net_dev->watchdog_timeo = 5 * HZ; + net_dev->irq = efx->pci_dev->irq; + net_dev->netdev_ops = &efx_netdev_ops; +#if !defined(EFX_USE_KCOMPAT) || !defined(EFX_HAVE_NDO_SET_MULTICAST_LIST) + if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0) + net_dev->priv_flags |= IFF_UNICAST_FLT; +#endif +#if defined(EFX_USE_KCOMPAT) && defined(EFX_HAVE_NETDEV_RFS_INFO) +#ifdef CONFIG_RFS_ACCEL + netdev_extended(net_dev)->rfs_data.ndo_rx_flow_steer = efx_filter_rfs; +#endif +#endif +#if !defined(EFX_USE_KCOMPAT) || !defined(SET_ETHTOOL_OPS) + net_dev->ethtool_ops = &efx_ethtool_ops; +#else + SET_ETHTOOL_OPS(net_dev, &efx_ethtool_ops); +#endif +#if defined(EFX_USE_KCOMPAT) && defined(EFX_USE_ETHTOOL_OPS_EXT) + set_ethtool_ops_ext(net_dev, &efx_ethtool_ops_ext); +#endif + netif_set_tso_max_segs(net_dev, EFX_TSO_MAX_SEGS); +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_NETDEV_MTU_LIMITS) + net_dev->min_mtu = EFX_MIN_MTU; + net_dev->max_mtu = EFX_MAX_MTU; +#elif defined(EFX_HAVE_NETDEV_EXT_MTU_LIMITS) + net_dev->extended->min_mtu = EFX_MIN_MTU; + net_dev->extended->max_mtu = EFX_MAX_MTU; +#endif + +#if defined(EFX_USE_KCOMPAT) && defined(EFX_HAVE_NDO_EXT_BUSY_POLL) +#ifdef CONFIG_NET_RX_BUSY_POLL + netdev_extended(net_dev)->ndo_busy_poll = efx_busy_poll; +#endif +#endif + +#if defined(EFX_USE_KCOMPAT) && defined(EFX_HAVE_NET_DEVICE_OPS_EXT) + set_netdev_ops_ext(net_dev, &efx_net_device_ops_ext); +#endif + + rtnl_lock(); + + /* If there was a scheduled reset during probe, the NIC is + * probably hosed anyway. We must do this in the same locked + * section as we set state = READY. + */ + if (efx->reset_pending) { + pci_err(efx->pci_dev, "aborting probe due to scheduled reset\n"); + rc = -EIO; + goto fail_locked; + } + + rc = dev_alloc_name(net_dev, net_dev->name); + if (rc < 0) + goto fail_locked; + efx_update_name(efx); + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_SET_NETDEV_DEVLINK_PORT) +#ifdef CONFIG_NET_DEVLINK + SET_NETDEV_DEVLINK_PORT(net_dev, efx->devlink_port); +#endif +#endif + rc = register_netdevice(net_dev); + if (rc) + goto fail_locked; + + /* Always start with carrier off; PHY events will detect the link */ + netif_carrier_off(net_dev); + + efx->state = STATE_NET_DOWN; + + rtnl_unlock(); + + efx_init_mcdi_logging(efx); + efx_probe_devlink(efx); + + return 0; + +fail_locked: + rtnl_unlock(); + netif_err(efx, drv, efx->net_dev, "could not register net dev\n"); + return rc; +} + +static void efx_unregister_netdev(struct efx_nic *efx) +{ + if (WARN_ON(efx_netdev_priv(efx->net_dev) != efx)) + return; + +#if defined(EFX_NOT_UPSTREAM) +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) + /* bug11519: This has only been seen on fc4, but the bug has never + * been fully understood - so this workaround is applied to a range + * of kernels. The issue is that if dev_close() is run too close + * to a driver unload, then netlink can allow userspace to leak a + * reference count. Sleeping here for a bit lowers the probability + * of seeing this failure. */ + schedule_timeout_uninterruptible(HZ * 2); + +#endif +#endif + if (efx_dev_registered(efx)) { + strscpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name)); + efx_fini_devlink(efx); + efx_fini_mcdi_logging(efx); + rtnl_lock(); + unregister_netdevice(efx->net_dev); + efx->state = STATE_UNINIT; + rtnl_unlock(); + } +} + +/************************************************************************** + * + * List of NICs we support + * + **************************************************************************/ + +/* PCI device ID table. + * On changes make sure to update sfc_pci_table, below + */ +static const struct pci_device_id efx_pci_table[] = { + {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0903), /* SFC9120 PF */ + .driver_data = (unsigned long) &efx_hunt_a0_nic_type}, + {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x1903), /* SFC9120 VF */ + .class = PCI_CLASS_NETWORK_ETHERNET << 8, + .class_mask = 0xffff00, + .driver_data = (unsigned long) &efx_hunt_a0_vf_nic_type}, + {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0923), /* SFC9140 PF */ + .driver_data = (unsigned long) &efx_hunt_a0_nic_type}, + {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x1923), /* SFC9140 VF */ + .class = PCI_CLASS_NETWORK_ETHERNET << 8, + .class_mask = 0xffff00, + .driver_data = (unsigned long) &efx_hunt_a0_vf_nic_type}, + {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0a03), /* SFC9220 PF */ + .driver_data = (unsigned long) &efx_hunt_a0_nic_type}, + {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x1a03), /* SFC9220 VF */ + .class = PCI_CLASS_NETWORK_ETHERNET << 8, + .class_mask = 0xffff00, + .driver_data = (unsigned long) &efx_hunt_a0_vf_nic_type}, + {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0b03), /* SFC9250 PF */ + .driver_data = (unsigned long) &efx_hunt_a0_nic_type}, + {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x1b03), /* SFC9250 VF */ + .class = PCI_CLASS_NETWORK_ETHERNET << 8, + .class_mask = 0xffff00, + .driver_data = (unsigned long) &efx_hunt_a0_vf_nic_type}, + {0} /* end of list */ +}; + +/* Module device ID table - efx_pci_table + ef100_pci_table */ +static const struct pci_device_id sfc_pci_table[] = { + {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0903), /* SFC9120 PF */ + .driver_data = (unsigned long) &efx_hunt_a0_nic_type}, + {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x1903), /* SFC9120 VF */ + .class = PCI_CLASS_NETWORK_ETHERNET << 8, + .class_mask = 0xffff00, + .driver_data = (unsigned long) &efx_hunt_a0_vf_nic_type}, + {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0923), /* SFC9140 PF */ + .driver_data = (unsigned long) &efx_hunt_a0_nic_type}, + {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x1923), /* SFC9140 VF */ + .class = PCI_CLASS_NETWORK_ETHERNET << 8, + .class_mask = 0xffff00, + .driver_data = (unsigned long) &efx_hunt_a0_vf_nic_type}, + {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0a03), /* SFC9220 PF */ + .driver_data = (unsigned long) &efx_hunt_a0_nic_type}, + {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x1a03), /* SFC9220 VF */ + .class = PCI_CLASS_NETWORK_ETHERNET << 8, + .class_mask = 0xffff00, + .driver_data = (unsigned long) &efx_hunt_a0_vf_nic_type}, + {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0b03), /* SFC9250 PF */ + .driver_data = (unsigned long) &efx_hunt_a0_nic_type}, + {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x1b03), /* SFC9250 VF */ + .class = PCI_CLASS_NETWORK_ETHERNET << 8, + .class_mask = 0xffff00, + .driver_data = (unsigned long) &efx_hunt_a0_vf_nic_type}, + {PCI_DEVICE(PCI_VENDOR_ID_XILINX, 0x0100), /* Riverhead PF */ + .driver_data = (unsigned long) &ef100_pf_nic_type }, + {PCI_DEVICE(PCI_VENDOR_ID_XILINX, 0x1100), /* Riverhead VF */ + .class = PCI_CLASS_NETWORK_ETHERNET << 8, + .class_mask = 0xffff00, + .driver_data = (unsigned long) &ef100_vf_nic_type }, + {0} /* end of list */ +}; + +void efx_update_sw_stats(struct efx_nic *efx, u64 *stats) +{ + struct efx_rx_queue *rx_queue; + struct efx_channel *channel; + u64 n_rx_nodesc_trunc = 0; + + efx_for_each_channel(channel, efx) + efx_for_each_channel_rx_queue(rx_queue, channel) + n_rx_nodesc_trunc += rx_queue->n_rx_nodesc_trunc; + stats[GENERIC_STAT_rx_nodesc_trunc] = n_rx_nodesc_trunc; + stats[GENERIC_STAT_rx_noskb_drops] = atomic_read(&efx->n_rx_noskb_drops); +} + +/************************************************************************** + * + * PCI interface + * + **************************************************************************/ + +void efx_pci_remove_post_io(struct efx_nic *efx, + void (*nic_remove)(struct efx_nic *efx)) +{ + efx_unregister_netdev(efx); + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_XDP) + rtnl_lock(); + efx_xdp_setup_prog(efx, NULL); + rtnl_unlock(); +#endif + if (efx->type->sriov_fini) + efx->type->sriov_fini(efx); + if (efx->type->vswitching_remove) + efx->type->vswitching_remove(efx); + efx_fini_interrupts(efx); +#ifdef CONFIG_SFC_PTP + efx_ptp_remove_post_io(efx); +#endif + efx->type->remove_port(efx); + nic_remove(efx); + efx_remove_common(efx); +#ifdef CONFIG_DEBUG_FS + mutex_lock(&efx->debugfs_symlink_mutex); + efx_fini_debugfs_netdev(efx->net_dev); + mutex_unlock(&efx->debugfs_symlink_mutex); +#endif +} + +int efx_pci_probe_post_io(struct efx_nic *efx, + int (*nic_probe)(struct efx_nic *efx)) +{ + int rc; + +#ifdef EFX_NOT_UPSTREAM + if (!performance_profile) + efx->performance_profile = EFX_PERFORMANCE_PROFILE_AUTO; + else if (strcmp(performance_profile, "throughput") == 0) + efx->performance_profile = EFX_PERFORMANCE_PROFILE_THROUGHPUT; + else if (strcmp(performance_profile, "latency") == 0) + efx->performance_profile = EFX_PERFORMANCE_PROFILE_LATENCY; + else + efx->performance_profile = EFX_PERFORMANCE_PROFILE_AUTO; +#endif + + rc = efx_probe_common(efx); + if (rc) + return rc; +#ifdef EFX_NOT_UPSTREAM + if (efx->mcdi->fn_flags & + (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_NO_ACTIVE_PORT)) + return 0; +#endif + + pci_dbg(efx->pci_dev, "creating NIC\n"); + +#ifdef EFX_NOT_UPSTREAM +#if IS_MODULE(CONFIG_SFC_DRIVERLINK) + /* Initialise NIC resource information */ + efx->ef10_resources = efx->type->ef10_resources; +#endif +#endif + + /* Carry out hardware-type specific initialisation */ + rc = nic_probe(efx); + if (rc) + return rc; + + efx->txq_min_entries = + roundup_pow_of_two(2 * efx->type->tx_max_skb_descs(efx)); + + /* Initialise the interrupt moderation settings */ + efx->irq_mod_step_us = DIV_ROUND_UP(efx->timer_quantum_ns, 1000); + efx_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec, + irq_adapt_enable, true); + + pci_dbg(efx->pci_dev, "create port\n"); + + /* Connect up MAC/PHY operations table */ + rc = efx->type->probe_port(efx); + if (rc) + return rc; + + /* Initialise MAC address to permanent address */ + eth_hw_addr_set(efx->net_dev, efx->net_dev->perm_addr); + + rc = efx_check_queue_size(efx, &rx_ring, + EFX_RXQ_MIN_ENT, efx_max_dmaq_size(efx), + true); + if (rc == -ERANGE) + pci_warn(efx->pci_dev, + "rx_ring parameter must be between %u and %lu; clamped to %u\n", + EFX_RXQ_MIN_ENT, efx_max_dmaq_size(efx), rx_ring); + else if (rc == -EINVAL) + pci_warn(efx->pci_dev, + "rx_ring parameter must be a power of two; rounded to %u\n", + rx_ring); + efx->rxq_entries = rx_ring; + + rc = efx_check_queue_size(efx, &tx_ring, + efx->txq_min_entries, EFX_TXQ_MAX_ENT(efx), + true); + if (rc == -ERANGE) + pci_warn(efx->pci_dev, + "tx_ring parameter must be between %u and %lu; clamped to %u\n", + efx->txq_min_entries, EFX_TXQ_MAX_ENT(efx), tx_ring); + else if (rc == -EINVAL) + pci_warn(efx->pci_dev, + "tx_ring parameter must be a power of two; rounded to %u\n", + tx_ring); + efx->txq_entries = tx_ring; + + rc = efx_ptp_defer_probe_with_channel(efx); + if (rc) + return rc; + + rc = efx_init_interrupts(efx); + if (rc < 0) + return rc; + + /* Update maximum channel count for ethtool */ + efx->max_channels = min_t(u16, efx->max_channels, efx->max_irqs); + efx->max_tx_channels = efx->max_channels; + + rc = efx->type->vswitching_probe(efx); + if (rc) /* not fatal; the PF will still work fine */ + pci_warn(efx->pci_dev, + "failed to setup vswitching rc=%d, VFs may not function\n", + rc); + + if (efx->type->sriov_init) { + rc = efx->type->sriov_init(efx); + if (rc) + pci_err(efx->pci_dev, "SR-IOV can't be enabled rc %d\n", + rc); + } + + return efx_register_netdev(efx); +} + +/* Final NIC shutdown + * This is called only at module unload (or hotplug removal). A PF can call + * this on its VFs to ensure they are unbound first. + */ +static void efx_pci_remove(struct pci_dev *pci_dev) +{ + struct efx_probe_data *probe_data; + struct efx_nic *efx; + + efx = pci_get_drvdata(pci_dev); + if (!efx) + return; + + /* Mark the NIC as fini, then stop the interface */ + rtnl_lock(); + dev_close(efx->net_dev); + + if (!efx_nic_hw_unavailable(efx)) + efx->state = STATE_UNINIT; + +#ifdef EFX_NOT_UPSTREAM +#if IS_MODULE(CONFIG_SFC_DRIVERLINK) + if (efx_dl_supported(efx)) + efx_dl_unregister_nic(&efx->dl_nic); +#endif +#endif + + /* Allow any queued efx_resets() to complete */ + rtnl_unlock(); + efx_flush_reset_workqueue(efx); + +#if defined(CONFIG_SFC_MTD) && defined(EFX_WORKAROUND_87308) + if (efx->mtd_struct) + (void)cancel_delayed_work_sync(&efx->mtd_struct->creation_work); +#endif + + efx_auxbus_unregister(efx); +#if defined(EFX_NOT_UPSTREAM) && defined(EFX_USE_SFC_LRO) + device_remove_file(&efx->pci_dev->dev, &dev_attr_lro); +#endif + device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type); + + efx->type->remove(efx); + +#ifdef CONFIG_SFC_MTD +#ifdef EFX_WORKAROUND_87308 + if (efx->mtd_struct && + atomic_read(&efx->mtd_struct->probed_flag) == 1) + efx_mtd_remove(efx); +#else + efx_mtd_remove(efx); +#endif +#endif + +#ifdef CONFIG_SFC_DUMP + efx_dump_fini(efx); +#endif + + unregister_netdevice_notifier(&efx->netdev_notifier); + + efx_fini_io(efx); + pci_dbg(efx->pci_dev, "shutdown successful\n"); + + efx_fini_struct(efx); + pci_set_drvdata(pci_dev, NULL); + free_netdev(efx->net_dev); + probe_data = container_of(efx, struct efx_probe_data, efx); + kfree(probe_data); +#if defined(EFX_USE_KCOMPAT) && defined(EFX_HAVE_PCI_ENABLE_PCIE_ERROR_REPORTING) + + pci_disable_pcie_error_reporting(pci_dev); +#endif +}; + +/* NIC initialisation + * + * This is called at module load (or hotplug insertion, + * theoretically). It sets up PCI mappings, resets the NIC, + * sets up and registers the network devices with the kernel and hooks + * the interrupt service routine. It does not prepare the device for + * transmission; this is left to the first time one of the network + * interfaces is brought up (i.e. efx_net_open). + */ +static int efx_pci_probe(struct pci_dev *pci_dev, + const struct pci_device_id *entry) +{ + struct efx_probe_data *probe_data, **probe_ptr; + struct net_device *net_dev; + struct efx_nic *efx; + int rc; + + /* Allocate probe data and struct efx_nic */ + probe_data = kzalloc(sizeof(*probe_data), GFP_KERNEL); + if (!probe_data) + return -ENOMEM; + probe_data->pci_dev = pci_dev; + efx = &probe_data->efx; + + /* Allocate and initialise a struct net_device */ + net_dev = alloc_etherdev_mq(sizeof(probe_data), EFX_MAX_CORE_TX_QUEUES); + if (!net_dev) + return -ENOMEM; + probe_ptr = netdev_priv(net_dev); + *probe_ptr = probe_data; + efx->net_dev = net_dev; + efx->type = (const struct efx_nic_type *) entry->driver_data; + + efx_init_features(efx); + + pci_set_drvdata(pci_dev, efx); + SET_NETDEV_DEV(net_dev, &pci_dev->dev); + rc = efx_init_struct(efx, pci_dev); + if (rc) + goto fail; +#ifdef CONFIG_SFC_MTD + if (efx_mtd_init(efx) < 0) + goto fail; +#endif + + pci_info(pci_dev, + "Solarflare NIC detected: device %04x:%04x subsys %04x:%04x\n", + efx->pci_dev->vendor, efx->pci_dev->device, + efx->pci_dev->subsystem_vendor, + efx->pci_dev->subsystem_device); + +#ifdef EFX_NOT_UPSTREAM + efx->xdp_tx = xdp_alloc_tx_resources; +#else + efx->xdp_tx = true; +#endif + + /* Set up basic I/O (BAR mappings etc) */ + rc = efx_init_io(efx, efx->type->mem_bar(efx), efx->type->max_dma_mask, + efx->type->mem_map_size(efx)); + if (rc) + goto fail; + + efx->netdev_notifier.notifier_call = efx_netdev_event; + rc = register_netdevice_notifier(&efx->netdev_notifier); + if (rc) + goto fail; + +#ifdef CONFIG_SFC_DUMP + rc = efx_dump_init(efx); + if (rc) + goto fail; +#endif + + rc = efx->type->probe(efx); + if (rc) + goto fail; + + rc = efx_auxbus_register(efx); + if (rc) + pci_warn(efx->pci_dev, + "Unable to register auxiliary bus driver (%d)\n", rc); + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_XDP_SOCK) + efx->tx_queues_per_channel++; +#endif +#ifdef EFX_NOT_UPSTREAM + if (efx->mcdi->fn_flags & + (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_NO_ACTIVE_PORT)) { + netif_dbg(efx, probe, efx->net_dev, + "initialisation successful (no active port)\n"); + return 0; + } +#endif + + rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_type); + if (rc) { + netif_err(efx, drv, efx->net_dev, + "failed to init net dev attributes\n"); + goto fail; + } +#if defined(EFX_NOT_UPSTREAM) && defined(EFX_USE_SFC_LRO) + rc = device_create_file(&efx->pci_dev->dev, &dev_attr_lro); + if (rc) { + netif_err(efx, drv, efx->net_dev, + "failed to init net dev attributes\n"); + goto fail; + } +#endif + + netif_dbg(efx, probe, efx->net_dev, "initialisation successful\n"); + if (PCI_FUNC(pci_dev->devfn) == 0) + efx_mcdi_log_puts(efx, "probe"); + +#ifdef CONFIG_SFC_MTD +#ifdef EFX_WORKAROUND_87308 + if (efx->mtd_struct) + schedule_delayed_work(&efx->mtd_struct->creation_work, 5 * HZ); +#else + /* Try to create MTDs, but allow this to fail */ + rtnl_lock(); + rc = efx_mtd_probe(efx); + rtnl_unlock(); +#endif +#endif + +#if defined(EFX_USE_KCOMPAT) && defined(EFX_HAVE_MTD_TABLE) + if (rc == -EBUSY) + netif_warn(efx, probe, efx->net_dev, + "kernel MTD table is full; flash will not be " + "accessible\n"); + else +#endif + if (rc && rc != -EPERM) + netif_warn(efx, probe, efx->net_dev, + "failed to create MTDs (%d)\n", rc); + +#if defined(EFX_USE_KCOMPAT) && defined(EFX_HAVE_PCI_ENABLE_PCIE_ERROR_REPORTING) + (void)pci_enable_pcie_error_reporting(pci_dev); +#endif +#ifdef EFX_NOT_UPSTREAM +#if IS_MODULE(CONFIG_SFC_DRIVERLINK) + efx_dl_probe(efx); + if (efx_dl_supported(efx)) { + rtnl_lock(); + efx_dl_register_nic(&efx->dl_nic); + rtnl_unlock(); + } +#endif +#endif + + if (efx->type->udp_tnl_push_ports) + efx->type->udp_tnl_push_ports(efx); + + return 0; + +fail: + efx_pci_remove(pci_dev); + return rc; +} + +/* efx_pci_sriov_configure returns the actual number of Virtual Functions enabled + on success*/ + +#ifdef CONFIG_SFC_SRIOV +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_SRIOV_CONFIGURE) || defined(EFX_HAVE_PCI_DRIVER_RH) + +static int efx_pci_sriov_configure(struct pci_dev *dev, + int num_vfs) +{ + int rc; + struct efx_nic *efx = pci_get_drvdata(dev); + + if (efx->type->sriov_configure) { + rc = efx->type->sriov_configure(efx, num_vfs); + if (rc) + return rc; + else + return num_vfs; + } + else + return -ENOSYS; +} +#endif +#endif + +static int efx_pm_freeze(struct device *dev) +{ + struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); + + rtnl_lock(); + +#ifdef EFX_NOT_UPSTREAM +#if IS_MODULE(CONFIG_SFC_DRIVERLINK) + efx_dl_reset_suspend(&efx->dl_nic); +#endif +#endif + + if (efx->state == STATE_NET_UP) { + efx_device_detach_sync(efx); + + efx_stop_all(efx); + efx_disable_interrupts(efx); + } + + if (efx_net_active(efx->state)) { + efx->state = efx_freeze(efx->state); + + efx_mcdi_port_reconfigure(efx); + } + + rtnl_unlock(); + + return 0; +} + +static void efx_pci_shutdown(struct pci_dev *pci_dev) +{ + struct efx_nic *efx = pci_get_drvdata(pci_dev); + + if (!efx) + return; + + efx_pm_freeze(&pci_dev->dev); + pci_disable_device(pci_dev); +} + +static int efx_pm_thaw(struct device *dev) +{ + struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); + int rc; + + rtnl_lock(); + + if (efx->state == (STATE_NET_UP | STATE_FROZEN)) { + rc = efx_enable_interrupts(efx); + if (rc) + goto fail; + + mutex_lock(&efx->mac_lock); + efx_mcdi_port_reconfigure(efx); + mutex_unlock(&efx->mac_lock); + + efx_start_all(efx); + + efx_device_attach_if_not_resetting(efx); + } + + if (efx_frozen(efx->state)) { + efx->state = efx_thaw(efx->state); + + efx_mcdi_port_reconfigure(efx); + + efx->type->resume_wol(efx); + } + +#ifdef EFX_NOT_UPSTREAM +#if IS_MODULE(CONFIG_SFC_DRIVERLINK) + efx_dl_reset_resume(&efx->dl_nic, efx->state != STATE_DISABLED); +#endif +#endif + + rtnl_unlock(); + + /* Reschedule any quenched resets scheduled during efx_pm_freeze() */ + efx_queue_reset_work(efx); + + return 0; + +fail: +#ifdef EFX_NOT_UPSTREAM +#if IS_MODULE(CONFIG_SFC_DRIVERLINK) + efx_dl_reset_resume(&efx->dl_nic, false); +#endif +#endif + + rtnl_unlock(); + + return rc; +} + +static int efx_pm_poweroff(struct device *dev) +{ + struct pci_dev *pci_dev = to_pci_dev(dev); + struct efx_nic *efx = pci_get_drvdata(pci_dev); + + if (efx->type->fini) + efx->type->fini(efx); + + efx->reset_pending = 0; + + pci_save_state(pci_dev); + return pci_set_power_state(pci_dev, PCI_D3hot); +} + +/* Used for both resume and restore */ +static int efx_pm_resume(struct device *dev) +{ + struct pci_dev *pci_dev = to_pci_dev(dev); + struct efx_nic *efx = pci_get_drvdata(pci_dev); + int rc; + + rc = pci_set_power_state(pci_dev, PCI_D0); + if (rc) + goto fail; + pci_restore_state(pci_dev); + rc = pci_enable_device(pci_dev); + if (rc) + goto fail; + pci_set_master(efx->pci_dev); + rc = efx->type->reset(efx, RESET_TYPE_ALL); + if (rc) + goto fail; + down_write(&efx->filter_sem); + rc = efx->type->init(efx); + up_write(&efx->filter_sem); + if (rc) + goto fail; + rc = efx_pm_thaw(dev); + return rc; + +fail: +#ifdef EFX_NOT_UPSTREAM +#if IS_MODULE(CONFIG_SFC_DRIVERLINK) + efx_dl_reset_resume(&efx->dl_nic, false); +#endif +#endif + return rc; +} + +static int efx_pm_suspend(struct device *dev) +{ + int rc; + + efx_pm_freeze(dev); + rc = efx_pm_poweroff(dev); + if (rc) + efx_pm_resume(dev); + return rc; +} + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_USE_DEV_PM_OPS) + +static const struct dev_pm_ops efx_pm_ops = { + .suspend = efx_pm_suspend, + .resume = efx_pm_resume, + .freeze = efx_pm_freeze, + .thaw = efx_pm_thaw, + .poweroff = efx_pm_poweroff, + .restore = efx_pm_resume, +}; + +#elif defined(EFX_USE_PM_EXT_OPS) + +static struct pm_ext_ops efx_pm_ops = { + .base = { + .suspend = efx_pm_suspend, + .resume = efx_pm_resume, + .freeze = efx_pm_freeze, + .thaw = efx_pm_thaw, + .poweroff = efx_pm_poweroff, + .restore = efx_pm_resume, + } +}; + +#else /* !EFX_USE_DEV_PM_OPS && !EFX_USE_PM_EXT_OPS */ + +static int efx_pm_old_suspend(struct pci_dev *dev, pm_message_t state) +{ + switch (state.event) { + case PM_EVENT_FREEZE: +#if defined(PM_EVENT_QUIESCE) + case PM_EVENT_QUIESCE: +#elif defined(PM_EVENT_PRETHAW) + case PM_EVENT_PRETHAW: +#endif + return efx_pm_freeze(&dev->dev); + default: + return efx_pm_suspend(&dev->dev); + } +} + +static int efx_pm_old_resume(struct pci_dev *dev) +{ + return efx_pm_resume(&dev->dev); +} + +#endif /* EFX_USE_PM_EXT_OPS */ + +#if defined(CONFIG_SFC_SRIOV) && defined(EFX_HAVE_PCI_DRIVER_RH) && !defined(EFX_HAVE_SRIOV_CONFIGURE) +static struct pci_driver_rh efx_pci_driver_rh = { + .sriov_configure = efx_pci_sriov_configure, +}; +#endif + +static struct pci_driver efx_pci_driver = { + .name = KBUILD_MODNAME, + .id_table = efx_pci_table, + .probe = efx_pci_probe, + .remove = efx_pci_remove, +#if !defined(EFX_USE_KCOMPAT) + .driver.pm = &efx_pm_ops, +#elif defined(EFX_USE_DEV_PM_OPS) + /* May need to cast away const */ + .driver.pm = (struct dev_pm_ops *)&efx_pm_ops, +#elif defined(EFX_USE_PM_EXT_OPS) + .pm = &efx_pm_ops, +#else + .suspend = efx_pm_old_suspend, + .resume = efx_pm_old_resume, +#endif + .shutdown = efx_pci_shutdown, + .err_handler = &efx_err_handlers, +#ifdef CONFIG_SFC_SRIOV +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_SRIOV_CONFIGURE) + .sriov_configure = efx_pci_sriov_configure, +#elif defined(EFX_HAVE_PCI_DRIVER_RH) + .rh_reserved = &efx_pci_driver_rh, +#endif +#endif +}; + +const struct net_device_ops efx_netdev_ops = { + .ndo_open = efx_net_open, + .ndo_stop = efx_net_stop, +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_USE_NETDEV_STATS64) + .ndo_get_stats64 = efx_net_stats, +#else + .ndo_get_stats = efx_net_stats, +#endif + .ndo_tx_timeout = efx_watchdog, + .ndo_start_xmit = efx_hard_start_xmit, + .ndo_validate_addr = eth_validate_addr, +#if defined(EFX_NOT_UPSTREAM) && defined(EFX_USE_PRIVATE_IOCTL) +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_NDO_SIOCDEVPRIVATE) + .ndo_siocdevprivate = efx_siocdevprivate, +#endif +#endif +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_NDO_ETH_IOCTL) + .ndo_eth_ioctl = efx_eth_ioctl, +#endif +#if defined(EFX_USE_KCOMPAT) && !defined(EFX_HAVE_NDO_SIOCDEVPRIVATE) && !defined(EFX_HAVE_NDO_ETH_IOCTL) + .ndo_do_ioctl = efx_ioctl, +#endif +#if defined(EFX_USE_KCOMPAT) && defined(EFX_HAVE_NDO_EXT_CHANGE_MTU) + .extended.ndo_change_mtu = efx_change_mtu, +#else + .ndo_change_mtu = efx_change_mtu, +#endif + .ndo_set_mac_address = efx_set_mac_address, +#if !defined(EFX_USE_KCOMPAT) || !defined(EFX_HAVE_NDO_SET_MULTICAST_LIST) + .ndo_set_rx_mode = efx_set_rx_mode, /* Lookout */ +#else + /* On older kernel versions, set_rx_mode is expected to + * support multiple unicast addresses and set_multicast_list + * is expected to support only one. On newer versions the + * IFF_UNICAST_FLT flag distinguishes these. + */ + .ndo_set_multicast_list = efx_set_rx_mode, +#endif +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_NDO_SET_FEATURES) +#if defined(EFX_NOT_UPSTREAM) && defined(EFX_USE_SFC_LRO) + .ndo_fix_features = efx_fix_features, +#endif + .ndo_set_features = efx_set_features, +#endif +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_NDO_FEATURES_CHECK) + .ndo_features_check = efx_features_check, +#endif + .ndo_vlan_rx_add_vid = efx_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = efx_vlan_rx_kill_vid, +#ifdef CONFIG_SFC_SRIOV +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_NDO_SET_VF_MAC) + .ndo_set_vf_mac = efx_sriov_set_vf_mac, +#if defined(EFX_USE_KCOMPAT) && defined(EFX_HAVE_NDO_EXT_SET_VF_VLAN_PROTO) + .extended.ndo_set_vf_vlan = efx_sriov_set_vf_vlan, +#else + .ndo_set_vf_vlan = efx_sriov_set_vf_vlan, +#endif + .ndo_get_vf_config = efx_sriov_get_vf_config, +#endif +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_VF_LINK_STATE) + .ndo_set_vf_link_state = efx_sriov_set_vf_link_state, +#endif +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_NDO_SET_VF_SPOOFCHK) + .ndo_set_vf_spoofchk = efx_sriov_set_vf_spoofchk, +#endif +#endif +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_NDO_GET_PHYS_PORT_ID) + .ndo_get_phys_port_id = efx_get_phys_port_id, +#endif +#if defined(EFX_NOT_UPSTREAM) && defined(EFX_HAVE_VLAN_RX_PATH) + .ndo_vlan_rx_register = efx_vlan_rx_register, +#endif +#if defined(EFX_USE_KCOMPAT) && defined(EFX_WANT_NDO_POLL_CONTROLLER) +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = efx_netpoll, +#endif +#endif +#if defined(EFX_USE_KCOMPAT) && defined(EFX_HAVE_NDO_BUSY_POLL) +#ifdef CONFIG_NET_RX_BUSY_POLL + .ndo_busy_poll = efx_busy_poll, +#endif +#endif +#if !defined(EFX_USE_KCOMPAT) || !defined(EFX_HAVE_NETDEV_RFS_INFO) +#ifdef CONFIG_RFS_ACCEL + .ndo_rx_flow_steer = efx_filter_rfs, +#endif +#endif + +#if defined(EFX_USE_KCOMPAT) +#if defined(EFX_HAVE_NDO_UDP_TUNNEL_ADD) +#if defined(EFX_HAVE_UDP_TUNNEL_NIC_INFO) + .ndo_udp_tunnel_add = udp_tunnel_nic_add_port, + .ndo_udp_tunnel_del = udp_tunnel_nic_del_port, +#else + .ndo_udp_tunnel_add = efx_udp_tunnel_add, + .ndo_udp_tunnel_del = efx_udp_tunnel_del, +#endif +#else +#if defined(EFX_HAVE_NDO_ADD_VXLAN_PORT) + .ndo_add_vxlan_port = efx_vxlan_add_port, + .ndo_del_vxlan_port = efx_vxlan_del_port, +#endif +#if defined(EFX_HAVE_NDO_ADD_GENEVE_PORT) + .ndo_add_geneve_port = efx_geneve_add_port, + .ndo_del_geneve_port = efx_geneve_del_port, +#endif +#endif +#endif +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_XDP) + .ndo_bpf = efx_xdp, +#endif +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_XDP_SOCK) +#if defined(CONFIG_XDP_SOCKETS) +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_XSK_NEED_WAKEUP) + .ndo_xsk_wakeup = efx_xsk_wakeup, +#else + .ndo_xsk_async_xmit = efx_xsk_async_xmit, +#endif +#endif +#endif +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_XDP_REDIR) + .ndo_xdp_xmit = efx_xdp_xmit, +#if defined(EFX_USE_KCOMPAT) && defined(EFX_NEED_XDP_FLUSH) + .ndo_xdp_flush = efx_xdp_flush, +#endif +#endif +#if defined(EFX_USE_KCOMPAT) && defined(EFX_HAVE_NDO_GET_DEVLINK_PORT) +#ifdef CONFIG_NET_DEVLINK + .ndo_get_devlink_port = efx_get_devlink_port, +#endif +#endif + +#if defined(EFX_USE_KCOMPAT) && defined(EFX_HAVE_NDO_SIZE) + .ndo_size = sizeof(struct net_device_ops), +#endif +#if defined(EFX_USE_KCOMPAT) && defined(EFX_HAVE_NDO_SIZE_RH) + .ndo_size_rh = sizeof(struct net_device_ops), +#endif +}; + +#if defined(EFX_USE_KCOMPAT) && defined(EFX_HAVE_NET_DEVICE_OPS_EXT) +const struct net_device_ops_ext efx_net_device_ops_ext = { +#ifdef EFX_HAVE_EXT_NDO_SET_FEATURES +#if defined(EFX_NOT_UPSTREAM) && defined(EFX_USE_SFC_LRO) + .ndo_fix_features = efx_fix_features, +#endif + .ndo_set_features = efx_set_features, +#endif + +#ifdef EFX_HAVE_NET_DEVICE_OPS_EXT_GET_PHYS_PORT_ID + .ndo_get_phys_port_id = efx_get_phys_port_id, +#endif +#ifdef CONFIG_SFC_SRIOV +#ifdef EFX_HAVE_NET_DEVICE_OPS_EXT_SET_VF_SPOOFCHK + .ndo_set_vf_spoofchk = efx_sriov_set_vf_spoofchk, +#endif +#ifdef EFX_HAVE_NET_DEVICE_OPS_EXT_SET_VF_LINK_STATE + .ndo_set_vf_link_state = efx_sriov_set_vf_link_state, +#endif +#endif /* CONFIG_SFC_SRIOV */ +}; +#endif +/************************************************************************** + * + * Kernel module interface + * + *************************************************************************/ + +#ifdef EFX_NOT_UPSTREAM + +module_param(rx_irq_mod_usec, uint, 0444); +MODULE_PARM_DESC(rx_irq_mod_usec, + "Receive interrupt moderation (microseconds)"); + +module_param(tx_irq_mod_usec, uint, 0444); +MODULE_PARM_DESC(tx_irq_mod_usec, + "Transmit interrupt moderation (microseconds)"); + +#endif /* EFX_NOT_UPSTREAM */ + +static int __init efx_init_module(void) +{ + int rc; + +#ifdef EFX_NOT_UPSTREAM + printk(KERN_INFO "Solarflare NET driver v" EFX_DRIVER_VERSION "\n"); +#else + printk(KERN_INFO "Solarflare NET driver\n"); +#endif + + rc = efx_init_debugfs(); + if (rc) + goto err_debugfs; + + rc = efx_create_reset_workqueue(); + if (rc) + goto err_reset; + + rc = efx_channels_init_module(); + if (rc) + goto err_channels_init; + + rc = pci_register_driver(&efx_pci_driver); + if (rc < 0) { + printk(KERN_ERR "pci_register_driver failed, rc=%d\n", rc); + goto err_pci; + } + + rc = pci_register_driver(&ef100_pci_driver); + if (rc < 0) { + printk(KERN_ERR "pci_register_driver (ef100) failed, rc=%d\n", rc); + goto err_pci_ef100; + } + + return 0; + + err_pci_ef100: + pci_unregister_driver(&efx_pci_driver); + err_pci: + efx_channels_fini_module(); + err_channels_init: + efx_destroy_reset_workqueue(); + err_reset: + efx_fini_debugfs(); + err_debugfs: + return rc; +} + +static void __exit efx_exit_module(void) +{ + printk(KERN_INFO "Solarflare NET driver unloading\n"); + + pci_unregister_driver(&ef100_pci_driver); + pci_unregister_driver(&efx_pci_driver); + efx_channels_fini_module(); + efx_destroy_reset_workqueue(); + efx_fini_debugfs(); +} + +module_init(efx_init_module); +module_exit(efx_exit_module); + +MODULE_AUTHOR("Solarflare Communications and " + "Michael Brown "); +MODULE_DESCRIPTION("Solarflare network driver"); +MODULE_LICENSE("GPL"); +MODULE_DEVICE_TABLE(pci, sfc_pci_table); +#ifdef EFX_NOT_UPSTREAM +MODULE_VERSION(EFX_DRIVER_VERSION); +#endif diff --git a/src/drivers/net/ethernet/sfc/efx.h b/src/drivers/net/ethernet/sfc/efx.h new file mode 100644 index 0000000..ffee9b5 --- /dev/null +++ b/src/drivers/net/ethernet/sfc/efx.h @@ -0,0 +1,429 @@ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2005-2006 Fen Systems Ltd. + * Copyright 2006-2017 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#ifndef EFX_EFX_H +#define EFX_EFX_H + +#include "net_driver.h" +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_INDIRECT_CALL_WRAPPERS) +#include +#endif +#include "ef100_rx.h" +#include "ef100_tx.h" +#include "filter.h" +#include "workarounds.h" +#include "efx_common.h" + +/* netdevice_ops */ +int efx_net_open(struct net_device *net_dev); +int efx_net_stop(struct net_device *net_dev); +int efx_change_mtu(struct net_device *net_dev, int new_mtu); +int __efx_net_alloc(struct efx_nic *efx); +void __efx_net_dealloc(struct efx_nic *efx); + +static inline int efx_net_alloc(struct efx_nic *efx) +{ + return efx->type->net_alloc(efx); +} + +static inline void efx_net_dealloc(struct efx_nic *efx) +{ + efx->type->net_dealloc(efx); +} + +#if defined(EFX_NOT_UPSTREAM) && defined(EFX_HAVE_VLAN_RX_PATH) +void efx_vlan_rx_register(struct net_device *dev, struct vlan_group *vlan_group); +#endif + +int efx_pci_probe_post_io(struct efx_nic *efx, + int (*nic_probe)(struct efx_nic *efx)); +void efx_pci_remove_post_io(struct efx_nic *efx, + void (*nic_remove)(struct efx_nic *efx)); + +/* TX */ +netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb, + struct net_device *net_dev); +int __efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb); + +static inline int efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) +{ + return INDIRECT_CALL_2(tx_queue->efx->type->tx_enqueue, + ef100_enqueue_skb, __efx_enqueue_skb, + tx_queue, skb); +} + +void efx_xmit_done_single(struct efx_tx_queue *tx_queue); +extern unsigned int efx_piobuf_size; +extern bool separate_tx_channels; + +/* RX */ +void efx_set_default_rx_indir_table(struct efx_nic *efx, + struct efx_rss_context *ctx); +void __efx_rx_packet(struct efx_rx_queue *rx_queue); +void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index, + unsigned int n_frags, unsigned int len, u16 flags); + +static inline void efx_rx_flush_packet(struct efx_rx_queue *rx_queue) +{ + if (rx_queue->rx_pkt_n_frags) + INDIRECT_CALL_2(rx_queue->efx->type->rx_packet, + __ef100_rx_packet, __efx_rx_packet, + rx_queue); +} + +static inline bool efx_rx_buf_hash_valid(struct efx_nic *efx, const u8 *prefix) +{ + if (efx->type->rx_buf_hash_valid) + return INDIRECT_CALL_1(efx->type->rx_buf_hash_valid, + ef100_rx_buf_hash_valid, + prefix); + return true; +} + +#define EFX_MAX_DMAQ_SIZE 4096UL +#define EFX_DEFAULT_RX_DMAQ_SIZE 1024UL +#define EFX_DEFAULT_TX_DMAQ_SIZE 1024UL +#define EFX_MIN_DMAQ_SIZE 512UL + +#define EFX_MAX_EVQ_SIZE 16384UL +#define EFX_MIN_EVQ_SIZE 512UL +#ifdef EFX_NOT_UPSTREAM +#if IS_MODULE(CONFIG_SFC_DRIVERLINK) +/* Additional event queue entries to add on channel zero for driverlink. */ +#define EFX_EVQ_DL_EXTRA_ENTRIES 7936UL +#endif +#endif + +static inline unsigned long efx_min_dmaq_size(struct efx_nic *efx) +{ + return (efx->supported_bitmap ? + (1 << (ffs(efx->supported_bitmap) - 1)) : EFX_MIN_DMAQ_SIZE); +} + +static inline unsigned long efx_max_dmaq_size(struct efx_nic *efx) +{ + return (efx->supported_bitmap ? + (1 << (fls(efx->supported_bitmap) - 1)) : EFX_MAX_DMAQ_SIZE); +} + +static inline unsigned long efx_min_evtq_size(struct efx_nic *efx) +{ + return (efx->supported_bitmap ? + (1 << (ffs(efx->supported_bitmap) - 1)) : EFX_MIN_EVQ_SIZE); +} + +static inline unsigned long efx_max_evtq_size(struct efx_nic *efx) +{ + return (efx->supported_bitmap ? + (1 << (fls(efx->supported_bitmap) - 1)) : EFX_MAX_EVQ_SIZE); +} + +/* Each packet can consume up to ceil(max_frame_len / buffer_size) buffers */ +#define EFX_RX_MAX_FRAGS DIV_ROUND_UP(EFX_MAX_FRAME_LEN(EFX_MAX_MTU), \ + EFX_RX_USR_BUF_SIZE) + +/* Maximum number of TCP segments we support for soft-TSO */ +#define EFX_TSO_MAX_SEGS 100 + +/* The smallest rxq_entries that the driver supports. Somewhat arbitrary. + */ +#define EFX_RXQ_MIN_ENT 16U + +/* All EF10 architecture NICs steal one bit of the DMAQ size for various + * other purposes when counting TxQ entries, so we halve the queue size. + */ +#define EFX_TXQ_MAX_ENT(efx) (EFX_WORKAROUND_EF10(efx) ? \ + efx_max_dmaq_size(efx) / 2 : \ + efx_max_dmaq_size(efx)) + +#ifdef EFX_NOT_UPSTREAM +/* PCIe link bandwidth measure: + * bw = (width << (speed - 1)) + */ +#define EFX_BW_PCIE_GEN1_X8 (8 << (1 - 1)) +#define EFX_BW_PCIE_GEN2_X8 (8 << (2 - 1)) +#define EFX_BW_PCIE_GEN3_X8 (8 << (3 - 1)) +#define EFX_BW_PCIE_GEN3_X16 (16 << (3 - 1)) +#endif + +static inline bool efx_rss_enabled(struct efx_nic *efx) +{ + return efx->n_rss_channels > 1; +} + +#if defined(EFX_NOT_UPSTREAM) && defined(EFX_USE_SFC_LRO) + +static inline bool efx_ssr_enabled(struct efx_nic *efx) +{ +#ifdef NETIF_F_LRO + return !!(efx->net_dev->features & NETIF_F_LRO); +#else + return efx->lro_enabled; +#endif +} + +int efx_ssr_init(struct efx_rx_queue *rx_queue, struct efx_nic *efx); +void efx_ssr_fini(struct efx_rx_queue *rx_queue); +void __efx_ssr_end_of_burst(struct efx_rx_queue *rx_queue); +void efx_ssr(struct efx_rx_queue *rx_queue, struct efx_rx_buffer *rx_buf, u8 *eh); + +static inline void efx_ssr_end_of_burst(struct efx_rx_queue *rx_queue) +{ + if (rx_queue->ssr.conns && !list_empty(&rx_queue->ssr.active_conns)) + __efx_ssr_end_of_burst(rx_queue); +} + +#endif /* EFX_USE_SFC_LRO */ + +/* Filters */ +/** + * efx_filter_insert_filter - add or replace a filter + * @efx: NIC in which to insert the filter + * @spec: Specification for the filter + * @replace_equal: Flag for whether the specified filter may replace an + * existing filter with equal priority + * + * If existing filters have equal match values to the new filter spec, + * then the new filter might replace them or the function might fail, + * as follows. + * + * 1. If the existing filters have lower priority, or @replace_equal + * is set and they have equal priority, replace them. + * + * 2. If the existing filters have higher priority, return -%EPERM. + * + * 3. If !efx_filter_is_mc_recipient(@spec), or the NIC does not + * support delivery to multiple recipients, return -%EEXIST. + * + * This implies that filters for multiple multicast recipients must + * all be inserted with the same priority and @replace_equal = %false. + * + * Return: + * * On success, return the filter ID. + * * On failure, return a negative error code. + */ +static inline s32 efx_filter_insert_filter(struct efx_nic *efx, + const struct efx_filter_spec *spec, + bool replace_equal) +{ + return efx->type->filter_insert(efx, spec, replace_equal); +} + +/** + * efx_filter_remove_id_safe - remove a filter by ID, carefully + * @efx: NIC from which to remove the filter + * @priority: Priority of filter, as passed to @efx_filter_insert_filter + * @filter_id: ID of filter, as returned by @efx_filter_insert_filter + * + * This function will range-check @filter_id, so it is safe to call + * with a value passed from userland. + * + * Return: a negative error code or 0 on success. + */ +static inline int efx_filter_remove_id_safe(struct efx_nic *efx, + enum efx_filter_priority priority, + u32 filter_id) +{ + return efx->type->filter_remove_safe(efx, priority, filter_id); +} + +/** + * efx_filter_get_filter_safe - retrieve a filter by ID, carefully + * @efx: NIC from which to remove the filter + * @priority: Priority of filter, as passed to @efx_filter_insert_filter + * @filter_id: ID of filter, as returned by @efx_filter_insert_filter + * @spec: Buffer in which to store filter specification + * + * This function will range-check @filter_id, so it is safe to call + * with a value passed from userland. + * + * Return: a negative error code or 0 on success. + */ +static inline int +efx_filter_get_filter_safe(struct efx_nic *efx, + enum efx_filter_priority priority, + u32 filter_id, struct efx_filter_spec *spec) +{ + return efx->type->filter_get_safe(efx, priority, filter_id, spec); +} + +static inline u32 efx_filter_count_rx_used(struct efx_nic *efx, + enum efx_filter_priority priority) +{ + return efx->type->filter_count_rx_used(efx, priority); +} +static inline u32 efx_filter_get_rx_id_limit(struct efx_nic *efx) +{ + return efx->type->filter_get_rx_id_limit(efx); +} +static inline s32 efx_filter_get_rx_ids(struct efx_nic *efx, + enum efx_filter_priority priority, + u32 *buf, u32 size) +{ + return efx->type->filter_get_rx_ids(efx, priority, buf, size); +} +#ifdef CONFIG_RFS_ACCEL +int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb, + u16 rxq_index, u32 flow_id); +bool __efx_filter_rfs_expire(struct efx_channel *channel, unsigned int quota); +static inline void efx_filter_rfs_expire(struct work_struct *data) +{ + struct delayed_work *dwork = to_delayed_work(data); + struct efx_channel *channel; + unsigned int time, quota; + + channel = container_of(dwork, struct efx_channel, filter_work); + time = jiffies - channel->rfs_last_expiry; + quota = channel->rfs_filter_count * time / (30 * HZ); + if (quota >= 20 && __efx_filter_rfs_expire(channel, min(channel->rfs_filter_count, quota))) + channel->rfs_last_expiry += time; + /* Ensure we do more work eventually even if NAPI poll is not happening */ + schedule_delayed_work(dwork, 30 * HZ); +} +#define efx_filter_rfs_enabled() 1 +#else +static inline void efx_filter_rfs_expire(struct work_struct *data) {} +#define efx_filter_rfs_enabled() 0 +#endif + +/* RSS contexts */ +struct efx_rss_context *efx_alloc_rss_context_entry(struct efx_nic *efx); +struct efx_rss_context *efx_find_rss_context_entry(struct efx_nic *efx, u32 id); +void efx_free_rss_context_entry(struct efx_rss_context *ctx); +static inline bool efx_rss_active(struct efx_rss_context *ctx) +{ + return ctx->context_id != EFX_MCDI_RSS_CONTEXT_INVALID; +} + +/* Ethtool support */ +#ifdef EFX_USE_KCOMPAT +int efx_ethtool_get_rxnfc(struct net_device *net_dev, + struct efx_ethtool_rxnfc *info, u32 *rules); +int efx_ethtool_set_rxnfc(struct net_device *net_dev, + struct efx_ethtool_rxnfc *info); +#else +int efx_ethtool_set_rxnfc(struct net_device *net_dev, + struct ethtool_rxnfc *info); +#endif +#if defined(EFX_USE_KCOMPAT) && !defined(EFX_HAVE_ETHTOOL_RXFH_INDIR) +int efx_ethtool_old_get_rxfh_indir(struct net_device *net_dev, + struct ethtool_rxfh_indir *indir); +int efx_ethtool_old_set_rxfh_indir(struct net_device *net_dev, + const struct ethtool_rxfh_indir *indir); +#endif +#ifdef CONFIG_SFC_DUMP +struct ethtool_dump; +int efx_ethtool_get_dump_flag(struct net_device *net_dev, + struct ethtool_dump *dump); +int efx_ethtool_get_dump_data(struct net_device *net_dev, + struct ethtool_dump *dump, void *buffer); +int efx_ethtool_set_dump(struct net_device *net_dev, struct ethtool_dump *val); +#endif +#if defined(EFX_USE_KCOMPAT) && !defined(EFX_HAVE_ETHTOOL_GET_TS_INFO) && !defined(EFX_HAVE_ETHTOOL_EXT_GET_TS_INFO) +int efx_ethtool_get_ts_info(struct net_device *net_dev, + struct ethtool_ts_info *ts_info); +#endif +extern const struct ethtool_ops efx_ethtool_ops; +#if defined(EFX_USE_KCOMPAT) && defined(EFX_USE_ETHTOOL_OPS_EXT) +extern const struct ethtool_ops_ext efx_ethtool_ops_ext; +#endif + +/* Global */ +unsigned int efx_usecs_to_ticks(struct efx_nic *efx, unsigned int usecs); +unsigned int efx_ticks_to_usecs(struct efx_nic *efx, unsigned int ticks); +int efx_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs, + unsigned int rx_usecs, bool rx_adaptive, + bool rx_may_override_tx); +void efx_get_irq_moderation(struct efx_nic *efx, unsigned int *tx_usecs, + unsigned int *rx_usecs, bool *rx_adaptive); +#ifdef EFX_NOT_UPSTREAM +extern int efx_target_num_vis; +#endif + +/* Update the generic software stats in the passed stats array */ +void efx_update_sw_stats(struct efx_nic *efx, u64 *stats); + +/* MTD */ +#ifdef CONFIG_SFC_MTD +extern bool efx_allow_nvconfig_writes; +int efx_mtd_add(struct efx_nic *efx, struct efx_mtd_partition *parts, + size_t n_parts); +static inline int efx_mtd_probe(struct efx_nic *efx) +{ + return efx->type->mtd_probe(efx); +} +int efx_mtd_init(struct efx_nic *efx); +void efx_mtd_free(struct efx_nic *efx); +void efx_mtd_rename(struct efx_nic *efx); +void efx_mtd_remove(struct efx_nic *efx); +#ifdef EFX_WORKAROUND_87308 +void efx_mtd_creation_work(struct work_struct *data); +#endif +#else +static inline int efx_mtd_probe(struct efx_nic *efx) { return 0; } +static inline void efx_mtd_rename(struct efx_nic *efx) {} +static inline void efx_mtd_remove(struct efx_nic *efx) {} +#endif + +static inline void efx_schedule_channel(struct efx_channel *channel) +{ + netif_vdbg(channel->efx, intr, channel->efx->net_dev, + "channel %d scheduling NAPI poll on CPU%d\n", + channel->channel, raw_smp_processor_id()); + + napi_schedule(&channel->napi_str); +} + +static inline void efx_schedule_channel_irq(struct efx_channel *channel) +{ + channel->event_test_cpu = raw_smp_processor_id(); + efx_schedule_channel(channel); +} + +static inline void efx_device_detach_sync(struct efx_nic *efx) +{ + struct net_device *dev = efx->net_dev; + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_TC_OFFLOAD) + /* We must stop reps (which use our TX) before we stop ourselves. */ + if (efx->type->detach_reps) + efx->type->detach_reps(efx); +#endif + /* Lock/freeze all TX queues so that we can be sure the + * TX scheduler is stopped when we're done and before + * netif_device_present() becomes false. + */ + netif_tx_lock_bh(dev); + netif_device_detach(dev); + netif_tx_unlock_bh(dev); +} + +static inline void efx_device_attach_if_not_resetting(struct efx_nic *efx) +{ + if ((efx->state != STATE_DISABLED) && !efx->reset_pending) { + netif_device_attach(efx->net_dev); +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_TC_OFFLOAD) + if (efx->type->attach_reps && efx->state == STATE_NET_UP) + efx->type->attach_reps(efx); +#endif + } +} + +static inline void efx_rwsem_assert_write_locked(struct rw_semaphore *sem) +{ +#ifdef DEBUG + if (down_read_trylock(sem)) { + up_read(sem); + WARN_ON(1); + } +#endif +} + +#endif /* EFX_EFX_H */ diff --git a/src/drivers/net/ethernet/sfc/efx_auxbus.c b/src/drivers/net/ethernet/sfc/efx_auxbus.c new file mode 100644 index 0000000..a0a872f --- /dev/null +++ b/src/drivers/net/ethernet/sfc/efx_auxbus.c @@ -0,0 +1,203 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Driver for Solarflare and Xilinx network controllers and boards + * Copyright 2019 Solarflare Communications Inc. + * Copyright 2019-2020 Xilinx Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#include "net_driver.h" +#include "nic.h" +#include +#include +#include "efx_auxbus.h" + +static DEFINE_IDA(efx_auxbus_ida); + +struct sfc_rdma_dev { + struct sfc_rdma_device dev; /* Must be first, drivers use this */ + struct list_head clients; + struct efx_nic *efx; +}; + +struct sfc_rdma_client { + struct sfc_rdma_dev *rdev; + const struct sfc_rdma_drvops *ops; + struct list_head list; +}; + +static struct efx_nic *rdma_client_to_efx(struct sfc_rdma_client *client) +{ + if (client && client->rdev) + return client->rdev->efx; + return NULL; +} + +static struct device *rdma_client_to_dev(struct sfc_rdma_client *client) +{ + if (client && client->rdev) + return &client->rdev->dev.auxdev.dev; + return NULL; +} + +static struct sfc_rdma_client *efx_rdma_open(struct auxiliary_device *auxdev, + const struct sfc_rdma_drvops *ops) +{ + struct sfc_rdma_client *client; + + if (!auxdev || !ops || !ops->handle_event) + return ERR_PTR(-EINVAL); + + client = kmalloc(sizeof(*client), GFP_KERNEL); + if (!client) + return ERR_PTR(-ENOMEM); + + client->rdev = container_of(auxdev, struct sfc_rdma_dev, dev.auxdev); + client->ops = ops; + list_add(&client->list, &client->rdev->clients); + return client; +} + +static int efx_rdma_close(struct sfc_rdma_client *client) +{ + if (!client) + return -EINVAL; + + list_del(&client->list); + kfree(client); + return 0; +} + +static int efx_rdma_get_param(struct sfc_rdma_client *client, + enum sfc_rdma_param param, + struct sfc_rdma_param_value *data) +{ + struct efx_nic *efx = rdma_client_to_efx(client); + int rc; + + if (!efx) + return -ENODEV; + + switch (param) { + case SFC_RDMA_NETDEV: + data->net_dev = efx->net_dev; + rc = 0; + break; + default: + dev_info(rdma_client_to_dev(client), + "Unknown parameter %u\n", param); + rc = -EOPNOTSUPP; + } + return rc; +} + +static int efx_rdma_fw_rpc(struct sfc_rdma_client *client, + struct sfc_rdma_rpc *rpc) +{ + struct efx_nic *efx = rdma_client_to_efx(client); + int rc; + + if (!efx) + return -ENODEV; + + rc = efx_mcdi_rpc_quiet(efx, rpc->cmd, + (const efx_dword_t *) rpc->inbuf, rpc->inlen, + (efx_dword_t *) rpc->outbuf, rpc->outlen, + rpc->outlen_actual); + return rc; +} + +static void efx_rdma_send_event(struct sfc_rdma_client *client, + enum sfc_event_type type, u64 value) +{ + struct sfc_rdma_event ev; + + ev.type = type; + ev.value = value; + (*client->ops->handle_event)(&client->rdev->dev.auxdev, &ev); +} + +static const struct sfc_rdma_devops rdma_devops = { + .open = efx_rdma_open, + .close = efx_rdma_close, + .get_param = efx_rdma_get_param, + .fw_rpc = efx_rdma_fw_rpc, +}; + +static void efx_auxbus_release(struct device *dev) +{ + struct auxiliary_device *auxdev = to_auxiliary_dev(dev); + struct sfc_rdma_dev *rdev; + + ida_free(&efx_auxbus_ida, auxdev->id); + rdev = container_of(auxdev, struct sfc_rdma_dev, dev.auxdev); + rdev->efx = NULL; + kfree(rdev); +} + +void efx_auxbus_unregister(struct efx_nic *efx) +{ + struct sfc_rdma_dev *rdev = efx->rdev; + struct sfc_rdma_client *client, *temp; + struct auxiliary_device *auxdev; + + if (!rdev) + return; + + /* Disconnect all users */ + list_for_each_entry_safe(client, temp, &rdev->clients, list) { + efx_rdma_send_event(client, SFC_EVENT_UNREGISTER, 0); + (void) efx_rdma_close(client); + } + + efx->rdev = NULL; + auxdev = &rdev->dev.auxdev; + auxiliary_device_delete(auxdev); + auxiliary_device_uninit(auxdev); +} + +int efx_auxbus_register(struct efx_nic *efx) +{ + struct auxiliary_device *auxdev; + struct sfc_rdma_dev *rdev; + int rc; + + rdev = kzalloc(sizeof(*rdev), GFP_KERNEL); + if (!rdev) + return -ENOMEM; + auxdev = &rdev->dev.auxdev; + + rc = ida_alloc(&efx_auxbus_ida, GFP_KERNEL); + if (rc < 0) { + kfree(rdev); + return rc; + } + auxdev->id = rc; + + auxdev->name = SFC_RDMA_DEVNAME; + auxdev->dev.release = efx_auxbus_release; + auxdev->dev.parent = &efx->pci_dev->dev; + rdev->dev.ops = &rdma_devops; + INIT_LIST_HEAD(&rdev->clients); + + rc = auxiliary_device_init(auxdev); + if (rc) + goto fail; + + rc = auxiliary_device_add(auxdev); + if (rc) { + auxiliary_device_uninit(auxdev); + goto fail; + } + + rdev->efx = efx; + efx->rdev = rdev; + return 0; +fail: + ida_free(&efx_auxbus_ida, auxdev->id); + kfree(rdev); + return rc; +} diff --git a/src/drivers/net/ethernet/sfc/efx_auxbus.h b/src/drivers/net/ethernet/sfc/efx_auxbus.h new file mode 100644 index 0000000..46769c8 --- /dev/null +++ b/src/drivers/net/ethernet/sfc/efx_auxbus.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Driver for Solarflare and Xilinx network controllers and boards + * Copyright 2019 Solarflare Communications Inc. + * Copyright 2019-2020 Xilinx Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#ifndef EFX_AUXBUS_H +#define EFX_AUXBUS_H + +/* Driver API */ +#ifdef CONFIG_AUXILIARY_BUS +int efx_auxbus_register(struct efx_nic *efx); +void efx_auxbus_unregister(struct efx_nic *efx); +#else +static inline int efx_auxbus_register(struct efx_nic *efx) +{ + return 0; +} + +static inline void efx_auxbus_unregister(struct efx_nic *efx) {} +#endif +#endif diff --git a/src/drivers/net/ethernet/sfc/efx_channels.c b/src/drivers/net/ethernet/sfc/efx_channels.c new file mode 100644 index 0000000..7dc9294 --- /dev/null +++ b/src/drivers/net/ethernet/sfc/efx_channels.c @@ -0,0 +1,2471 @@ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2019 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#include "net_driver.h" +#include +#include +#ifndef EFX_USE_KCOMPAT +#include +#endif +#include "efx_channels.h" +#include "efx.h" +#include "efx_common.h" +#include "rx_common.h" +#include "tx_common.h" +#include "nic.h" +#include "sriov.h" +#include "mcdi_functions.h" +#include "mcdi_filters.h" +#include "debugfs.h" + +/* Interrupt mode names (see INT_MODE())) */ +const unsigned int efx_interrupt_mode_max = EFX_INT_MODE_MAX; +const char *const efx_interrupt_mode_names[] = { + [EFX_INT_MODE_MSIX] = "MSI-X", + [EFX_INT_MODE_MSI] = "MSI", +#if defined(EFX_NOT_UPSTREAM) && defined(CONFIG_SFC_BUSYPOLL) + [EFX_INT_MODE_POLLED] = "busy-poll", +#endif +}; + + +/* + * Use separate channels for TX and RX events + * + * Set this to 1 to use separate channels for TX and RX. It allows us + * to control interrupt affinity separately for TX and RX. + * + * This is only used by sfc.ko in MSI-X interrupt mode + */ +bool separate_tx_channels; + +/* This is the first interrupt mode to try out of: + * 0 => MSI-X + * 1 => MSI + */ +unsigned int efx_interrupt_mode; + +#if defined(EFX_NOT_UPSTREAM) +#define HAVE_EFX_NUM_PACKAGES +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_EXPORTED_CPU_SIBLING_MAP) +#define HAVE_EFX_NUM_CORES +#endif + +/* This is the requested number of CPUs to use for Receive-Side Scaling + * (RSS), i.e. the number of CPUs among which we may distribute + * simultaneous interrupt handling. Or alternatively it may be set to + * "packages", "cores", "hyperthreads", "numa_local_cores" or + * "numa_local_hyperthreads" to get one receive channel per package, core, + * hyperthread, numa local core or numa local hyperthread. The default + * is "cores". + * + * Systems without MSI-X will only target one CPU via legacy or MSI + * interrupt. + */ +static char *efx_rss_cpus_str; +module_param_named(rss_cpus, efx_rss_cpus_str, charp, 0444); +MODULE_PARM_DESC(rss_cpus, "Number of CPUs to use for Receive-Side Scaling, or 'packages', 'cores', 'hyperthreads', 'numa_local_cores' or 'numa_local_hyperthreads'"); + +#if defined(HAVE_EFX_NUM_PACKAGES) +static bool rss_numa_local = true; +module_param(rss_numa_local, bool, 0444); +MODULE_PARM_DESC(rss_numa_local, "Restrict RSS to CPUs on the local NUMA node"); +#endif + +static enum efx_rss_mode rss_mode; +#else +/* This is the requested number of CPUs to use for Receive-Side Scaling (RSS), + * i.e. the number of CPUs among which we may distribute simultaneous + * interrupt handling. + * + * Cards without MSI-X will only target one CPU via legacy or MSI interrupt. + * The default (0) means to assign an interrupt to each core. + */ +unsigned int rss_cpus; +#endif + +static unsigned int irq_adapt_low_thresh = 8000; +module_param(irq_adapt_low_thresh, uint, 0644); +MODULE_PARM_DESC(irq_adapt_low_thresh, + "Threshold score for reducing IRQ moderation"); + +static unsigned int irq_adapt_high_thresh = 16000; +module_param(irq_adapt_high_thresh, uint, 0644); +MODULE_PARM_DESC(irq_adapt_high_thresh, + "Threshold score for increasing IRQ moderation"); + +static unsigned int irq_adapt_irqs = 1000; +module_param(irq_adapt_irqs, uint, 0644); +MODULE_PARM_DESC(irq_adapt_irqs, + "Number of IRQs per IRQ moderation adaptation"); + +/* This is the weight assigned to each of the (per-channel) virtual + * NAPI devices. + */ +static int napi_weight = NAPI_POLL_WEIGHT; +#ifdef EFX_NOT_UPSTREAM +module_param(napi_weight, int, 0444); +MODULE_PARM_DESC(napi_weight, "NAPI weighting"); +#endif + +static const struct efx_channel_type efx_default_channel_type; +static void efx_remove_channel(struct efx_channel *channel); +#ifdef EFX_NOT_UPSTREAM +static void efx_schedule_all_channels(struct work_struct *data); +#endif +static int efx_soft_enable_interrupts(struct efx_nic *efx); +static void efx_soft_disable_interrupts(struct efx_nic *efx); +static int efx_init_napi_channel(struct efx_channel *channel); +static void efx_fini_napi_channel(struct efx_channel *channel); + +/************* + * INTERRUPTS + *************/ + +#if defined(HAVE_EFX_NUM_CORES) +static unsigned int count_online_cores(struct efx_nic *efx, bool local_node) +{ + cpumask_var_t filter_mask; + unsigned int count; + int cpu; + + if (unlikely(!zalloc_cpumask_var(&filter_mask, GFP_KERNEL))) { + netif_warn(efx, probe, efx->net_dev, + "RSS disabled due to allocation failure\n"); + return 1; + } + + cpumask_copy(filter_mask, cpu_online_mask); + if (local_node) + cpumask_and(filter_mask, filter_mask, + cpumask_of_pcibus(efx->pci_dev->bus)); + + count = 0; + for_each_cpu(cpu, filter_mask) { + ++count; + cpumask_andnot(filter_mask, filter_mask, topology_sibling_cpumask(cpu)); + } + + free_cpumask_var(filter_mask); + + return count; +} +#endif + +#ifdef HAVE_EFX_NUM_PACKAGES +/* Count the number of unique packages in the given cpumask */ +static unsigned int efx_num_packages(const cpumask_t *in) +{ + cpumask_var_t core_mask; + unsigned int count; + int cpu, cpu2; + + if (unlikely(!zalloc_cpumask_var(&core_mask, GFP_KERNEL))) { + printk(KERN_WARNING + "sfc: RSS disabled due to allocation failure\n"); + return 1; + } + + count = 0; + for_each_cpu(cpu, in) { + if (!cpumask_test_cpu(cpu, core_mask)) { + ++count; + + /* Treat each numa node as a separate package */ + for_each_cpu(cpu2, topology_core_cpumask(cpu)) { + if (cpu_to_node(cpu) == cpu_to_node(cpu2)) + cpumask_set_cpu(cpu2, core_mask); + } + } + } + + free_cpumask_var(core_mask); + + return count; +} +#endif + +static unsigned int efx_wanted_parallelism(struct efx_nic *efx) +{ +#if defined(EFX_NOT_UPSTREAM) + static unsigned int n_rxq; + bool selected = false; +#else + unsigned int n_rxq; + enum efx_rss_mode rss_mode; +#endif + +#if defined(EFX_NOT_UPSTREAM) + if (n_rxq) + return n_rxq; + rss_mode = EFX_RSS_CORES; + + if (!efx_rss_cpus_str) { + /* Leave at default. */ + } else if (strcmp(efx_rss_cpus_str, "packages") == 0) { + rss_mode = EFX_RSS_PACKAGES; + selected = true; + } else if (strcmp(efx_rss_cpus_str, "cores") == 0) { + rss_mode = EFX_RSS_CORES; + selected = true; + } else if (strcmp(efx_rss_cpus_str, "hyperthreads") == 0) { + rss_mode = EFX_RSS_HYPERTHREADS; + selected = true; + } else if (strcmp(efx_rss_cpus_str, "numa_local_cores") == 0) { + rss_mode = EFX_RSS_NUMA_LOCAL_CORES; + selected = true; + } else if (strcmp(efx_rss_cpus_str, "numa_local_hyperthreads") == 0) { + rss_mode = EFX_RSS_NUMA_LOCAL_HYPERTHREADS; + selected = true; + } else if (sscanf(efx_rss_cpus_str, "%u", &n_rxq) == 1 && n_rxq > 0) { + rss_mode = EFX_RSS_CUSTOM; + selected = true; + } else { + pci_err(efx->pci_dev, + "Bad value for module parameter rss_cpus='%s'\n", + efx_rss_cpus_str); + } +#else + if (rss_cpus) { + rss_mode = EFX_RSS_CUSTOM; + n_rxq = rss_cpus; + } else { + rss_mode = EFX_RSS_CORES; + } +#endif + + switch (rss_mode) { +#if defined(HAVE_EFX_NUM_PACKAGES) + case EFX_RSS_PACKAGES: + if (xen_domain()) { + pci_warn(efx->pci_dev, + "Unable to determine CPU topology on Xen reliably. Using 4 rss channels.\n"); + n_rxq = 4; + } else { + pci_dbg(efx->pci_dev, "using efx_num_packages()\n"); + n_rxq = efx_num_packages(cpu_online_mask); + /* Create two RSS queues even with a single package */ + if (n_rxq == 1) + n_rxq = 2; + } + break; +#endif +#if defined(HAVE_EFX_NUM_CORES) + case EFX_RSS_CORES: + if (xen_domain()) { + pci_warn(efx->pci_dev, + "Unable to determine CPU topology on Xen reliably. Assuming hyperthreading enabled.\n"); + n_rxq = max_t(int, 1, num_online_cpus() / 2); + } else { + pci_dbg(efx->pci_dev, "using count_online_cores()\n"); + n_rxq = count_online_cores(efx, false); + } + break; +#endif + case EFX_RSS_HYPERTHREADS: + n_rxq = num_online_cpus(); + break; +#if defined(HAVE_EFX_NUM_CORES) + case EFX_RSS_NUMA_LOCAL_CORES: + if (xen_domain()) { + pci_warn(efx->pci_dev, + "Unable to determine CPU topology on Xen reliably. Creating rss channels for half of cores/hyperthreads.\n"); + n_rxq = max_t(int, 1, num_online_cpus() / 2); + } else { + n_rxq = count_online_cores(efx, true); + + /* If no online CPUs in local node, fallback to all + * online CPUs. + */ + if (n_rxq == 0) + n_rxq = count_online_cores(efx, false); + } + break; +#endif + case EFX_RSS_NUMA_LOCAL_HYPERTHREADS: + if (xen_domain()) { + pci_warn(efx->pci_dev, + "Unable to determine CPU topology on Xen reliably. Creating rss channels for all cores/hyperthreads.\n"); + n_rxq = num_online_cpus(); + } else { + n_rxq = min(num_online_cpus(), + cpumask_weight(cpumask_of_pcibus(efx->pci_dev->bus))); + } + break; + case EFX_RSS_CUSTOM: + break; + default: +#if defined(EFX_NOT_UPSTREAM) + if (selected) + pci_err(efx->pci_dev, + "Selected rss mode '%s' not available\n", + efx_rss_cpus_str); +#endif + rss_mode = EFX_RSS_HYPERTHREADS; + n_rxq = num_online_cpus(); + break; + } + + if (n_rxq > EFX_MAX_RX_QUEUES) { + pci_warn(efx->pci_dev, + "Reducing number of rss channels from %u to %u.\n", + n_rxq, EFX_MAX_RX_QUEUES); + n_rxq = EFX_MAX_RX_QUEUES; + } + +#ifdef CONFIG_SFC_SRIOV + /* If RSS is requested for the PF *and* VFs then we can't write RSS + * table entries that are inaccessible to VFs + */ + if (efx_sriov_wanted(efx) && n_rxq > 1) { + pci_warn(efx->pci_dev, + "Reducing number of RSS channels from %u to 1 for VF support. Increase vf-msix-limit to use more channels on the PF.\n", + n_rxq); + n_rxq = 1; + } +#endif + +#if !defined(EFX_NOT_UPSTREAM) + efx->rss_mode = rss_mode; +#endif + + return n_rxq; +} + +static unsigned int efx_num_rss_channels(struct efx_nic *efx) +{ + /* do not RSS to extra_channels, such as PTP */ + unsigned int rss_channels = efx_rx_channels(efx) - + efx->n_extra_channels; +#if defined(HAVE_EFX_NUM_PACKAGES) +#if !defined(EFX_NOT_UPSTREAM) + enum efx_rss_mode rss_mode = efx->rss_mode; +#endif + + if (rss_numa_local) { + cpumask_var_t local_online_cpus; + + if (unlikely(!zalloc_cpumask_var(&local_online_cpus, + GFP_KERNEL))) { + netif_err(efx, drv, efx->net_dev, + "Not enough temporary memory to determine local CPUs - using all CPUs for RSS.\n"); + rss_numa_local = false; + return rss_channels; + } + + cpumask_and(local_online_cpus, cpu_online_mask, + cpumask_of_pcibus(efx->pci_dev->bus)); + + if (unlikely(!cpumask_weight(local_online_cpus))) { + netif_info(efx, drv, efx->net_dev, "No local CPUs online - using all CPUs for RSS.\n"); + rss_numa_local = false; + free_cpumask_var(local_online_cpus); + return rss_channels; + } + + if (rss_mode == EFX_RSS_PACKAGES) + rss_channels = min(rss_channels, + efx_num_packages(local_online_cpus)); +#ifdef HAVE_EFX_NUM_CORES + else if (rss_mode == EFX_RSS_CORES) + rss_channels = min(rss_channels, + count_online_cores(efx, true)); +#endif + else + rss_channels = min(rss_channels, + cpumask_weight(local_online_cpus)); + free_cpumask_var(local_online_cpus); + } +#endif + + return rss_channels; +} + +static void efx_allocate_xdp_channels(struct efx_nic *efx, + unsigned int max_channels, + unsigned int n_channels) +{ + /* To allow XDP transmit to happen from arbitrary NAPI contexts + * we allocate a TX queue per CPU. We share event queues across + * multiple tx queues, assuming tx and ev queues are both + * maximum size. + */ + int tx_per_ev = efx_max_evtq_size(efx) / EFX_TXQ_MAX_ENT(efx); + int n_xdp_tx; + int n_xdp_ev; + + if (!efx->xdp_tx) + goto xdp_disabled; + + n_xdp_tx = num_possible_cpus(); + n_xdp_ev = DIV_ROUND_UP(n_xdp_tx, tx_per_ev); + + /* Check resources. + * We need a channel per event queue, plus a VI per tx queue. + * This may be more pessimistic than it needs to be. + */ + if (n_channels + n_xdp_ev > max_channels) { + pci_err(efx->pci_dev, + "Insufficient resources for %d XDP event queues (%d current channels, max %d)\n", + n_xdp_ev, n_channels, max_channels); + goto xdp_disabled; + } else if (n_channels + n_xdp_tx > efx->max_vis) { + pci_err(efx->pci_dev, + "Insufficient resources for %d XDP TX queues (%d current channels, max VIs %d)\n", + n_xdp_tx, n_channels, efx->max_vis); + goto xdp_disabled; + } + + efx->n_xdp_channels = n_xdp_ev; + efx->xdp_tx_per_channel = tx_per_ev; + efx->xdp_tx_queue_count = n_xdp_tx; + pci_dbg(efx->pci_dev, + "Allocating %d TX and %d event queues for XDP\n", + n_xdp_tx, n_xdp_ev); + return; + +xdp_disabled: + efx->n_xdp_channels = 0; + efx->xdp_tx_per_channel = 0; + efx->xdp_tx_queue_count = 0; + return; +} + +static int efx_allocate_msix_channels(struct efx_nic *efx, + unsigned int max_channels) +{ + unsigned int n_channels = efx_wanted_parallelism(efx); + unsigned int extra_channel_type; +#ifdef EFX_HAVE_MSIX_CAP + int vec_count; +#endif + unsigned int min_channels = 1; + + if (separate_tx_channels) { + n_channels *= 2; + min_channels = 2; + } + + if (max_channels < min_channels) { + pci_err(efx->pci_dev, + "Unable to satisfy minimum channel requirements\n"); + return -ENOSPC; + } + + for (extra_channel_type = 0; + extra_channel_type < EFX_MAX_EXTRA_CHANNELS && + !separate_tx_channels && n_channels < max_channels; + extra_channel_type++) + n_channels += !!efx->extra_channel_type[extra_channel_type]; + + efx_allocate_xdp_channels(efx, max_channels, n_channels); + n_channels += efx->n_xdp_channels; + +#ifdef EFX_HAVE_MSIX_CAP +#ifdef EFX_NOT_UPSTREAM +#if IS_MODULE(CONFIG_SFC_DRIVERLINK) + /* dl IRQs don't need VIs, so no need to clamp to max_channels */ + n_channels += efx->n_dl_irqs; +#endif +#endif + + vec_count = pci_msix_vec_count(efx->pci_dev); + if (vec_count < 0) + return vec_count; + if (vec_count < min_channels) { + pci_err(efx->pci_dev, + "Unable to satisfy minimum channel requirements\n"); + return -ENOSPC; + } + if (vec_count < n_channels) { + pci_err(efx->pci_dev, + "WARNING: Insufficient MSI-X vectors available (%d < %u).\n", + vec_count, n_channels); + pci_err(efx->pci_dev, + "WARNING: Performance may be reduced.\n"); + +#ifdef EFX_NOT_UPSTREAM +#if IS_MODULE(CONFIG_SFC_DRIVERLINK) + /* reduce driverlink allocated IRQs first, to a minimum of 1 */ + n_channels -= efx->n_dl_irqs; + efx->n_dl_irqs = vec_count == min_channels ? 0 : + min(max(vec_count - (int)n_channels, 1), + efx->n_dl_irqs); + /* Make driverlink-consumed IRQ vectors unavailable for net + * driver use. + */ + vec_count -= efx->n_dl_irqs; +#endif +#endif + /* reduce XDP channels */ + n_channels -= efx->n_xdp_channels; + efx->n_xdp_channels = max(vec_count - (int)n_channels, 0); + + n_channels = vec_count; + } +#ifdef EFX_NOT_UPSTREAM +#if IS_MODULE(CONFIG_SFC_DRIVERLINK) + else { + /* driverlink IRQs don't have a channel in the net driver, so + * remove them from further calculations. + */ + n_channels -= efx->n_dl_irqs; + } +#endif +#endif +#endif + + /* Ignore XDP tx channels when creating rx channels. */ + n_channels -= efx->n_xdp_channels; + + if (separate_tx_channels) { + efx->n_extra_channels = 0; + efx->n_combined_channels = 0; + efx->n_tx_only_channels = + min(max(n_channels / 2, 1U), + efx->max_tx_channels); + efx->tx_channel_offset = + n_channels - efx->n_tx_only_channels; + efx->n_rx_only_channels = + max(n_channels - + efx->n_tx_only_channels, 1U); + } else { + efx->n_extra_channels = 0; + /* allocate other channels, leaving at least one channel */ + for (extra_channel_type = 0; + extra_channel_type < EFX_MAX_EXTRA_CHANNELS && + n_channels > 1 && + efx->n_extra_channels + 1 < efx->max_tx_channels; + extra_channel_type++) + efx->n_extra_channels += !!efx->extra_channel_type[extra_channel_type]; + n_channels -= efx->n_extra_channels; + + efx->n_combined_channels = min(n_channels, + efx->max_tx_channels); + efx->n_tx_only_channels = 0; + efx->tx_channel_offset = 0; + /* if we have other channels then don't use RX only channels */ + efx->n_rx_only_channels = efx->n_extra_channels ? 0 : + n_channels - efx->n_combined_channels; + } + efx->n_rss_channels = efx_num_rss_channels(efx); + + EFX_WARN_ON_PARANOID(efx->n_rx_only_channels && + efx->n_tx_only_channels && + (efx->n_combined_channels || + efx->n_extra_channels)); + + return 0; +} + +#ifdef EFX_NOT_UPSTREAM +#if IS_MODULE(CONFIG_SFC_DRIVERLINK) +static u16 efx_dl_make_irq_resources_(struct efx_dl_irq_resources *res, + size_t entries, + struct msix_entry *xentries) +{ + u16 n_ranges = 0; + size_t i = 0, j; + + while (i < entries) { + j = i + 1; + + while (j < entries && + xentries[j - 1].vector + 1 == xentries[j].vector) + ++j; + + if (res) { + res->irq_ranges[n_ranges].vector = xentries[i].vector; + res->irq_ranges[n_ranges].range = j - i; + } + n_ranges++; + i = j; + } + + return n_ranges; +} + +static void efx_dl_make_irq_resources(struct efx_nic *efx, size_t entries, + struct msix_entry *xentries) +{ + u16 n_ranges = efx_dl_make_irq_resources_(NULL, entries, xentries); + + if (n_ranges) { + efx->irq_resources = kzalloc(struct_size(efx->irq_resources, + irq_ranges, n_ranges), + GFP_KERNEL); + if (!efx->irq_resources) + netif_warn(efx, drv, efx->net_dev, + "Out of memory exporting interrupts to driverlink\n"); + } + if (efx->irq_resources) { + efx->irq_resources->hdr.type = EFX_DL_IRQ_RESOURCES; + efx->irq_resources->channel_base = xentries[0].entry; + efx->irq_resources->n_ranges = n_ranges; + efx_dl_make_irq_resources_(efx->irq_resources, + entries, xentries); + + netif_dbg(efx, drv, efx->net_dev, + "Exporting %d IRQ range(s) to driverlink for channels %d-%zu. IRQ[0]=%d.\n", + efx->irq_resources->n_ranges, + efx->irq_resources->channel_base, + efx->irq_resources->channel_base + entries - 1, + efx->irq_resources->irq_ranges[0].vector); + } +} +#endif +#endif + +static void efx_assign_msix_vectors(struct efx_nic *efx, + struct msix_entry *xentries, + unsigned int n_irqs) +{ + struct efx_channel *channel; + + efx_for_each_channel(channel, efx) + if (channel->channel < n_irqs) + channel->irq = xentries[channel->channel].vector; + else + channel->irq = 0; +} + +/* Probe the number and type of interrupts we are able to obtain, and + * the resulting numbers of channels and RX queues. + */ +int efx_probe_interrupts(struct efx_nic *efx) +{ + unsigned int i; + int rc; + + if (efx->interrupt_mode == EFX_INT_MODE_MSIX) { + struct msix_entry *xentries; + unsigned int n_irqs = efx->max_irqs; + + n_irqs = min(n_irqs, efx_channels(efx)); +#ifdef EFX_NOT_UPSTREAM +#if IS_MODULE(CONFIG_SFC_DRIVERLINK) + n_irqs += efx->n_dl_irqs; +#endif +#endif + netif_dbg(efx, drv, efx->net_dev, + "Allocating %u interrupts\n", n_irqs); + + xentries = kmalloc_array(n_irqs, sizeof(*xentries), GFP_KERNEL); + if (!xentries) { + rc = -ENOMEM; + } else { + for (i = 0; i < n_irqs; i++) + xentries[i].entry = i; + rc = pci_enable_msix_range(efx->pci_dev, xentries, + 1, n_irqs); + } + if (rc < 0) { + /* Fall back to single channel MSI */ + netif_err(efx, drv, efx->net_dev, + "could not enable MSI-X (%d)\n", rc); + if (efx->type->supported_interrupt_modes & BIT(EFX_INT_MODE_MSI)) + efx->interrupt_mode = EFX_INT_MODE_MSI; +#if defined(EFX_NOT_UPSTREAM) && defined(CONFIG_SFC_BUSYPOLL) + else if (efx->type->supported_interrupt_modes & BIT(EFX_INT_MODE_POLLED)) + efx->interrupt_mode = EFX_INT_MODE_POLLED; +#endif + else { + kfree(xentries); + return rc; + } +#ifndef EFX_HAVE_MSIX_CAP + } else if (rc < n_irqs) { + int rc2; + + netif_err(efx, drv, efx->net_dev, + "WARNING: Insufficient MSI-X vectors available (%d < %u).\n", + rc, n_irqs); + netif_err(efx, drv, efx->net_dev, + "WARNING: Performance may be reduced.\n"); + n_irqs = rc; + rc2 = efx_allocate_msix_channels(efx, n_irqs); + if (rc2) + rc = rc2; +#endif + } + + if (rc > 0) { +#ifdef EFX_NOT_UPSTREAM +#if IS_MODULE(CONFIG_SFC_DRIVERLINK) + n_irqs = rc - efx->n_dl_irqs; + efx_dl_make_irq_resources(efx, efx->n_dl_irqs, + xentries + n_irqs); +#endif +#endif + efx_assign_msix_vectors(efx, xentries, n_irqs); + } + + kfree(xentries); + } + + /* Try single interrupt MSI */ + if (efx->interrupt_mode == EFX_INT_MODE_MSI) { + efx->n_extra_channels = 0; + efx->n_combined_channels = 1; + efx->n_rx_only_channels = 0; + efx->n_rss_channels = 1; + efx->rss_spread = 1; + efx->n_tx_only_channels = 0; + efx->tx_channel_offset = 0; + efx->n_xdp_channels = 0; + rc = pci_enable_msi(efx->pci_dev); + if (rc == 0) { + efx_get_channel(efx, 0)->irq = efx->pci_dev->irq; + } else { + netif_err(efx, drv, efx->net_dev, + "could not enable MSI\n"); +#if defined(EFX_NOT_UPSTREAM) && defined(CONFIG_SFC_BUSYPOLL) + if (efx->type->supported_interrupt_modes & BIT(EFX_INT_MODE_POLLED)) + efx->interrupt_mode = EFX_INT_MODE_POLLED; + else +#endif + return rc; + } + } + +#if defined(EFX_NOT_UPSTREAM) && defined(CONFIG_SFC_BUSYPOLL) + if (efx->interrupt_mode == EFX_INT_MODE_POLLED) { + efx->n_extra_channels = 0; + efx->n_combined_channels = 1; + efx->n_rx_only_channels = 0; + efx->n_rss_channels = 1; + efx->rss_spread = 1; + efx->n_tx_only_channels = 0; + efx->tx_channel_offset = 0; + efx->n_xdp_channels = 0; + } +#endif + return 0; +} + +void efx_remove_interrupts(struct efx_nic *efx) +{ + struct efx_channel *channel; + + /* Remove MSI/MSI-X interrupts */ + efx_for_each_channel(channel, efx) + channel->irq = 0; + pci_disable_msi(efx->pci_dev); + pci_disable_msix(efx->pci_dev); +#ifdef EFX_NOT_UPSTREAM +#if IS_MODULE(CONFIG_SFC_DRIVERLINK) + kfree(efx->irq_resources); + efx->irq_resources = NULL; +#endif +#endif +} + +#if !defined(CONFIG_SMP) +void efx_set_interrupt_affinity(struct efx_nic *efx __always_unused) +{ +} + +void efx_clear_interrupt_affinity(struct efx_nic *efx __always_unused) +{ +} +#else +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_NETIF_SET_XPS_QUEUE) +#if defined(EFX_NOT_UPSTREAM) && defined(CONFIG_XPS) +static bool auto_config_xps = true; +module_param(auto_config_xps, bool, 0644); +MODULE_PARM_DESC(auto_config_xps, + "Toggle automatic XPS configuration (default is enabled)."); +#endif /* EFX_NOT_UPSTREAM && CONFIG_XPS */ + +static void efx_set_xps_queue(struct efx_channel *channel, + const cpumask_t *mask) +{ + if (!efx_channel_has_tx_queues(channel) || +#if defined(EFX_NOT_UPSTREAM) && defined(CONFIG_XPS) + !auto_config_xps || +#endif + efx_channel_is_xdp_tx(channel) || + channel == efx_ptp_channel(channel->efx)) + return; + + netif_set_xps_queue(channel->efx->net_dev, mask, + channel->channel - channel->efx->tx_channel_offset); +} +#else +static void efx_set_xps_queue(struct efx_channel *channel, + const cpumask_t *mask) +{ +} +#endif + +#if !defined(EFX_NOT_UPSTREAM) +void efx_set_interrupt_affinity(struct efx_nic *efx) +{ + const struct cpumask *numa_mask = cpumask_of_pcibus(efx->pci_dev->bus); + struct efx_channel *channel; + unsigned int cpu; + + /* If no online CPUs in local node, fallback to any online CPU */ + if (cpumask_first_and(cpu_online_mask, numa_mask) >= nr_cpu_ids) + numa_mask = cpu_online_mask; + + cpu = -1; + efx_for_each_channel(channel, efx) { + cpu = cpumask_next_and(cpu, cpu_online_mask, numa_mask); + if (cpu >= nr_cpu_ids) + cpu = cpumask_first_and(cpu_online_mask, numa_mask); + irq_set_affinity_hint(channel->irq, cpumask_of(cpu)); + channel->irq_mem_node = cpu_to_mem(cpu); + efx_set_xps_queue(channel, cpumask_of(cpu)); + } +} + +void efx_clear_interrupt_affinity(struct efx_nic *efx) +{ + struct efx_channel *channel; + + efx_for_each_channel(channel, efx) + irq_set_affinity_hint(channel->irq, NULL); +} +#else +static bool efx_irq_set_affinity = true; +module_param_named(irq_set_affinity, efx_irq_set_affinity, bool, 0444); +MODULE_PARM_DESC(irq_set_affinity, + "Set SMP affinity of IRQs to support RSS (N=>disabled Y=>enabled (default))"); + +static int efx_set_cpu_affinity(struct efx_channel *channel, int cpu) +{ + int rc; + + if (!efx_irq_set_affinity) + return 0; + + rc = irq_set_affinity_hint(channel->irq, cpumask_of(cpu)); + if (rc) { + netif_err(channel->efx, drv, channel->efx->net_dev, + "Unable to set affinity hint for channel %d" + " interrupt %d\n", channel->channel, channel->irq); + return rc; + } + efx_set_xps_queue(channel, cpumask_of(cpu)); + return rc; +} + +/* Count of number of RSS channels allocated to each CPU + * in the system. Protected by the rtnl lock + */ +static u16 *rss_cpu_usage; + +#ifdef HAVE_EFX_NUM_PACKAGES +/* Select the package_set with the lowest usage count */ +static void efx_rss_choose_package(cpumask_t *set, cpumask_t *package_set, + cpumask_t *used_set, + const cpumask_t *global_set) +{ + unsigned int thresh, count; + int cpu, cpu2, sibling; + + thresh = 1; + for_each_cpu(cpu, global_set) + thresh += rss_cpu_usage[cpu]; + + cpumask_clear(used_set); + for_each_cpu(cpu, global_set) { + if (!cpumask_test_cpu(cpu, used_set)) { + cpumask_clear(package_set); + /* Treat each numa node as a separate package */ + for_each_cpu(cpu2, topology_core_cpumask(cpu)) { + if (cpu_to_node(cpu) == cpu_to_node(cpu2)) + cpumask_set_cpu(cpu2, package_set); + } + cpumask_and(package_set, package_set, global_set); + cpumask_or(used_set, used_set, package_set); + + count = 0; + for_each_cpu(sibling, package_set) + count += rss_cpu_usage[sibling]; + + if (count < thresh) { + cpumask_copy(set, package_set); + thresh = count; + } + } + } +} +#endif + +#ifdef HAVE_EFX_NUM_CORES +/* Select the thread siblings within the package with the lowest usage count */ +static void efx_rss_choose_core(cpumask_t *set, const cpumask_t *package_set, + cpumask_t *core_set, cpumask_t *used_set) +{ + unsigned int thresh, count; + int cpu, sibling; + + thresh = 1; + for_each_cpu(cpu, package_set) + thresh += rss_cpu_usage[cpu]; + + cpumask_clear(used_set); + for_each_cpu(cpu, package_set) { + if (!cpumask_test_cpu(cpu, used_set)) { + cpumask_copy(core_set, topology_sibling_cpumask(cpu)); + cpumask_or(used_set, used_set, core_set); + + count = 0; + for_each_cpu(sibling, core_set) + count += rss_cpu_usage[sibling]; + + if (count < thresh) { + cpumask_copy(set, core_set); + thresh = count; + } + } + } +} +#endif + +/* Select the thread within the mask with the lowest usage count */ +static int efx_rss_choose_thread(const cpumask_t *set) +{ + int cpu, chosen; + unsigned int thresh; + + thresh = 1; + for_each_cpu(cpu, set) + thresh += rss_cpu_usage[cpu]; + + chosen = 0; + for_each_cpu(cpu, set) { + if (rss_cpu_usage[cpu] < thresh) { + chosen = cpu; + thresh = rss_cpu_usage[cpu]; + } + } + + return chosen; +} + +/* Stripe the RSS vectors across the CPUs. */ +void efx_set_interrupt_affinity(struct efx_nic *efx) +{ + enum {PACKAGE, CORE, TEMP1, TEMP2, LOCAL, SETS_MAX}; + struct efx_channel *channel; + struct cpumask *sets; + int cpu; + + /* Only do this for RSS/MSI-X */ + if (efx->interrupt_mode != EFX_INT_MODE_MSIX) + return; + + sets = kcalloc(SETS_MAX, sizeof(*sets), GFP_KERNEL); + if (!sets) { + netif_err(efx, drv, efx->net_dev, + "Not enough temporary memory to set IRQ affinity\n"); + return; + } + + cpumask_and(&sets[LOCAL], cpu_online_mask, + cpumask_of_pcibus(efx->pci_dev->bus)); + + /* Assign each channel a CPU */ + efx_for_each_channel(channel, efx) { +#ifdef HAVE_EFX_NUM_PACKAGES + /* Force channels 0-RSS to the local package, otherwise select + * the package with the lowest usage count + */ + efx_rss_choose_package(&sets[PACKAGE], &sets[TEMP1], + &sets[TEMP2], + rss_numa_local && + channel->channel < efx->n_rss_channels ? + &sets[LOCAL] : cpu_online_mask); + WARN_ON(!cpumask_weight(&sets[PACKAGE])); +#else + cpumask_copy(&sets[PACKAGE], &cpu_online_map); +#endif + + /* Select the thread siblings within this package with the + * lowest usage count + */ +#ifdef HAVE_EFX_NUM_CORES + efx_rss_choose_core(&sets[CORE], &sets[PACKAGE], &sets[TEMP1], + &sets[TEMP2]); + WARN_ON(!cpumask_weight(&sets[CORE])); +#else + cpumask_copy(&sets[CORE], &sets[PACKAGE]); +#endif + /* Select the thread within this set with the lowest usage + * count + */ + cpu = efx_rss_choose_thread(&sets[CORE]); + ++rss_cpu_usage[cpu]; + efx_set_cpu_affinity(channel, cpu); + channel->irq_mem_node = cpu_to_mem(cpu); + } + + kfree(sets); +} + +void efx_clear_interrupt_affinity(struct efx_nic *efx) +{ + struct efx_channel *channel; + + efx_for_each_channel(channel, efx) + (void)irq_set_affinity_hint(channel->irq, NULL); +} + +#endif /* EFX_NOT_UPSTREAM */ +#endif /* CONFIG_SMP */ + +#ifdef EFX_USE_IRQ_NOTIFIERS +static void efx_irq_release(struct kref *ref) +{ + struct efx_channel *channel = container_of(ref, struct efx_channel, + irq_affinity.notifier.kref); + + complete(&channel->irq_affinity.complete); +} + +static void efx_irq_notify(struct irq_affinity_notify *this, + const cpumask_t *mask) +{ + struct efx_channel *channel = container_of(this, struct efx_channel, + irq_affinity.notifier); + + efx_set_xps_queue(channel, mask); +} + +static void efx_set_affinity_notifier(struct efx_channel *channel) +{ + int rc; + + init_completion(&channel->irq_affinity.complete); + channel->irq_affinity.notifier.notify = efx_irq_notify; + channel->irq_affinity.notifier.release = efx_irq_release; + rc = irq_set_affinity_notifier(channel->irq, + &channel->irq_affinity.notifier); + if (rc) + netif_warn(channel->efx, probe, channel->efx->net_dev, + "Failed to set irq notifier for IRQ %d\n", + channel->irq); +} + +static void efx_clear_affinity_notifier(struct efx_channel *channel) +{ + /* Don't clear if it hasn't been set */ + if (!channel->irq_affinity.notifier.notify) + return; + irq_set_affinity_notifier(channel->irq, NULL); + wait_for_completion(&channel->irq_affinity.complete); + channel->irq_affinity.notifier.notify = NULL; +} + +void efx_register_irq_notifiers(struct efx_nic *efx) +{ + struct efx_channel *channel; + + efx_for_each_channel(channel, efx) + efx_set_affinity_notifier(channel); +} + +void efx_unregister_irq_notifiers(struct efx_nic *efx) +{ + struct efx_channel *channel; + + efx_for_each_channel(channel, efx) + efx_clear_affinity_notifier(channel); +} +#endif + +/*************** + * EVENT QUEUES + ***************/ + +/* Create event queue + * Event queue memory allocations are done only once. If the channel + * is reset, the memory buffer will be reused; this guards against + * errors during channel reset and also simplifies interrupt handling. + */ +static int efx_probe_eventq(struct efx_channel *channel) +{ + struct efx_nic *efx = channel->efx; + struct efx_tx_queue *txq; + unsigned long entries; + + /* When sizing our event queue we need to allow for: + * - one entry per rxq entry. + * - one entry per txq entry - or three if we're using timestamping. + * - some capacity for MCDI and other events. This is mostly on + * channel zero. + */ + if (efx_channel_has_rx_queue(channel)) + entries = efx->rxq_entries; + else + entries = 0; + + efx_for_each_channel_tx_queue(txq, channel) { + entries += txq->timestamping ? + efx->txq_entries * 3 : + efx->txq_entries; + } + + entries += channel->channel == 0 ? 256 : 128; + +#ifdef EFX_NOT_UPSTREAM +#if IS_MODULE(CONFIG_SFC_DRIVERLINK) + /* Add additional event queue entries for driverlink activity on + * channel zero. + */ + if (channel->channel == 0 && efx_dl_supported(efx)) + entries += EFX_EVQ_DL_EXTRA_ENTRIES; +#endif +#endif + + if (entries > efx_max_evtq_size(efx)) { + netif_warn(efx, probe, efx->net_dev, + "chan %d ev queue too large at %lu, capped at %lu\n", + channel->channel, entries, efx_max_evtq_size(efx)); + entries = efx_max_evtq_size(efx); + } else { + entries = roundup_pow_of_two(entries); + } + if (!efx_is_guaranteed_ringsize(efx, entries)) { + unsigned int new_entries = + efx_next_guaranteed_ringsize(efx, entries, true); + + if (new_entries == entries) + return -ERANGE; + entries = new_entries; + } + netif_dbg(efx, probe, efx->net_dev, + "chan %d ev queue created with %lu entries\n", + channel->channel, entries); + channel->eventq_mask = max(entries, efx_min_evtq_size(efx)) - 1; + + return efx_nic_probe_eventq(channel); +} + +/* Prepare channel's event queue */ +static int efx_init_eventq(struct efx_channel *channel) +{ + struct efx_nic *efx = channel->efx; + int rc; + + EFX_WARN_ON_PARANOID(channel->eventq_init); + + netif_dbg(efx, drv, efx->net_dev, + "chan %d init event queue\n", channel->channel); + + rc = efx_nic_init_eventq(channel); + if (rc == 0) { + efx->type->push_irq_moderation(channel); + channel->eventq_read_ptr = 0; + channel->eventq_init = true; + } + return rc; +} + +/* Enable event queue processing and NAPI */ +void efx_start_eventq(struct efx_channel *channel) +{ + netif_dbg(channel->efx, ifup, channel->efx->net_dev, + "chan %d start event queue\n", channel->channel); + + /* Make sure the NAPI handler sees the enabled flag set */ + channel->enabled = true; + smp_wmb(); + +#if defined(EFX_USE_KCOMPAT) && defined(EFX_WANT_DRIVER_BUSY_POLL) + efx_channel_enable(channel); +#endif + napi_enable(&channel->napi_str); + +#if defined(EFX_NOT_UPSTREAM) && defined(CONFIG_SFC_BUSYPOLL) + if (channel->efx->type->revision == EFX_REV_EF100 && channel->efx->interrupt_mode == EFX_INT_MODE_POLLED) + napi_schedule(&channel->napi_str); +#endif + + efx_nic_eventq_read_ack(channel); +} + +/* Disable event queue processing and NAPI */ +void efx_stop_eventq(struct efx_channel *channel) +{ + if (!channel->enabled) + return; + + netif_dbg(channel->efx, drv, channel->efx->net_dev, + "chan %d stop event queue\n", channel->channel); + + napi_disable(&channel->napi_str); +#if defined(EFX_USE_KCOMPAT) && defined(EFX_WANT_DRIVER_BUSY_POLL) + while (!efx_channel_disable(channel)) + usleep_range(1000, 20000); + +#endif + channel->enabled = false; +} + +static void efx_fini_eventq(struct efx_channel *channel) +{ + if (!channel->eventq_init || efx_nic_hw_unavailable(channel->efx)) + return; + + netif_dbg(channel->efx, drv, channel->efx->net_dev, + "chan %d fini event queue\n", channel->channel); + + efx_nic_fini_eventq(channel); + channel->eventq_init = false; +} + +static void efx_remove_eventq(struct efx_channel *channel) +{ + netif_dbg(channel->efx, drv, channel->efx->net_dev, + "chan %d remove event queue\n", channel->channel); + + efx_nic_remove_eventq(channel); +} + +/* Configure a normal RX channel */ +static int efx_set_channel_rx(struct efx_nic *efx, struct efx_channel *channel) +{ + struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel); + + rx_queue->core_index = channel->channel; + rx_queue->queue = efx_rx_queue_id_internal(efx, channel->channel); + rx_queue->label = channel->channel; + rx_queue->receive_skb = channel->type->receive_skb; + rx_queue->receive_raw = channel->type->receive_raw; + return 0; +} + +/* Configure a normal TX channel - add TX queues */ +static int efx_set_channel_tx(struct efx_nic *efx, struct efx_channel *channel) +{ + struct efx_tx_queue *tx_queue; + int queue_base; + int j; + + EFX_WARN_ON_PARANOID(channel->tx_queues); + channel->tx_queues = kcalloc(efx->tx_queues_per_channel, + sizeof(*tx_queue), + GFP_KERNEL); + if (!channel->tx_queues) + return -ENOMEM; + + channel->tx_queue_count = efx->tx_queues_per_channel; + queue_base = efx->tx_queues_per_channel * + (channel->channel - efx->tx_channel_offset); + + for (j = 0; j < channel->tx_queue_count; ++j) { + tx_queue = &channel->tx_queues[j]; + tx_queue->efx = efx; + tx_queue->channel = channel; +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_XDP_SOCK) +#if defined(CONFIG_XDP_SOCKETS) + /* for xsk queue, no csum_offload is used as the onus is put on, + * application for the same. + */ + if (j && (j == channel->tx_queue_count - 1)) + tx_queue->csum_offload = EFX_TXQ_TYPE_NO_OFFLOAD; + else +#endif + tx_queue->csum_offload = j; +#else + tx_queue->csum_offload = j; +#endif + tx_queue->label = j; + tx_queue->queue = queue_base + j; + /* When using an even number of queues, for even numbered + * channels alternate the queues. This stripes events across + * the NIC resources more effectively. + */ + if (efx->tx_queues_per_channel % 2 == 0) + tx_queue->queue ^= channel->channel & 1; + } + + return 0; +} + +/* Configure an XDP TX channel - add TX queues */ +static int efx_set_channel_xdp(struct efx_nic *efx, struct efx_channel *channel) +{ + struct efx_tx_queue *tx_queue; + int xdp_zero_base; + int xdp_base; + int j; + + /* TX queue index for first XDP queue overall. */ + xdp_zero_base = efx->tx_queues_per_channel * efx_tx_channels(efx); + /* TX queue index for first queue on this channel. */ + xdp_base = channel->channel - efx_xdp_channel_offset(efx); + xdp_base *= efx->xdp_tx_per_channel; + + /* Do we need the full allowance of XDP tx queues for this channel? + * If the total number of queues required is not a multiple of + * xdp_tx_per_channel we omit the surplus queues. + */ + if (xdp_base + efx->xdp_tx_per_channel > efx->xdp_tx_queue_count) { + channel->tx_queue_count = efx->xdp_tx_queue_count % + efx->xdp_tx_per_channel; + } else { + channel->tx_queue_count = efx->xdp_tx_per_channel; + } + EFX_WARN_ON_PARANOID(channel->tx_queue_count == 0); + + EFX_WARN_ON_PARANOID(channel->tx_queues); + channel->tx_queues = kcalloc(channel->tx_queue_count, + sizeof(*tx_queue), + GFP_KERNEL); + if (!channel->tx_queues) { + channel->tx_queue_count = 0; + return -ENOMEM; + } + + for (j = 0; j < channel->tx_queue_count; ++j) { + tx_queue = &channel->tx_queues[j]; + tx_queue->efx = efx; + tx_queue->channel = channel; + tx_queue->csum_offload = EFX_TXQ_TYPE_NO_OFFLOAD; + tx_queue->label = j; + tx_queue->queue = xdp_zero_base + xdp_base + j; + + /* Stash pointer for use by XDP TX */ + efx->xdp_tx_queues[xdp_base + j] = tx_queue; + } + + return 0; +} + +/* Allocate and initialise a channel structure. */ +static struct efx_channel *efx_alloc_channel(struct efx_nic *efx, int i) +{ + struct efx_channel *channel; + + channel = kzalloc(sizeof(*channel), GFP_KERNEL); + if (!channel) + return NULL; + + channel->efx = efx; + channel->channel = i; + channel->type = &efx_default_channel_type; + channel->holdoff_doorbell = false; + channel->tx_coalesce_doorbell = false; + channel->irq_mem_node = NUMA_NO_NODE; +#ifdef CONFIG_RFS_ACCEL + INIT_DELAYED_WORK(&channel->filter_work, efx_filter_rfs_expire); +#endif + + channel->rx_queue.efx = efx; + + return channel; +} + +struct efx_channel *efx_get_channel(struct efx_nic *efx, unsigned int index) +{ + struct efx_channel *this = NULL; + + EFX_WARN_ON_ONCE_PARANOID(index >= efx_channels(efx)); + if (list_empty(&efx->channel_list)) + return NULL; + + list_for_each_entry(this, &efx->channel_list, list) + if (this->channel == index) + break; + return this; +} + +void efx_fini_interrupts(struct efx_nic *efx) +{ + kfree(efx->msi_context); + efx->msi_context = NULL; +} + +void efx_fini_channels(struct efx_nic *efx) +{ + struct efx_channel *channel; + + while (!list_empty(&efx->channel_list)) { + channel = list_first_entry(&efx->channel_list, + struct efx_channel, list); + list_del(&channel->list); + kfree(channel); + } +} + +/* Returns the maximum number of interrupts we can use, or a negative number + * on error. + */ +int efx_init_interrupts(struct efx_nic *efx) +{ + int rc = 0; + + if (WARN_ON(efx->type->supported_interrupt_modes == 0)) { + netif_err(efx, drv, efx->net_dev, "no interrupt modes supported\n"); + return -ENOTSUPP; + } + + efx->max_irqs = 1; /* For MSI mode */ + if (BIT(efx_interrupt_mode) & efx->type->supported_interrupt_modes) + efx->interrupt_mode = efx_interrupt_mode; + else + efx->interrupt_mode = ffs(efx->type->supported_interrupt_modes) - 1; + + if (efx->interrupt_mode == EFX_INT_MODE_MSIX) { +#ifdef EFX_HAVE_MSIX_CAP + rc = pci_msix_vec_count(efx->pci_dev); + if (rc <= 0) + rc = efx_wanted_parallelism(efx); + efx->max_irqs = rc; +#else + /* On old kernels fall back to the rss_cpus parameter */ + efx->max_irqs = efx_wanted_parallelism(efx); +#endif + } + + if (efx->max_irqs > 0) { + rc = efx->max_irqs; + /* TODO: At some point limit this to the number of cores + * times the number of clients. + */ + rc = min_t(u16, rc, efx->max_channels); + efx->msi_context = kcalloc(rc, sizeof(struct efx_msi_context), + GFP_KERNEL); + if (!efx->msi_context) + rc = -ENOMEM; + efx->max_irqs = rc; + } + + if (efx->interrupt_mode == EFX_INT_MODE_MSIX) + rc = efx_allocate_msix_channels(efx, efx->max_channels); + + return rc; +} + +int efx_init_channels(struct efx_nic *efx) +{ + unsigned int i; + int rc = 0; + + efx->max_tx_channels = efx->max_channels; + + for (i = 0; i < efx_channels(efx); i++) { + struct efx_channel *channel = efx_alloc_channel(efx, i); + + if (!channel) { + netif_err(efx, drv, efx->net_dev, + "out of memory allocating channel\n"); + rc = -ENOMEM; + break; + } + list_add_tail(&channel->list, &efx->channel_list); + if (i < efx->max_irqs) { + efx->msi_context[i].efx = efx; + efx->msi_context[i].index = i; + } + } + + return rc; +} + +static int efx_calc_queue_entries(struct efx_nic *efx) +{ + unsigned int entries; + + entries = efx->txq_entries; + if (!efx_is_guaranteed_ringsize(efx, entries)) { + efx->txq_entries = efx_best_guaranteed_ringsize(efx, entries, + false); + if (efx->txq_entries == entries) + return -ERANGE; + } + entries = efx->rxq_entries; + if (!efx_is_guaranteed_ringsize(efx, entries)) { + efx->rxq_entries = efx_best_guaranteed_ringsize(efx, entries, + false); + if (efx->rxq_entries == entries) + return -ERANGE; + } + + return 0; +} + +static int efx_probe_channel(struct efx_channel *channel) +{ + struct efx_tx_queue *tx_queue; + struct efx_rx_queue *rx_queue; + struct efx_nic *efx; + int rc; + + efx = channel->efx; + + netif_dbg(efx, probe, efx->net_dev, + "creating channel %d\n", channel->channel); + + rc = channel->type->pre_probe(channel); + if (rc) + goto fail; + + rc = efx_calc_queue_entries(efx); + if (rc) + return rc; + + rc = efx_probe_eventq(channel); + if (rc) + goto fail; + + efx_for_each_channel_tx_queue(tx_queue, channel) { + rc = efx_probe_tx_queue(tx_queue); + if (rc) + goto fail; + } + + efx_for_each_channel_rx_queue(rx_queue, channel) { + rc = efx_probe_rx_queue(rx_queue); + if (rc) + goto fail; + } + + channel->rx_list = NULL; + + return 0; + +fail: + efx_remove_channel(channel); + return rc; +} + +static void +efx_get_channel_name(struct efx_channel *channel, char *buf, size_t len) +{ + struct efx_nic *efx = channel->efx; + const char *type; + int number; + + number = channel->channel; + + if (efx->n_xdp_channels && number >= efx_xdp_channel_offset(efx)) { + type = "-xdp"; + number -= efx_xdp_channel_offset(efx); + } else if (efx->tx_channel_offset == 0) { + type = ""; + } else if (number < efx->tx_channel_offset) { + type = "-rx"; + } else { + type = "-tx"; + number -= efx->tx_channel_offset; + } + snprintf(buf, len, "%s%s-%d", efx->name, type, number); +} + +void efx_set_channel_names(struct efx_nic *efx) +{ + struct efx_channel *channel; + + efx_for_each_channel(channel, efx) { + if (channel->channel >= efx->max_irqs) + continue; + + channel->type->get_name(channel, + efx->msi_context[channel->channel].name, + sizeof(efx->msi_context[0].name)); + } +} + +int efx_probe_channels(struct efx_nic *efx) +{ + struct efx_channel *channel; + int rc; + +#ifdef EFX_NOT_UPSTREAM + INIT_WORK(&efx->schedule_all_channels_work, efx_schedule_all_channels); +#endif + + /* Probe channels in reverse, so that any 'extra' channels + * use the start of the buffer table. This allows the traffic + * channels to be resized without moving them or wasting the + * entries before them. + */ + efx_for_each_channel_rev(channel, efx) { + rc = efx_probe_channel(channel); + if (rc) { + netif_err(efx, probe, efx->net_dev, + "failed to create channel %d\n", + channel->channel); + return rc; + } + } + efx_set_channel_names(efx); + + /* initialising debugfs is not a fatal error */ + efx_init_debugfs_channels(efx); + + return 0; +} + +static void efx_remove_tx_queue(struct efx_tx_queue *tx_queue) +{ + + netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev, + "removing TX queue %d\n", tx_queue->queue); + efx_nic_free_buffer(tx_queue->efx, &tx_queue->txd); +} + +static void efx_remove_channel(struct efx_channel *channel) +{ + struct efx_tx_queue *tx_queue; + struct efx_rx_queue *rx_queue; + + netif_dbg(channel->efx, drv, channel->efx->net_dev, + "destroy chan %d\n", channel->channel); + + efx_for_each_channel_rx_queue(rx_queue, channel) { + efx_remove_rx_queue(rx_queue); + efx_destroy_rx_queue(rx_queue); + } + efx_for_each_channel_tx_queue(tx_queue, channel) { + efx_remove_tx_queue(tx_queue); + efx_destroy_tx_queue(tx_queue); + } + efx_remove_eventq(channel); + channel->type->post_remove(channel); +} + +void efx_remove_channels(struct efx_nic *efx) +{ + struct efx_channel *channel; + + efx_fini_debugfs_channels(efx); + + efx_for_each_channel(channel, efx) + efx_remove_channel(channel); +} + +int efx_set_channels(struct efx_nic *efx) +{ + unsigned int i, j, hidden_tx_channels = 0; + struct efx_channel *channel; + int rc = 0; + + if (efx->xdp_tx_queue_count) { + EFX_WARN_ON_PARANOID(efx->xdp_tx_queues); + + /* Allocate array for XDP TX queue lookup. */ + efx->xdp_tx_queues = kcalloc(efx->xdp_tx_queue_count, + sizeof(*efx->xdp_tx_queues), + GFP_KERNEL); + if (!efx->xdp_tx_queues) + return -ENOMEM; + } + + /* set all channels to default type. + * will be overridden for extra_channels + */ + efx_for_each_channel(channel, efx) + channel->type = &efx_default_channel_type; + + /* Assign extra channels if possible in to extra_channel range */ + for (i = 0, j = efx_extra_channel_offset(efx); + i < EFX_MAX_EXTRA_CHANNELS; i++) { + if (!efx->extra_channel_type[i]) + continue; + + if (j < efx_extra_channel_offset(efx) + efx->n_extra_channels) { + efx_get_channel(efx, j++)->type = + efx->extra_channel_type[i]; + /* Other TX channels exposed to the kernel must be + * before hidden ones in the extra_channel_type array. + */ + WARN_ON(hidden_tx_channels && + !efx->extra_channel_type[i]->hide_tx); + hidden_tx_channels += + efx->extra_channel_type[i]->hide_tx || + hidden_tx_channels != 0; + } else { + efx->extra_channel_type[i]->handle_no_channel(efx); + } + } + + /* We need to mark which channels really have RX and TX + * queues, and adjust the TX queue numbers if we have separate + * RX-only and TX-only channels. + */ + efx_for_each_channel(channel, efx) { + if (channel->channel < efx_rx_channels(efx)) + rc = efx_set_channel_rx(efx, channel); + else + channel->rx_queue.core_index = -1; + if (rc) + return rc; + + if (efx_channel_is_xdp_tx(channel)) + rc = efx_set_channel_xdp(efx, channel); + else if (efx_channel_has_tx_queues(channel)) + rc = efx_set_channel_tx(efx, channel); + + if (rc) + return rc; + } + + netif_set_real_num_tx_queues(efx->net_dev, efx_tx_channels(efx) - + hidden_tx_channels); + netif_set_real_num_rx_queues(efx->net_dev, efx_rx_channels(efx)); + + return 0; +} + +void efx_unset_channels(struct efx_nic *efx) +{ + struct efx_channel *channel; + + kfree(efx->xdp_tx_queues); + efx->xdp_tx_queues = NULL; + + efx_for_each_channel(channel, efx) { + channel->tx_queue_count = 0; + kfree(channel->tx_queues); + channel->tx_queues = NULL; + } +} + +/************* + * START/STOP + *************/ +static int efx_soft_enable_interrupts(struct efx_nic *efx) +{ + struct efx_channel *channel, *end_channel; + int rc; + + if (efx->state == STATE_DISABLED) + return -ENETDOWN; + + efx->irq_soft_enabled = true; + smp_wmb(); + + efx_for_each_channel(channel, efx) { + if (!channel->type->keep_eventq) { + rc = efx_init_eventq(channel); + if (rc) + goto fail; + } + efx_start_eventq(channel); + } + + efx_mcdi_mode_event(efx); + + return 0; +fail: + end_channel = channel; + efx_for_each_channel(channel, efx) { + if (channel == end_channel) + break; + efx_stop_eventq(channel); + if (!channel->type->keep_eventq) + efx_fini_eventq(channel); + } + + return rc; +} + +static void efx_soft_disable_interrupts(struct efx_nic *efx) +{ + struct efx_channel *channel; + + if (efx->state == STATE_DISABLED) + return; + + efx_mcdi_mode_poll(efx); + + efx->irq_soft_enabled = false; + smp_wmb(); + + efx_for_each_channel(channel, efx) { + if (channel->irq) + synchronize_irq(channel->irq); + + efx_stop_eventq(channel); + if (!channel->type->keep_eventq) + efx_fini_eventq(channel); + } +} + +int efx_enable_interrupts(struct efx_nic *efx) +{ + struct efx_channel *channel, *end_channel; + int rc; + + if (efx->state == STATE_DISABLED) + return -ENETDOWN; + + efx->type->irq_enable_master(efx); + + efx_for_each_channel(channel, efx) { + if (channel->type->keep_eventq) { + rc = efx_init_eventq(channel); + if (rc) + goto fail; + } + } + + rc = efx_soft_enable_interrupts(efx); + if (rc) + goto fail; + + return 0; + +fail: + end_channel = channel; + efx_for_each_channel(channel, efx) { + if (channel == end_channel) + break; + if (channel->type->keep_eventq) + efx_fini_eventq(channel); + } + + efx->type->irq_disable_non_ev(efx); + + return rc; +} + +void efx_disable_interrupts(struct efx_nic *efx) +{ + struct efx_channel *channel; + + efx_soft_disable_interrupts(efx); + + efx_for_each_channel(channel, efx) { + if (channel->type->keep_eventq) + efx_fini_eventq(channel); + } + + efx->type->irq_disable_non_ev(efx); +} + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_XDP_SOCK) +#if defined(CONFIG_XDP_SOCKETS) +int efx_channel_start_xsk_queue(struct efx_channel *channel) +{ + struct efx_rx_queue *rx_queue; + struct efx_tx_queue *tx_queue; + int rc = 0; + + tx_queue = efx_channel_get_xsk_tx_queue(channel); + if (tx_queue) { + rc = efx_init_tx_queue(tx_queue); + if (rc) + goto fail; + atomic_inc(&channel->efx->active_queues); + } + + efx_for_each_channel_rx_queue(rx_queue, channel) { + rc = efx_init_rx_queue(rx_queue); + if (rc) + goto fail; + atomic_inc(&channel->efx->active_queues); + efx_stop_eventq(channel); + rx_queue->refill_enabled = true; + efx_fast_push_rx_descriptors(rx_queue, false); + efx_start_eventq(channel); + } + + return 0; +fail: + return rc; +} + +int efx_channel_stop_xsk_queue(struct efx_channel *channel) +{ + struct efx_rx_queue *rx_queue; + struct efx_tx_queue *tx_queue; + unsigned int active_queues; + int pending; + + if (efx_channel_has_rx_queue(channel)) { + /* Stop RX refill */ + efx_for_each_channel_rx_queue(rx_queue, channel) + rx_queue->refill_enabled = false; + } + + /* RX packet processing is pipelined, so wait for the + * NAPI handler to complete. + */ + efx_stop_eventq(channel); + efx_start_eventq(channel); + + active_queues = atomic_read(&channel->efx->active_queues); + efx_for_each_channel_rx_queue(rx_queue, channel) { + efx_mcdi_rx_fini(rx_queue); + active_queues--; + } + tx_queue = efx_channel_get_xsk_tx_queue(channel); + if (tx_queue) { + efx_mcdi_tx_fini(tx_queue); + active_queues--; + } + wait_event_timeout(channel->efx->flush_wq, + atomic_read(&channel->efx->active_queues) == + active_queues, + msecs_to_jiffies(EFX_MAX_FLUSH_TIME)); + pending = atomic_read(&channel->efx->active_queues); + if (pending != active_queues) { + netif_err(channel->efx, hw, channel->efx->net_dev, + "failed to flush %d queues\n", + pending); + return -ETIMEDOUT; + } + efx_for_each_channel_rx_queue(rx_queue, channel) + efx_fini_rx_queue(rx_queue); + if (tx_queue) + efx_fini_tx_queue(tx_queue); + + return 0; +} +#endif +#endif + +int efx_start_channels(struct efx_nic *efx) +{ + struct efx_channel *channel; + struct efx_tx_queue *tx_queue; + struct efx_rx_queue *rx_queue; + int rc; + + efx_for_each_channel(channel, efx) { + if (channel->type->start) { + rc = channel->type->start(channel); + if (rc) + return rc; + } + efx_for_each_channel_tx_queue(tx_queue, channel) { + rc = efx_init_tx_queue(tx_queue); + if (rc) + return rc; + atomic_inc(&efx->active_queues); + } + + efx_for_each_channel_rx_queue(rx_queue, channel) { + rc = efx_init_rx_queue(rx_queue); + if (rc) + return rc; + atomic_inc(&efx->active_queues); + efx_stop_eventq(channel); + rx_queue->refill_enabled = true; + efx_fast_push_rx_descriptors(rx_queue, false); + efx_start_eventq(channel); + } + } + + return 0; +} + +void efx_stop_channels(struct efx_nic *efx) +{ + struct efx_channel *channel; + struct efx_tx_queue *tx_queue; + struct efx_rx_queue *rx_queue; + struct efx_mcdi_iface *mcdi = NULL; + int rc = 0; + + /* Stop special channels and RX refill. + * The channel's stop has to be called first, since it might wait + * for a sentinel RX to indicate the channel has fully drained. + */ + efx_for_each_channel(channel, efx) { + if (channel->type->stop) + channel->type->stop(channel); + efx_for_each_channel_rx_queue(rx_queue, channel) + rx_queue->refill_enabled = false; + } + + efx_for_each_channel(channel, efx) { + /* RX packet processing is pipelined, so wait for the + * NAPI handler to complete. At least event queue 0 + * might be kept active by non-data events, so don't + * use napi_synchronize() but actually disable NAPI + * temporarily. + */ + if (efx_channel_has_rx_queue(channel)) { + efx_stop_eventq(channel); + efx_start_eventq(channel); + } + } + + if (efx->type->fini_dmaq) + rc = efx->type->fini_dmaq(efx); + else + rc = efx_fini_dmaq(efx); + if (rc) { + if (efx->mcdi) + mcdi = efx_mcdi(efx); + if (mcdi && mcdi->mode == MCDI_MODE_FAIL) { + netif_info(efx, drv, efx->net_dev, + "Ignoring flush queue failure as we're in MCDI_MODE_FAIL\n"); + } else { + netif_err(efx, drv, efx->net_dev, + "Recover or disable due to flush queue failure\n"); + efx_schedule_reset(efx, RESET_TYPE_RECOVER_OR_ALL); + } + } else { + netif_dbg(efx, drv, efx->net_dev, + "successfully flushed all queues\n"); + } + +#if defined(EFX_NOT_UPSTREAM) + cancel_work_sync(&efx->schedule_all_channels_work); +#endif + + efx_for_each_channel(channel, efx) { + efx_for_each_channel_rx_queue(rx_queue, channel) + efx_fini_rx_queue(rx_queue); + efx_for_each_channel_tx_queue(tx_queue, channel) + efx_fini_tx_queue(tx_queue); + } +} + +/************************************************************************** + * + * NAPI interface + * + **************************************************************************/ +/* Process channel's event queue + * + * This function is responsible for processing the event queue of a + * single channel. The caller must guarantee that this function will + * never be concurrently called more than once on the same channel, + * though different channels may be being processed concurrently. + */ +static int efx_process_channel(struct efx_channel *channel, int budget) +{ + int spent; + struct efx_nic *efx = channel->efx; + struct efx_tx_queue *tx_queue; + struct netdev_queue *core_txq; +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_SKB__LIST) + struct list_head rx_list; +#else + struct sk_buff_head rx_list; +#endif + unsigned int fill_level; + + if (unlikely(!channel->enabled)) + return 0; + + /* Notify the TX path that we are going to ping + * doorbell, do this early to maximise benefit + */ + channel->holdoff_doorbell = channel->tx_coalesce_doorbell; + + efx_for_each_channel_tx_queue(tx_queue, channel) { + tx_queue->pkts_compl = 0; + tx_queue->bytes_compl = 0; + } + + /* Prepare the batch receive list */ + EFX_WARN_ON_PARANOID(channel->rx_list != NULL); +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_SKB__LIST) + INIT_LIST_HEAD(&rx_list); +#else + __skb_queue_head_init(&rx_list); +#endif + channel->rx_list = &rx_list; + + spent = efx_nic_process_eventq(channel, budget); + if (spent && efx_channel_has_rx_queue(channel)) { + struct efx_rx_queue *rx_queue = + efx_channel_get_rx_queue(channel); + + efx_rx_flush_packet(rx_queue); +#if defined(EFX_NOT_UPSTREAM) && defined(EFX_USE_SFC_LRO) + efx_ssr_end_of_burst(rx_queue); +#endif + efx_fast_push_rx_descriptors(rx_queue, true); + } + + /* Receive any packets we queued up */ + netif_receive_skb_list(channel->rx_list); + channel->rx_list = NULL; + + /* See if we need to ping doorbell if there is + * anything on the send queue that NIC has not been + * informed of. + */ + while (unlikely(channel->holdoff_doorbell)) { + unsigned int unsent = 0; + + /* write_count and notify_count can be updated on the Tx path + * so use READ_ONCE() in this loop to avoid optimizations that + * would avoid reading the latest values from memory. + */ + + /* There are unsent packets, for this to be set + * the xmit thread knows we are running + */ + efx_for_each_channel_tx_queue(tx_queue, channel) { + if (READ_ONCE(tx_queue->notify_count) != + READ_ONCE(tx_queue->write_count)) { + efx_nic_notify_tx_desc(tx_queue); + ++tx_queue->doorbell_notify_comp; + } + } + channel->holdoff_doorbell = false; + smp_mb(); + efx_for_each_channel_tx_queue(tx_queue, channel) + unsent += READ_ONCE(tx_queue->write_count) - + READ_ONCE(tx_queue->notify_count); + if (unsent) { + channel->holdoff_doorbell = true; + + /* Ensure that all reads and writes are complete to + * allow the latest values to be read in the next + * iteration, and that the Tx path sees holdoff_doorbell + * true so there are no further updates at this point. + */ + smp_mb(); + } + } + + /* Update BQL */ + smp_rmb(); /* ensure netdev_tx_sent updates are seen */ + efx_for_each_channel_tx_queue(tx_queue, channel) + if (tx_queue->bytes_compl && tx_queue->core_txq) + netdev_tx_completed_queue(tx_queue->core_txq, + tx_queue->pkts_compl, + tx_queue->bytes_compl); + + if (channel->tx_queues) { + core_txq = channel->tx_queues[0].core_txq; + fill_level = efx_channel_tx_fill_level(channel); + + /* See if we need to restart the netif queue. */ + if (fill_level <= efx->txq_wake_thresh && + likely(core_txq) && + unlikely(netif_tx_queue_stopped(core_txq)) && + likely(efx->port_enabled) && + likely(netif_device_present(efx->net_dev))) + netif_tx_wake_queue(core_txq); + } + + return spent; +} + +static void efx_update_irq_mod(struct efx_nic *efx, struct efx_channel *channel) +{ + int step = efx->irq_mod_step_us; + + if (channel->irq_mod_score < irq_adapt_low_thresh) { + if (channel->irq_moderation_us > step) { + channel->irq_moderation_us -= step; + efx->type->push_irq_moderation(channel); + } + } else if (channel->irq_mod_score > irq_adapt_high_thresh) { + if (channel->irq_moderation_us < + efx->irq_rx_moderation_us) { + channel->irq_moderation_us += step; + efx->type->push_irq_moderation(channel); + } + } + + channel->irq_count = 0; + channel->irq_mod_score = 0; +} + +/* NAPI poll handler + * + * NAPI guarantees serialisation of polls of the same device, which + * provides the guarantee required by efx_process_channel(). + */ +static int efx_poll(struct napi_struct *napi, int budget) +{ + struct efx_channel *channel = + container_of(napi, struct efx_channel, napi_str); + struct efx_nic *efx = channel->efx; +#ifdef CONFIG_RFS_ACCEL + unsigned int time; +#endif + int spent; + +#ifdef EFX_NOT_UPSTREAM +#ifdef SFC_NAPI_DEBUG + channel->last_napi_poll_jiffies = jiffies; +#endif +#endif + +#if defined(EFX_USE_KCOMPAT) && defined(EFX_WANT_DRIVER_BUSY_POLL) + /* Worst case scenario is this poll waiting for as many packets to be + * processed as if it would process itself without busy poll. If no + * further packets to process, this poll will be quick. If further + * packets reaching after busy poll is done, this will handle them. + * This active wait is simpler than synchronizing busy poll and napi + * which implies to schedule napi from the busy poll driver's code. + */ + spin_lock(&channel->poll_lock); +#endif + + netif_vdbg(efx, intr, efx->net_dev, + "channel %d NAPI poll executing on CPU %d\n", + channel->channel, raw_smp_processor_id()); + + spent = efx_process_channel(channel, budget); + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_XDP_REDIR) + xdp_do_flush(); +#endif + +#ifdef EFX_NOT_UPSTREAM +#ifdef SFC_NAPI_DEBUG + channel->last_complete_done = false; +#endif +#endif + + if (spent < budget) { + if (efx_channel_has_rx_queue(channel) && + efx->irq_rx_adaptive && + unlikely(++channel->irq_count == irq_adapt_irqs)) { + efx_update_irq_mod(efx, channel); + } + +#ifdef CONFIG_RFS_ACCEL + /* Perhaps expire some ARFS filters */ + time = jiffies - channel->rfs_last_expiry; + /* Would our quota be >= 20? */ + if (channel->rfs_filter_count * time >= 600 * HZ) + mod_delayed_work(system_wq, &channel->filter_work, 0); +#endif + + /* There is no race here; although napi_disable() will + * only wait for napi_complete(), this isn't a problem + * since efx_nic_eventq_read_ack() will have no effect if + * interrupts have already been disabled. + */ + if (napi_complete_done(napi, spent)) { +#ifdef EFX_NOT_UPSTREAM +#ifdef SFC_NAPI_DEBUG + channel->last_complete_done = true; +#endif +#endif + efx_nic_eventq_read_ack(channel); + } + } + +#if defined(EFX_USE_KCOMPAT) && defined(EFX_WANT_DRIVER_BUSY_POLL) + spin_unlock(&channel->poll_lock); +#endif + +#ifdef EFX_NOT_UPSTREAM +#ifdef SFC_NAPI_DEBUG + channel->last_napi_poll_end_jiffies = jiffies; + channel->last_budget = budget; + channel->last_spent = spent; +#endif +#endif + + return spent; +} + +static int efx_init_napi_channel(struct efx_channel *channel) +{ + struct efx_nic *efx = channel->efx; + + channel->napi_dev = efx->net_dev; + netif_napi_add_weight(channel->napi_dev, &channel->napi_str, + efx_poll, napi_weight); +#if defined(EFX_USE_KCOMPAT) && defined(EFX_WANT_DRIVER_BUSY_POLL) + efx_channel_busy_poll_init(channel); +#endif + + return 0; +} + +int efx_init_napi(struct efx_nic *efx) +{ + struct efx_channel *channel; + int rc; + + efx_for_each_channel(channel, efx) { + rc = efx_init_napi_channel(channel); + if (rc) + return rc; + } + + return 0; +} + +static void efx_fini_napi_channel(struct efx_channel *channel) +{ + if (channel->napi_dev) + netif_napi_del(&channel->napi_str); + channel->napi_dev = NULL; +} + +void efx_fini_napi(struct efx_nic *efx) +{ + struct efx_channel *channel; + + efx_for_each_channel(channel, efx) + efx_fini_napi_channel(channel); +} + +#ifdef EFX_NOT_UPSTREAM +static void efx_schedule_all_channels(struct work_struct *data) +{ + struct efx_nic *efx = container_of(data, struct efx_nic, + schedule_all_channels_work); + struct efx_channel *channel; + + efx_for_each_channel(channel, efx) { + local_bh_disable(); + efx_schedule_channel(channel); + local_bh_enable(); + } +} + +void efx_pause_napi(struct efx_nic *efx) +{ + struct efx_channel *channel; + + if (efx->state != STATE_NET_UP) + return; + + ASSERT_RTNL(); + netif_dbg(efx, drv, efx->net_dev, "Pausing NAPI\n"); + + efx_for_each_channel(channel, efx) { + napi_disable(&channel->napi_str); +#if defined(EFX_USE_KCOMPAT) && defined(EFX_WANT_DRIVER_BUSY_POLL) + while (!efx_channel_disable(channel)) + usleep_range(1000, 20000); + +#endif + } +} + +int efx_resume_napi(struct efx_nic *efx) +{ + struct efx_channel *channel; + + if (efx->state != STATE_NET_UP) + return 0; + + ASSERT_RTNL(); + netif_dbg(efx, drv, efx->net_dev, "Resuming NAPI\n"); + + efx_for_each_channel(channel, efx) { +#if defined(EFX_USE_KCOMPAT) && defined(EFX_WANT_DRIVER_BUSY_POLL) + efx_channel_enable(channel); +#endif + napi_enable(&channel->napi_str); + } + + /* Schedule all channels in case we've + * missed something whilst paused. + */ + schedule_work(&efx->schedule_all_channels_work); + + return 0; +} +#endif + +#if defined(EFX_USE_KCOMPAT) && defined(EFX_WANT_NDO_POLL_CONTROLLER) +#ifdef CONFIG_NET_POLL_CONTROLLER + +/* Although in the common case interrupts will be disabled, this is not + * guaranteed. However, all our work happens inside the NAPI callback, + * so no locking is required. + */ +void efx_netpoll(struct net_device *net_dev) +{ + struct efx_nic *efx = efx_netdev_priv(net_dev); + struct efx_channel *channel; + + efx_for_each_channel(channel, efx) + efx_schedule_channel(channel); +} + +#endif +#endif + +#if defined(EFX_USE_KCOMPAT) && defined(EFX_WANT_DRIVER_BUSY_POLL) +#ifdef CONFIG_NET_RX_BUSY_POLL +int efx_busy_poll(struct napi_struct *napi) +{ + struct efx_channel *channel = + container_of(napi, struct efx_channel, napi_str); + unsigned long old_rx_packets = 0, rx_packets = 0; + struct efx_nic *efx = channel->efx; + struct efx_rx_queue *rx_queue; + int budget = 4; + + if (!netif_running(efx->net_dev)) + return LL_FLUSH_FAILED; + + /* Tell about busy poll in progress if napi channel enabled */ + if (!efx_channel_try_lock_poll(channel)) + return LL_FLUSH_BUSY; + + /* Protect against napi poll scheduled in any core */ + spin_lock_bh(&channel->poll_lock); + + efx_for_each_channel_rx_queue(rx_queue, channel) + old_rx_packets += rx_queue->rx_packets; + efx_process_channel(channel, budget); + + efx_for_each_channel_rx_queue(rx_queue, channel) + rx_packets += rx_queue->rx_packets; + rx_packets -= old_rx_packets; + + /* Tell code disabling napi that busy poll is done */ + efx_channel_unlock_poll(channel); + + /* Allow napi poll to go on if waiting and net_rx_action softirq to + * execute in this core */ + spin_unlock_bh(&channel->poll_lock); + + return rx_packets; +} +#endif +#endif + +/*************** + * Housekeeping + ***************/ + +static int efx_channel_dummy_op_int(struct efx_channel *channel) +{ + return 0; +} + +void efx_channel_dummy_op_void(struct efx_channel *channel) +{ +} + +static const struct efx_channel_type efx_default_channel_type = { + .pre_probe = efx_channel_dummy_op_int, + .post_remove = efx_channel_dummy_op_void, + .get_name = efx_get_channel_name, + .keep_eventq = false, +}; + +int efx_channels_init_module(void) +{ +#if defined(EFX_NOT_UPSTREAM) && defined(CONFIG_SMP) + rss_cpu_usage = kcalloc(NR_CPUS, sizeof(rss_cpu_usage[0]), GFP_KERNEL); + if (!rss_cpu_usage) + return -ENOMEM; +#endif + return 0; +} + +void efx_channels_fini_module(void) +{ +#if defined(EFX_NOT_UPSTREAM) && defined(CONFIG_SMP) + kfree(rss_cpu_usage); + rss_cpu_usage = NULL; +#endif +} + diff --git a/src/drivers/net/ethernet/sfc/efx_channels.h b/src/drivers/net/ethernet/sfc/efx_channels.h new file mode 100644 index 0000000..9684f94 --- /dev/null +++ b/src/drivers/net/ethernet/sfc/efx_channels.h @@ -0,0 +1,76 @@ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2019 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#ifndef EFX_CHANNELS_H +#define EFX_CHANNELS_H + +extern unsigned int efx_interrupt_mode; +#if !defined(EFX_NOT_UPSTREAM) +extern unsigned int rss_cpus; +#endif + +void efx_channel_dummy_op_void(struct efx_channel *channel); + +int efx_channels_init_module(void); +void efx_channels_fini_module(void); + +int efx_init_interrupts(struct efx_nic *efx); +void efx_fini_interrupts(struct efx_nic *efx); +int efx_probe_interrupts(struct efx_nic *efx); +void efx_remove_interrupts(struct efx_nic *efx); +int efx_enable_interrupts(struct efx_nic *efx); +void efx_disable_interrupts(struct efx_nic *efx); + +void efx_register_irq_notifiers(struct efx_nic *efx); +void efx_unregister_irq_notifiers(struct efx_nic *efx); + +void efx_set_interrupt_affinity(struct efx_nic *efx); +void efx_clear_interrupt_affinity(struct efx_nic *efx); + +int efx_init_channels(struct efx_nic *efx); +int efx_probe_channels(struct efx_nic *efx); +int efx_set_channels(struct efx_nic *efx); +void efx_unset_channels(struct efx_nic *efx); +void efx_remove_channels(struct efx_nic *efx); +void efx_fini_channels(struct efx_nic *efx); + +void efx_set_channel_names(struct efx_nic *efx); + +int efx_init_napi(struct efx_nic *efx); +void efx_fini_napi(struct efx_nic *efx); +#ifdef EFX_NOT_UPSTREAM +/* Only used from driverlink. */ +void efx_pause_napi(struct efx_nic *efx); +int efx_resume_napi(struct efx_nic *efx); +#endif + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_XDP_SOCK) +#if defined(CONFIG_XDP_SOCKETS) +int efx_channel_start_xsk_queue(struct efx_channel *channel); +int efx_channel_stop_xsk_queue(struct efx_channel *channel); +#endif +#endif +int efx_start_channels(struct efx_nic *efx); +void efx_stop_channels(struct efx_nic *efx); +void efx_start_eventq(struct efx_channel *channel); +void efx_stop_eventq(struct efx_channel *channel); + +#if defined(EFX_USE_KCOMPAT) && defined(EFX_WANT_NDO_POLL_CONTROLLER) +#ifdef CONFIG_NET_POLL_CONTROLLER +void efx_netpoll(struct net_device *net_dev); +#endif +#endif +#if defined(EFX_USE_KCOMPAT) && defined(EFX_WANT_DRIVER_BUSY_POLL) +#ifdef CONFIG_NET_RX_BUSY_POLL +int efx_busy_poll(struct napi_struct *napi); +#endif +#endif + +#endif + diff --git a/src/drivers/net/ethernet/sfc/efx_common.c b/src/drivers/net/ethernet/sfc/efx_common.c new file mode 100644 index 0000000..89c39b4 --- /dev/null +++ b/src/drivers/net/ethernet/sfc/efx_common.c @@ -0,0 +1,2785 @@ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2018 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#include "net_driver.h" +#include +#include +#include +#include +#include "efx_common.h" +#include "efx_channels.h" +#include "efx.h" +#include "mcdi.h" +#include "debugfs.h" +#include "mcdi_port_common.h" +#include "selftest.h" +#include "rx_common.h" +#include "tx_common.h" +#include "nic.h" +#include "io.h" +#include "tc.h" +#include "dump.h" +#include "mcdi_pcol.h" +#include "tc.h" +#include "xdp.h" +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_NDO_ADD_VXLAN_PORT) || defined(EFX_HAVE_NDO_UDP_TUNNEL_ADD) || defined(EFX_HAVE_UDP_TUNNEL_NIC_INFO) +#include +#endif +#ifdef EFX_NOT_UPSTREAM +#include "ef100_dump.h" +#endif +#ifdef CONFIG_SFC_VDPA +#include "ef100_vdpa.h" +#endif + +static unsigned int debug = (NETIF_MSG_DRV | NETIF_MSG_PROBE | + NETIF_MSG_LINK | NETIF_MSG_IFDOWN | + NETIF_MSG_IFUP | NETIF_MSG_RX_ERR | + NETIF_MSG_TX_ERR | NETIF_MSG_HW); +module_param(debug, uint, 0); +MODULE_PARM_DESC(debug, "Bitmapped debugging message enable value"); + +/* This is the time (in ms) between invocations of the hardware + * monitor. + */ +unsigned int monitor_interval_ms = 200; +module_param(monitor_interval_ms, uint, 0644); +MODULE_PARM_DESC(monitor_interval_ms, "Bus state test interval in ms"); + +#ifdef EFX_NOT_UPSTREAM +static bool phy_power_follows_link; +module_param(phy_power_follows_link, bool, 0444); +MODULE_PARM_DESC(phy_power_follows_link, + "Power down phy when interface is administratively down"); + +static bool link_down_on_reset; +module_param(link_down_on_reset, bool, 0444); +MODULE_PARM_DESC(link_down_on_reset, + "Signal the link down and up on resets"); +#endif + +/* How often and how many times to poll for a reset while waiting for a + * BIST that another function started to complete. + */ +#define BIST_WAIT_DELAY_MS 100 +#define BIST_WAIT_DELAY_COUNT 300 + +/* Default stats update time */ +#define STATS_PERIOD_MS_DEFAULT 1000 + + +static const unsigned int efx_reset_type_max = RESET_TYPE_MAX; +static const char *const efx_reset_type_names[] = { + [RESET_TYPE_INVISIBLE] = "INVISIBLE", + [RESET_TYPE_ALL] = "ALL", + [RESET_TYPE_RECOVER_OR_ALL] = "RECOVER_OR_ALL", + [RESET_TYPE_WORLD] = "WORLD", + [RESET_TYPE_DATAPATH] = "DATAPATH", + [RESET_TYPE_MC_BIST] = "MC_BIST", + [RESET_TYPE_DISABLE] = "DISABLE", + [RESET_TYPE_TX_WATCHDOG] = "TX_WATCHDOG", + [RESET_TYPE_INT_ERROR] = "INT_ERROR", + [RESET_TYPE_DMA_ERROR] = "DMA_ERROR", + [RESET_TYPE_TX_SKIP] = "TX_SKIP", + [RESET_TYPE_MC_FAILURE] = "MC_FAILURE", + [RESET_TYPE_MCDI_TIMEOUT] = "MCDI_TIMEOUT (FLR)", +}; +#define RESET_TYPE(type) \ + STRING_TABLE_LOOKUP(type, efx_reset_type) + +/* Loopback mode names (see LOOPBACK_MODE()) */ +const unsigned int efx_loopback_mode_max = LOOPBACK_MAX; +const char *const efx_loopback_mode_names[] = { + [LOOPBACK_NONE] = "NONE", + [LOOPBACK_DATA] = "DATAPATH", + [LOOPBACK_GMAC] = "GMAC", + [LOOPBACK_XGMII] = "XGMII", + [LOOPBACK_XGXS] = "XGXS", + [LOOPBACK_XAUI] = "XAUI", + [LOOPBACK_GMII] = "GMII", + [LOOPBACK_SGMII] = "SGMII", + [LOOPBACK_XGBR] = "XGBR", + [LOOPBACK_XFI] = "XFI", + [LOOPBACK_XAUI_FAR] = "XAUI_FAR", + [LOOPBACK_GMII_FAR] = "GMII_FAR", + [LOOPBACK_SGMII_FAR] = "SGMII_FAR", + [LOOPBACK_XFI_FAR] = "XFI_FAR", + [LOOPBACK_GPHY] = "GPHY", + [LOOPBACK_PHYXS] = "PHYXS", + [LOOPBACK_PCS] = "PCS", + [LOOPBACK_PMAPMD] = "PMA/PMD", + [LOOPBACK_XPORT] = "XPORT", + [LOOPBACK_XGMII_WS] = "XGMII_WS", + [LOOPBACK_XAUI_WS] = "XAUI_WS", + [LOOPBACK_XAUI_WS_FAR] = "XAUI_WS_FAR", + [LOOPBACK_XAUI_WS_NEAR] = "XAUI_WS_NEAR", + [LOOPBACK_GMII_WS] = "GMII_WS", + [LOOPBACK_XFI_WS] = "XFI_WS", + [LOOPBACK_XFI_WS_FAR] = "XFI_WS_FAR", + [LOOPBACK_PHYXS_WS] = "PHYXS_WS", +}; + +#ifdef EFX_NOT_UPSTREAM +#if IS_MODULE(CONFIG_SFC_DRIVERLINK) +static struct efx_dl_ops efx_driverlink_ops; +#endif +#endif + +/* Reset workqueue. If any NIC has a hardware failure then a reset will be + * queued onto this work queue. This is not a per-nic work queue, because + * efx_reset_work() acquires the rtnl lock, so resets are naturally serialised. + */ +static struct workqueue_struct *reset_workqueue; + +int efx_create_reset_workqueue(void) +{ + reset_workqueue = create_singlethread_workqueue("sfc_reset"); + if (!reset_workqueue) { + printk(KERN_ERR "Failed to create reset workqueue\n"); + return -ENOMEM; + } + + return 0; +} + +void efx_queue_reset_work(struct efx_nic *efx) +{ + queue_work(reset_workqueue, &efx->reset_work); +} + +void efx_flush_reset_workqueue(struct efx_nic *efx) +{ + cancel_work_sync(&efx->reset_work); +} + +void efx_destroy_reset_workqueue(void) +{ + if (reset_workqueue) { + destroy_workqueue(reset_workqueue); + reset_workqueue = NULL; + } +} + +/* We assume that efx->type->reconfigure_mac will always try to sync RX + * filters and therefore needs to read-lock the filter table against freeing + */ +int efx_mac_reconfigure(struct efx_nic *efx, bool mtu_only) +{ + int rc = 0; + + WARN_ON(!mutex_is_locked(&efx->mac_lock)); + + if (efx->type->reconfigure_mac) { + down_read(&efx->filter_sem); + rc = efx->type->reconfigure_mac(efx, mtu_only); + up_read(&efx->filter_sem); + } + return rc; +} + +/* Asynchronous work item for changing MAC promiscuity and multicast + * hash. Avoid a drain/rx_ingress enable by reconfiguring the current + * MAC directly. */ +static void efx_mac_work(struct work_struct *data) +{ + struct efx_nic *efx = container_of(data, struct efx_nic, mac_work); + + mutex_lock(&efx->mac_lock); + if (efx->port_enabled) + (void)efx_mac_reconfigure(efx, false); + mutex_unlock(&efx->mac_lock); +} + +int efx_set_mac_address(struct net_device *net_dev, void *data) +{ + struct efx_nic *efx = efx_netdev_priv(net_dev); + struct sockaddr *addr = data; + const u8 *new_addr = addr->sa_data; + u8 old_addr[6]; + int rc; + + if (!is_valid_ether_addr(new_addr)) { + netif_err(efx, drv, efx->net_dev, + "invalid ethernet MAC address requested: %pM\n", + new_addr); + return -EADDRNOTAVAIL; + } + + ether_addr_copy(old_addr, net_dev->dev_addr); /* save old address */ + eth_hw_addr_set(net_dev, new_addr); + + if (efx->type->set_mac_address) { + rc = efx->type->set_mac_address(efx); + if (rc) { + eth_hw_addr_set(net_dev, old_addr); + return rc; + } + } + + /* Reconfigure the MAC */ + mutex_lock(&efx->mac_lock); + (void)efx_mac_reconfigure(efx, false); + mutex_unlock(&efx->mac_lock); + + return 0; +} + +/* Context: netif_addr_lock held, BHs disabled. */ +void efx_set_rx_mode(struct net_device *net_dev) +{ + struct efx_nic *efx = efx_netdev_priv(net_dev); + + if (efx->port_enabled) + schedule_work(&efx->mac_work); + + /* Otherwise efx_start_port() will do this */ +} + +/* This ensures that the kernel is kept informed (via + * netif_carrier_on/off) of the link status, and also maintains the + * link status's stop on the port's TX queue. + */ +void efx_link_status_changed(struct efx_nic *efx) +{ + struct efx_link_state *link_state = &efx->link_state; + bool kernel_link_up; + + /* SFC Bug 5356: A net_dev notifier is registered, so we must ensure + * that no events are triggered between unregister_netdev() and the + * driver unloading. A more general condition is that NETDEV_CHANGE + * can only be generated between NETDEV_UP and NETDEV_DOWN + */ + if ((efx->type->revision != EFX_REV_EF100) && + !netif_running(efx->net_dev)) + return; + + kernel_link_up = netif_carrier_ok(efx->net_dev); + + if (link_state->up != kernel_link_up) { + efx->n_link_state_changes++; + + if (link_state->up) + netif_carrier_on(efx->net_dev); + else + netif_carrier_off(efx->net_dev); + } + + /* Status message for kernel log */ + if (!net_ratelimit()) + return; + + if (link_state->up) { + netif_info(efx, link, efx->net_dev, + "link up at %uMbps %s-duplex (MTU %d)%s%s%s\n", + link_state->speed, link_state->fd ? "full" : "half", + efx->net_dev->mtu, + (efx->loopback_mode ? " [" : ""), + (efx->loopback_mode ? LOOPBACK_MODE(efx) : ""), + (efx->loopback_mode ? " LOOPBACK]" : "")); + + if ((efx->wanted_fc & EFX_FC_AUTO) && + (efx->wanted_fc & EFX_FC_TX) && + (~efx->link_state.fc & EFX_FC_TX)) + /* There is no way to report this state + * through ethtool, so print this information + * to the kernel log + */ + netif_info(efx, link, efx->net_dev, + "Flow control autonegotiated tx OFF (wanted ON)\n"); + } else if (kernel_link_up) { + netif_info(efx, link, efx->net_dev, "link down%s\n", + (efx->phy_mode & PHY_MODE_LOW_POWER) ? " [OFF]" : ""); + } + +} + +/* Context: process, rtnl_lock() held. */ +int efx_change_mtu(struct net_device *net_dev, int new_mtu) +{ + struct efx_nic *efx = efx_netdev_priv(net_dev); + int old_mtu; + int rc; + + rc = efx_check_disabled(efx); + if (rc) + return rc; + +#if defined(EFX_USE_KCOMPAT) && !(defined(EFX_HAVE_NETDEV_MTU_LIMITS) || defined(EFX_HAVE_NETDEV_EXT_MTU_LIMITS)) + if (new_mtu > (efx_nic_rev(efx) == EFX_REV_EF100? EFX_100_MAX_MTU: EFX_MAX_MTU)) { + netif_err(efx, drv, efx->net_dev, + "Requested MTU of %d too big (max: %d)\n", + new_mtu, EFX_MAX_MTU); + return -EINVAL; + } + if (new_mtu < EFX_MIN_MTU) { + netif_err(efx, drv, efx->net_dev, + "Requested MTU of %d too small (min: %d)\n", + new_mtu, EFX_MIN_MTU); + return -EINVAL; + } +#endif + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_XDP) + if (rtnl_dereference(efx->xdp_prog) && + new_mtu > efx_xdp_max_mtu(efx)) { + netif_err(efx, drv, efx->net_dev, + "Requested MTU of %d too big for XDP (max: %d)\n", + new_mtu, efx_xdp_max_mtu(efx)); + return -EINVAL; + } +#endif + + netif_dbg(efx, drv, efx->net_dev, "changing MTU to %d\n", new_mtu); + + efx_device_detach_sync(efx); + efx_stop_all(efx); + + mutex_lock(&efx->mac_lock); + old_mtu = net_dev->mtu; + net_dev->mtu = new_mtu; + rc = efx_mac_reconfigure(efx, true); + if (rc) + net_dev->mtu = old_mtu; + mutex_unlock(&efx->mac_lock); + + if (efx->state == STATE_NET_UP) + efx_start_all(efx); + efx_device_attach_if_not_resetting(efx); + + /* + * Reinsert filters as the previous call to efx_mac_reconfigure, being + * called on a detached device, will not have done so. + */ + if (!rc && (efx->state != STATE_DISABLED) && !efx->reset_pending) { + mutex_lock(&efx->mac_lock); + rc = efx_mac_reconfigure(efx, false); + mutex_unlock(&efx->mac_lock); + netif_info(efx, link, net_dev, "MTU changed to %d\n", new_mtu); + } + return rc; +} + +#ifdef EFX_NOT_UPSTREAM +#if IS_MODULE(CONFIG_SFC_DRIVERLINK) +/* Is Driverlink supported on this device? */ +bool efx_dl_supported(struct efx_nic *efx) +{ + if (!efx->mcdi || !efx->dl_nic.ops) + return false; + + /* VI spreading will confuse driverlink clients, so prevent + * registration if it's in use. + */ + if (efx->mcdi->fn_flags & + (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_TX_ONLY_VI_SPREADING_ENABLED)) { + netif_info(efx, drv, efx->net_dev, + "Driverlink disabled: VI spreading in use\n"); + return false; + } + + return true; +} +#endif +#endif + +/************************************************************************** + * + * Hardware monitor + * + **************************************************************************/ + +/* Run periodically off the general workqueue */ +static void efx_monitor(struct work_struct *data) +{ + struct efx_nic *efx = container_of(data, struct efx_nic, + monitor_work.work); + + netif_vdbg(efx, timer, efx->net_dev, + "hardware monitor executing on CPU %d\n", + raw_smp_processor_id()); + WARN_ON_ONCE(!efx->type->monitor); + + /* If the mac_lock is already held then it is likely a port + * reconfiguration is already in place, which will likely do + * most of the work of monitor() anyway. + */ + if (mutex_trylock(&efx->mac_lock)) { + if (efx->port_enabled && efx->type->monitor) + efx->type->monitor(efx); + mutex_unlock(&efx->mac_lock); + } + + efx_start_monitor(efx); +} + +void efx_start_monitor(struct efx_nic *efx) +{ + if (efx->type->monitor != NULL) + schedule_delayed_work(&efx->monitor_work, + msecs_to_jiffies(monitor_interval_ms)); +} + +/************************************************************************** + * + * Device reset and suspend + * + **************************************************************************/ +/* Channels are shutdown and reinitialised whilst the NIC is running + * to propagate configuration changes (mtu, checksum offload), or + * to clear hardware error conditions + */ +static int efx_start_datapath(struct efx_nic *efx) +{ + bool old_rx_scatter = efx->rx_scatter; + size_t rx_page_buf_step; + int rc = 0; +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_NETDEV_FEATURES_CHANGE) + netdev_features_t old_features = efx->net_dev->features; +#endif +#if defined(EFX_NOT_UPSTREAM) && defined(EFX_USE_SFC_LRO) +#if defined(EFX_HAVE_NDO_SET_FEATURES) || defined(EFX_HAVE_EXT_NDO_SET_FEATURES) + bool old_lro_available = efx->lro_available; +#endif + + efx->lro_available = true; +#endif + + /* Calculate the rx buffer allocation parameters required to + * support the current MTU, including padding for header + * alignment and overruns. + */ + efx->rx_dma_len = (efx->rx_prefix_size + + EFX_MAX_FRAME_LEN(efx->net_dev->mtu) + + efx->type->rx_buffer_padding); + rx_page_buf_step = efx_rx_buffer_step(efx); + if (rx_page_buf_step <= PAGE_SIZE) { + efx->rx_scatter = efx->type->always_rx_scatter; + efx->rx_buffer_order = 0; + } else if (efx->type->can_rx_scatter) { + BUILD_BUG_ON(EFX_RX_USR_BUF_SIZE % L1_CACHE_BYTES); +#if !defined(EFX_NOT_UPSTREAM) || defined(EFX_RX_PAGE_SHARE) + BUILD_BUG_ON(sizeof(struct efx_rx_page_state) + + 2 * ALIGN(NET_IP_ALIGN + EFX_RX_USR_BUF_SIZE, + EFX_RX_BUF_ALIGNMENT) + + XDP_PACKET_HEADROOM > PAGE_SIZE); +#endif + efx->rx_scatter = true; + efx->rx_dma_len = EFX_RX_USR_BUF_SIZE; + efx->rx_buffer_order = 0; +#if defined(EFX_NOT_UPSTREAM) && defined(EFX_USE_SFC_LRO) + efx->lro_available = false; +#endif + } else { + efx->rx_scatter = false; + efx->rx_buffer_order = get_order(rx_page_buf_step); + } + + efx_rx_config_page_split(efx); + if (efx->rx_buffer_order) + netif_dbg(efx, drv, efx->net_dev, + "RX buf len=%u; page order=%u batch=%u\n", + efx->rx_dma_len, efx->rx_buffer_order, + efx->rx_pages_per_batch); + else + netif_dbg(efx, drv, efx->net_dev, + "RX buf len=%u step=%u bpp=%u; page batch=%u\n", + efx->rx_dma_len, efx->rx_page_buf_step, + efx->rx_bufs_per_page, efx->rx_pages_per_batch); + +#if defined(EFX_NOT_UPSTREAM) && defined(EFX_USE_SFC_LRO) +#if defined(EFX_HAVE_NDO_SET_FEATURES) || defined(EFX_HAVE_EXT_NDO_SET_FEATURES) + /* This will call back into efx_fix_features() */ + if (efx->lro_available != old_lro_available) + netdev_update_features(efx->net_dev); +#elif defined(NETIF_F_LRO) + if (!efx->lro_available && efx->net_dev->features & NETIF_F_LRO) + efx->net_dev->features &= ~NETIF_F_LRO; +#else + if (!efx->lro_available) + efx->lro_enabled = false; +#endif +#endif + + /* Restore previously fixed features in hw_features and remove + * features which are fixed now. + */ + efx_add_hw_features(efx, efx->net_dev->features); +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_NETDEV_HW_FEATURES) + efx->net_dev->hw_features &= ~efx->fixed_features; +#elif defined(EFX_HAVE_NETDEV_EXTENDED_HW_FEATURES) + netdev_extended(efx->net_dev)->hw_features &= ~efx->fixed_features; +#else + efx->hw_features &= ~efx->fixed_features; +#endif + efx->net_dev->features |= efx->fixed_features; +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_NETDEV_FEATURES_CHANGE) + if (efx->net_dev->features != old_features) + netdev_features_change(efx->net_dev); +#endif + + /* RX filters may also have scatter-enabled flags */ + if ((efx->rx_scatter != old_rx_scatter) && + efx->type->filter_update_rx_scatter) + efx->type->filter_update_rx_scatter(efx); + + if (efx->type->filter_table_up) + rc = efx->type->filter_table_up(efx); + if (rc) + goto fail; + + /* We must keep at least one descriptor in a TX ring empty. + * We could avoid this when the queue size does not exactly + * match the hardware ring size, but it's not that important. + * Therefore we stop the queue when one more skb might fill + * the ring completely. We wake it when half way back to + * empty. + */ + efx->txq_stop_thresh = efx->txq_entries - + efx->type->tx_max_skb_descs(efx); + efx->txq_wake_thresh = efx->txq_stop_thresh / 2; + + rc = efx_start_channels(efx); + if (rc) + goto fail; + + efx_ptp_start_datapath(efx); + + efx->datapath_started = true; + + /* trigger MAC reconfiguration again to ensure filters get inserted */ + mutex_lock(&efx->mac_lock); + efx_mac_reconfigure(efx, false); + mutex_unlock(&efx->mac_lock); + + if (netif_device_present(efx->net_dev)) + netif_tx_wake_all_queues(efx->net_dev); + + return 0; +fail: + efx_stop_channels(efx); + + if (efx->type->filter_table_down) + efx->type->filter_table_down(efx); + return rc; +} + +static void efx_stop_datapath(struct efx_nic *efx) +{ + EFX_ASSERT_RESET_SERIALISED(efx); + BUG_ON(efx->port_enabled); + + efx->datapath_started = false; + + efx_ptp_stop_datapath(efx); + + efx_stop_channels(efx); + + if (efx->type->filter_table_down) + efx->type->filter_table_down(efx); +} + +static void efx_start_port(struct efx_nic *efx) +{ + netif_dbg(efx, ifup, efx->net_dev, "start port\n"); + BUG_ON(efx->port_enabled); + + mutex_lock(&efx->mac_lock); + efx->port_enabled = true; + /* Always come out of low power unless we're forced off */ + if (!efx->phy_power_force_off) + efx->phy_mode &= ~PHY_MODE_LOW_POWER; + __efx_reconfigure_port(efx); + + /* Ensure MAC ingress/egress is enabled */ + (void)efx_mac_reconfigure(efx, false); + + mutex_unlock(&efx->mac_lock); +} + +/* Cancel work for MAC reconfiguration, periodic hardware monitoring + * and the async self-test, wait for them to finish and prevent them + * being scheduled again. This doesn't cover online resets, which + * should only be cancelled when removing the device. + */ +static void efx_stop_port(struct efx_nic *efx) +{ + netif_dbg(efx, ifdown, efx->net_dev, "stop port\n"); + + EFX_ASSERT_RESET_SERIALISED(efx); + + mutex_lock(&efx->mac_lock); + efx->port_enabled = false; + if (efx->phy_power_follows_link) + efx->phy_mode |= PHY_MODE_LOW_POWER; + __efx_reconfigure_port(efx); + mutex_unlock(&efx->mac_lock); + + netif_addr_lock_bh(efx->net_dev); + netif_addr_unlock_bh(efx->net_dev); + + cancel_delayed_work_sync(&efx->monitor_work); + efx_selftest_async_cancel(efx); + cancel_work_sync(&efx->mac_work); +} + +/* If the interface is supposed to be running but is not, start + * the hardware and software data path, regular activity for the port + * (MAC statistics, link polling, etc.) and schedule the port to be + * reconfigured. Interrupts must already be enabled. This function + * is safe to call multiple times, so long as the NIC is not disabled. + * Requires the RTNL lock. + */ +int efx_start_all(struct efx_nic *efx) +{ + int rc; + + EFX_ASSERT_RESET_SERIALISED(efx); + + /* Check that it is appropriate to restart the interface. All + * of these flags are safe to read under just the rtnl lock + */ + if (efx->state == STATE_DISABLED || efx->port_enabled || + efx->reset_pending) + return 0; + + efx_start_port(efx); + rc = efx_start_datapath(efx); + if (rc) { + efx_stop_port(efx); + return rc; + } + + /* Start the hardware monitor if there is one */ + efx_start_monitor(efx); + + /* Link state detection is normally event-driven; we have + * to poll now because we could have missed a change + */ + mutex_lock(&efx->mac_lock); + if (efx_mcdi_phy_poll(efx)) + efx_link_status_changed(efx); + mutex_unlock(&efx->mac_lock); + + if (efx->type->start_stats) + efx->type->start_stats(efx); + else + efx_mcdi_mac_start_stats(efx); + efx->type->pull_stats(efx); + efx->type->update_stats(efx, NULL, NULL); + /* release stats_lock obtained in update_stats */ + spin_unlock_bh(&efx->stats_lock); + + return rc; +} + +/* Quiesce the hardware and software data path, and regular activity + * for the port without bringing the link down. Safe to call multiple + * times with the NIC in almost any state, but interrupts should be + * enabled. Requires the RTNL lock. + */ +void efx_stop_all(struct efx_nic *efx) +{ + EFX_ASSERT_RESET_SERIALISED(efx); + + /* port_enabled can be read safely under the rtnl lock */ + if (!efx->port_enabled) + return; + + /* update stats before we go down so we can accurately count + * rx_nodesc_drops + */ + efx->type->update_stats(efx, NULL, NULL); + /* release stats_lock obtained in update_stats */ + spin_unlock_bh(&efx->stats_lock); + if (efx->type->stop_stats) + efx->type->stop_stats(efx); + else + efx_mcdi_mac_stop_stats(efx); + efx_stop_port(efx); + + /* Stop the kernel transmit interface. This is only valid if + * the device is stopped or detached; otherwise the watchdog + * may fire immediately. + */ + WARN_ON(netif_running(efx->net_dev) && + netif_device_present(efx->net_dev)); + netif_tx_disable(efx->net_dev); + + efx_stop_datapath(efx); +} + +/* Context: process, dev_base_lock or RTNL held, non-blocking. */ +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_NETDEV_STATS64_VOID) +void efx_net_stats(struct net_device *net_dev, struct rtnl_link_stats64 *stats) +#elif defined(EFX_USE_NETDEV_STATS64) +struct rtnl_link_stats64 *efx_net_stats(struct net_device *net_dev, + struct rtnl_link_stats64 *stats) +#else +struct net_device_stats *efx_net_stats(struct net_device *net_dev) +#endif +{ + struct efx_nic *efx = efx_netdev_priv(net_dev); +#if defined(EFX_USE_KCOMPAT) && !defined(EFX_USE_NETDEV_STATS64) +#if defined(EFX_USE_NETDEV_STATS) + struct net_device_stats *stats = &net_dev->stats; +#else + struct net_device_stats *stats = &efx->stats; +#endif +#endif + + efx->type->update_stats(efx, NULL, stats); + /* release stats_lock obtained in update_stats */ + spin_unlock_bh(&efx->stats_lock); +#if defined(EFX_USE_KCOMPAT) && !defined(EFX_HAVE_NETDEV_STATS64_VOID) + + return stats; +#endif +} + +void efx_reset_sw_stats(struct efx_nic *efx) +{ + struct efx_channel *channel; + struct efx_tx_queue *tx_queue; + + efx_for_each_channel(channel, efx) { + if (efx_channel_has_rx_queue(channel)) + efx_channel_get_rx_queue(channel)->rx_packets = 0; + efx_for_each_channel_tx_queue(tx_queue, channel) { + tx_queue->tx_packets = 0; + tx_queue->pushes = 0; + tx_queue->pio_packets = 0; + tx_queue->cb_packets = 0; + } + } +} + +static int efx_msecs_since(unsigned long event_jiffies) +{ + if (!event_jiffies) + return -1; + return jiffies_to_msecs(jiffies - event_jiffies); +} + +void efx_print_stopped_queues(struct efx_nic *efx) +{ + struct efx_channel *channel; + + netif_info(efx, tx_err, efx->net_dev, + "TX queue timeout: printing stopped queue data\n"); + + efx_for_each_channel(channel, efx) { + struct netdev_queue *core_txq = channel->tx_queues[0].core_txq; + long unsigned int busy_poll_state = 0xffff; + struct efx_tx_queue *tx_queue; + + if (!efx_channel_has_tx_queues(channel)) + continue; + + /* The netdev watchdog must have triggered on a queue that had + * stopped transmitting, so ignore other queues. + */ + if (!netif_xmit_stopped(core_txq)) + continue; + +#if defined(EFX_USE_KCOMPAT) && defined(EFX_WANT_DRIVER_BUSY_POLL) +#ifdef CONFIG_NET_RX_BUSY_POLL + busy_poll_state = channel->busy_poll_state; +#endif +#endif + netif_info(efx, tx_err, efx->net_dev, + "Channel %u: %senabled Busy poll %#lx NAPI state %#lx Doorbell %sheld %scoalescing Xmit state %#lx\n", + channel->channel, (channel->enabled ? "" : "NOT "), + busy_poll_state, channel->napi_str.state, + (channel->holdoff_doorbell ? "" : "not "), + (channel->tx_coalesce_doorbell ? "" : "not "), + core_txq->state); + efx_for_each_channel_tx_queue(tx_queue, channel) + netif_info(efx, tx_err, efx->net_dev, + "Tx queue: insert %u, write %u (%dms), read %u (%dms)\n", + tx_queue->insert_count, + tx_queue->write_count, + efx_msecs_since(tx_queue->notify_jiffies), + tx_queue->read_count, + efx_msecs_since(tx_queue->read_jiffies)); + } +} + +/* Push loopback/power/transmit disable settings to the PHY, and reconfigure + * the MAC appropriately. All other PHY configuration changes are pushed + * through efx_mcdi_phy_set_settings(), and pushed asynchronously to the MAC + * through efx_monitor(). + * + * Callers must hold the mac_lock + */ +int __efx_reconfigure_port(struct efx_nic *efx) +{ + enum efx_phy_mode phy_mode; + int rc = 0; + + WARN_ON(!mutex_is_locked(&efx->mac_lock)); + + /* Disable PHY transmit in mac level loopbacks */ + phy_mode = efx->phy_mode; + if (LOOPBACK_INTERNAL(efx)) + efx->phy_mode |= PHY_MODE_TX_DISABLED; + else + efx->phy_mode &= ~PHY_MODE_TX_DISABLED; + + if (efx->type->reconfigure_port) + rc = efx->type->reconfigure_port(efx); + + if (rc) + efx->phy_mode = phy_mode; + + return rc; +} + +/* Reinitialise the MAC to pick up new PHY settings, even if the port is + * disabled. + */ +int efx_reconfigure_port(struct efx_nic *efx) +{ + int rc; + + EFX_ASSERT_RESET_SERIALISED(efx); + + mutex_lock(&efx->mac_lock); + rc = __efx_reconfigure_port(efx); + mutex_unlock(&efx->mac_lock); + + return rc; +} + +static void efx_wait_for_bist_end(struct efx_nic *efx) +{ + int i; + + for (i = 0; i < BIST_WAIT_DELAY_COUNT; ++i) { + if (efx->type->mcdi_poll_bist_end(efx)) + goto out; + msleep(BIST_WAIT_DELAY_MS); + } + + netif_err(efx, drv, efx->net_dev, "Warning: No MC reboot after BIST mode\n"); +out: + /* Either way unset the BIST flag. If we found no reboot we probably + * won't recover, but we should try. + */ + efx->mc_bist_for_other_fn = false; + efx->reset_count = 0; +} + +/* Try recovery mechanisms. + * For now only EEH is supported. + * Returns 0 if the recovery mechanisms are unsuccessful. + * Returns a non-zero value otherwise. + */ +int efx_try_recovery(struct efx_nic *efx) +{ +#ifdef CONFIG_EEH + /* A PCI error can occur and not be seen by EEH because nothing + * happens on the PCI bus. In this case the driver may fail and + * schedule a 'recover or reset', leading to this recovery handler. + * Manually call the eeh failure check function. + */ +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_EEH_DEV_CHECK_FAILURE) + struct eeh_dev *eehdev = pci_dev_to_eeh_dev(efx->pci_dev); + + if (eeh_dev_check_failure(eehdev)) { +#else + struct pci_dev *pcidev = efx->pci_dev; + struct device_node *dn = pci_device_to_OF_node(pcidev); + + if (eeh_dn_check_failure(dn, pcidev)) { +#endif + /* The EEH mechanisms will handle the error and reset + * the device if necessary. + */ + return 1; + } +#endif + return 0; +} + +/* Tears down the entire software state and most of the hardware state + * before reset. + */ +void efx_reset_down(struct efx_nic *efx, enum reset_type method) +{ + EFX_ASSERT_RESET_SERIALISED(efx); + + if (method == RESET_TYPE_MCDI_TIMEOUT) + efx->type->prepare_flr(efx); + + efx_stop_all(efx); + efx_disable_interrupts(efx); + + mutex_lock(&efx->mac_lock); + down_write(&efx->filter_sem); + mutex_lock(&efx->rss_lock); + if (efx->type->fini) + efx->type->fini(efx); +} + +/* Context: netif_tx_lock held, BHs disabled. */ +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_NDO_TX_TIMEOUT_TXQUEUE) +void efx_watchdog(struct net_device *net_dev, unsigned int txqueue) +#else +void efx_watchdog(struct net_device *net_dev) +#endif +{ + struct efx_nic *efx = efx_netdev_priv(net_dev); + + efx_print_stopped_queues(efx); + netif_err(efx, tx_err, efx->net_dev, + "TX stuck with port_enabled=%d: resetting channels\n", + efx->port_enabled); + + efx_schedule_reset(efx, RESET_TYPE_TX_WATCHDOG); +} + +/* This function will always ensure that the locks acquired in + * efx_reset_down() are released. A failure return code indicates + * that we were unable to reinitialise the hardware, and the + * driver should be disabled. If ok is false, then the rx and tx + * engines are not restarted, pending a RESET_DISABLE. + */ +int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok) +{ + u32 attach_flags; + int rc = 0; + + EFX_ASSERT_RESET_SERIALISED(efx); + + if (method == RESET_TYPE_MCDI_TIMEOUT) + efx->type->finish_flr(efx); + + efx_mcdi_post_reset(efx); + + if (efx_net_allocated(efx->state) && efx->type->init) + rc = efx->type->init(efx); + if (rc) { + if (rc != -EAGAIN) + netif_err(efx, drv, efx->net_dev, "failed to initialise NIC\n"); + goto fail; + } + + if (!ok) + goto fail; + + if (efx->port_initialized && method != RESET_TYPE_INVISIBLE && + method != RESET_TYPE_DATAPATH) { + rc = efx_mcdi_port_reconfigure(efx); + if (rc && rc != -EPERM) + netif_err(efx, drv, efx->net_dev, + "could not restore PHY settings\n"); + } + + if (efx_net_allocated(efx->state)) { + rc = efx_enable_interrupts(efx); + if (rc) + goto fail; + } + +#ifdef CONFIG_SFC_DUMP + rc = efx_dump_reset(efx); + if (rc) + goto fail; +#endif + + /* If the MC has reset then re-attach the driver to restore the + * firmware state. Note that although there are some ways we can get + * here that aren't the result of an MC reset, it is still safe to + * perform the attach operation. + */ + rc = efx_mcdi_drv_attach(efx, MC_CMD_FW_DONT_CARE, &attach_flags, true); + if (rc) /* not fatal: the PF will still work */ + netif_warn(efx, probe, efx->net_dev, + "failed to re-attach driver to MCPU rc=%d, PPS & NCSI may malfunction\n", + rc); + else + /* Store new attach flags. */ + efx->mcdi->fn_flags = attach_flags; + + if (efx->type->vswitching_restore) { + rc = efx->type->vswitching_restore(efx); + if (rc) /* not fatal; the PF will still work fine */ + netif_warn(efx, probe, efx->net_dev, + "failed to restore vswitching rc=%d, VFs may not function\n", + rc); + } + + if (efx->type->rx_restore_rss_contexts) + efx->type->rx_restore_rss_contexts(efx); + mutex_unlock(&efx->rss_lock); + if (efx->state == STATE_NET_UP) + efx->type->filter_table_restore(efx); + up_write(&efx->filter_sem); + mutex_unlock(&efx->mac_lock); + + if (efx->state == STATE_NET_UP) { + rc = efx_start_all(efx); + if (rc) { + efx->port_initialized = false; + return rc; + } + } else { + efx->port_initialized = false; + } + + if (efx->type->udp_tnl_push_ports) + efx->type->udp_tnl_push_ports(efx); + +#if defined(EFX_NOT_UPSTREAM) && defined(CONFIG_SFC_PTP) + /* If PPS possible re-enable after MC reset */ + if (efx->type->pps_reset) + if (efx->type->pps_reset(efx)) + netif_warn(efx, drv, efx->net_dev, "failed to reset PPS"); +#endif + + return 0; + +fail: + efx->port_initialized = false; + + mutex_unlock(&efx->rss_lock); + up_write(&efx->filter_sem); + mutex_unlock(&efx->mac_lock); + + return rc; +} + +static int efx_do_reset(struct efx_nic *efx, enum reset_type method) +{ + int rc = efx->type->reset(efx, method); + + if (rc) { + netif_err(efx, drv, efx->net_dev, "failed to reset hardware\n"); + return rc; + } + + /* Clear flags for the scopes we covered. We assume the NIC and + * driver are now quiescent so that there is no race here. + */ + if (method < RESET_TYPE_MAX_METHOD) + efx->reset_pending &= -(1 << (method + 1)); + else /* it doesn't fit into the well-ordered scope hierarchy */ + __clear_bit(method, &efx->reset_pending); + + /* Reinitialise bus-mastering, which may have been turned off before + * the reset was scheduled. This is still appropriate, even in the + * RESET_TYPE_DISABLE since this driver generally assumes the hardware + * can respond to requests. + */ + pci_set_master(efx->pci_dev); + + return 0; +} + +/* Do post-processing after the reset. + * Returns whether the reset was completed and the device is back up. + */ +static int efx_reset_complete(struct efx_nic *efx, enum reset_type method, + bool retry, bool disabled) +{ + if (disabled) { + netif_err(efx, drv, efx->net_dev, "has been disabled\n"); + efx->state = STATE_DISABLED; + return false; + } + + if (retry) { + netif_info(efx, drv, efx->net_dev, "scheduling retry of reset\n"); + if (method == RESET_TYPE_MC_BIST) + method = RESET_TYPE_DATAPATH; + efx_schedule_reset(efx, method); + return false; + } + + netif_dbg(efx, drv, efx->net_dev, "reset complete\n"); + return true; +} + +/* Reset the NIC using the specified method. Note that the reset may + * fail, in which case the card will be left in an unusable state. + * + * Caller must hold the rtnl_lock. + */ +int efx_reset(struct efx_nic *efx, enum reset_type method) +{ + int rc, rc2 = 0; + bool disabled, retry; + bool link_up = efx->link_state.up; + + ASSERT_RTNL(); + +#ifdef EFX_NOT_UPSTREAM +#if IS_MODULE(CONFIG_SFC_DRIVERLINK) + /* Notify driverlink clients of imminent reset then serialise + * against other driver operations + */ + efx_dl_reset_suspend(&efx->dl_nic); +#endif +#endif + + netif_info(efx, drv, efx->net_dev, "resetting (%s)\n", + RESET_TYPE(method)); + + efx_device_detach_sync(efx); + /* efx_reset_down() grabs locks that prevent recovery on EF100. + * EF100 reset is handled in the efx_nic_type callback below. + */ + if (efx_nic_rev(efx) != EFX_REV_EF100) + efx_reset_down(efx, method); + + if (efx->link_down_on_reset && link_up) { + efx->link_state.up = false; + efx_link_status_changed(efx); + } + + rc = efx_do_reset(efx, method); + + retry = rc == -EAGAIN; + + /* Leave device stopped if necessary */ + disabled = (rc && !retry) || + method == RESET_TYPE_DISABLE; + + if (efx->link_down_on_reset && link_up) { + efx->link_state.up = true; + efx_link_status_changed(efx); + } + + if (efx_nic_rev(efx) != EFX_REV_EF100) + rc2 = efx_reset_up(efx, method, !disabled && !retry); + if (rc2) { + if (rc2 == -EAGAIN) + retry = true; + else + disabled = true; + if (!rc) + rc = rc2; + } + + if (disabled) + dev_close(efx->net_dev); + + if (efx_reset_complete(efx, method, retry, disabled)) { + efx_device_attach_if_not_resetting(efx); + + /* Now reset is finished, reconfigure MAC + * again to ensure filters that weren't inserted while + * resetting are now. + */ + mutex_lock(&efx->mac_lock); + (void)efx_mac_reconfigure(efx, false); + mutex_unlock(&efx->mac_lock); + + if (PCI_FUNC(efx->pci_dev->devfn) == 0) + efx_mcdi_log_puts(efx, efx_reset_type_names[method]); + } +#ifdef EFX_NOT_UPSTREAM +#if IS_MODULE(CONFIG_SFC_DRIVERLINK) + efx_dl_reset_resume(&efx->dl_nic, !disabled); +#endif + +#endif + return rc; +} + +#ifdef CONFIG_SFC_VDPA +static void efx_reset_vdpa(struct efx_nic *efx, enum reset_type method) +{ + bool retry, disabled; + int rc; + + WARN_ON(method != RESET_TYPE_DISABLE); + method = RESET_TYPE_DISABLE; + + pr_info("%s: VDPA resetting (%s)\n", __func__, RESET_TYPE(method)); + + rc = efx_do_reset(efx, method); + + retry = rc == -EAGAIN; + + /* Leave device stopped if necessary */ + disabled = (rc && !retry) || method == RESET_TYPE_DISABLE; + + if (disabled) + ef100_vdpa_reset(&efx->vdpa_nic->vdpa_dev); + + efx_reset_complete(efx, method, retry, disabled); +} +#endif + +/* The worker thread exists so that code that cannot sleep can + * schedule a reset for later. + */ +static void efx_reset_work(struct work_struct *data) +{ + struct efx_nic *efx = container_of(data, struct efx_nic, reset_work); + unsigned long pending; + enum reset_type method; + + pending = READ_ONCE(efx->reset_pending); + method = fls(pending) - 1; + +#ifdef EFX_NOT_UPSTREAM + if (method == RESET_TYPE_TX_WATCHDOG && + efx_nic_rev(efx) == EFX_REV_EF100) { + WARN_ON(efx->state == STATE_VDPA); + efx_ef100_dump_napi_debug(efx); + efx_ef100_dump_sss_regs(efx); + } +#endif + + if (method == RESET_TYPE_MC_BIST) + efx_wait_for_bist_end(efx); + + if (method == RESET_TYPE_RECOVER_OR_ALL && + efx_try_recovery(efx)) + return; + + if (!pending) + return; + + rtnl_lock(); + + /* We checked the state in efx_schedule_reset() but it may + * have changed by now. Now that we have the RTNL lock, + * it cannot change again. + */ + if (efx_net_active(efx->state)) + (void)efx_reset(efx, method); +#ifdef CONFIG_SFC_VDPA + else if (efx->state == STATE_VDPA) + efx_reset_vdpa(efx, method); +#endif + + rtnl_unlock(); +} + +void efx_schedule_reset(struct efx_nic *efx, enum reset_type type) +{ + static const unsigned int RESETS_BEFORE_DISABLE = 5; + unsigned long last_reset = READ_ONCE(efx->last_reset); + enum reset_type method; + + if (efx_recovering(efx->state)) { + netif_dbg(efx, drv, efx->net_dev, + "recovering: skip scheduling %s reset\n", + RESET_TYPE(type)); + return; + } + + method = efx->type->map_reset_reason(type); + + /* check we're scheduling a new reset and if so check we're + * not scheduling resets too often. + * this part is not atomically safe, but is also ultimately a + * heuristic; if we lose increments due to dirty writes + * that's fine and if we falsely increment or reset due to an + * inconsistent read of last_reset on 32-bit arch it's also ok. + */ + if (time_after(jiffies, last_reset + HZ)) + efx->reset_count = 0; + if (!(efx->reset_pending & (1 << method)) && + ++efx->reset_count > RESETS_BEFORE_DISABLE) { + method = RESET_TYPE_DISABLE; + netif_err(efx, drv, efx->net_dev, + "too many resets, scheduling %s\n", + RESET_TYPE(method)); + } + + /* It is not atomic-safe as well, but there is a high chance that + * this code will catch the just-set current_reset value. If we + * fail once, we'll get the value next time. */ + if (time_after(efx->current_reset, last_reset) ) + efx->last_reset = efx->current_reset; + + if (method == type) + netif_dbg(efx, drv, efx->net_dev, "scheduling %s reset\n", + RESET_TYPE(method)); + else + netif_dbg(efx, drv, efx->net_dev, + "scheduling %s reset for %s\n", + RESET_TYPE(method), RESET_TYPE(type)); + + set_bit(method, &efx->reset_pending); + + if (efx->state != STATE_VDPA) { + /* If we're not READY then just leave the flags set as the cue + * to abort probing or reschedule the reset later. + */ + if (!efx_net_active(READ_ONCE(efx->state))) + return; + + /* Stop the periodic statistics monitor to prevent it firing + * while we are handling the reset. + */ + efx_mac_stats_reset_monitor(efx); + + /* we might be resetting because things are broken, so detach + * so we don't get things like the TX watchdog firing while we + * wait to reset. + */ +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_TC_OFFLOAD) + if (efx->type->detach_reps) + efx->type->detach_reps(efx); +#endif + netif_device_detach(efx->net_dev); + } + + efx_queue_reset_work(efx); +} + +/************************************************************************** + * + * Dummy NIC operations + * + * Can be used for some unimplemented operations + * Needed so all function pointers are valid and do not have to be tested + * before use + * + **************************************************************************/ +int efx_port_dummy_op_int(struct efx_nic *efx) +{ + return 0; +} +void efx_port_dummy_op_void(struct efx_nic *efx) {} + +/************************************************************************** + * + * Data housekeeping + * + **************************************************************************/ +void efx_fini_struct(struct efx_nic *efx) +{ + efx_filter_clear_ntuple(efx); +#ifdef CONFIG_RFS_ACCEL + kfree(efx->rps_hash_table); +#endif + +#ifdef CONFIG_DEBUG_FS + mutex_destroy(&efx->debugfs_symlink_mutex); +#endif +} + +bool efx_is_supported_ringsize(struct efx_nic *efx, unsigned long entries) +{ + if (efx->supported_bitmap) + return !!(efx->supported_bitmap & entries); + + return true; +} + +bool efx_is_guaranteed_ringsize(struct efx_nic *efx, unsigned long entries) +{ + /* supported_bitmap!= 0 -- MCDI v10 ring size supported */ + if (efx->supported_bitmap) + return !!(efx->guaranteed_bitmap & entries); + + return true; +} + +/* Tries to get nearest next available guaranteed ring size. + * Higher ring size is preferred of guranteed ring sizes. + * If nothing matches, returns the same value. + * param entries assmued to be pow-of-two + */ +unsigned long +efx_best_guaranteed_ringsize(struct efx_nic *efx, unsigned long entries, + bool fallback_to_supported) +{ + unsigned long more_entries = entries << 1; + unsigned long less_entries = entries >> 1; + + while (1) { + if ((more_entries & efx->supported_bitmap) && + (more_entries & efx->guaranteed_bitmap)) + return more_entries; + more_entries <<= 1; + if ((less_entries & efx->supported_bitmap) && + (less_entries & efx->guaranteed_bitmap)) + return less_entries; + less_entries >>= 1; + /* check if next loops are valid anymore */ + if ((more_entries | less_entries) & efx->guaranteed_bitmap) + continue; + else if (fallback_to_supported && + (more_entries | less_entries) & efx->supported_bitmap) + continue; + else + break; + } + + return entries; +} + +unsigned long +efx_next_guaranteed_ringsize(struct efx_nic *efx, unsigned long entries, + bool fallback_to_supported) +{ + unsigned long more_entries = entries << 1; + + while (1) { + if ((more_entries & efx->supported_bitmap) && + (more_entries & efx->guaranteed_bitmap)) + return more_entries; + more_entries <<= 1; + if (more_entries & efx->guaranteed_bitmap) + continue; + else if (fallback_to_supported && + (more_entries & efx->guaranteed_bitmap)) + continue; + else + break; + } + + return entries; +} + +/* This zeroes out and then fills in the invariants in a struct + * efx_nic (including all sub-structures). + */ +int efx_init_struct(struct efx_nic *efx, struct pci_dev *pci_dev) +{ + /* Initialise common structures */ + spin_lock_init(&efx->biu_lock); + INIT_WORK(&efx->reset_work, efx_reset_work); + INIT_DELAYED_WORK(&efx->monitor_work, efx_monitor); + efx_selftest_async_init(efx); + efx->pci_dev = pci_dev; + efx->msg_enable = debug; + efx->state = STATE_UNINIT; + strscpy(efx->name, pci_name(pci_dev), sizeof(efx->name)); + +#if defined(EFX_USE_KCOMPAT) && !defined(EFX_HAVE_NDO_SET_FEATURES) && !defined(EFX_HAVE_EXT_NDO_SET_FEATURES) + efx->rx_checksum_enabled = true; +#endif +#if defined(EFX_NOT_UPSTREAM) && defined(EFX_USE_SFC_LRO) + efx->lro_available = true; +#ifndef NETIF_F_LRO + efx->lro_enabled = lro; +#endif +#endif +#ifdef EFX_NOT_UPSTREAM + efx->phy_power_follows_link = phy_power_follows_link; + efx->link_down_on_reset = link_down_on_reset; +#endif +#ifdef DEBUG + efx->log_tc_errs = true; +#endif + efx->tc_match_ignore_ttl = true; + INIT_LIST_HEAD(&efx->channel_list); + efx->rx_prefix_size = efx->type->rx_prefix_size; + efx->rx_ip_align = + NET_IP_ALIGN ? (efx->rx_prefix_size + NET_IP_ALIGN) % 4 : 0; + efx->rx_packet_hash_offset = + efx->type->rx_hash_offset - efx->type->rx_prefix_size; + efx->rx_packet_ts_offset = + efx->type->rx_ts_offset - efx->type->rx_prefix_size; + INIT_LIST_HEAD(&efx->rss_context.list); + mutex_init(&efx->rss_lock); + INIT_LIST_HEAD(&efx->vport.list); + mutex_init(&efx->vport_lock); + efx->vport.vport_id = EVB_PORT_ID_ASSIGNED; + spin_lock_init(&efx->stats_lock); + efx->num_mac_stats = MC_CMD_MAC_NSTATS; + BUILD_BUG_ON(MC_CMD_MAC_NSTATS - 1 != MC_CMD_MAC_GENERATION_END); + efx->stats_period_ms = STATS_PERIOD_MS_DEFAULT; + INIT_DELAYED_WORK(&efx->stats_monitor_work, efx_mac_stats_monitor); + efx->stats_monitor_generation = EFX_MC_STATS_GENERATION_INVALID; + efx->vi_stride = EFX_DEFAULT_VI_STRIDE; + mutex_init(&efx->mac_lock); + init_rwsem(&efx->filter_sem); + INIT_LIST_HEAD(&efx->ntuple_list); +#ifdef CONFIG_RFS_ACCEL + mutex_init(&efx->rps_mutex); + spin_lock_init(&efx->rps_hash_lock); + /* Failure to allocate is not fatal, but may degrade ARFS performance */ + efx->rps_hash_table = kcalloc(EFX_ARFS_HASH_TABLE_SIZE, + sizeof(*efx->rps_hash_table), GFP_KERNEL); +#endif + INIT_WORK(&efx->mac_work, efx_mac_work); + init_waitqueue_head(&efx->flush_wq); + +#ifdef CONFIG_DEBUG_FS + mutex_init(&efx->debugfs_symlink_mutex); +#endif + efx->tx_queues_per_channel = 1; + efx->rxq_entries = EFX_DEFAULT_RX_DMAQ_SIZE; + efx->txq_entries = EFX_DEFAULT_TX_DMAQ_SIZE; + + efx->rss_context.context_id = EFX_MCDI_RSS_CONTEXT_INVALID; + efx->vport.vport_id = EVB_PORT_ID_ASSIGNED; + + efx->mem_bar = UINT_MAX; + efx->reg_base = 0; + + efx->max_channels = EFX_MAX_CHANNELS; + efx->max_tx_channels = EFX_MAX_CHANNELS; + + mutex_init(&efx->reflash_mutex); + + return 0; +} + +/* This configures the PCI device to enable I/O and DMA. */ +int efx_init_io(struct efx_nic *efx, int bar, dma_addr_t dma_mask, unsigned int mem_map_size) +{ + struct pci_dev *pci_dev = efx->pci_dev; + int rc; + + efx->mem_bar = UINT_MAX; + pci_dbg(pci_dev, "initialising I/O bar=%d\n", bar); + + rc = pci_enable_device(pci_dev); + if (rc) { + pci_err(pci_dev, "failed to enable PCI device\n"); + goto fail1; + } + + pci_set_master(pci_dev); + + rc = dma_set_mask_and_coherent(&pci_dev->dev, dma_mask); + if (rc) { + pci_err(pci_dev, "could not find a suitable DMA mask\n"); + goto fail2; + } + pci_dbg(pci_dev, "using DMA mask %llx\n", (unsigned long long)dma_mask); + + efx->membase_phys = pci_resource_start(efx->pci_dev, bar); + if (!efx->membase_phys) { + pci_err(pci_dev, + "ERROR: No BAR%d mapping from the BIOS. Try pci=realloc on the kernel command line\n", + bar); + rc = -ENODEV; + goto fail3; + } + rc = pci_request_region(pci_dev, bar, "sfc"); + + if (rc) { + pci_err(pci_dev, "request for memory BAR[%d] failed\n", bar); + rc = -EIO; + goto fail3; + } + efx->mem_bar = bar; +#if defined(EFX_USE_KCOMPAT) + efx->membase = efx_ioremap(efx->membase_phys, mem_map_size); +#else + efx->membase = ioremap(efx->membase_phys, mem_map_size); +#endif + + if (!efx->membase) { + pci_err(pci_dev, "could not map memory BAR[%d] at %llx+%x\n", + bar, (unsigned long long)efx->membase_phys, + mem_map_size); + rc = -ENOMEM; + goto fail4; + } + pci_dbg(pci_dev, "memory BAR[%d] at %llx+%x (virtual %p)\n", bar, + (unsigned long long)efx->membase_phys, mem_map_size, + efx->membase); + + return 0; + +fail4: + pci_release_region(efx->pci_dev, bar); +fail3: + efx->membase_phys = 0; +fail2: + pci_disable_device(efx->pci_dev); +fail1: + return rc; +} + +void efx_fini_io(struct efx_nic *efx) +{ + pci_dbg(efx->pci_dev, "shutting down I/O\n"); + + if (efx->membase) { + iounmap(efx->membase); + efx->membase = NULL; + } + + if (efx->membase_phys) { + pci_release_region(efx->pci_dev, efx->mem_bar); + efx->membase_phys = 0; + efx->mem_bar = UINT_MAX; + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_PCI_DEV_FLAGS_ASSIGNED) + /* Don't disable bus-mastering if VFs are assigned */ + if (!pci_vfs_assigned(efx->pci_dev)) +#endif + pci_disable_device(efx->pci_dev); + } +} + +int efx_probe_common(struct efx_nic *efx) +{ + int rc = efx_mcdi_init(efx); + + if (rc) + return rc; + if (efx->mcdi->fn_flags & + (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_NO_ACTIVE_PORT) +#if defined(EFX_USE_KCOMPAT) && defined(EFX_TC_OFFLOAD) + && efx_nic_rev(efx) != EFX_REV_EF100 +#endif + ) + return 0; + + /* Reset (most) configuration for this function */ + rc = efx_mcdi_reset(efx, RESET_TYPE_ALL); + if (rc) + return rc; + /* Enable event logging */ + rc = efx_mcdi_log_ctrl(efx, true, false, 0); + if (rc) + return rc; + + /* Create debugfs symlinks */ +#ifdef CONFIG_DEBUG_FS + mutex_lock(&efx->debugfs_symlink_mutex); + rc = efx_init_debugfs_nic(efx); + mutex_unlock(&efx->debugfs_symlink_mutex); + if (rc) + pci_err(efx->pci_dev, "failed to init device debugfs\n"); +#endif + + return 0; +} + +void efx_remove_common(struct efx_nic *efx) +{ +#ifdef CONFIG_DEBUG_FS + mutex_lock(&efx->debugfs_symlink_mutex); + efx_fini_debugfs_nic(efx); + mutex_unlock(&efx->debugfs_symlink_mutex); +#endif + + efx_mcdi_detach(efx); + efx_mcdi_fini(efx); +} + +/** Check queue size for range and rounding. + * + * If #fix is set it will clamp and round as required. + * Regardless of #fix this will return an error code if the value is + * invalid. + */ +int efx_check_queue_size(struct efx_nic *efx, u32 *entries, + u32 min, u32 max, bool fix) +{ + if (*entries < min || *entries > max) { + if (fix) + *entries = clamp_t(u32, *entries, min, max); + return -ERANGE; + } + + if (!is_power_of_2(*entries)) { + if (fix) + *entries = roundup_pow_of_two(*entries); + return -EINVAL; + } + + return 0; +} + +#ifdef CONFIG_SFC_MCDI_LOGGING +static ssize_t mcdi_logging_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); + struct efx_mcdi_iface *mcdi = efx_mcdi(efx); + + return scnprintf(buf, PAGE_SIZE, "%d\n", mcdi->logging_enabled); +} +static ssize_t mcdi_logging_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); + struct efx_mcdi_iface *mcdi = efx_mcdi(efx); + bool enable = count > 0 && *buf != '0'; + + mcdi->logging_enabled = enable; + return count; +} +static DEVICE_ATTR_RW(mcdi_logging); + +void efx_init_mcdi_logging(struct efx_nic *efx) +{ + int rc = device_create_file(&efx->pci_dev->dev, &dev_attr_mcdi_logging); + if (rc) { + netif_warn(efx, drv, efx->net_dev, + "failed to init net dev attributes\n"); + } +} + +void efx_fini_mcdi_logging(struct efx_nic *efx) +{ + device_remove_file(&efx->pci_dev->dev, &dev_attr_mcdi_logging); +} +#endif + +/* VLAN acceleration */ +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_NDO_VLAN_RX_ADD_VID_PROTO) +int efx_vlan_rx_add_vid(struct net_device *net_dev, __be16 proto, u16 vid) +{ + struct efx_nic *efx = efx_netdev_priv(net_dev); + + if (efx->type->vlan_rx_add_vid) + return efx->type->vlan_rx_add_vid(efx, proto, vid); + else + return -EOPNOTSUPP; +} + +int efx_vlan_rx_kill_vid(struct net_device *net_dev, __be16 proto, u16 vid) +{ + struct efx_nic *efx = efx_netdev_priv(net_dev); + + if (efx->type->vlan_rx_kill_vid) + return efx->type->vlan_rx_kill_vid(efx, proto, vid); + else + return -EOPNOTSUPP; +} +#elif defined(EFX_HAVE_NDO_VLAN_RX_ADD_VID_RC) +int efx_vlan_rx_add_vid(struct net_device *net_dev, u16 vid) +{ + struct efx_nic *efx = efx_netdev_priv(net_dev); + + if (efx->type->vlan_rx_add_vid) + return efx->type->vlan_rx_add_vid(efx, htons(ETH_P_8021Q), vid); + else + return -EOPNOTSUPP; +} + +int efx_vlan_rx_kill_vid(struct net_device *net_dev, u16 vid) +{ + struct efx_nic *efx = efx_netdev_priv(net_dev); + + if (efx->type->vlan_rx_kill_vid) + return efx->type->vlan_rx_kill_vid(efx, htons(ETH_P_8021Q), + vid); + else + return -EOPNOTSUPP; +} +#else +void efx_vlan_rx_add_vid(struct net_device *net_dev, unsigned short vid) +{ + struct efx_nic *efx = efx_netdev_priv(net_dev); + + if (efx->type->vlan_rx_add_vid) + efx->type->vlan_rx_add_vid(efx, htons(ETH_P_8021Q), vid); +} + +void efx_vlan_rx_kill_vid(struct net_device *net_dev, unsigned short vid) +{ + struct efx_nic *efx = efx_netdev_priv(net_dev); + + if (efx->type->vlan_rx_kill_vid) + efx->type->vlan_rx_kill_vid(efx, htons(ETH_P_8021Q), vid); +} +#endif + +/* V-port allocations. Same algorithms (and justification for them) as RSS + * contexts, above. + */ +struct efx_vport *efx_alloc_vport_entry(struct efx_nic *efx) +{ + struct list_head *head = &efx->vport.list; + struct efx_vport *ctx, *new; + u16 id = 1; /* Don't use zero, that refers to the driver master vport */ + + WARN_ON(!mutex_is_locked(&efx->vport_lock)); + + /* Search for first gap in the numbering */ + list_for_each_entry(ctx, head, list) { + if (ctx->user_id != id) + break; + id++; + /* Check for wrap. If this happens, we have nearly 2^16 + * allocated vports, which seems unlikely. + */ + if (WARN_ON_ONCE(!id)) + return NULL; + } + + /* Create the new entry */ + new = kzalloc(sizeof(struct efx_vport), GFP_KERNEL); + if (!new) + return NULL; + + /* Insert the new entry into the gap */ + new->user_id = id; + list_add_tail(&new->list, &ctx->list); + return new; +} + +struct efx_vport *efx_find_vport_entry(struct efx_nic *efx, u16 id) +{ + struct list_head *head = &efx->vport.list; + struct efx_vport *ctx; + + WARN_ON(!mutex_is_locked(&efx->vport_lock)); + + list_for_each_entry(ctx, head, list) + if (ctx->user_id == id) + return ctx; + return NULL; +} + +void efx_free_vport_entry(struct efx_vport *ctx) +{ + list_del(&ctx->list); + kfree(ctx); +} + +int efx_vport_add(struct efx_nic *efx, u16 vlan, bool vlan_restrict) +{ + struct efx_vport *vpx; + int rc; + + if (!efx->type->vport_add) + return -EOPNOTSUPP; + + mutex_lock(&efx->vport_lock); + vpx = efx_alloc_vport_entry(efx); + if (!vpx) { + rc = -ENOMEM; + goto out_unlock; + } + vpx->vlan = vlan; + vpx->vlan_restrict = vlan_restrict; + rc = efx->type->vport_add(efx, vpx->vlan, vpx->vlan_restrict, + &vpx->vport_id); + if (rc < 0) + efx_free_vport_entry(vpx); + else + rc = vpx->user_id; +out_unlock: + mutex_unlock(&efx->vport_lock); + return rc; +} + +int efx_vport_del(struct efx_nic *efx, u16 port_user_id) +{ + struct efx_vport *vpx; + int rc; + + if (!efx->type->vport_del) + return -EOPNOTSUPP; + + mutex_lock(&efx->vport_lock); + vpx = efx_find_vport_entry(efx, port_user_id); + if (!vpx) { + rc = -ENOENT; + goto out_unlock; + } + + rc = efx->type->vport_del(efx, vpx->vport_id); + if (!rc) + efx_free_vport_entry(vpx); +out_unlock: + mutex_unlock(&efx->vport_lock); + return rc; +} + +#if defined(EFX_NOT_UPSTREAM) && defined(DEBUG) +/* Used by load.sh to reliably indicate DEBUG vs RELEASE */ +extern int __efx_enable_debug; /* placate sparse */ +int __efx_enable_debug __attribute__((unused)); +#endif + +/************************************************************************** + * + * PCI error handling + * + **************************************************************************/ + +/* A PCI error affecting this device was detected. + * At this point MMIO and DMA may be disabled. + * Stop the software path and request a slot reset. + */ +static pci_ers_result_t efx_io_error_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + pci_ers_result_t status = PCI_ERS_RESULT_RECOVERED; + struct efx_nic *efx = pci_get_drvdata(pdev); + + if (state == pci_channel_io_perm_failure) + return PCI_ERS_RESULT_DISCONNECT; + + rtnl_lock(); + + switch (efx->state) { +#ifdef CONFIG_SFC_VDPA + case STATE_VDPA: + WARN_ON(efx_nic_rev(efx) != EFX_REV_EF100); + efx->state = STATE_DISABLED; + ef100_vdpa_reset(&efx->vdpa_nic->vdpa_dev); + + status = PCI_ERS_RESULT_DISCONNECT; + break; +#endif + case STATE_DISABLED: + /* If the interface is disabled we don't want to do anything + * with it. + */ + status = PCI_ERS_RESULT_RECOVERED; + break; + default: + efx->state = efx_begin_recovery(efx->state); + efx->reset_pending = 0; + + efx_device_detach_sync(efx); + + if (efx_net_active(efx->state)) { + efx_stop_all(efx); + efx_disable_interrupts(efx); + } + + status = PCI_ERS_RESULT_NEED_RESET; + break; + } + + rtnl_unlock(); + + pci_disable_device(pdev); + + return status; +} + +/* Fake a successfull reset, which will be performed later in efx_io_resume. */ +static pci_ers_result_t efx_io_slot_reset(struct pci_dev *pdev) +{ + struct efx_nic *efx = pci_get_drvdata(pdev); + pci_ers_result_t status = PCI_ERS_RESULT_RECOVERED; + int rc; + + if (pci_enable_device(pdev)) { + netif_err(efx, hw, efx->net_dev, + "Cannot re-enable PCI device after reset.\n"); + status = PCI_ERS_RESULT_DISCONNECT; + } + + rc = pci_aer_clear_nonfatal_status(pdev); + if (rc) { + netif_err(efx, hw, efx->net_dev, + "pci_aer_clear_nonfatal_status failed (%d)\n", rc); + /* Non-fatal error. Continue. */ + } + + return status; +} + +/* Perform the actual reset and resume I/O operations. */ +static void efx_io_resume(struct pci_dev *pdev) +{ + struct efx_nic *efx = pci_get_drvdata(pdev); + int rc; + + rtnl_lock(); + + if (efx->state == STATE_DISABLED) + goto out; + + rc = efx_reset(efx, RESET_TYPE_ALL); + if (rc) { + netif_err(efx, hw, efx->net_dev, + "efx_reset failed after PCI error (%d)\n", rc); + } else { + efx->state = efx_end_recovery(efx->state); + efx->reset_count = 0; + netif_dbg(efx, hw, efx->net_dev, + "Done resetting and resuming IO after PCI error.\n"); + } + +out: + rtnl_unlock(); +} + +int efx_rx_queue_id_internal(struct efx_nic *efx, int rxq_id) +{ + if (efx_tx_vi_spreading(efx)) + return rxq_id * 2; + else + return rxq_id; +} + +/* For simplicity and reliability, we always require a slot reset and try to + * reset the hardware when a pci error affecting the device is detected. + * We leave both the link_reset and mmio_enabled callback unimplemented: + * with our request for slot reset the mmio_enabled callback will never be + * called, and the link_reset callback is not used by AER or EEH mechanisms. + */ +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_CONST_PCI_ERR_HANDLER) +const struct pci_error_handlers efx_err_handlers = { +#else +struct pci_error_handlers efx_err_handlers = { +#endif + .error_detected = efx_io_error_detected, + .slot_reset = efx_io_slot_reset, + .resume = efx_io_resume, +}; + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_NDO_FEATURES_CHECK) +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_SKB_ENCAPSULATION) +/* Determine whether the NIC will be able to handle TX offloads for a given + * encapsulated packet. + */ +static bool efx_can_encap_offloads(struct efx_nic *efx, struct sk_buff *skb) +{ +#if defined(EFX_USE_KCOMPAT) && !defined(EFX_HAVE_NDO_ADD_VXLAN_PORT) && !defined(EFX_HAVE_NDO_UDP_TUNNEL_ADD) && !defined(EFX_HAVE_UDP_TUNNEL_NIC_INFO) + return false; +#else + struct gre_base_hdr *greh; + __be16 dst_port; + u8 ipproto; + + /* Does the NIC support encap offloads? + * If not, we should never get here, because we shouldn't have + * advertised encap offload feature flags in the first place. + */ + if (WARN_ON_ONCE(!efx->type->udp_tnl_has_port)) + return false; + + /* Determine encapsulation protocol in use */ + switch (skb->protocol) { + case htons(ETH_P_IP): + ipproto = ip_hdr(skb)->protocol; + break; + case htons(ETH_P_IPV6): + /* If there are extension headers, this will cause us to + * think we can't offload something that we maybe could have. + */ + ipproto = ipv6_hdr(skb)->nexthdr; + break; + default: + /* Not IP, so can't offload it */ + return false; + } + switch (ipproto) { + case IPPROTO_GRE: + /* We support NVGRE but not IP over GRE or random gretaps. + * Specifically, the NIC will accept GRE as encapsulated if + * the inner protocol is Ethernet, but only handle it + * correctly if the GRE header is 8 bytes long. Moreover, + * it will not update the Checksum or Sequence Number fields + * if they are present. (The Routing Present flag, + * GRE_ROUTING, cannot be set else the header would be more + * than 8 bytes long; so we don't have to worry about it.) + */ + if (skb->inner_protocol_type != ENCAP_TYPE_ETHER) + return false; + if (ntohs(skb->inner_protocol) != ETH_P_TEB) + return false; + if (skb_inner_mac_header(skb) - skb_transport_header(skb) != 8) + return false; + greh = (struct gre_base_hdr *)skb_transport_header(skb); + return !(greh->flags & (GRE_CSUM | GRE_SEQ)); + case IPPROTO_UDP: + /* If the port is registered for a UDP tunnel, we assume the + * packet is for that tunnel, and the NIC will handle it as + * such. If not, the NIC won't know what to do with it. + */ + dst_port = udp_hdr(skb)->dest; + return efx->type->udp_tnl_has_port(efx, dst_port); + default: + return false; + } +#endif +} +#endif + +netdev_features_t efx_features_check(struct sk_buff *skb, + struct net_device *dev, + netdev_features_t features) +{ +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_SKB_ENCAPSULATION) + struct efx_nic *efx = efx_netdev_priv(dev); + + if (skb->encapsulation) { + if (features & NETIF_F_GSO_MASK) + /* Hardware can only do TSO with at most 208 bytes + * of headers. + */ + if (skb_inner_transport_offset(skb) > + EFX_TSO2_MAX_HDRLEN) + features &= ~(NETIF_F_GSO_MASK); + if (features & (NETIF_F_GSO_MASK | NETIF_F_CSUM_MASK)) + if (!efx_can_encap_offloads(efx, skb)) + features &= ~(NETIF_F_GSO_MASK | + NETIF_F_CSUM_MASK); + } +#endif + return features; +} +#endif + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_NEED_GET_PHYS_PORT_ID) +int efx_get_phys_port_id(struct net_device *net_dev, + struct netdev_phys_item_id *ppid) +{ + struct efx_nic *efx = efx_netdev_priv(net_dev); + + if (efx->type->get_phys_port_id) + return efx->type->get_phys_port_id(efx, ppid); + else + return -EOPNOTSUPP; +} +#endif + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_NDO_GET_PHYS_PORT_NAME) +int efx_get_phys_port_name(struct net_device *net_dev, + char *name, size_t len) +{ + struct efx_nic *efx = efx_netdev_priv(net_dev); + + if (snprintf(name, len, "p%u", efx->port_num) >= len) + return -EINVAL; + return 0; +} +#endif + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_NDO_SET_FEATURES) || defined(EFX_HAVE_EXT_NDO_SET_FEATURES) +#if defined(EFX_NOT_UPSTREAM) && defined(EFX_USE_SFC_LRO) +/* This is called by netdev_update_features() to apply any + * restrictions on offload features. We must disable LRO whenever RX + * scattering is on since our implementation (SSR) does not yet + * support it. + */ +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_NDO_SET_FEATURES) +netdev_features_t efx_fix_features(struct net_device *net_dev, + netdev_features_t data) +#else +u32 efx_fix_features(struct net_device *net_dev, u32 data) +#endif +{ + struct efx_nic *efx = efx_netdev_priv(net_dev); + + if (!efx->lro_available) + data &= ~NETIF_F_LRO; + + if (!efx->vlan_filter_available) + data &= ~NETIF_F_HW_VLAN_CTAG_FILTER; + + return data; +} +#endif + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_NDO_SET_FEATURES) +int efx_set_features(struct net_device *net_dev, netdev_features_t data) +#else +int efx_set_features(struct net_device *net_dev, u32 data) +#endif +{ + struct efx_nic *efx = efx_netdev_priv(net_dev); + int rc; + + /* If disabling RX n-tuple filtering, clear existing filters */ + if (net_dev->features & ~data & NETIF_F_NTUPLE) { + rc = efx->type->filter_clear_rx(efx, EFX_FILTER_PRI_MANUAL); + if (rc) + return rc; + } + + /* If Rx VLAN filter is changed, update filters via mac_reconfigure. + * If forward-fcs is changed, mac_reconfigure updates that too. + */ + if ((net_dev->features ^ data) & (NETIF_F_HW_VLAN_CTAG_FILTER | + NETIF_F_RXFCS)) { + /* efx_set_rx_mode() will schedule MAC work to update filters + * when a new features are finally set in net_dev. + */ + efx_set_rx_mode(net_dev); + } + + return 0; +} +#endif + +#ifdef EFX_NOT_UPSTREAM +#if IS_MODULE(CONFIG_SFC_DRIVERLINK) +static struct efx_nic *efx_dl_device_priv(struct efx_dl_device *efx_dev) +{ + return container_of(efx_dev->nic, struct efx_nic, dl_nic); +} + +static int __efx_dl_publish(struct efx_dl_device *efx_dev) +{ + struct efx_nic *efx = efx_dl_device_priv(efx_dev); + int rc = efx_net_alloc(efx); + + if (rc) + efx_net_dealloc(efx); + + return rc; +} + +static void __efx_dl_unpublish(struct efx_dl_device *efx_dev) +{ + struct efx_nic *efx = efx_dl_device_priv(efx_dev); + + efx_net_dealloc(efx); +} + +static void __efx_dl_pause(struct efx_dl_device *efx_dev) +{ + struct efx_nic *efx = efx_dl_device_priv(efx_dev); + + efx_pause_napi(efx); +} + +static void __efx_dl_resume(struct efx_dl_device *efx_dev) +{ + struct efx_nic *efx = efx_dl_device_priv(efx_dev); + + efx_resume_napi(efx); +} + +static void __efx_dl_schedule_reset(struct efx_dl_device *efx_dev) +{ + struct efx_nic *efx = efx_dl_device_priv(efx_dev); + + efx_schedule_reset(efx, RESET_TYPE_ALL); +} + +static u32 __efx_dl_rss_flags_default(struct efx_dl_device *efx_dev) +{ + struct efx_nic *efx = efx_dl_device_priv(efx_dev); + + if (efx->type->rx_get_default_rss_flags) + return efx->type->rx_get_default_rss_flags(efx); + /* NIC does not support RSS flags, so any value will do */ + return 0; +} + +static int __efx_dl_rss_context_new(struct efx_dl_device *efx_dev, + const u32 *indir, const u8 *key, u32 flags, + u8 num_queues, u32 *rss_context) +{ + struct efx_nic *efx = efx_dl_device_priv(efx_dev); + struct efx_rss_context *ctx; + int rc; + + /* num_queues=0 is used internally by the driver to represent + * efx->rss_spread, and is not appropriate for driverlink clients + */ + if (!num_queues) + return -EINVAL; + + if (!efx->type->rx_push_rss_context_config) + return -EOPNOTSUPP; + + if (!efx->type->rx_set_rss_flags) + return -EOPNOTSUPP; + + mutex_lock(&efx->rss_lock); + ctx = efx_alloc_rss_context_entry(efx); + if (!ctx) { + rc = -ENOMEM; + goto out_unlock; + } + if (!indir) { + efx_set_default_rx_indir_table(efx, ctx); + indir = ctx->rx_indir_table; + } + if (!key) { + netdev_rss_key_fill(ctx->rx_hash_key, sizeof(ctx->rx_hash_key)); + key = ctx->rx_hash_key; + } + ctx->num_queues = num_queues; + rc = efx->type->rx_push_rss_context_config(efx, ctx, indir, key); + if (rc) + goto out_free; + *rss_context = ctx->user_id; + rc = efx->type->rx_set_rss_flags(efx, ctx, flags); + if (rc) + goto out_delete; + ctx->flags = flags; + +out_unlock: + mutex_unlock(&efx->rss_lock); + return rc; + +out_delete: + efx->type->rx_push_rss_context_config(efx, ctx, NULL, NULL); +out_free: + efx_free_rss_context_entry(ctx); + goto out_unlock; +} + +static int __efx_dl_rss_context_set(struct efx_dl_device *efx_dev, + const u32 *indir, const u8 *key, + u32 flags, u32 rss_context) +{ + struct efx_nic *efx = efx_dl_device_priv(efx_dev); + struct efx_rss_context *ctx; + u32 old_flags; + int rc; + + if (!efx->type->rx_push_rss_context_config) + return -EOPNOTSUPP; + + mutex_lock(&efx->rss_lock); + ctx = efx_find_rss_context_entry(efx, rss_context); + if (!ctx) { + rc = -ENOENT; + goto out_unlock; + } + + if (!indir) /* no change */ + indir = ctx->rx_indir_table; + if (!key) /* no change */ + key = ctx->rx_hash_key; + old_flags = ctx->flags; + ctx->flags = flags; + rc = efx->type->rx_push_rss_context_config(efx, ctx, indir, key); + if (rc) /* restore old RSS flags on failure */ + ctx->flags = old_flags; +out_unlock: + mutex_unlock(&efx->rss_lock); + return rc; +} + +static int __efx_dl_rss_context_free(struct efx_dl_device *efx_dev, + u32 rss_context) +{ + struct efx_nic *efx = efx_dl_device_priv(efx_dev); + struct efx_rss_context *ctx; + int rc; + + if (!efx->type->rx_push_rss_context_config) + return -EOPNOTSUPP; + + mutex_lock(&efx->rss_lock); + ctx = efx_find_rss_context_entry(efx, rss_context); + if (!ctx) { + rc = -ENOENT; + goto out_unlock; + } + + rc = efx->type->rx_push_rss_context_config(efx, ctx, NULL, NULL); + if (!rc) + efx_free_rss_context_entry(ctx); +out_unlock: + mutex_unlock(&efx->rss_lock); + return rc; +} + +/* We additionally include priority in the filter ID so that we + * can pass it back into efx_filter_remove_id_safe(). + */ +#define EFX_FILTER_PRI_SHIFT 28 +#define EFX_FILTER_ID_MASK ((1 << EFX_FILTER_PRI_SHIFT) - 1) + +static int __efx_dl_filter_insert(struct efx_dl_device *efx_dev, + const struct efx_filter_spec *spec, + bool replace_equal) +{ + struct efx_nic *efx = efx_dl_device_priv(efx_dev); + s32 filter_id = efx_filter_insert_filter(efx, + spec, replace_equal); + if (filter_id >= 0) { + EFX_WARN_ON_PARANOID(filter_id & ~EFX_FILTER_ID_MASK); + filter_id |= spec->priority << EFX_FILTER_PRI_SHIFT; + } + return filter_id; +} + +static int __efx_dl_filter_remove(struct efx_dl_device *efx_dev, int filter_id) +{ + struct efx_nic *efx = efx_dl_device_priv(efx_dev); + + if (filter_id < 0) + return -EINVAL; + return efx_filter_remove_id_safe(efx, + filter_id >> EFX_FILTER_PRI_SHIFT, + filter_id & EFX_FILTER_ID_MASK); +} + +static int __efx_dl_filter_redirect(struct efx_dl_device *efx_dev, + int filter_id, int rxq_i, u32 *rss_context, + int stack_id) +{ + struct efx_nic *efx = efx_dl_device_priv(efx_dev); + + if (WARN_ON(filter_id < 0)) + return -EINVAL; + return efx->type->filter_redirect(efx, filter_id & EFX_FILTER_ID_MASK, + rss_context, rxq_i, stack_id); +} + +static int __efx_dl_vport_new(struct efx_dl_device *efx_dev, u16 vlan, + bool vlan_restrict) +{ + struct efx_nic *efx = efx_dl_device_priv(efx_dev); + + return efx_vport_add(efx, vlan, vlan_restrict); +} + +static int __efx_dl_vport_free(struct efx_dl_device *efx_dev, u16 port_id) +{ + struct efx_nic *efx = efx_dl_device_priv(efx_dev); + + return efx_vport_del(efx, port_id); +} + +static +int __efx_dl_init_txq(struct efx_dl_device *efx_dev, u32 client_id, + dma_addr_t *dma_addrs, + int n_dma_addrs, u16 vport_id, u8 stack_id, u32 owner_id, + bool timestamp, u8 crc_mode, bool tcp_udp_only, + bool tcp_csum_dis, bool ip_csum_dis, bool inner_tcp_csum, + bool inner_ip_csum, bool buff_mode, bool pacer_bypass, + bool ctpio, bool ctpio_uthresh, bool m2m_d2c, u32 instance, + u32 label, u32 target_evq, u32 num_entries) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_INIT_TXQ_EXT_IN_LEN); + struct efx_nic *efx = efx_dl_device_priv(efx_dev); + struct efx_vport *vpx; + u32 port_id; + int i, rc; + + mutex_lock(&efx->vport_lock); + /* look up vport, convert to hw ID, and OR in stack_id */ + if (vport_id == 0) + vpx = &efx->vport; + else + vpx = efx_find_vport_entry(efx, vport_id); + if (!vpx) { + rc = -ENOENT; + goto out_unlock; + } + if (vpx->vport_id == EVB_PORT_ID_NULL) { + rc = -EOPNOTSUPP; + goto out_unlock; + } + port_id = vpx->vport_id | EVB_STACK_ID(stack_id); + + MCDI_SET_DWORD(inbuf, INIT_TXQ_EXT_IN_SIZE, num_entries); + MCDI_SET_DWORD(inbuf, INIT_TXQ_EXT_IN_TARGET_EVQ, target_evq); + MCDI_SET_DWORD(inbuf, INIT_TXQ_EXT_IN_LABEL, label); + MCDI_SET_DWORD(inbuf, INIT_TXQ_EXT_IN_INSTANCE, instance); + MCDI_SET_DWORD(inbuf, INIT_TXQ_EXT_IN_OWNER_ID, owner_id); + MCDI_SET_DWORD(inbuf, INIT_TXQ_EXT_IN_PORT_ID, port_id); + + MCDI_POPULATE_DWORD_12(inbuf, INIT_TXQ_EXT_IN_FLAGS, + INIT_TXQ_EXT_IN_FLAG_BUFF_MODE, !!buff_mode, + INIT_TXQ_EXT_IN_FLAG_IP_CSUM_DIS, !!ip_csum_dis, + INIT_TXQ_EXT_IN_FLAG_TCP_CSUM_DIS, !!tcp_csum_dis, + INIT_TXQ_EXT_IN_FLAG_INNER_IP_CSUM_EN, !!inner_ip_csum, + INIT_TXQ_EXT_IN_FLAG_INNER_TCP_CSUM_EN, !!inner_tcp_csum, + INIT_TXQ_EXT_IN_FLAG_TCP_UDP_ONLY, !!tcp_udp_only, + INIT_TXQ_EXT_IN_CRC_MODE, crc_mode, + INIT_TXQ_EXT_IN_FLAG_TIMESTAMP, !!timestamp, + INIT_TXQ_EXT_IN_FLAG_CTPIO, !!ctpio, + INIT_TXQ_EXT_IN_FLAG_CTPIO_UTHRESH, !!ctpio_uthresh, + INIT_TXQ_EXT_IN_FLAG_PACER_BYPASS, !!pacer_bypass, + INIT_TXQ_EXT_IN_FLAG_M2M_D2C, !!m2m_d2c); + + for (i = 0; i < n_dma_addrs; ++i) + MCDI_SET_ARRAY_QWORD(inbuf, INIT_TXQ_EXT_IN_DMA_ADDR, i, + dma_addrs[i]); + + rc = efx_mcdi_rpc_client(efx, client_id, MC_CMD_INIT_TXQ, inbuf, + MC_CMD_INIT_TXQ_EXT_IN_LEN, NULL, 0, NULL); +out_unlock: + mutex_unlock(&efx->vport_lock); + return rc; +} + +static +int __efx_dl_init_rxq(struct efx_dl_device *efx_dev, u32 client_id, + dma_addr_t *dma_addrs, + int n_dma_addrs, u16 vport_id, u8 stack_id, u32 owner_id, + u8 crc_mode, bool timestamp, bool hdr_split, + bool buff_mode, bool rx_prefix, u8 dma_mode, u32 instance, + u32 label, u32 target_evq, u32 num_entries, + u8 ps_buf_size, bool force_rx_merge, int ef100_rx_buffer_size) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_INIT_RXQ_V4_IN_LEN); + struct efx_nic *efx = efx_dl_device_priv(efx_dev); + struct efx_vport *vpx; + u32 port_id; + int i, rc; + + mutex_lock(&efx->vport_lock); + /* look up vport, convert to hw ID, and OR in stack_id */ + if (vport_id == 0) + vpx = &efx->vport; + else + vpx = efx_find_vport_entry(efx, vport_id); + if (!vpx) { + rc = -ENOENT; + goto out_unlock; + } + if (vpx->vport_id == EVB_PORT_ID_NULL) { + rc = -EOPNOTSUPP; + goto out_unlock; + } + port_id = vpx->vport_id | EVB_STACK_ID(stack_id); + + MCDI_SET_DWORD(inbuf, INIT_RXQ_EXT_IN_SIZE, num_entries); + MCDI_SET_DWORD(inbuf, INIT_RXQ_EXT_IN_TARGET_EVQ, target_evq); + MCDI_SET_DWORD(inbuf, INIT_RXQ_EXT_IN_LABEL, label); + MCDI_SET_DWORD(inbuf, INIT_RXQ_EXT_IN_INSTANCE, instance); + MCDI_SET_DWORD(inbuf, INIT_RXQ_EXT_IN_OWNER_ID, owner_id); + MCDI_SET_DWORD(inbuf, INIT_RXQ_EXT_IN_PORT_ID, port_id); + + MCDI_POPULATE_DWORD_8(inbuf, INIT_RXQ_EXT_IN_FLAGS, + INIT_RXQ_EXT_IN_FLAG_BUFF_MODE, !!buff_mode, + INIT_RXQ_EXT_IN_FLAG_HDR_SPLIT, !!hdr_split, + INIT_RXQ_EXT_IN_FLAG_TIMESTAMP, !!timestamp, + INIT_RXQ_EXT_IN_FLAG_PREFIX, !!rx_prefix, + INIT_RXQ_EXT_IN_CRC_MODE, crc_mode, + INIT_RXQ_EXT_IN_DMA_MODE, dma_mode, + INIT_RXQ_EXT_IN_PACKED_STREAM_BUFF_SIZE, ps_buf_size, + INIT_RXQ_EXT_IN_FLAG_FORCE_EV_MERGING, !!force_rx_merge); + + for (i = 0; i < n_dma_addrs; ++i) + MCDI_SET_ARRAY_QWORD(inbuf, INIT_RXQ_EXT_IN_DMA_ADDR, i, + dma_addrs[i]); + + if (efx_nic_rev(efx) == EFX_REV_EF100) { + if (ef100_rx_buffer_size % L1_CACHE_BYTES) { + rc = -EINVAL; + goto out_unlock; + } + MCDI_SET_DWORD(inbuf, INIT_RXQ_V4_IN_BUFFER_SIZE_BYTES, + ef100_rx_buffer_size); + } + + + rc = efx_mcdi_rpc_client(efx, client_id, MC_CMD_INIT_RXQ, inbuf, + MC_CMD_INIT_RXQ_V4_IN_LEN, NULL, 0, NULL); +out_unlock: + mutex_unlock(&efx->vport_lock); + return rc; +} + +static +int __efx_dl_set_multicast_loopback_suppression(struct efx_dl_device *efx_dev, + bool suppress, u16 vport_id, + u8 stack_id) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_PARSER_DISP_CONFIG_IN_LEN(1)); + struct efx_nic *efx = efx_dl_device_priv(efx_dev); + struct efx_vport *vpx; + u32 port_id; + int rc; + + mutex_lock(&efx->vport_lock); + /* look up vport, convert to hw ID, and OR in stack_id */ + if (vport_id == 0) + vpx = &efx->vport; + else + vpx = efx_find_vport_entry(efx, vport_id); + if (!vpx) { + rc = -ENOENT; + goto out_unlock; + } + if (vpx->vport_id == EVB_PORT_ID_NULL) { + rc = -EOPNOTSUPP; + goto out_unlock; + } + port_id = vpx->vport_id | EVB_STACK_ID(stack_id); + + MCDI_SET_DWORD(inbuf, SET_PARSER_DISP_CONFIG_IN_TYPE, + MC_CMD_SET_PARSER_DISP_CONFIG_IN_VADAPTOR_SUPPRESS_SELF_TX); + MCDI_SET_DWORD(inbuf, SET_PARSER_DISP_CONFIG_IN_ENTITY, port_id); + MCDI_SET_DWORD(inbuf, SET_PARSER_DISP_CONFIG_IN_VALUE, !!suppress); + rc = efx_mcdi_rpc(efx, MC_CMD_SET_PARSER_DISP_CONFIG, + inbuf, sizeof(inbuf), NULL, 0, NULL); +out_unlock: + mutex_unlock(&efx->vport_lock); + return rc; +} + +static int __efx_dl_mcdi_rpc(struct efx_dl_device *efx_dev, unsigned int cmd, + size_t inlen, size_t outlen, size_t *outlen_actual, + const u8 *inbuf, u8 *outbuf) +{ + struct efx_nic *efx = efx_dl_device_priv(efx_dev); + + /* FIXME: Buffer parameter types should be changed to __le32 * + * so we can reasonably assume they are properly padded even + * if the lengths are not multiples of 4. + */ + if (WARN_ON(inlen & 3 || outlen & 3)) + return -EINVAL; + + return efx_mcdi_rpc_quiet(efx, cmd, (const efx_dword_t *)inbuf, + inlen, (efx_dword_t *)outbuf, outlen, + outlen_actual); +} + +static int __efx_dl_filter_block_kernel(struct efx_dl_device *efx_dev, + enum efx_dl_filter_block_kernel_type type) +{ + struct efx_nic *efx = efx_dl_device_priv(efx_dev); + int rc = 0; + + mutex_lock(&efx->dl_block_kernel_mutex); + + if (efx->dl_block_kernel_count[type] == 0) { + rc = efx->type->filter_block_kernel(efx, type); + if (rc) + goto unlock; + } + ++efx->dl_block_kernel_count[type]; + +unlock: + mutex_unlock(&efx->dl_block_kernel_mutex); + + return rc; +} + +static void __efx_dl_filter_unblock_kernel(struct efx_dl_device *efx_dev, + enum efx_dl_filter_block_kernel_type type) +{ + struct efx_nic *efx = efx_dl_device_priv(efx_dev); + + mutex_lock(&efx->dl_block_kernel_mutex); + + if (--efx->dl_block_kernel_count[type] == 0) + efx->type->filter_unblock_kernel(efx, type); + + mutex_unlock(&efx->dl_block_kernel_mutex); +} + +static long __efx_dl_dma_xlate(struct efx_dl_device *efx_dev, + const dma_addr_t *src, + dma_addr_t *dst, unsigned int n) +{ + struct efx_nic *efx = efx_dl_device_priv(efx_dev); + unsigned i; + long rc = -1; + + if (!efx->type->regionmap_buffer) { + /* This NIC has 1:1 mappings */ + memmove(dst, src, n * sizeof(src[0])); + return n; + } + + for (i = 0; i < n; ++i) { + dma_addr_t addr = src[i]; + if (efx->type->regionmap_buffer(efx, &addr)) { + if (rc < 0) + rc = i; + addr = (dma_addr_t)-1; + } + dst[i] = addr; + } + return rc >= 0 ? rc : n; +} + +static int __efx_dl_mcdi_rpc_client(struct efx_dl_device *efx_dev, + u32 client_id, unsigned int cmd, + size_t inlen, size_t outlen, + size_t *outlen_actual, + u32 *inbuf, u32 *outbuf) +{ + struct efx_nic *efx = efx_dl_device_priv(efx_dev); + + return efx_mcdi_rpc_client(efx, client_id, cmd, (efx_dword_t *)inbuf, + inlen, (efx_dword_t *)outbuf, outlen, + outlen_actual); +} + +static int __efx_dl_client_alloc(struct efx_dl_device *efx_dev, u32 parent, + u32 *id) +{ + MCDI_DECLARE_BUF(outbuf, MC_CMD_CLIENT_ALLOC_OUT_LEN); + struct efx_nic *efx = efx_dl_device_priv(efx_dev); + int rc; + + BUILD_BUG_ON(MC_CMD_CLIENT_ALLOC_IN_LEN != 0); + rc = efx_mcdi_rpc_client(efx, parent, MC_CMD_CLIENT_ALLOC, + NULL, 0, outbuf, sizeof(outbuf), NULL); + if (rc) + return rc; + *id = MCDI_DWORD(outbuf, CLIENT_ALLOC_OUT_CLIENT_ID); + return 0; +} + +static int __efx_dl_client_free(struct efx_dl_device *efx_dev, u32 id) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_CLIENT_FREE_IN_LEN); + struct efx_nic *efx = efx_dl_device_priv(efx_dev); + + MCDI_SET_DWORD(inbuf, CLIENT_FREE_IN_CLIENT_ID, id); + return efx_mcdi_rpc(efx, MC_CMD_CLIENT_FREE, + inbuf, sizeof(inbuf), NULL, 0, NULL); +} + +static int __efx_dl_vi_set_user(struct efx_dl_device *efx_dev, + u32 vi_instance, u32 user) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_VI_USER_IN_LEN); + struct efx_nic *efx = efx_dl_device_priv(efx_dev); + + MCDI_SET_DWORD(inbuf, SET_VI_USER_IN_INSTANCE, vi_instance); + MCDI_SET_DWORD(inbuf, SET_VI_USER_IN_CLIENT_ID, user); + return efx_mcdi_rpc(efx, MC_CMD_SET_VI_USER, + inbuf, sizeof(inbuf), NULL, 0, NULL); +} + +static bool efx_dl_hw_unavailable(struct efx_dl_device *efx_dev) +{ + return efx_nic_hw_unavailable(efx_dl_device_priv(efx_dev)); +} + +static struct efx_dl_ops efx_driverlink_ops = { + .hw_unavailable = efx_dl_hw_unavailable, + .pause = __efx_dl_pause, + .resume = __efx_dl_resume, + .schedule_reset = __efx_dl_schedule_reset, + .rss_flags_default = __efx_dl_rss_flags_default, + .rss_context_new = __efx_dl_rss_context_new, + .rss_context_set = __efx_dl_rss_context_set, + .rss_context_free = __efx_dl_rss_context_free, + .filter_insert = __efx_dl_filter_insert, + .filter_remove = __efx_dl_filter_remove, + .filter_redirect = __efx_dl_filter_redirect, + .vport_new = __efx_dl_vport_new, + .vport_free = __efx_dl_vport_free, + .init_txq = __efx_dl_init_txq, + .init_rxq = __efx_dl_init_rxq, + .set_multicast_loopback_suppression = + __efx_dl_set_multicast_loopback_suppression, + .filter_block_kernel = __efx_dl_filter_block_kernel, + .filter_unblock_kernel = __efx_dl_filter_unblock_kernel, + .mcdi_rpc = __efx_dl_mcdi_rpc, + .publish = __efx_dl_publish, + .unpublish = __efx_dl_unpublish, + .dma_xlate = __efx_dl_dma_xlate, + .mcdi_rpc_client = __efx_dl_mcdi_rpc_client, + .client_alloc = __efx_dl_client_alloc, + .client_free = __efx_dl_client_free, + .vi_set_user = __efx_dl_vi_set_user, +}; + +void efx_dl_probe(struct efx_nic *efx) +{ + mutex_init(&efx->dl_block_kernel_mutex); + efx->dl_nic.pci_dev = efx->pci_dev; + efx->dl_nic.net_dev = efx->net_dev; + efx->dl_nic.ops = &efx_driverlink_ops; + efx->dl_nic.msg_enable = efx->msg_enable; + INIT_LIST_HEAD(&efx->dl_nic.nic_node); + INIT_LIST_HEAD(&efx->dl_nic.device_list); +} +#endif +#endif + diff --git a/src/drivers/net/ethernet/sfc/efx_common.h b/src/drivers/net/ethernet/sfc/efx_common.h new file mode 100644 index 0000000..268650a --- /dev/null +++ b/src/drivers/net/ethernet/sfc/efx_common.h @@ -0,0 +1,162 @@ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2018 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#ifndef EFX_COMMON_H +#define EFX_COMMON_H + +int efx_port_dummy_op_int(struct efx_nic *efx); +void efx_port_dummy_op_void(struct efx_nic *efx); + +bool efx_is_supported_ringsize(struct efx_nic *efx, unsigned long entries); +bool efx_is_guaranteed_ringsize(struct efx_nic *efx, unsigned long entries); +unsigned long +efx_best_guaranteed_ringsize(struct efx_nic *efx, unsigned long entries, + bool fallback_to_supported); +unsigned long +efx_next_guaranteed_ringsize(struct efx_nic *efx, unsigned long entries, + bool fallback_to_supported); +int efx_init_io(struct efx_nic *efx, int bar, dma_addr_t dma_mask, unsigned int mem_map_size); +void efx_fini_io(struct efx_nic *efx); +int efx_init_struct(struct efx_nic *efx, struct pci_dev *pci_dev); +void efx_fini_struct(struct efx_nic *efx); + +int efx_probe_common(struct efx_nic *efx); +void efx_remove_common(struct efx_nic *efx); + +int efx_start_all(struct efx_nic *efx); +void efx_stop_all(struct efx_nic *efx); +int efx_try_recovery(struct efx_nic *efx); +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_NETDEV_STATS64_VOID) +void efx_net_stats(struct net_device *net_dev, struct rtnl_link_stats64 *stats); +#elif defined(EFX_USE_NETDEV_STATS64) +struct rtnl_link_stats64 *efx_net_stats(struct net_device *net_dev, + struct rtnl_link_stats64 *stats); +#else +struct net_device_stats *efx_net_stats(struct net_device *net_dev); +#endif +void efx_reset_sw_stats(struct efx_nic *efx); +void efx_print_stopped_queues(struct efx_nic *efx); +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_NDO_TX_TIMEOUT_TXQUEUE) +void efx_watchdog(struct net_device *net_dev, unsigned int txqueue); +#else +void efx_watchdog(struct net_device *net_dev); +#endif + +#define EFX_ASSERT_RESET_SERIALISED(efx) \ + do { \ + if (efx->state != STATE_UNINIT && efx->state != STATE_PROBED) \ + ASSERT_RTNL(); \ + } while (0) + +void efx_reset_down(struct efx_nic *efx, enum reset_type method); +int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok); +int efx_reset(struct efx_nic *efx, enum reset_type method); + + +extern unsigned int monitor_interval_ms; +void efx_start_monitor(struct efx_nic *efx); + +int efx_create_reset_workqueue(void); +void efx_queue_reset_work(struct efx_nic *efx); +void efx_flush_reset_workqueue(struct efx_nic *efx); +void efx_destroy_reset_workqueue(void); +void efx_schedule_reset(struct efx_nic *efx, enum reset_type type); + +static inline int efx_check_disabled(struct efx_nic *efx) +{ + if (efx->state == STATE_DISABLED || efx_recovering(efx->state)) { + netif_err(efx, drv, efx->net_dev, + "device is disabled due to earlier errors\n"); + return -EIO; + } + return 0; +} +int efx_check_queue_size(struct efx_nic *efx, u32 *entries, + u32 min, u32 max, bool fix); + +#ifdef CONFIG_SFC_MCDI_LOGGING +void efx_init_mcdi_logging(struct efx_nic *efx); +void efx_fini_mcdi_logging(struct efx_nic *efx); +#else +static inline void efx_init_mcdi_logging(struct efx_nic *efx) {} +static inline void efx_fini_mcdi_logging(struct efx_nic *efx) {} +#endif + +/* V-ports */ +struct efx_vport *efx_alloc_vport_entry(struct efx_nic *efx); +struct efx_vport *efx_find_vport_entry(struct efx_nic *efx, u16 id); +void efx_free_vport_entry(struct efx_vport *ctx); +int efx_vport_add(struct efx_nic *efx, u16 vlan, bool vlan_restrict); +int efx_vport_del(struct efx_nic *efx, u16 port_user_id); + +int efx_mac_reconfigure(struct efx_nic *efx, bool mtu_only); +void efx_link_status_changed(struct efx_nic *efx); +int efx_reconfigure_port(struct efx_nic *efx); +int __efx_reconfigure_port(struct efx_nic *efx); +int efx_change_mtu(struct net_device *net_dev, int new_mtu); +int efx_set_mac_address(struct net_device *net_dev, void *data); +void efx_set_rx_mode(struct net_device *net_dev); + +#ifdef EFX_NOT_UPSTREAM +#if IS_MODULE(CONFIG_SFC_DRIVERLINK) +void efx_dl_probe(struct efx_nic *efx); +bool efx_dl_supported(struct efx_nic *efx); +#endif +#endif + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_CONST_PCI_ERR_HANDLER) +extern const struct pci_error_handlers efx_err_handlers; +#else +extern struct pci_error_handlers efx_err_handlers; +#endif + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_NDO_FEATURES_CHECK) +netdev_features_t efx_features_check(struct sk_buff *skb, + struct net_device *dev, + netdev_features_t features); +#endif + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_NEED_GET_PHYS_PORT_ID) +int efx_get_phys_port_id(struct net_device *net_dev, + struct netdev_phys_item_id *ppid); +#endif + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_NDO_GET_PHYS_PORT_NAME) +int efx_get_phys_port_name(struct net_device *net_dev, + char *name, size_t len); +#endif + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_NDO_SET_FEATURES) || defined(EFX_HAVE_EXT_NDO_SET_FEATURES) +#if defined(EFX_NOT_UPSTREAM) && defined(EFX_USE_SFC_LRO) +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_NDO_SET_FEATURES) +netdev_features_t efx_fix_features(struct net_device *net_dev, netdev_features_t data); +#else +u32 efx_fix_features(struct net_device *net_dev, u32 data); +#endif +#endif +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_NDO_SET_FEATURES) +int efx_set_features(struct net_device *net_dev, netdev_features_t data); +#else +int efx_set_features(struct net_device *net_dev, u32 data); +#endif +#endif + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_NDO_VLAN_RX_ADD_VID_PROTO) +int efx_vlan_rx_add_vid(struct net_device *net_dev, __be16 proto, u16 vid); +int efx_vlan_rx_kill_vid(struct net_device *net_dev, __be16 proto, u16 vid); +#elif defined(EFX_HAVE_NDO_VLAN_RX_ADD_VID_RC) +int efx_vlan_rx_add_vid(struct net_device *net_dev, u16 vid); +int efx_vlan_rx_kill_vid(struct net_device *net_dev, u16 vid); +#else +void efx_vlan_rx_add_vid(struct net_device *net_dev, unsigned short vid); +void efx_vlan_rx_kill_vid(struct net_device *net_dev, unsigned short vid); +#endif + +#endif + diff --git a/src/drivers/net/ethernet/sfc/efx_devlink.c b/src/drivers/net/ethernet/sfc/efx_devlink.c new file mode 100644 index 0000000..2cada63 --- /dev/null +++ b/src/drivers/net/ethernet/sfc/efx_devlink.c @@ -0,0 +1,575 @@ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2019 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ +#include +#include "net_driver.h" +#include "efx_devlink.h" +#include "nic.h" +#include "mcdi.h" +#include "mcdi_functions.h" +#include "mcdi_pcol.h" +#include "efx_reflash.h" + +/* Custom devlink-info version object names for details that do not map to the + * generic standardized names. + */ +#define EFX_DEVLINK_INFO_VERSION_FW_MGMT_SUC "fw.mgmt.suc" +#define EFX_DEVLINK_INFO_VERSION_FW_MGMT_CMC "fw.mgmt.cmc" +#define EFX_DEVLINK_INFO_VERSION_FPGA_REV "fpga.rev" +#define EFX_DEVLINK_INFO_VERSION_DATAPATH_HW "fpga.app" +#define EFX_DEVLINK_INFO_VERSION_DATAPATH_FW DEVLINK_INFO_VERSION_GENERIC_FW_APP +#define EFX_DEVLINK_INFO_VERSION_SOC_BOOT "coproc.boot" +#define EFX_DEVLINK_INFO_VERSION_SOC_UBOOT "coproc.uboot" +#define EFX_DEVLINK_INFO_VERSION_SOC_MAIN "coproc.main" +#define EFX_DEVLINK_INFO_VERSION_SOC_RECOVERY "coproc.recovery" +#define EFX_DEVLINK_INFO_VERSION_FW_EXPROM "fw.exprom" +#define EFX_DEVLINK_INFO_VERSION_FW_UEFI "fw.uefi" + +#define EFX_MAX_VERSION_INFO_LEN 64 + +static int efx_devlink_info_nvram_partition(struct efx_nic *efx, + struct devlink_info_req *req, + unsigned int partition_type, + const char *version_name) +{ + char buf[EFX_MAX_VERSION_INFO_LEN]; + u16 version[4]; + int rc; + + rc = efx_mcdi_nvram_metadata(efx, partition_type, NULL, version, NULL, 0); + if (rc) + return rc; + + snprintf(buf, EFX_MAX_VERSION_INFO_LEN, "%u.%u.%u.%u", version[0], + version[1], version[2], version[3]); + devlink_info_version_stored_put(req, version_name, buf); + + return 0; +} + +static void efx_devlink_info_stored_versions(struct efx_nic *efx, + struct devlink_info_req *req) +{ + efx_devlink_info_nvram_partition(efx, req, NVRAM_PARTITION_TYPE_BUNDLE, + DEVLINK_INFO_VERSION_GENERIC_FW_BUNDLE_ID); + efx_devlink_info_nvram_partition(efx, req, + NVRAM_PARTITION_TYPE_MC_FIRMWARE, + DEVLINK_INFO_VERSION_GENERIC_FW_MGMT); + efx_devlink_info_nvram_partition(efx, req, + NVRAM_PARTITION_TYPE_SUC_FIRMWARE, + EFX_DEVLINK_INFO_VERSION_FW_MGMT_SUC); + efx_devlink_info_nvram_partition(efx, req, + NVRAM_PARTITION_TYPE_EXPANSION_ROM, + EFX_DEVLINK_INFO_VERSION_FW_EXPROM); + efx_devlink_info_nvram_partition(efx, req, + NVRAM_PARTITION_TYPE_EXPANSION_UEFI, + EFX_DEVLINK_INFO_VERSION_FW_UEFI); +} + +#define EFX_MAX_SERIALNUM_LEN (ETH_ALEN * 2 + 1) + +static void efx_devlink_info_board_cfg(struct efx_nic *efx, + struct devlink_info_req *req) +{ + char sn[EFX_MAX_SERIALNUM_LEN]; + u8 mac_address[ETH_ALEN]; + int rc; + + rc = efx_mcdi_get_board_cfg(efx, 0, mac_address, NULL, NULL); + if (!rc) { + snprintf(sn, EFX_MAX_SERIALNUM_LEN, "%pm", mac_address); + devlink_info_serial_number_put(req, sn); + } +} + +#define EFX_VER_FLAG(_f) \ + (MC_CMD_GET_VERSION_V5_OUT_ ## _f ## _PRESENT_LBN) + +static void efx_devlink_info_running_versions(struct efx_nic *efx, + struct devlink_info_req *req) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_VERSION_EXT_IN_LEN); + MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_VERSION_V5_OUT_LEN); + char buf[EFX_MAX_VERSION_INFO_LEN]; + unsigned int flags, build_id; + union { + const __le32 *dwords; + const __le16 *words; + const char *str; + } ver; + struct rtc_time build_date; + size_t outlength, offset; + u64 tstamp; + int rc; + + rc = efx_mcdi_rpc(efx, MC_CMD_GET_VERSION, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), &outlength); + if (rc || outlength < MC_CMD_GET_VERSION_OUT_LEN) + return; + + /* Handle previous output */ + if (outlength < MC_CMD_GET_VERSION_V2_OUT_LEN) { + ver.words = (__le16 *)MCDI_PTR(outbuf, + GET_VERSION_EXT_OUT_VERSION); + offset = snprintf(buf, EFX_MAX_VERSION_INFO_LEN, "%u.%u.%u.%u", + le16_to_cpu(ver.words[0]), + le16_to_cpu(ver.words[1]), + le16_to_cpu(ver.words[2]), + le16_to_cpu(ver.words[3])); + + devlink_info_version_running_put(req, + DEVLINK_INFO_VERSION_GENERIC_FW_MGMT, + buf); + return; + } + + /* Handle V2 additions */ + flags = MCDI_DWORD(outbuf, GET_VERSION_V2_OUT_FLAGS); + if (flags & BIT(EFX_VER_FLAG(BOARD_EXT_INFO))) { + snprintf(buf, EFX_MAX_VERSION_INFO_LEN, "%s", + MCDI_PTR(outbuf, GET_VERSION_V2_OUT_BOARD_NAME)); + devlink_info_version_fixed_put(req, + DEVLINK_INFO_VERSION_GENERIC_BOARD_ID, + buf); + + /* Favour full board version if present (in V5 or later) */ + if (~flags & BIT(EFX_VER_FLAG(BOARD_VERSION))) { + snprintf(buf, EFX_MAX_VERSION_INFO_LEN, "%u", + MCDI_DWORD(outbuf, + GET_VERSION_V2_OUT_BOARD_REVISION)); + devlink_info_version_fixed_put(req, + DEVLINK_INFO_VERSION_GENERIC_BOARD_REV, + buf); + } + + ver.str = MCDI_PTR(outbuf, GET_VERSION_V2_OUT_BOARD_SERIAL); + if (ver.str[0]) + devlink_info_board_serial_number_put(req, ver.str); + } + + if (flags & BIT(EFX_VER_FLAG(FPGA_EXT_INFO))) { + ver.dwords = (__le32 *)MCDI_PTR(outbuf, + GET_VERSION_V2_OUT_FPGA_VERSION); + offset = snprintf(buf, EFX_MAX_VERSION_INFO_LEN, "%u_%c%u", + le32_to_cpu(ver.dwords[0]), + 'A' + le32_to_cpu(ver.dwords[1]), + le32_to_cpu(ver.dwords[2])); + + ver.str = MCDI_PTR(outbuf, GET_VERSION_V2_OUT_FPGA_EXTRA); + if (ver.str[0]) + snprintf(&buf[offset], EFX_MAX_VERSION_INFO_LEN - offset, + " (%s)", ver.str); + + devlink_info_version_running_put(req, + EFX_DEVLINK_INFO_VERSION_FPGA_REV, + buf); + } + + if (flags & BIT(EFX_VER_FLAG(CMC_EXT_INFO))) { + ver.dwords = (__le32 *)MCDI_PTR(outbuf, + GET_VERSION_V2_OUT_CMCFW_VERSION); + offset = snprintf(buf, EFX_MAX_VERSION_INFO_LEN, "%u.%u.%u.%u", + le32_to_cpu(ver.dwords[0]), + le32_to_cpu(ver.dwords[1]), + le32_to_cpu(ver.dwords[2]), + le32_to_cpu(ver.dwords[3])); + + tstamp = MCDI_QWORD(outbuf, + GET_VERSION_V2_OUT_CMCFW_BUILD_DATE); + if (tstamp) { + rtc_time64_to_tm(tstamp, &build_date); + snprintf(&buf[offset], EFX_MAX_VERSION_INFO_LEN - offset, + " (%ptRd)", &build_date); + } + + devlink_info_version_running_put(req, + EFX_DEVLINK_INFO_VERSION_FW_MGMT_CMC, + buf); + } + + ver.words = (__le16 *)MCDI_PTR(outbuf, GET_VERSION_V2_OUT_VERSION); + offset = snprintf(buf, EFX_MAX_VERSION_INFO_LEN, "%u.%u.%u.%u", + le16_to_cpu(ver.words[0]), le16_to_cpu(ver.words[1]), + le16_to_cpu(ver.words[2]), le16_to_cpu(ver.words[3])); + if (flags & BIT(EFX_VER_FLAG(MCFW_EXT_INFO))) { + build_id = MCDI_DWORD(outbuf, GET_VERSION_V2_OUT_MCFW_BUILD_ID); + snprintf(&buf[offset], EFX_MAX_VERSION_INFO_LEN - offset, + " (%x) %s", build_id, + MCDI_PTR(outbuf, GET_VERSION_V2_OUT_MCFW_BUILD_NAME)); + } + devlink_info_version_running_put(req, + DEVLINK_INFO_VERSION_GENERIC_FW_MGMT, + buf); + + if (flags & BIT(EFX_VER_FLAG(SUCFW_EXT_INFO))) { + ver.dwords = (__le32 *)MCDI_PTR(outbuf, + GET_VERSION_V2_OUT_SUCFW_VERSION); + tstamp = MCDI_QWORD(outbuf, + GET_VERSION_V2_OUT_SUCFW_BUILD_DATE); + rtc_time64_to_tm(tstamp, &build_date); + build_id = MCDI_DWORD(outbuf, GET_VERSION_V2_OUT_SUCFW_CHIP_ID); + + snprintf(buf, EFX_MAX_VERSION_INFO_LEN, + "%u.%u.%u.%u type %x (%ptRd)", + le32_to_cpu(ver.dwords[0]), le32_to_cpu(ver.dwords[1]), + le32_to_cpu(ver.dwords[2]), le32_to_cpu(ver.dwords[3]), + build_id, &build_date); + + devlink_info_version_running_put(req, + EFX_DEVLINK_INFO_VERSION_FW_MGMT_SUC, + buf); + } + + if (outlength < MC_CMD_GET_VERSION_V3_OUT_LEN) + return; + + /* Handle V3 additions */ + if (flags & BIT(EFX_VER_FLAG(DATAPATH_HW_VERSION))) { + ver.dwords = (__le32 *)MCDI_PTR(outbuf, + GET_VERSION_V3_OUT_DATAPATH_HW_VERSION); + + snprintf(buf, EFX_MAX_VERSION_INFO_LEN, "%u.%u.%u", + le32_to_cpu(ver.dwords[0]), le32_to_cpu(ver.dwords[1]), + le32_to_cpu(ver.dwords[2])); + + devlink_info_version_running_put(req, + EFX_DEVLINK_INFO_VERSION_DATAPATH_HW, + buf); + } + + if (flags & BIT(EFX_VER_FLAG(DATAPATH_FW_VERSION))) { + ver.dwords = (__le32 *)MCDI_PTR(outbuf, + GET_VERSION_V3_OUT_DATAPATH_FW_VERSION); + + snprintf(buf, EFX_MAX_VERSION_INFO_LEN, "%u.%u.%u", + le32_to_cpu(ver.dwords[0]), le32_to_cpu(ver.dwords[1]), + le32_to_cpu(ver.dwords[2])); + + devlink_info_version_running_put(req, + EFX_DEVLINK_INFO_VERSION_DATAPATH_FW, + buf); + } + + if (outlength < MC_CMD_GET_VERSION_V4_OUT_LEN) + return; + + /* Handle V4 additions */ + if (flags & BIT(EFX_VER_FLAG(SOC_BOOT_VERSION))) { + ver.dwords = (__le32 *)MCDI_PTR(outbuf, + GET_VERSION_V4_OUT_SOC_BOOT_VERSION); + + snprintf(buf, EFX_MAX_VERSION_INFO_LEN, "%u.%u.%u.%u", + le32_to_cpu(ver.dwords[0]), le32_to_cpu(ver.dwords[1]), + le32_to_cpu(ver.dwords[2]), + le32_to_cpu(ver.dwords[3])); + + devlink_info_version_running_put(req, + EFX_DEVLINK_INFO_VERSION_SOC_BOOT, + buf); + } + + if (flags & BIT(EFX_VER_FLAG(SOC_UBOOT_VERSION))) { + ver.dwords = (__le32 *)MCDI_PTR(outbuf, + GET_VERSION_V4_OUT_SOC_UBOOT_VERSION); + + snprintf(buf, EFX_MAX_VERSION_INFO_LEN, "%u.%u.%u.%u", + le32_to_cpu(ver.dwords[0]), le32_to_cpu(ver.dwords[1]), + le32_to_cpu(ver.dwords[2]), + le32_to_cpu(ver.dwords[3])); + + devlink_info_version_running_put(req, + EFX_DEVLINK_INFO_VERSION_SOC_UBOOT, + buf); + } + + if (flags & BIT(EFX_VER_FLAG(SOC_MAIN_ROOTFS_VERSION))) { + ver.dwords = (__le32 *)MCDI_PTR(outbuf, + GET_VERSION_V4_OUT_SOC_MAIN_ROOTFS_VERSION); + + snprintf(buf, EFX_MAX_VERSION_INFO_LEN, "%u.%u.%u.%u", + le32_to_cpu(ver.dwords[0]), le32_to_cpu(ver.dwords[1]), + le32_to_cpu(ver.dwords[2]), + le32_to_cpu(ver.dwords[3])); + + devlink_info_version_running_put(req, + EFX_DEVLINK_INFO_VERSION_SOC_MAIN, + buf); + } + + if (flags & BIT(EFX_VER_FLAG(SOC_RECOVERY_BUILDROOT_VERSION))) { + ver.dwords = (__le32 *)MCDI_PTR(outbuf, + GET_VERSION_V4_OUT_SOC_RECOVERY_BUILDROOT_VERSION); + + snprintf(buf, EFX_MAX_VERSION_INFO_LEN, "%u.%u.%u.%u", + le32_to_cpu(ver.dwords[0]), le32_to_cpu(ver.dwords[1]), + le32_to_cpu(ver.dwords[2]), + le32_to_cpu(ver.dwords[3])); + + devlink_info_version_running_put(req, + EFX_DEVLINK_INFO_VERSION_SOC_RECOVERY, + buf); + } + + if (flags & BIT(EFX_VER_FLAG(SUCFW_VERSION)) && + ~flags & BIT(EFX_VER_FLAG(SUCFW_EXT_INFO))) { + ver.dwords = (__le32 *)MCDI_PTR(outbuf, + GET_VERSION_V4_OUT_SUCFW_VERSION); + + snprintf(buf, EFX_MAX_VERSION_INFO_LEN, "%u.%u.%u.%u", + le32_to_cpu(ver.dwords[0]), le32_to_cpu(ver.dwords[1]), + le32_to_cpu(ver.dwords[2]), + le32_to_cpu(ver.dwords[3])); + + devlink_info_version_running_put(req, + EFX_DEVLINK_INFO_VERSION_FW_MGMT_SUC, + buf); + } + + if (outlength < MC_CMD_GET_VERSION_V5_OUT_LEN) + return; + + /* Handle V5 additions */ + + if (flags & BIT(EFX_VER_FLAG(BOARD_VERSION))) { + ver.dwords = (__le32 *)MCDI_PTR(outbuf, + GET_VERSION_V5_OUT_BOARD_VERSION); + + snprintf(buf, EFX_MAX_VERSION_INFO_LEN, "%u.%u.%u.%u", + le32_to_cpu(ver.dwords[0]), le32_to_cpu(ver.dwords[1]), + le32_to_cpu(ver.dwords[2]), + le32_to_cpu(ver.dwords[3])); + + devlink_info_version_running_put(req, + DEVLINK_INFO_VERSION_GENERIC_BOARD_REV, + buf); + } + + if (flags & BIT(EFX_VER_FLAG(BUNDLE_VERSION))) { + ver.dwords = (__le32 *)MCDI_PTR(outbuf, + GET_VERSION_V5_OUT_BUNDLE_VERSION); + + snprintf(buf, EFX_MAX_VERSION_INFO_LEN, "%u.%u.%u.%u", + le32_to_cpu(ver.dwords[0]), le32_to_cpu(ver.dwords[1]), + le32_to_cpu(ver.dwords[2]), + le32_to_cpu(ver.dwords[3])); + + devlink_info_version_running_put(req, + DEVLINK_INFO_VERSION_GENERIC_FW_BUNDLE_ID, + buf); + } +} + +#undef EFX_VER_FLAG + +static void efx_devlink_info_query_all(struct efx_nic *efx, + struct devlink_info_req *req) +{ +#if defined(EFX_USE_KCOMPAT) && defined(EFX_HAVE_DEVLINK_INFO_DRIVER_NAME_PUT) + devlink_info_driver_name_put(req, efx->pci_dev->driver->name); +#endif + efx_devlink_info_board_cfg(efx, req); + efx_devlink_info_stored_versions(efx, req); + efx_devlink_info_running_versions(efx, req); +} + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_USE_DEVLINK) + +/* This is the private data we have in struct devlink */ +struct efx_devlink { + struct efx_nic *efx; +}; + +#if defined(EFX_USE_KCOMPAT) && defined(EFX_HAVE_NDO_GET_DEVLINK_PORT) +struct devlink_port *efx_get_devlink_port(struct net_device *dev) +{ + struct efx_nic *efx = efx_netdev_priv(dev); + struct efx_devlink *devlink_private; + + if (!efx->devlink) + return NULL; + + devlink_private = devlink_priv(efx->devlink); + if (devlink_private) + return efx->devlink_port; + else + return NULL; +} +#endif + +static int efx_devlink_info_get(struct devlink *devlink, + struct devlink_info_req *req, + struct netlink_ext_ack *extack) +{ + struct efx_devlink *devlink_private = devlink_priv(devlink); + struct efx_nic *efx = devlink_private->efx; + + efx_devlink_info_query_all(efx, req); + return 0; +} + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_DEVLINK_FLASH_UPDATE_PARAMS) +static int efx_devlink_flash_update(struct devlink *devlink, + struct devlink_flash_update_params *params, + struct netlink_ext_ack *extack) +#else +static int efx_devlink_flash_update(struct devlink *devlink, + const char *file_name, + const char *component, + struct netlink_ext_ack *extack) +#endif +{ + struct efx_devlink *devlink_private = devlink_priv(devlink); + struct efx_nic *efx = devlink_private->efx; +#if defined(EFX_USE_KCOMPAT) && !defined(EFX_HAVE_DEVLINK_FLASH_UPDATE_PARAMS_FW) + const struct firmware *fw; + int rc; +#endif + +#if defined(EFX_USE_KCOMPAT) && !defined(EFX_HAVE_DEVLINK_FLASH_UPDATE_PARAMS) + if (component) { + netif_err(efx, drv, efx->net_dev, + "Updates to NVRAM component %s are not supported\n", + component); + return -EINVAL; + } +#endif + +#if defined(EFX_USE_KCOMPAT) && !defined(EFX_HAVE_DEVLINK_FLASH_UPDATE_PARAMS_FW) +#ifdef EFX_HAVE_DEVLINK_FLASH_UPDATE_PARAMS + rc = request_firmware(&fw, params->file_name, &efx->pci_dev->dev); +#else + rc = request_firmware(&fw, file_name, &efx->pci_dev->dev); +#endif + if (rc) + return rc; + + rc = efx_reflash_flash_firmware(efx, fw); + + release_firmware(fw); + return rc; +#else + return efx_reflash_flash_firmware(efx, params->fw); +#endif +} + +static const struct devlink_ops sfc_devlink_ops = { +#ifdef EFX_HAVE_DEVLINK_OPS_SUPPORTED_FLASH_UPDATE_PARAMS + .supported_flash_update_params = 0, +#endif + .flash_update = efx_devlink_flash_update, + .info_get = efx_devlink_info_get, +}; + +void efx_fini_devlink(struct efx_nic *efx) +{ + if (efx->devlink) { +#if defined(EFX_USE_KCOMPAT) && defined(EFX_HAVE_SET_NETDEV_DEVLINK_PORT) + efx->net_dev->devlink_port->type = DEVLINK_PORT_TYPE_NOTSET; +#endif + devlink_port_unregister(efx->devlink_port); + kfree(efx->devlink_port); + efx->devlink_port = NULL; +#if defined(EFX_USE_KCOMPAT) && defined(EFX_HAVE_SET_NETDEV_DEVLINK_PORT) + efx->net_dev->devlink_port = NULL; +#endif + devlink_unregister(efx->devlink); + devlink_free(efx->devlink); + } + efx->devlink = NULL; +} + +int efx_probe_devlink(struct efx_nic *efx) +{ + struct efx_devlink *devlink_private; + int rc; + + efx->devlink = devlink_alloc(&sfc_devlink_ops, +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_DEVLINK_ALLOC_DEV) + sizeof(struct efx_devlink), + &efx->pci_dev->dev); +#else + sizeof(struct efx_devlink)); +#endif + if (!efx->devlink) + return -ENOMEM; + devlink_private = devlink_priv(efx->devlink); + devlink_private->efx = efx; + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_VOID_DEVLINK_REGISTER) + devlink_register(efx->devlink); +#elif defined(EFX_HAVE_DEVLINK_ALLOC_DEV) + rc = devlink_register(efx->devlink); + if (rc) + goto out_free; +#else + rc = devlink_register(efx->devlink, &efx->pci_dev->dev); + if (rc) + goto out_free; +#endif + + efx->devlink_port = kzalloc(sizeof(*efx->devlink_port), GFP_KERNEL); + if (!efx->devlink_port) + goto out_unreg; + + rc = devlink_port_register(efx->devlink, efx->devlink_port, + efx->port_num); + if (rc) + goto out_free_port; + +#if defined(EFX_USE_KCOMPAT) +#if defined(EFX_HAVE_SET_NETDEV_DEVLINK_PORT) + efx->net_dev->devlink_port = efx->devlink_port; + efx->net_dev->devlink_port->type = DEVLINK_PORT_TYPE_ETH; +#else + devlink_port_type_eth_set(efx->devlink_port, efx->net_dev); +#endif +#endif + return 0; + +out_free_port: + kfree(efx->devlink_port); + efx->devlink_port = NULL; + +out_unreg: + devlink_unregister(efx->devlink); +#if defined(EFX_USE_KCOMPAT) && !defined(EFX_HAVE_VOID_DEVLINK_REGISTER) +out_free: +#endif + devlink_free(efx->devlink); + efx->devlink = NULL; + return rc; +} +#else + +static ssize_t versions_show(struct device *dev, struct device_attribute *attr, + char *buf_out) +{ + struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); + struct devlink_info_req req = { + .buf = buf_out, + .bufsize = PAGE_SIZE + }; + + buf_out[0] = '\0'; + efx_devlink_info_query_all(efx, &req); + return strlen(buf_out); +} + +static DEVICE_ATTR_RO(versions); + +int efx_probe_devlink(struct efx_nic *efx) +{ + return device_create_file(&efx->pci_dev->dev, &dev_attr_versions); +} + +void efx_fini_devlink(struct efx_nic *efx) +{ + device_remove_file(&efx->pci_dev->dev, &dev_attr_versions); +} + +#endif /* EFX_USE_DEVLINK */ diff --git a/src/drivers/net/ethernet/sfc/efx_devlink.h b/src/drivers/net/ethernet/sfc/efx_devlink.h new file mode 100644 index 0000000..28a1f7f --- /dev/null +++ b/src/drivers/net/ethernet/sfc/efx_devlink.h @@ -0,0 +1,24 @@ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2019 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ +#ifndef _EFX_DEVLINK_H +#define _EFX_DEVLINK_H + +#include "net_driver.h" +#ifndef EFX_USE_KCOMPAT +#include +#endif + +int efx_probe_devlink(struct efx_nic *efx); +void efx_fini_devlink(struct efx_nic *efx); + +#if defined(EFX_USE_KCOMPAT) && defined(EFX_HAVE_NDO_GET_DEVLINK_PORT) +struct devlink_port *efx_get_devlink_port(struct net_device *dev); +#endif + +#endif /* _EFX_DEVLINK_H */ diff --git a/src/drivers/net/ethernet/sfc/efx_ethtool.h b/src/drivers/net/ethernet/sfc/efx_ethtool.h new file mode 100644 index 0000000..cce8913 --- /dev/null +++ b/src/drivers/net/ethernet/sfc/efx_ethtool.h @@ -0,0 +1,17 @@ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2019 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ +#ifndef EFX_ETHTOOL_H +#define EFX_ETHTOOL_H + +int efx_ethtool_get_module_info(struct net_device *net_dev, + struct ethtool_modinfo *modinfo); +int efx_ethtool_get_module_eeprom(struct net_device *net_dev, + struct ethtool_eeprom *ee, + u8 *data); +#endif diff --git a/src/drivers/net/ethernet/sfc/efx_ioctl.h b/src/drivers/net/ethernet/sfc/efx_ioctl.h new file mode 100644 index 0000000..962664c --- /dev/null +++ b/src/drivers/net/ethernet/sfc/efx_ioctl.h @@ -0,0 +1,626 @@ +/**************************************************************************** + * Driver for Solarflare network controllers + * (including support for SFE4001 10GBT NIC) + * + * Copyright 2005-2006: Fen Systems Ltd. + * Copyright 2006-2012: Solarflare Communications Inc, + * 9501 Jeronimo Road, Suite 250, + * Irvine, CA 92618, USA + * + * Initially developed by Michael Brown + * Maintained by Solarflare Communications + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + **************************************************************************** + */ + +#ifndef EFX_IOCTL_H +#define EFX_IOCTL_H + +#if defined(__KERNEL__) +#include +#include +#else +#include +#ifndef _LINUX_IF_H +#define _LINUX_IF_H /* prevent from conflicting with */ +#endif +#include "efx_linux_types.h" +#endif +#include +#include +#include + +/** + * DOC: sfc driver private ioctl + * + * Various driver features can be controlled through a private ioctl, + * which has multiple sub-commands. + * + * Most of these features are also available through the ethtool API + * or other standard kernel API on a sufficiently recent kernel + * version. Userland tools should generally use the standard API + * first and fall back to the private ioctl in case of an error code + * indicating the standard API is not implemented (e.g. %EOPNOTSUPP, + * %ENOSYS, or %ENOTTY). + * + * A few features are intended for driver debugging and are not + * included in the production driver. + * + * The private ioctl is numbered %SIOCEFX and is implemented on + * both sockets and a char device (/dev/sfc_control). Sockets are + * more reliable as they do not depend on a device node being + * created on disk. + */ + +/* Efx private ioctl number */ +/* We do not use the first 3 private ioctls because some utilities expect + * them to be old ioctls. */ +#define SIOCEFX (SIOCDEVPRIVATE + 3) + +/* + * Efx private ioctls + */ + +/* For talking MCDI to siena ************************************************/ + +/* Deprecated */ +#define EFX_MCDI_REQUEST 0xef0c +/** + * struct efx_mcdi_request - Parameters for %EFX_MCDI_REQUEST sub-command + * @payload: On entry, the MCDI command parameters. On return, if + * @rc == 0, this is the response. + * @cmd: MCDI command type number. + * @len: On entry, the length of command parameters, in bytes. On + * return, if @rc == 0, this is the length of the response. + * @rc: Linux error code for the request. This may be based on an + * error code reported by the MC or a communication failure + * detected by the driver. + * + * If the driver detects invalid parameters, e.g. @len is out of + * range, the ioctl() call will return -1, errno will be set + * accordingly, and none of the fields will be valid. All other + * errors are reported by setting @rc. + * + * %EFX_MCDI_REQUEST does not support the larger command type numbers, + * error codes and payload lengths of MCDIv2. + */ +struct efx_mcdi_request { + __u32 payload[63]; + __u8 cmd; + __u8 len; + __u8 rc; +}; + +#define EFX_MCDI_REQUEST2 0xef21 +/** + * struct efx_mcdi_request2 - Parameters for %EFX_MCDI_REQUEST2 sub-command + * @cmd: MCDI command type number. + * @inlen: The length of command parameters, in bytes. + * @outlen: On entry, the length available for the response, in bytes. + * On return, the length used for the response, in bytes. + * @flags: Flags for the command or response. The only flag defined + * at present is %EFX_MCDI_REQUEST_ERROR. If this is set on return, + * the MC reported an error. + * @host_errno: On return, if %EFX_MCDI_REQUEST_ERROR is included in @flags, + * the suggested Linux error code for the error. + * @payload: On entry, the MCDI command parameters. On return, the response. + * + * If the driver detects invalid parameters or a communication failure + * with the MC, the ioctl() call will return -1, errno will be set + * accordingly, and none of the fields will be valid. If the MC reports + * an error, the ioctl() call will return 0 but @flags will include the + * %EFX_MCDI_REQUEST_ERROR flag. The MC error code can then be found in + * @payload (if @outlen was sufficiently large) and a suggested Linux + * error code can be found in @host_errno. + * + * %EFX_MCDI_REQUEST2 fully supports both MCDIv1 and v2. + */ +struct efx_mcdi_request2 { + __u16 cmd; + __u16 inlen; + __u16 outlen; + __u16 flags; + __u32 host_errno; + /* + * The maximum payload length is 0x400 (MCDI_CTL_SDU_LEN_MAX_V2) - 4 bytes + * = 255 x 32 bit words as MCDI_CTL_SDU_LEN_MAX_V2 doesn't take account of + * the space required by the V1 header, which still exists in a V2 command. + */ + __u32 payload[255]; +}; +#define EFX_MCDI_REQUEST_ERROR 0x0001 + +/* Reset selected components, like ETHTOOL_RESET ****************************/ +#define EFX_RESET_FLAGS 0xef0d +struct efx_reset_flags { + __u32 flags; +}; +#ifndef ETH_RESET_SHARED_SHIFT + enum ethtool_reset_flags { + /* These flags represent components dedicated to the interface + * the command is addressed to. Shift any flag left by + * ETH_RESET_SHARED_SHIFT to reset a shared component of the + * same type. + */ + ETH_RESET_MGMT = 1 << 0, /* Management processor */ + ETH_RESET_IRQ = 1 << 1, /* Interrupt requester */ + ETH_RESET_DMA = 1 << 2, /* DMA engine */ + ETH_RESET_FILTER = 1 << 3, /* Filtering/flow direction */ + ETH_RESET_OFFLOAD = 1 << 4, /* Protocol offload */ + ETH_RESET_MAC = 1 << 5, /* Media access controller */ + ETH_RESET_PHY = 1 << 6, /* Transceiver/PHY */ + ETH_RESET_RAM = 1 << 7, /* RAM shared between + * multiple components */ + + ETH_RESET_DEDICATED = 0x0000ffff, /* All components dedicated to + * this interface */ + ETH_RESET_ALL = 0xffffffff, /* All components used by this + * interface, even if shared */ + }; + #define ETH_RESET_SHARED_SHIFT 16 +#endif +#ifndef ETHTOOL_RESET + #define ETHTOOL_RESET 0x00000034 +#endif + +/* Get RX flow hashing capabilities, like ETHTOOL_GRX{RINGS,FH} *************/ +#define EFX_RXNFC 0xef0e +#ifndef ETHTOOL_GRXFH + #define ETHTOOL_GRXFH 0x00000029 +#endif +#ifndef ETHTOOL_GRXRINGS + #define ETHTOOL_GRXRINGS 0x0000002d +#endif +#ifndef ETHTOOL_GRXCLSRULE + struct ethtool_tcpip4_spec { + __be32 ip4src; + __be32 ip4dst; + __be16 psrc; + __be16 pdst; + __u8 tos; + }; + + struct ethtool_usrip4_spec { + __be32 ip4src; + __be32 ip4dst; + __be32 l4_4_bytes; + __u8 tos; + __u8 ip_ver; + __u8 proto; + }; + + #define ETH_RX_NFC_IP4 1 + #define RX_CLS_FLOW_DISC 0xffffffffffffffffULL + #define ETHTOOL_GRXCLSRLCNT 0x0000002e + #define ETHTOOL_GRXCLSRULE 0x0000002f + #define ETHTOOL_GRXCLSRLALL 0x00000030 + #define ETHTOOL_SRXCLSRLDEL 0x00000031 + #define ETHTOOL_SRXCLSRLINS 0x00000032 +#endif +#ifndef EFX_HAVE_EFX_ETHTOOL_RXNFC + union efx_ethtool_flow_union { + struct ethtool_tcpip4_spec tcp_ip4_spec; + struct ethtool_tcpip4_spec udp_ip4_spec; + struct ethtool_tcpip4_spec sctp_ip4_spec; + struct ethtool_usrip4_spec usr_ip4_spec; + struct ethhdr ether_spec; + /* unneeded members omitted... */ + __u8 hdata[60]; + }; + struct efx_ethtool_flow_ext { + __be16 vlan_etype; + __be16 vlan_tci; + __be32 data[2]; + }; + struct efx_ethtool_rx_flow_spec { + __u32 flow_type; + union efx_ethtool_flow_union h_u; + struct efx_ethtool_flow_ext h_ext; + union efx_ethtool_flow_union m_u; + struct efx_ethtool_flow_ext m_ext; + __u64 ring_cookie; + __u32 location; + }; + struct efx_ethtool_rxnfc { + __u32 cmd; + __u32 flow_type; + __u64 data; + struct efx_ethtool_rx_flow_spec fs; + union { + __u32 rule_cnt; + __u32 rss_context; + }; + __u32 rule_locs[0]; + }; + #define EFX_HAVE_EFX_ETHTOOL_RXNFC yes +#endif +#ifndef RX_CLS_LOC_SPECIAL + #define RX_CLS_LOC_SPECIAL 0x80000000 + #define RX_CLS_LOC_ANY 0xffffffff + #define RX_CLS_LOC_FIRST 0xfffffffe + #define RX_CLS_LOC_LAST 0xfffffffd +#endif + +/* Get/set RX flow hash indirection table, like ETHTOOL_{G,S}RXFHINDIR} *****/ +#define EFX_RXFHINDIR 0xef10 +#ifndef ETHTOOL_GRXFHINDIR + struct ethtool_rxfh_indir { + __u32 cmd; + /* On entry, this is the array size of the user buffer. On + * return from ETHTOOL_GRXFHINDIR, this is the array size of + * the hardware indirection table. */ + __u32 size; + __u32 ring_index[0]; /* ring/queue index for each hash value */ + }; + #define ETHTOOL_GRXFHINDIR 0x00000038 + #define ETHTOOL_SRXFHINDIR 0x00000039 +#endif +struct efx_rxfh_indir { + struct ethtool_rxfh_indir head; + __u32 table[128]; +}; + +/* PTP support for NIC time disciplining ************************************/ + +struct efx_timespec { + __s64 tv_sec; + __s32 tv_nsec; +}; + +/* Set/get hardware timestamp config, like SIOC{S,G}HWTSTAMP ****************/ +#define EFX_TS_INIT 0xef12 +#define EFX_GET_TS_CONFIG 0xef25 + +#define EFX_TS_INIT_FLAGS_PTP_V2_ENHANCED 0x80000000 + +#if !defined(__KERNEL__) + +enum { + SOF_TIMESTAMPING_TX_HARDWARE = (1<<0), + SOF_TIMESTAMPING_TX_SOFTWARE = (1<<1), + SOF_TIMESTAMPING_RX_HARDWARE = (1<<2), + SOF_TIMESTAMPING_RX_SOFTWARE = (1<<3), + SOF_TIMESTAMPING_SOFTWARE = (1<<4), + SOF_TIMESTAMPING_SYS_HARDWARE = (1<<5), + SOF_TIMESTAMPING_RAW_HARDWARE = (1<<6), + SOF_TIMESTAMPING_MASK = + (SOF_TIMESTAMPING_RAW_HARDWARE - 1) | + SOF_TIMESTAMPING_RAW_HARDWARE +}; + +enum hwtstamp_tx_types { + HWTSTAMP_TX_OFF, + HWTSTAMP_TX_ON, + HWTSTAMP_TX_ONESTEP_SYNC, +}; + +enum hwtstamp_rx_filters { + HWTSTAMP_FILTER_NONE, + HWTSTAMP_FILTER_ALL, + HWTSTAMP_FILTER_SOME, + HWTSTAMP_FILTER_PTP_V1_L4_EVENT, + HWTSTAMP_FILTER_PTP_V1_L4_SYNC, + HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ, + HWTSTAMP_FILTER_PTP_V2_L4_EVENT, + HWTSTAMP_FILTER_PTP_V2_L4_SYNC, + HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ, + HWTSTAMP_FILTER_PTP_V2_L2_EVENT, + HWTSTAMP_FILTER_PTP_V2_L2_SYNC, + HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ, + HWTSTAMP_FILTER_PTP_V2_EVENT, + HWTSTAMP_FILTER_PTP_V2_SYNC, + HWTSTAMP_FILTER_PTP_V2_DELAY_REQ, +}; + +struct hwtstamp_config { + int flags; + int tx_type; + int rx_filter; +}; + +#endif /* !__KERNEL__ */ + +/* Set the NIC time clock offset ********************************************/ +#define EFX_TS_SETTIME 0xef14 +struct efx_ts_settime { + struct efx_timespec ts; /* In and out */ + __u32 iswrite; /* 1 == write, 0 == read (only) */ +}; + +/* Adjust the NIC time frequency ********************************************/ +#define EFX_TS_ADJTIME 0xef15 +struct efx_ts_adjtime { + __s64 adjustment; /* Parts per billion, In and out */ + __u32 iswrite; /* 1 == write, 0 == read (only) */ +}; + +/* Get the NIC-system time skew *********************************************/ +#define EFX_TS_SYNC 0xef16 +struct efx_ts_sync { + struct efx_timespec ts; +}; + +/* Set the NIC-system synchronization status ********************************/ +#define EFX_TS_SET_SYNC_STATUS 0xef27 +struct efx_ts_set_sync_status { + __u32 in_sync; /* 0 == not in sync, 1 == in sync */ + __u32 timeout; /* Seconds until no longer in sync */ +}; + +/* Get the clock/timestamp capabilities, like ETHTOOL_GET_TS_INFO ***********/ +#define EFX_GET_TS_INFO 0xef24 +#ifndef ETHTOOL_GET_TS_INFO + struct ethtool_ts_info { + __u32 cmd; + __u32 so_timestamping; + __s32 phc_index; + __u32 tx_types; + __u32 tx_reserved[3]; + __u32 rx_filters; + __u32 rx_reserved[3]; + }; + #define ETHTOOL_GET_TS_INFO 0x00000041 /* Get time stamping and PHC info */ +#endif + +/* Get pluging module eeprom if not availble via ethtool ********************/ +#define EFX_MODULEEEPROM 0xef17 +#define EFX_GMODULEINFO 0xef18 +#ifndef ETHTOOL_GMODULEEEPROM + struct ethtool_modinfo { + __u32 cmd; + __u32 type; + __u32 eeprom_len; + __u32 reserved[8]; + }; + + #define ETH_MODULE_SFF_8079 0x1 + #define ETH_MODULE_SFF_8079_LEN 256 + #define ETH_MODULE_SFF_8472 0x2 + #define ETH_MODULE_SFF_8472_LEN 512 + + #define ETHTOOL_GMODULEINFO 0x00000042 + #define ETHTOOL_GMODULEEEPROM 0x00000043 +#endif +struct efx_get_module_eeprom { + struct ethtool_eeprom ee; +}; + +struct efx_get_module_info { + struct ethtool_modinfo info; +}; + +/* Set the VLAN tags for PTP receive packet filtering ***********************/ +#define EFX_TS_SET_VLAN_FILTER 0xef19 +struct efx_ts_set_vlan_filter { +#define TS_MAX_VLAN_TAGS 3 /* Maximum supported VLAN tags */ + __u32 num_vlan_tags; /* Number of VLAN tags */ + __u16 vlan_tags[TS_MAX_VLAN_TAGS]; /* VLAN tag list */ +}; + +/* Set the UUID for PTP receive packet filtering ****************************/ +#define EFX_TS_SET_UUID_FILTER 0xef1a +struct efx_ts_set_uuid_filter { + __u32 enable; /* 1 == enabled, 0 == disabled */ + __u8 uuid[8]; /* UUID to filter against */ +}; + +/* Set the Domain for PTP receive packet filtering **************************/ +#define EFX_TS_SET_DOMAIN_FILTER 0xef1b +struct efx_ts_set_domain_filter { + __u32 enable; /* 1 == enabled, 0 == disabled */ + __u32 domain; /* Domain number to filter against */ +}; + +/* Return a PPS timestamp ***************************************************/ +#define EFX_TS_GET_PPS 0xef1c +struct efx_ts_get_pps { + __u32 sequence; /* seq. num. of assert event */ + __u32 timeout; + struct efx_timespec sys_assert; /* time of assert in system time */ + struct efx_timespec nic_assert; /* time of assert in nic time */ + struct efx_timespec delta; /* delta between NIC and system time */ +}; + +#define EFX_TS_ENABLE_HW_PPS 0xef1d +struct efx_ts_hw_pps { + __u32 enable; +}; + +/* Reprogram the CPLD on an AOE NIC *****************************************/ +#define EFX_UPDATE_CPLD 0xef1e +struct efx_update_cpld { + __u32 update; +}; + +/* License key operations on AOE or EF10 NIC ********************************/ + +/* Deprecated - only supports AOE */ +#define EFX_LICENSE_UPDATE 0xef1f +struct efx_update_license { + __u32 valid_keys; + __u32 invalid_keys; + __u32 blacklisted_keys; +}; + +#define EFX_LICENSE_UPDATE2 0xef23 +struct efx_update_license2 { + __u32 valid_keys; + __u32 invalid_keys; + __u32 blacklisted_keys; + __u32 unverifiable_keys; + __u32 wrong_node_keys; +}; + +#define EFX_LICENSED_APP_STATE 0xef26 +struct efx_licensed_app_state { + __u32 app_id; + __u32 state; +}; + +/* Reset the AOE application and controller *********************************/ +#define EFX_RESET_AOE 0xef20 +struct efx_aoe_reset { + __u32 flags; +}; + +/* Get device identity ******************************************************/ +#define EFX_GET_DEVICE_IDS 0xef22 +struct efx_device_ids { + __u16 vendor_id, device_id; /* PCI device ID */ + __u16 subsys_vendor_id, subsys_device_id; /* PCI subsystem ID */ + __u32 phy_type; /* PHY type code */ + __u8 port_num; /* port number (0-based) */ + __u8 perm_addr[6]; /* non-volatile MAC address */ +}; + +/* Dump support *************************************************************/ +#define EFX_DUMP 0xef28 +#if !defined (ETHTOOL_SET_DUMP) || defined (EFX_NEED_STRUCT_ETHTOOL_DUMP) + struct ethtool_dump { + __u32 cmd; + __u32 version; + __u32 flag; + __u32 len; + __u8 data[0]; + }; +#elif !defined (__KERNEL__) + struct efx_ethtool_dump { + __u32 cmd; + __u32 version; + __u32 flag; + __u32 len; + __u8 data[0]; + }; + #define ethtool_dump efx_ethtool_dump +#endif +#ifndef ETHTOOL_SET_DUMP + #define ETHTOOL_SET_DUMP 0x0000003e +#endif +#ifndef ETHTOOL_GET_DUMP_FLAG + #define ETHTOOL_GET_DUMP_FLAG 0x0000003f +#endif +#ifndef ETHTOOL_GET_DUMP_DATA + #define ETHTOOL_GET_DUMP_DATA 0x00000040 +#endif + +/* sfctool2 shadow ethtool interface ****************************************/ +#define EFX_SFCTOOL 0xef29 +struct efx_sfctool { + /* This can be any ethtool command struct from include/linux/ethtool.h. + * The first element will always be __u32 cmd. + */ + void __user *data; +}; + +/* Next available cmd number is 0xef2a */ + +/* Efx private ioctl command structures *************************************/ + +union efx_ioctl_data { + struct efx_mcdi_request mcdi_request; + struct efx_mcdi_request2 mcdi_request2; + struct efx_reset_flags reset_flags; + struct efx_ethtool_rxnfc rxnfc; + struct efx_rxfh_indir rxfh_indir; + struct hwtstamp_config ts_init; + struct efx_ts_settime ts_settime; + struct efx_ts_adjtime ts_adjtime; + struct efx_ts_sync ts_sync; + struct efx_ts_set_sync_status ts_set_sync_status; + struct ethtool_ts_info ts_info; + struct efx_get_module_eeprom eeprom; + struct efx_get_module_info modinfo; + struct efx_ts_set_vlan_filter ts_vlan_filter; + struct efx_ts_set_uuid_filter ts_uuid_filter; + struct efx_ts_set_domain_filter ts_domain_filter; + struct efx_ts_get_pps pps_event; + struct efx_ts_hw_pps pps_enable; + struct efx_update_cpld cpld; + struct efx_update_license key_stats; + struct efx_update_license2 key_stats2; + struct efx_aoe_reset aoe_reset; + struct efx_device_ids device_ids; + struct efx_licensed_app_state app_state; + struct ethtool_dump dump; + struct efx_sfctool sfctool; +}; + +/** + * struct efx_ioctl - Parameters for sfc private ioctl on char device + * @if_name: Name of the net device to control + * @cmd: Command number + * @u: Command-specific parameters + * + * Usage: + * struct efx_ioctl efx; + * + * fd = open("/dev/sfc_control", %O_RDWR); + * + * strncpy(efx.if_name, if_name, %IFNAMSIZ); + * + * efx.cmd = %EFX_FROBNOSTICATE; + * + * efx.u.frobnosticate.magic = 42; + * + * ret = ioctl(fd, %SIOCEFX, & efx); + */ +struct efx_ioctl { + char if_name[IFNAMSIZ]; + /* Command to run */ + __u16 cmd; + /* Parameters */ + union efx_ioctl_data u; +} __attribute__ ((packed)); + +/** + * struct efx_sock_ioctl - Parameters for sfc private ioctl on socket + * @cmd: Command number + * @u: Command-specific parameters + * + * Usage: + * struct ifreq ifr; + * + * struct efx_sock_ioctl efx; + * + * fd = socket(%AF_INET, %SOCK_STREAM, 0); + * + * strncpy(ifr.ifr_name, if_name, %IFNAMSIZ); + * + * ifr.ifr_data = (caddr_t) & efx; + * + * efx.cmd = %EFX_FROBNOSTICATE; + * + * efx.u.frobnosticate.magic = 42; + * + * ret = ioctl(fd, %SIOCEFX, & ifr); + */ +struct efx_sock_ioctl { + /* Command to run */ + __u16 cmd; +/* private: */ + __u16 reserved; +/* public: */ + /* Parameters */ + union efx_ioctl_data u; +} __attribute__ ((packed)); + +#ifdef __KERNEL__ +int efx_private_ioctl(struct efx_nic *efx, u16 cmd, + union efx_ioctl_data __user *data); +int efx_control_init(void); +void efx_control_fini(void); +#endif /* __KERNEL__ */ + +#endif /* EFX_IOCTL_H */ diff --git a/src/drivers/net/ethernet/sfc/efx_linux_types.h b/src/drivers/net/ethernet/sfc/efx_linux_types.h new file mode 100644 index 0000000..461da98 --- /dev/null +++ b/src/drivers/net/ethernet/sfc/efx_linux_types.h @@ -0,0 +1,48 @@ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2019 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +/* Wrapper for */ + +#ifndef EFX_LINUX_TYPES_H +#define EFX_LINUX_TYPES_H + +#include +#include + +/* Although we don't support kernel versions before 2.6.9, the kernel + * headers for userland may come from a rather older version (as they + * do in RHEL 4). + */ +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,9) +typedef __u16 __be16; +typedef __u32 __be32; +typedef __u64 __be64; +#endif + +/* Prior to Linux 2.6.18, some kernel headers wrongly used the + * in-kernel type names for user API. Also, sfctool really wants + * these names. + */ +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) || \ + defined(EFX_WANT_KERNEL_TYPES) +typedef __u8 u8; +typedef __u16 u16; +typedef __u32 u32; +typedef __s32 s32; +typedef __u64 u64; +#endif + +/* Empty define of __user, for use in struct efx_sfctool */ +#define __user + +#ifndef noinline_for_stack +#define noinline_for_stack noinline +#endif + +#endif /* !EFX_LINUX_TYPES_H */ diff --git a/src/drivers/net/ethernet/sfc/efx_reflash.c b/src/drivers/net/ethernet/sfc/efx_reflash.c new file mode 100644 index 0000000..135396d --- /dev/null +++ b/src/drivers/net/ethernet/sfc/efx_reflash.c @@ -0,0 +1,594 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Driver for Xilinx network controllers and boards + * Copyright 2021 Xilinx Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#include + +#include "net_driver.h" +#include "efx.h" +#include "nic.h" +#include "efx_reflash.h" +#include "efx_devlink.h" + +/* Reflash firmware data header and trailer fields (see SF-121352-AN) */ +#define EFX_REFLASH_HEADER_MAGIC_OFST 0 +#define EFX_REFLASH_HEADER_MAGIC_LEN 4 +#define EFX_REFLASH_HEADER_MAGIC_VALUE 0x106F1A5 + +#define EFX_REFLASH_HEADER_VERSION_OFST 4 +#define EFX_REFLASH_HEADER_VERSION_LEN 4 +#define EFX_REFLASH_HEADER_VERSION_VALUE 4 + +#define EFX_REFLASH_HEADER_FIRMWARE_TYPE_OFST 8 +#define EFX_REFLASH_HEADER_FIRMWARE_TYPE_LEN 4 +#define EFX_REFLASH_FIRMWARE_TYPE_BOOTROM 0x2 +#define EFX_REFLASH_FIRMWARE_TYPE_BUNDLE 0xd + +#define EFX_REFLASH_HEADER_FIRMWARE_SUBTYPE_OFST 12 +#define EFX_REFLASH_HEADER_FIRMWARE_SUBTYPE_LEN 4 + +#define EFX_REFLASH_HEADER_PAYLOAD_SIZE_OFST 16 +#define EFX_REFLASH_HEADER_PAYLOAD_SIZE_LEN 4 + +#define EFX_REFLASH_HEADER_LENGTH_OFST 20 +#define EFX_REFLASH_HEADER_LENGTH_LEN 4 + +#define EFX_REFLASH_HEADER_MINLEN \ + (EFX_REFLASH_HEADER_LENGTH_OFST + EFX_REFLASH_HEADER_LENGTH_LEN) + +#define EFX_REFLASH_TRAILER_CRC_OFST 0 +#define EFX_REFLASH_TRAILER_CRC_LEN 4 + +#define EFX_REFLASH_TRAILER_LEN \ + (EFX_REFLASH_TRAILER_CRC_OFST + EFX_REFLASH_TRAILER_CRC_LEN) + +static bool efx_reflash_parse_reflash_header(const struct firmware *fw, + size_t header_offset, u32 *type, + u32 *subtype, const u8 **data, + size_t *data_size) +{ + u32 magic, version, payload_size, header_len, trailer_offset; + const u8 *header, *trailer; + u32 expected_crc, crc; + + if (fw->size < header_offset + EFX_REFLASH_HEADER_MINLEN) + return false; + + header = fw->data + header_offset; + magic = get_unaligned_le32(header + EFX_REFLASH_HEADER_MAGIC_OFST); + if (magic != EFX_REFLASH_HEADER_MAGIC_VALUE) + return false; + + version = get_unaligned_le32(header + EFX_REFLASH_HEADER_VERSION_OFST); + if (version != EFX_REFLASH_HEADER_VERSION_VALUE) + return false; + + payload_size = get_unaligned_le32(header + EFX_REFLASH_HEADER_PAYLOAD_SIZE_OFST); + header_len = get_unaligned_le32(header + EFX_REFLASH_HEADER_LENGTH_OFST); + trailer_offset = header_offset + header_len + payload_size; + if (fw->size < trailer_offset + EFX_REFLASH_TRAILER_LEN) + return false; + + trailer = fw->data + trailer_offset; + expected_crc = get_unaligned_le32(trailer + EFX_REFLASH_TRAILER_CRC_OFST); + crc = crc32_le(0, header, header_len + payload_size); + if (crc != expected_crc) + return false; + + *type = get_unaligned_le32(header + EFX_REFLASH_HEADER_FIRMWARE_TYPE_OFST); + *subtype = get_unaligned_le32(header + EFX_REFLASH_HEADER_FIRMWARE_SUBTYPE_OFST); + if (*type == EFX_REFLASH_FIRMWARE_TYPE_BUNDLE) { + /* All the bundle data is written verbatim to NVRAM */ + *data = fw->data; + *data_size = fw->size; + } else { + /* Other payload types strip the reflash header and trailer + * from the data written to NVRAM + */ + *data = header + header_len; + *data_size = payload_size; + } + + return true; +} + +static int efx_reflash_partition_type(u32 type, u32 subtype, + u32 *partition_type, + u32 *partition_subtype) +{ + int rc = 0; + + /* Map from FIRMWARE_TYPE to NVRAM_PARTITION_TYPE */ + switch (type) { + case EFX_REFLASH_FIRMWARE_TYPE_BOOTROM: + *partition_type = NVRAM_PARTITION_TYPE_EXPANSION_ROM; + *partition_subtype = subtype; + break; + case EFX_REFLASH_FIRMWARE_TYPE_BUNDLE: + *partition_type = NVRAM_PARTITION_TYPE_BUNDLE; + *partition_subtype = subtype; + break; + default: + /* Not supported */ + rc = -EINVAL; + } + + return rc; +} + +/* SmartNIC image header fields */ +#define EFX_SNICIMAGE_HEADER_MAGIC_OFST 16 +#define EFX_SNICIMAGE_HEADER_MAGIC_LEN 4 +#define EFX_SNICIMAGE_HEADER_MAGIC_VALUE 0x541C057A + +#define EFX_SNICIMAGE_HEADER_VERSION_OFST 20 +#define EFX_SNICIMAGE_HEADER_VERSION_LEN 4 +#define EFX_SNICIMAGE_HEADER_VERSION_VALUE 1 + +#define EFX_SNICIMAGE_HEADER_LENGTH_OFST 24 +#define EFX_SNICIMAGE_HEADER_LENGTH_LEN 4 + +#define EFX_SNICIMAGE_HEADER_PARTITION_TYPE_OFST 36 +#define EFX_SNICIMAGE_HEADER_PARTITION_TYPE_LEN 4 + +#define EFX_SNICIMAGE_HEADER_PARTITION_SUBTYPE_OFST 40 +#define EFX_SNICIMAGE_HEADER_PARTITION_SUBTYPE_LEN 4 + +#define EFX_SNICIMAGE_HEADER_PAYLOAD_SIZE_OFST 60 +#define EFX_SNICIMAGE_HEADER_PAYLOAD_SIZE_LEN 4 + +#define EFX_SNICIMAGE_HEADER_CRC_OFST 64 +#define EFX_SNICIMAGE_HEADER_CRC_LEN 4 + +#define EFX_SNICIMAGE_HEADER_MINLEN 256 + +static bool efx_reflash_parse_snic_header(const struct firmware *fw, + size_t header_offset, + u32 *partition_type, + u32 *partition_subtype, + const u8 **data, size_t *data_size) +{ + u32 magic, version, payload_size, header_len, expected_crc, crc; + const u8 *header; + + if (fw->size < header_offset + EFX_SNICIMAGE_HEADER_MINLEN) + return false; + + header = fw->data + header_offset; + magic = get_unaligned_le32(header + EFX_SNICIMAGE_HEADER_MAGIC_OFST); + if (magic != EFX_SNICIMAGE_HEADER_MAGIC_VALUE) + return false; + + version = get_unaligned_le32(header + EFX_SNICIMAGE_HEADER_VERSION_OFST); + if (version != EFX_SNICIMAGE_HEADER_VERSION_VALUE) + return false; + + header_len = get_unaligned_le32(header + EFX_SNICIMAGE_HEADER_LENGTH_OFST); + payload_size = get_unaligned_le32(header + EFX_SNICIMAGE_HEADER_PAYLOAD_SIZE_OFST); + if (fw->size < header_len + payload_size) + return false; + + expected_crc = get_unaligned_le32(header + EFX_SNICIMAGE_HEADER_CRC_OFST); + + /* Calculate CRC omitting the expected CRC field itself */ + crc = crc32_le(~0, header, EFX_SNICIMAGE_HEADER_CRC_OFST); + crc = ~crc32_le(crc, + header + EFX_SNICIMAGE_HEADER_CRC_OFST + + EFX_SNICIMAGE_HEADER_CRC_LEN, + header_len + payload_size - EFX_SNICIMAGE_HEADER_CRC_OFST - + EFX_SNICIMAGE_HEADER_CRC_LEN); + if (crc != expected_crc) + return false; + + *partition_type = + get_unaligned_le32(header + EFX_SNICIMAGE_HEADER_PARTITION_TYPE_OFST); + *partition_subtype = + get_unaligned_le32(header + EFX_SNICIMAGE_HEADER_PARTITION_SUBTYPE_OFST); + *data = fw->data; + *data_size = fw->size; + return true; +} + +/* SmartNIC bundle header fields (see SF-122606-TC) */ +#define EFX_SNICBUNDLE_HEADER_MAGIC_OFST 0 +#define EFX_SNICBUNDLE_HEADER_MAGIC_LEN 4 +#define EFX_SNICBUNDLE_HEADER_MAGIC_VALUE 0xB1001001 + +#define EFX_SNICBUNDLE_HEADER_VERSION_OFST 4 +#define EFX_SNICBUNDLE_HEADER_VERSION_LEN 4 +#define EFX_SNICBUNDLE_HEADER_VERSION_VALUE 1 + +#define EFX_SNICBUNDLE_HEADER_BUNDLE_TYPE_OFST 8 +#define EFX_SNICBUNDLE_HEADER_BUNDLE_TYPE_LEN 4 + +#define EFX_SNICBUNDLE_HEADER_BUNDLE_SUBTYPE_OFST 12 +#define EFX_SNICBUNDLE_HEADER_BUNDLE_SUBTYPE_LEN 4 + +#define EFX_SNICBUNDLE_HEADER_LENGTH_OFST 20 +#define EFX_SNICBUNDLE_HEADER_LENGTH_LEN 4 + +#define EFX_SNICBUNDLE_HEADER_CRC_OFST 224 +#define EFX_SNICBUNDLE_HEADER_CRC_LEN 4 + +#define EFX_SNICBUNDLE_HEADER_LEN \ + (EFX_SNICBUNDLE_HEADER_CRC_OFST + EFX_SNICBUNDLE_HEADER_CRC_LEN) + +static bool efx_reflash_parse_snic_bundle_header(const struct firmware *fw, + size_t header_offset, + u32 *partition_type, + u32 *partition_subtype, + const u8 **data, + size_t *data_size) +{ + u32 magic, version, bundle_type, header_len, expected_crc, crc; + const u8 *header; + + if (fw->size < header_offset + EFX_SNICBUNDLE_HEADER_LEN) + return false; + + header = fw->data + header_offset; + magic = get_unaligned_le32(header + EFX_SNICBUNDLE_HEADER_MAGIC_OFST); + if (magic != EFX_SNICBUNDLE_HEADER_MAGIC_VALUE) + return false; + + version = get_unaligned_le32(header + EFX_SNICBUNDLE_HEADER_VERSION_OFST); + if (version != EFX_SNICBUNDLE_HEADER_VERSION_VALUE) + return false; + + bundle_type = get_unaligned_le32(header + EFX_SNICBUNDLE_HEADER_BUNDLE_TYPE_OFST); + if (bundle_type != NVRAM_PARTITION_TYPE_BUNDLE) + return false; + + header_len = get_unaligned_le32(header + EFX_SNICBUNDLE_HEADER_LENGTH_OFST); + if (header_len != EFX_SNICBUNDLE_HEADER_LEN) + return false; + + expected_crc = get_unaligned_le32(header + EFX_SNICBUNDLE_HEADER_CRC_OFST); + crc = ~crc32_le(~0, header, EFX_SNICBUNDLE_HEADER_CRC_OFST); + if (crc != expected_crc) + return false; + + *partition_type = NVRAM_PARTITION_TYPE_BUNDLE; + *partition_subtype = get_unaligned_le32(header + EFX_SNICBUNDLE_HEADER_BUNDLE_SUBTYPE_OFST); + *data = fw->data; + *data_size = fw->size; + return true; +} + +static int efx_reflash_parse_firmware_data(const struct firmware *fw, + u32 *partition_type, + u32 *partition_subtype, + const u8 **data, size_t *data_size) +{ + size_t header_offset; + u32 type, subtype; + + /* Try to find a valid firmware payload in the firmware data. Some + * packaging formats (such as CMS/PKCS#7 signed images) prepend a + * header for which finding the size is a non-trivial task. + * + * The checks are intended to reject firmware data that is clearly not + * in the expected format. They do not need to be exhaustive as the + * running firmware will perform its own comprehensive validity and + * compatibility checks during the update procedure. + * + * Firmware packages may contain multiple reflash images, e.g. a + * bundle containing one or more other images. Only check the + * outermost container by stopping after the first candidate image + * found even it is for an unsupported partition type. + */ + for (header_offset = 0; header_offset < fw->size; header_offset++) { + if (efx_reflash_parse_snic_bundle_header(fw, header_offset, + partition_type, + partition_subtype, + data, data_size)) + return 0; + + if (efx_reflash_parse_snic_header(fw, header_offset, + partition_type, + partition_subtype, data, + data_size)) + return 0; + + if (efx_reflash_parse_reflash_header(fw, header_offset, &type, + &subtype, data, data_size)) + return efx_reflash_partition_type(type, subtype, + partition_type, + partition_subtype); + } + + return -EINVAL; +} + +/* Limit the number of status updates during the erase or write phases */ +#define EFX_DEVLINK_STATUS_UPDATE_COUNT 50 + +/* Expected timeout for the efx_mcdi_nvram_update_finish_polled() */ +#define EFX_DEVLINK_UPDATE_FINISH_TIMEOUT 900 + +/* Ideal erase chunk size. This is a balance between minimising the number of + * MCDI requests to erase an entire partition whilst avoiding tripping the MCDI + * RPC timeout. + */ +#define EFX_NVRAM_ERASE_IDEAL_CHUNK_SIZE (64 * 1024) + +static int efx_reflash_erase_partition(struct efx_nic *efx, + struct devlink *devlink, u32 type, + size_t partition_size, + size_t align) +{ + size_t chunk, offset, next_update; + int rc; + + /* Partitions that cannot be erased or do not require erase before + * write are advertised with a erase alignment/sector size of zero. + */ + if (align == 0) + /* Nothing to do */ + return 0; + + if (partition_size % align) + return -EINVAL; + +#ifdef EFX_NOT_UPSTREAM + netif_info(efx, hw, efx->net_dev, "Erasing NVRAM partition %#x\n", type); +#endif + + /* Erase the entire NVRAM partition a chunk at a time to avoid + * potentially tripping the MCDI RPC timeout. + */ + if (align >= EFX_NVRAM_ERASE_IDEAL_CHUNK_SIZE) + chunk = align; + else + chunk = rounddown(EFX_NVRAM_ERASE_IDEAL_CHUNK_SIZE, align); + + for (offset = 0, next_update = 0; offset < partition_size; offset += chunk) { + if (offset >= next_update) { + devlink_flash_update_status_notify(devlink, "Erasing", + NULL, offset, + partition_size); + next_update += partition_size / EFX_DEVLINK_STATUS_UPDATE_COUNT; + } + + chunk = min_t(size_t, partition_size - offset, chunk); + rc = efx_mcdi_nvram_erase(efx, type, offset, chunk); + if (rc) { + netif_err(efx, hw, efx->net_dev, + "Erase failed for NVRAM partition %#x at %#zx-%#zx with error %d\n", + type, offset, offset + chunk - 1, rc); + return rc; + } + } + + devlink_flash_update_status_notify(devlink, "Erasing", NULL, + partition_size, partition_size); + + return 0; +} + +static int efx_reflash_write_partition(struct efx_nic *efx, + struct devlink *devlink, u32 type, + const u8 *data, size_t data_size, + size_t align) +{ + size_t write_max, chunk, offset, next_update; + int rc; + + if (align == 0) + return -EINVAL; + +#ifdef EFX_NOT_UPSTREAM + netif_info(efx, drv, efx->net_dev, + "Writing firmware image to NVRAM partition %#x\n", type); +#endif + + /* Write the NVRAM partition in chunks that are the largest multiple + * of the partiion's required write alignment that will fit into the + * MCDI NVRAM_WRITE RPC payload. + */ + if (efx->type->mcdi_max_ver < 2) + write_max = MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_LEN * + MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_MAXNUM; + else + write_max = MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_LEN * + MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_MAXNUM_MCDI2; + chunk = rounddown(write_max, align); + + for (offset = 0, next_update = 0; offset + chunk <= data_size; offset += chunk) { + if (offset >= next_update) { + devlink_flash_update_status_notify(devlink, "Writing", + NULL, offset, + data_size); + next_update += data_size / EFX_DEVLINK_STATUS_UPDATE_COUNT; + } + + rc = efx_mcdi_nvram_write(efx, type, offset, data + offset, chunk); + if (rc) { + netif_err(efx, hw, efx->net_dev, + "Write failed for NVRAM partition %#x at %#zx-%#zx with error %d\n", + type, offset, offset + chunk - 1, rc); + return rc; + } + } + + /* Round up left over data to satisfy write alignment */ + if (offset < data_size) { + size_t remaining = data_size - offset; + u8 *buf; + + if (offset >= next_update) + devlink_flash_update_status_notify(devlink, "Writing", + NULL, offset, + data_size); + + chunk = roundup(remaining, align); + buf = kmalloc(chunk, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + memcpy(buf, data + offset, remaining); + memset(buf + remaining, 0xFF, chunk - remaining); + rc = efx_mcdi_nvram_write(efx, type, offset, buf, chunk); + kfree(buf); + if (rc) { + netif_err(efx, hw, efx->net_dev, + "Write failed for NVRAM partition %#x at %#zx-%#zx with error %d\n", + type, offset, offset + chunk - 1, rc); + return rc; + } + } + + devlink_flash_update_status_notify(devlink, "Writing", NULL, data_size, + data_size); + + return 0; +} + +int efx_reflash_flash_firmware(struct efx_nic *efx, const struct firmware *fw) +{ + size_t data_size, size, erase_align, write_align; +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_USE_DEVLINK) + struct devlink *devlink = efx->devlink; +#else + struct devlink *devlink = NULL; /* devlink not available */ +#endif + u32 type, data_subtype, subtype; + const u8 *data; + bool protected; + int rc; + + if (!efx_has_cap(efx, BUNDLE_UPDATE)) { + netif_err(efx, hw, efx->net_dev, + "NVRAM bundle updates are not supported by the firmware\n"); + return -EOPNOTSUPP; + } + + mutex_lock(&efx->reflash_mutex); +#if defined(EFX_USE_KCOMPAT) && defined(EFX_USE_DEVLINK) && defined(EFX_HAVE_DEVLINK_FLASH_UPDATE_BEGIN_NOTIFY) + devlink_flash_update_begin_notify(devlink); +#endif + + devlink_flash_update_status_notify(devlink, "Checking update", NULL, 0, 0); + + rc = efx_reflash_parse_firmware_data(fw, &type, &data_subtype, &data, + &data_size); + if (rc) { + netif_err(efx, drv, efx->net_dev, + "Firmware image validation check failed with error %d\n", + rc); + goto out; + } + + rc = efx_mcdi_nvram_metadata(efx, type, &subtype, NULL, NULL, 0); + if (rc) { + netif_err(efx, drv, efx->net_dev, + "Metadata query for NVRAM partition %#x failed with error %d\n", + type, rc); + goto out; + } + + if (subtype != data_subtype) { + netif_err(efx, drv, efx->net_dev, + "Firmware image is not appropriate for this adapter"); + rc = -EINVAL; + goto out; + } + + rc = efx_mcdi_nvram_info(efx, type, &size, &erase_align, &write_align, + &protected); + if (rc) { + netif_err(efx, hw, efx->net_dev, + "Info query for NVRAM partition %#x failed with error %d\n", + type, rc); + goto out; + } + + if (protected) { + netif_err(efx, drv, efx->net_dev, + "NVRAM partition %#x is protected\n", type); + rc = -EPERM; + goto out; + } + + if (write_align == 0) { + netif_err(efx, drv, efx->net_dev, + "NVRAM partition %#x is not writable\n", type); + rc = -EACCES; + goto out; + } + + if (erase_align != 0 && size % erase_align) { + netif_err(efx, drv, efx->net_dev, + "NVRAM partition %#x has a bad partition table entry and therefore is not erasable\n", type); + rc = -EACCES; + goto out; + } + + if (data_size > size) { + netif_err(efx, drv, efx->net_dev, + "Firmware image is too big for NVRAM partition %#x\n", + type); + rc = -EFBIG; + goto out; + } + + devlink_flash_update_status_notify(devlink, "Starting update", NULL, 0, 0); + + rc = efx_mcdi_nvram_update_start(efx, type); + if (rc) { + netif_err(efx, hw, efx->net_dev, + "Update start request for NVRAM partition %#x failed with error %d\n", + type, rc); + goto out; + } + + rc = efx_reflash_erase_partition(efx, devlink, type, size, erase_align); + if (rc) + goto out_update_finish; + + rc = efx_reflash_write_partition(efx, devlink, type, data, data_size, + write_align); + if (rc) + goto out_update_finish; + +#ifdef EFX_NOT_UPSTREAM + netif_info(efx, drv, efx->net_dev, + "Finalizing and validating NVRAM partition %#x\n", type); +#endif + + devlink_flash_update_timeout_notify(devlink, "Finishing update", NULL, + EFX_DEVLINK_UPDATE_FINISH_TIMEOUT); + +out_update_finish: + if (rc) + /* Don't obscure the return code from an earlier failure */ + (void) efx_mcdi_nvram_update_finish(efx, type, + EFX_UPDATE_FINISH_ABORT); + else + rc = efx_mcdi_nvram_update_finish_polled(efx, type); + +out: + if (!rc) { +#ifdef EFX_NOT_UPSTREAM + netif_info(efx, hw, efx->net_dev, + "NVRAM partition %#x update complete\n", type); +#endif + devlink_flash_update_status_notify(devlink, "Update complete", + NULL, 0, 0); + } else { + devlink_flash_update_status_notify(devlink, "Update failed", + NULL, 0, 0); + } + +#if defined(EFX_USE_KCOMPAT) && defined(EFX_USE_DEVLINK) && defined(EFX_HAVE_DEVLINK_FLASH_UPDATE_BEGIN_NOTIFY) + devlink_flash_update_end_notify(devlink); +#endif + mutex_unlock(&efx->reflash_mutex); + + return rc; +} diff --git a/src/drivers/net/ethernet/sfc/efx_reflash.h b/src/drivers/net/ethernet/sfc/efx_reflash.h new file mode 100644 index 0000000..0fe6ccc --- /dev/null +++ b/src/drivers/net/ethernet/sfc/efx_reflash.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Driver for Xilinx network controllers and boards + * Copyright 2021 Xilinx Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#ifndef _EFX_REFLASH_H +#define _EFX_REFLASH_H + +#include "net_driver.h" +#include + +int efx_reflash_flash_firmware(struct efx_nic *efx, const struct firmware *fw); + +#endif /* _EFX_REFLASH_H */ diff --git a/src/drivers/net/ethernet/sfc/enum.h b/src/drivers/net/ethernet/sfc/enum.h new file mode 100644 index 0000000..a1a1c2f --- /dev/null +++ b/src/drivers/net/ethernet/sfc/enum.h @@ -0,0 +1,179 @@ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2007-2017 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#ifndef EFX_ENUM_H +#define EFX_ENUM_H + +/** + * enum efx_loopback_mode - loopback modes + * @LOOPBACK_NONE: no loopback + * @LOOPBACK_DATA: data path loopback + * @LOOPBACK_GMAC: loopback within GMAC + * @LOOPBACK_XGMII: loopback after XMAC + * @LOOPBACK_XGXS: loopback within BPX after XGXS + * @LOOPBACK_XAUI: loopback within BPX before XAUI serdes + * @LOOPBACK_GMII: loopback within BPX after GMAC + * @LOOPBACK_SGMII: loopback within BPX within SGMII + * @LOOPBACK_XGBR: loopback within BPX within XGBR + * @LOOPBACK_XFI: loopback within BPX before XFI serdes + * @LOOPBACK_XAUI_FAR: loopback within BPX after XAUI serdes + * @LOOPBACK_GMII_FAR: loopback within BPX before SGMII + * @LOOPBACK_SGMII_FAR: loopback within BPX after SGMII + * @LOOPBACK_XFI_FAR: loopback after XFI serdes + * @LOOPBACK_GPHY: loopback within 1G PHY at unspecified level + * @LOOPBACK_PHYXS: loopback within 10G PHY at PHYXS level + * @LOOPBACK_PCS: loopback within 10G PHY at PCS level + * @LOOPBACK_PMAPMD: loopback within 10G PHY at PMAPMD level + * @LOOPBACK_XPORT: cross port loopback + * @LOOPBACK_XGMII_WS: wireside loopback excluding XMAC + * @LOOPBACK_XAUI_WS: wireside loopback within BPX within XAUI serdes + * @LOOPBACK_XAUI_WS_FAR: wireside loopback within BPX including XAUI serdes + * @LOOPBACK_XAUI_WS_NEAR: wireside loopback within BPX excluding XAUI serdes + * @LOOPBACK_GMII_WS: wireside loopback excluding GMAC + * @LOOPBACK_XFI_WS: wireside loopback excluding XFI serdes + * @LOOPBACK_XFI_WS_FAR: wireside loopback including XFI serdes + * @LOOPBACK_PHYXS_WS: wireside loopback within 10G PHY at PHYXS level + */ +/* Please keep up-to-date w.r.t the following two #defines */ +enum efx_loopback_mode { + LOOPBACK_NONE = 0, + LOOPBACK_DATA = 1, + LOOPBACK_GMAC = 2, + LOOPBACK_XGMII = 3, + LOOPBACK_XGXS = 4, + LOOPBACK_XAUI = 5, + LOOPBACK_GMII = 6, + LOOPBACK_SGMII = 7, + LOOPBACK_XGBR = 8, + LOOPBACK_XFI = 9, + LOOPBACK_XAUI_FAR = 10, + LOOPBACK_GMII_FAR = 11, + LOOPBACK_SGMII_FAR = 12, + LOOPBACK_XFI_FAR = 13, + LOOPBACK_GPHY = 14, + LOOPBACK_PHYXS = 15, + LOOPBACK_PCS = 16, + LOOPBACK_PMAPMD = 17, + LOOPBACK_XPORT = 18, + LOOPBACK_XGMII_WS = 19, + LOOPBACK_XAUI_WS = 20, + LOOPBACK_XAUI_WS_FAR = 21, + LOOPBACK_XAUI_WS_NEAR = 22, + LOOPBACK_GMII_WS = 23, + LOOPBACK_XFI_WS = 24, + LOOPBACK_XFI_WS_FAR = 25, + LOOPBACK_PHYXS_WS = 26, + /** @LOOPBACK_MAX: Limit of enum values */ + LOOPBACK_MAX +}; +#define LOOPBACK_TEST_MAX LOOPBACK_PMAPMD + +/* These loopbacks occur within the controller */ +#define LOOPBACKS_INTERNAL ((1 << LOOPBACK_DATA) | \ + (1 << LOOPBACK_GMAC) | \ + (1 << LOOPBACK_XGMII)| \ + (1 << LOOPBACK_XGXS) | \ + (1 << LOOPBACK_XAUI) | \ + (1 << LOOPBACK_GMII) | \ + (1 << LOOPBACK_SGMII) | \ + (1 << LOOPBACK_XGBR) | \ + (1 << LOOPBACK_XFI) | \ + (1 << LOOPBACK_XAUI_FAR) | \ + (1 << LOOPBACK_GMII_FAR) | \ + (1 << LOOPBACK_SGMII_FAR) | \ + (1 << LOOPBACK_XFI_FAR) | \ + (1 << LOOPBACK_XGMII_WS) | \ + (1 << LOOPBACK_XAUI_WS) | \ + (1 << LOOPBACK_XAUI_WS_FAR) | \ + (1 << LOOPBACK_XAUI_WS_NEAR) | \ + (1 << LOOPBACK_GMII_WS) | \ + (1 << LOOPBACK_XFI_WS) | \ + (1 << LOOPBACK_XFI_WS_FAR)) + +#define LOOPBACKS_WS ((1 << LOOPBACK_XGMII_WS) | \ + (1 << LOOPBACK_XAUI_WS) | \ + (1 << LOOPBACK_XAUI_WS_FAR) | \ + (1 << LOOPBACK_XAUI_WS_NEAR) | \ + (1 << LOOPBACK_GMII_WS) | \ + (1 << LOOPBACK_XFI_WS) | \ + (1 << LOOPBACK_XFI_WS_FAR) | \ + (1 << LOOPBACK_PHYXS_WS)) + +#define LOOPBACKS_EXTERNAL(_efx) \ + ((_efx)->loopback_modes & ~LOOPBACKS_INTERNAL & \ + ~(1 << LOOPBACK_NONE)) + +#define LOOPBACK_MASK(_efx) \ + (1 << (_efx)->loopback_mode) + +#define LOOPBACK_INTERNAL(_efx) \ + (!!(LOOPBACKS_INTERNAL & LOOPBACK_MASK(_efx))) + +#define LOOPBACK_EXTERNAL(_efx) \ + (!!(LOOPBACK_MASK(_efx) & LOOPBACKS_EXTERNAL(_efx))) + +#define LOOPBACK_CHANGED(_from, _to, _mask) \ + (!!((LOOPBACK_MASK(_from) ^ LOOPBACK_MASK(_to)) & (_mask))) + +#define LOOPBACK_OUT_OF(_from, _to, _mask) \ + ((LOOPBACK_MASK(_from) & (_mask)) && !(LOOPBACK_MASK(_to) & (_mask))) + +/*****************************************************************************/ + +/** + * enum reset_type - reset types + * @RESET_TYPE_INVISIBLE: Reset datapath and MAC (Falcon only) + * @RESET_TYPE_RECOVER_OR_ALL: Try to recover. Apply RESET_TYPE_ALL + * if unsuccessful. + * @RESET_TYPE_ALL: Reset datapath, MAC and PHY + * @RESET_TYPE_WORLD: Reset as much as possible + * @RESET_TYPE_DATAPATH: Reset datapath only. + * @RESET_TYPE_MC_BIST: MC entering BIST mode. + * @RESET_TYPE_DISABLE: Reset datapath, MAC and PHY; leave NIC disabled + * @RESET_TYPE_TX_WATCHDOG: reset due to TX watchdog + * @RESET_TYPE_INT_ERROR: reset due to internal error + * @RESET_TYPE_DMA_ERROR: DMA error + * @RESET_TYPE_TX_SKIP: hardware completed empty tx descriptors + * @RESET_TYPE_MC_FAILURE: MC reboot/assertion + * @RESET_TYPE_MCDI_TIMEOUT: MCDI timeout. + * + * %RESET_TYPE_INVISIBLE, %RESET_TYPE_ALL, %RESET_TYPE_WORLD and + * %RESET_TYPE_DISABLE specify the method/scope of the reset. The + * other values specify reasons, which efx_schedule_reset() will choose + * a method for. + * + * Reset methods are numbered in order of increasing scope. + */ +enum reset_type { + RESET_TYPE_INVISIBLE, + RESET_TYPE_RECOVER_OR_ALL, + RESET_TYPE_ALL, + RESET_TYPE_WORLD, + RESET_TYPE_DATAPATH, + RESET_TYPE_MC_BIST, + RESET_TYPE_DISABLE, + /** @RESET_TYPE_MAX_METHOD: Limit of method/scope reset types */ + RESET_TYPE_MAX_METHOD, + RESET_TYPE_TX_WATCHDOG, + RESET_TYPE_INT_ERROR, + RESET_TYPE_DMA_ERROR, + RESET_TYPE_TX_SKIP, + RESET_TYPE_MC_FAILURE, + /* RESET_TYPE_MCDI_TIMEOUT is actually a method, not + * a reason, but it doesn't fit the scope hierarchy (it's not well- + * ordered by inclusion) + * We encode this by having its enum values be greater than + * RESET_TYPE_MAX_METHOD. This also prevents issuing it with + * efx_ioctl_reset */ + RESET_TYPE_MCDI_TIMEOUT, + /** @RESET_TYPE_MAX: Limit of enum values */ + RESET_TYPE_MAX, +}; + +#endif /* EFX_ENUM_H */ diff --git a/src/drivers/net/ethernet/sfc/ethtool.c b/src/drivers/net/ethernet/sfc/ethtool.c new file mode 100644 index 0000000..7ab0578 --- /dev/null +++ b/src/drivers/net/ethernet/sfc/ethtool.c @@ -0,0 +1,594 @@ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2005-2006 Fen Systems Ltd. + * Copyright 2006-2017 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#include +#include +#include +#include +#include "mcdi_pcol.h" +#include "net_driver.h" +#include "workarounds.h" +#include "selftest.h" +#include "efx.h" +#include "filter.h" +#include "nic.h" +#include "efx_common.h" +#include "mcdi_port_common.h" +#include "efx_channels.h" +#include "tx_common.h" +#include "ethtool_common.h" +#include "efx_reflash.h" +#ifdef CONFIG_SFC_DUMP +#include "dump.h" +#endif + +#ifdef EFX_NOT_UPSTREAM +#include "sfctool.h" +#endif +#include "efx_ethtool.h" + +#define EFX_ETHTOOL_EEPROM_MAGIC 0xEFAB + +/************************************************************************** + * + * Ethtool operations + * + ************************************************************************** + */ + +static int efx_ethtool_get_regs_len(struct net_device *net_dev) +{ + return efx_nic_get_regs_len(efx_netdev_priv(net_dev)); +} + +static void efx_ethtool_get_regs(struct net_device *net_dev, + struct ethtool_regs *regs, void *buf) +{ + struct efx_nic *efx = efx_netdev_priv(net_dev); + + regs->version = efx->type->revision; + efx_nic_get_regs(efx, buf); +} + +#if defined(EFX_USE_KCOMPAT) && !defined(EFX_USE_ETHTOOL_GET_SSET_COUNT) +static int efx_ethtool_get_stats_count(struct net_device *net_dev) +{ + return efx_ethtool_get_sset_count(net_dev, ETH_SS_STATS); +} +static int efx_ethtool_self_test_count(struct net_device *net_dev) +{ + return efx_ethtool_get_sset_count(net_dev, ETH_SS_TEST); +} +#endif + +#if defined(EFX_USE_KCOMPAT) && !defined(EFX_HAVE_NDO_SET_FEATURES) && !defined(EFX_HAVE_EXT_NDO_SET_FEATURES) + +static int efx_ethtool_set_tso(struct net_device *net_dev, u32 enable) +{ + struct efx_nic *efx __attribute__ ((unused)) = efx_netdev_priv(net_dev); + u32 features; + + features = NETIF_F_TSO; + if (efx->type->offload_features & (NETIF_F_IPV6_CSUM | NETIF_F_HW_CSUM)) + features |= NETIF_F_TSO6; + + if (enable) + net_dev->features |= features; + else + net_dev->features &= ~features; + + return 0; +} + +static int efx_ethtool_set_tx_csum(struct net_device *net_dev, u32 enable) +{ + struct efx_nic *efx = efx_netdev_priv(net_dev); + u32 features = efx->type->offload_features & NETIF_F_CSUM_MASK; + + if (enable) + net_dev->features |= features; + else + net_dev->features &= ~features; + + return 0; +} + +static int efx_ethtool_set_rx_csum(struct net_device *net_dev, u32 enable) +{ + struct efx_nic *efx = efx_netdev_priv(net_dev); + + /* No way to stop the hardware doing the checks; we just + * ignore the result. + */ + efx->rx_checksum_enabled = !!enable; + + return 0; +} + +static u32 efx_ethtool_get_rx_csum(struct net_device *net_dev) +{ + struct efx_nic *efx = efx_netdev_priv(net_dev); + + return efx->rx_checksum_enabled; +} + +#if defined(EFX_USE_ETHTOOL_FLAGS) +static int efx_ethtool_set_flags(struct net_device *net_dev, u32 data) +{ + struct efx_nic *efx = efx_netdev_priv(net_dev); + u32 supported = (efx->type->offload_features & + (ETH_FLAG_RXHASH | ETH_FLAG_NTUPLE)); + int rc; + +#if defined(EFX_NOT_UPSTREAM) && defined(EFX_USE_SFC_LRO) + if (efx->lro_available) + supported |= ETH_FLAG_LRO; +#endif + + if (data & ~supported) + return -EINVAL; + + if (net_dev->features & ~data & ETH_FLAG_NTUPLE) { + rc = efx->type->filter_clear_rx(efx, EFX_FILTER_PRI_MANUAL); + if (rc) + return rc; + } + + net_dev->features = (net_dev->features & ~supported) | data; + return 0; +} +#endif + +#endif /* EFX_HAVE_NDO_SET_FEATURES && EFX_HAVE_EXT_NDO_SET_FEATURES */ + +#if defined(EFX_USE_KCOMPAT) && !defined(EFX_USE_ETHTOOL_OP_GET_LINK) +static u32 efx_ethtool_get_link(struct net_device *net_dev) +{ + return netif_running(net_dev) && netif_carrier_ok(net_dev); +} +#endif + +/* + * Each channel has a single IRQ and moderation timer, started by any + * completion (or other event). Unless the module parameter + * separate_tx_channels is set, IRQs and moderation are therefore + * shared between RX and TX completions. In this case, when RX IRQ + * moderation is explicitly changed then TX IRQ moderation is + * automatically changed too, but otherwise we fail if the two values + * are requested to be different. + * + * The hardware does not support a limit on the number of completions + * before an IRQ, so we do not use the max_frames fields. We should + * report and require that max_frames == (usecs != 0), but this would + * invalidate existing user documentation. + * + * The hardware does not have distinct settings for interrupt + * moderation while the previous IRQ is being handled, so we should + * not use the 'irq' fields. However, an earlier developer + * misunderstood the meaning of the 'irq' fields and the driver did + * not support the standard fields. To avoid invalidating existing + * user documentation, we report and accept changes through either the + * standard or 'irq' fields. If both are changed at the same time, we + * prefer the standard field. + * + * We implement adaptive IRQ moderation, but use a different algorithm + * from that assumed in the definition of struct ethtool_coalesce. + * Therefore we do not use any of the adaptive moderation parameters + * in it. + */ + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_ETHTOOL_COALESCE_CQE) +static int efx_ethtool_get_coalesce(struct net_device *net_dev, + struct ethtool_coalesce *coalesce, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) +#else +static int efx_ethtool_get_coalesce(struct net_device *net_dev, + struct ethtool_coalesce *coalesce) +#endif +{ + struct efx_nic *efx = efx_netdev_priv(net_dev); + unsigned int tx_usecs, rx_usecs; + bool rx_adaptive; + + efx_get_irq_moderation(efx, &tx_usecs, &rx_usecs, &rx_adaptive); + + coalesce->tx_coalesce_usecs = tx_usecs; + coalesce->tx_coalesce_usecs_irq = tx_usecs; + coalesce->rx_coalesce_usecs = rx_usecs; + coalesce->rx_coalesce_usecs_irq = rx_usecs; + coalesce->use_adaptive_rx_coalesce = rx_adaptive; + coalesce->stats_block_coalesce_usecs = efx->stats_period_ms * 1000; + + return 0; +} + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_ETHTOOL_COALESCE_CQE) +static int efx_ethtool_set_coalesce(struct net_device *net_dev, + struct ethtool_coalesce *coalesce, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) +#else +static int efx_ethtool_set_coalesce(struct net_device *net_dev, + struct ethtool_coalesce *coalesce) +#endif +{ + struct efx_nic *efx = efx_netdev_priv(net_dev); + struct efx_channel *channel; + unsigned int tx_usecs, rx_usecs; + bool adaptive, rx_may_override_tx; + unsigned int stats_usecs; + int rc = 0; + +#if defined(EFX_USE_KCOMPAT) && !defined(EFX_HAVE_COALESCE_PARAMS) + if (coalesce->use_adaptive_tx_coalesce) + return -EINVAL; +#endif + + efx_for_each_channel(channel, efx) + if (channel->enabled) { + rc = 1; + break; + } + + if (!rc) + return -ENETDOWN; + + efx_get_irq_moderation(efx, &tx_usecs, &rx_usecs, &adaptive); + + if (coalesce->rx_coalesce_usecs != rx_usecs) + rx_usecs = coalesce->rx_coalesce_usecs; + else + rx_usecs = coalesce->rx_coalesce_usecs_irq; + + adaptive = coalesce->use_adaptive_rx_coalesce; + + /* If channels are shared, TX IRQ moderation can be quietly + * overridden unless it is changed from its old value. + */ + rx_may_override_tx = (coalesce->tx_coalesce_usecs == tx_usecs && + coalesce->tx_coalesce_usecs_irq == tx_usecs); + if (coalesce->tx_coalesce_usecs != tx_usecs) + tx_usecs = coalesce->tx_coalesce_usecs; + else + tx_usecs = coalesce->tx_coalesce_usecs_irq; + + rc = efx_init_irq_moderation(efx, tx_usecs, rx_usecs, adaptive, + rx_may_override_tx); + if (rc != 0) + return rc; + + efx_for_each_channel(channel, efx) + if (channel->enabled) + efx->type->push_irq_moderation(channel); + + stats_usecs = coalesce->stats_block_coalesce_usecs; + if (stats_usecs > 0 && stats_usecs < 1000) + stats_usecs = 1000; + + efx->stats_period_ms = stats_usecs / 1000; + if (efx->type->update_stats_period) + efx->type->update_stats_period(efx); + else + efx_mcdi_mac_update_stats_period(efx); + + return 0; +} + +static void efx_ethtool_get_ringparam(struct net_device *net_dev, +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_ETHTOOL_GET_RINGPARAM_EXTACK) + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kring, + struct netlink_ext_ack *ext_ack) +#else + struct ethtool_ringparam *ring) +#endif +{ + struct efx_nic *efx = efx_netdev_priv(net_dev); + + ring->rx_max_pending = efx_max_dmaq_size(efx); + ring->tx_max_pending = EFX_TXQ_MAX_ENT(efx); + ring->rx_pending = efx->rxq_entries; + ring->tx_pending = efx->txq_entries; +} + +static int efx_ethtool_set_ringparam(struct net_device *net_dev, +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_ETHTOOL_SET_RINGPARAM_EXTACK) + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kring, + struct netlink_ext_ack *ext_ack) +#else + struct ethtool_ringparam *ring) +#endif +{ + struct efx_nic *efx = efx_netdev_priv(net_dev); + int rc; + + if (ring->rx_mini_pending || ring->rx_jumbo_pending) + return -EINVAL; + + if (ring->rx_pending == efx->rxq_entries && + ring->tx_pending == efx->txq_entries) + /* Nothing to do */ + return 0; + + /* Validate range and rounding of RX queue. */ + rc = efx_check_queue_size(efx, &ring->rx_pending, + EFX_RXQ_MIN_ENT, efx_max_dmaq_size(efx), + false); + if (rc == -ERANGE) + netif_err(efx, drv, efx->net_dev, + "Rx queue length must be between %u and %lu\n", + EFX_RXQ_MIN_ENT, efx_max_dmaq_size(efx)); + else if (rc == -EINVAL) + netif_err(efx, drv, efx->net_dev, + "Rx queue length must be power of two\n"); + + if (rc) + return rc; + + /* Validate range and rounding of TX queue. */ + rc = efx_check_queue_size(efx, &ring->tx_pending, + efx->txq_min_entries, EFX_TXQ_MAX_ENT(efx), + false); + if (rc == -ERANGE) + netif_err(efx, drv, efx->net_dev, + "Tx queue length must be between %u and %lu\n", + efx->txq_min_entries, EFX_TXQ_MAX_ENT(efx)); + else if (rc == -EINVAL) + netif_err(efx, drv, efx->net_dev, + "Tx queue length must be power of two\n"); + if (rc) + return rc; + + /* Apply the new settings */ + efx->rxq_entries = ring->rx_pending; + efx->txq_entries = ring->tx_pending; + + /* Update the datapath with the new settings if the interface is up */ + if (!efx_check_disabled(efx) && netif_running(efx->net_dev)) { + dev_close(net_dev); + rc = dev_open(net_dev, NULL); + } + + return rc; +} + +static void efx_ethtool_get_wol(struct net_device *net_dev, + struct ethtool_wolinfo *wol) +{ + struct efx_nic *efx = efx_netdev_priv(net_dev); + return efx->type->get_wol(efx, wol); +} + + +static int efx_ethtool_set_wol(struct net_device *net_dev, + struct ethtool_wolinfo *wol) +{ + struct efx_nic *efx = efx_netdev_priv(net_dev); + return efx->type->set_wol(efx, wol->wolopts); +} + +#ifdef CONFIG_SFC_DUMP +int efx_ethtool_get_dump_flag(struct net_device *net_dev, + struct ethtool_dump *dump) +{ + struct efx_nic *efx = efx_netdev_priv(net_dev); + + return efx_dump_get_flag(efx, dump); +} + +int efx_ethtool_get_dump_data(struct net_device *net_dev, + struct ethtool_dump *dump, void *buffer) +{ + struct efx_nic *efx = efx_netdev_priv(net_dev); + + return efx_dump_get_data(efx, dump, buffer); +} + +int efx_ethtool_set_dump(struct net_device *net_dev, struct ethtool_dump *val) +{ + struct efx_nic *efx = efx_netdev_priv(net_dev); + + return efx_dump_set(efx, val); +} +#endif + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_ETHTOOL_GET_TS_INFO) || defined(EFX_HAVE_ETHTOOL_EXT_GET_TS_INFO) +static +#endif +int efx_ethtool_get_ts_info(struct net_device *net_dev, + struct ethtool_ts_info *ts_info) +{ + struct efx_nic *efx = efx_netdev_priv(net_dev); + + /* Software capabilities */ + ts_info->so_timestamping = (SOF_TIMESTAMPING_RX_SOFTWARE | +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_SKB_TX_TIMESTAMP) + SOF_TIMESTAMPING_TX_SOFTWARE | +#endif + SOF_TIMESTAMPING_SOFTWARE); + ts_info->phc_index = -1; + + efx_ptp_get_ts_info(efx, ts_info); + return 0; +} + +#if defined(EFX_USE_KCOMPAT) && (!defined(EFX_USE_DEVLINK) || defined(EFX_NEED_ETHTOOL_FLASH_DEVICE)) +int efx_ethtool_flash_device(struct net_device *net_dev, + struct ethtool_flash *flash) +{ + struct efx_nic *efx = efx_netdev_priv(net_dev); + const struct firmware *fw; + int rc; + + if (flash->region != ETHTOOL_FLASH_ALL_REGIONS) { + netif_err(efx, drv, efx->net_dev, + "Updates to NVRAM region %u are not supported\n", + flash->region); + return -EINVAL; + } + + rc = request_firmware(&fw, flash->data, &efx->pci_dev->dev); + if (rc) + return rc; + + dev_hold(net_dev); + rtnl_unlock(); + + rc = efx_reflash_flash_firmware(efx, fw); + + rtnl_lock(); + dev_put(net_dev); + + release_firmware(fw); + return rc; +} +#endif + +const struct ethtool_ops efx_ethtool_ops = { +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_COALESCE_PARAMS) + .supported_coalesce_params = (ETHTOOL_COALESCE_USECS | + ETHTOOL_COALESCE_USECS_IRQ | + ETHTOOL_COALESCE_STATS_BLOCK_USECS | + ETHTOOL_COALESCE_USE_ADAPTIVE_RX), +#endif +#if defined(EFX_USE_KCOMPAT) && !defined(EFX_HAVE_ETHTOOL_LINKSETTINGS) || defined(EFX_HAVE_ETHTOOL_LEGACY) + .get_settings = efx_ethtool_get_settings, + .set_settings = efx_ethtool_set_settings, +#endif + .get_drvinfo = efx_ethtool_get_drvinfo, + .get_regs_len = efx_ethtool_get_regs_len, + .get_regs = efx_ethtool_get_regs, + .get_msglevel = efx_ethtool_get_msglevel, + .set_msglevel = efx_ethtool_set_msglevel, + .nway_reset = efx_ethtool_nway_reset, +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_USE_ETHTOOL_OP_GET_LINK) + .get_link = ethtool_op_get_link, +#else + .get_link = efx_ethtool_get_link, +#endif + .get_coalesce = efx_ethtool_get_coalesce, + .set_coalesce = efx_ethtool_set_coalesce, + .get_ringparam = efx_ethtool_get_ringparam, + .set_ringparam = efx_ethtool_set_ringparam, + .get_pauseparam = efx_ethtool_get_pauseparam, + .set_pauseparam = efx_ethtool_set_pauseparam, +#if defined(EFX_USE_KCOMPAT) && !defined(EFX_HAVE_NDO_SET_FEATURES) && !defined(EFX_HAVE_EXT_NDO_SET_FEATURES) + .get_rx_csum = efx_ethtool_get_rx_csum, + .set_rx_csum = efx_ethtool_set_rx_csum, + .get_tx_csum = ethtool_op_get_tx_csum, + /* Need to enable/disable IPv6 too */ + .set_tx_csum = efx_ethtool_set_tx_csum, + .get_sg = ethtool_op_get_sg, + .set_sg = ethtool_op_set_sg, + .get_tso = ethtool_op_get_tso, + /* Need to enable/disable TSO-IPv6 too */ + .set_tso = efx_ethtool_set_tso, +#if defined(EFX_USE_ETHTOOL_FLAGS) + .get_flags = ethtool_op_get_flags, + .set_flags = efx_ethtool_set_flags, +#endif +#endif /* EFX_HAVE_NDO_SET_FEATURES */ +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_USE_ETHTOOL_GET_SSET_COUNT) + .get_sset_count = efx_ethtool_get_sset_count, +#else + .self_test_count = efx_ethtool_self_test_count, + .get_stats_count = efx_ethtool_get_stats_count, +#endif +#if defined(EFX_USE_KCOMPAT) && (!defined(EFX_USE_DEVLINK) || defined(EFX_NEED_ETHTOOL_FLASH_DEVICE)) + .flash_device = efx_ethtool_flash_device, +#endif + .get_priv_flags = efx_ethtool_get_priv_flags, + .set_priv_flags = efx_ethtool_set_priv_flags, + .self_test = efx_ethtool_self_test, + .get_strings = efx_ethtool_get_strings, +#if !defined(EFX_USE_KCOMPAT) || (defined(EFX_HAVE_ETHTOOL_SET_PHYS_ID) && !defined(EFX_USE_ETHTOOL_OPS_EXT)) + .set_phys_id = efx_ethtool_phys_id, +#elif !defined(EFX_USE_ETHTOOL_OPS_EXT) + .phys_id = efx_ethtool_phys_id_loop, +#endif + .get_ethtool_stats = efx_ethtool_get_stats, + .get_wol = efx_ethtool_get_wol, + .set_wol = efx_ethtool_set_wol, +#if !defined(EFX_USE_KCOMPAT) || (defined(EFX_HAVE_ETHTOOL_RESET) && !defined(EFX_USE_ETHTOOL_OPS_EXT)) + .reset = efx_ethtool_reset, +#endif +#if !defined(EFX_USE_KCOMPAT) + .get_rxnfc = efx_ethtool_get_rxnfc, + .set_rxnfc = efx_ethtool_set_rxnfc, +#else + .get_rxnfc = efx_ethtool_get_rxnfc_wrapper, + .set_rxnfc = efx_ethtool_set_rxnfc_wrapper, +#endif +#if defined(EFX_USE_KCOMPAT) && defined(EFX_USE_ETHTOOL_OPS_EXT) +}; +const struct ethtool_ops_ext efx_ethtool_ops_ext = { + .size = sizeof(struct ethtool_ops_ext), + .set_phys_id = efx_ethtool_phys_id, + /* Do not set ethtool_ops_ext::reset due to RH BZ 1008678 (SF bug 39031) */ +#endif + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_ETHTOOL_GET_RXFH_INDIR_SIZE) + .get_rxfh_indir_size = efx_ethtool_get_rxfh_indir_size, +#endif +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_ETHTOOL_GET_RXFH_KEY_SIZE) + .get_rxfh_key_size = efx_ethtool_get_rxfh_key_size, +#endif +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_CONFIGURABLE_RSS_HASH) + .get_rxfh = efx_ethtool_get_rxfh, + .set_rxfh = efx_ethtool_set_rxfh, +#elif defined(EFX_HAVE_ETHTOOL_GET_RXFH) + .get_rxfh = efx_ethtool_get_rxfh_no_hfunc, + .set_rxfh = efx_ethtool_set_rxfh_no_hfunc, +#elif defined(EFX_HAVE_ETHTOOL_GET_RXFH_INDIR) +# if defined(EFX_HAVE_OLD_ETHTOOL_RXFH_INDIR) + .get_rxfh_indir = efx_ethtool_old_get_rxfh_indir, + .set_rxfh_indir = efx_ethtool_old_set_rxfh_indir, +# else + .get_rxfh_indir = efx_ethtool_get_rxfh_indir, + .set_rxfh_indir = efx_ethtool_set_rxfh_indir, +# endif +#endif +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_ETHTOOL_RXFH_CONTEXT) + .get_rxfh_context = efx_ethtool_get_rxfh_context, + .set_rxfh_context = efx_ethtool_set_rxfh_context, +#endif +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_ETHTOOL_GET_DUMP_FLAG) || defined(EFX_HAVE_ETHTOOL_GET_DUMP_DATA) || defined(EFX_HAVE_ETHTOOL_SET_DUMP) +#ifdef CONFIG_SFC_DUMP + .get_dump_flag = efx_ethtool_get_dump_flag, + .get_dump_data = efx_ethtool_get_dump_data, + .set_dump = efx_ethtool_set_dump, +#endif +#endif + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_ETHTOOL_GET_TS_INFO) || defined(EFX_HAVE_ETHTOOL_EXT_GET_TS_INFO) + .get_ts_info = efx_ethtool_get_ts_info, +#endif +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_ETHTOOL_GMODULEEEPROM) + .get_module_info = efx_ethtool_get_module_info, + .get_module_eeprom = efx_ethtool_get_module_eeprom, +#endif +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_ETHTOOL_CHANNELS) || defined(EFX_HAVE_ETHTOOL_EXT_CHANNELS) + .get_channels = efx_ethtool_get_channels, + .set_channels = efx_ethtool_set_channels, +#endif +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_ETHTOOL_LINKSETTINGS) + .get_link_ksettings = efx_ethtool_get_link_ksettings, + .set_link_ksettings = efx_ethtool_set_link_ksettings, +#endif +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_ETHTOOL_FECSTATS) + .get_fec_stats = efx_ethtool_get_fec_stats, +#endif +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_ETHTOOL_FECPARAM) + .get_fecparam = efx_ethtool_get_fecparam, + .set_fecparam = efx_ethtool_set_fecparam, +#endif +}; diff --git a/src/drivers/net/ethernet/sfc/ethtool_common.c b/src/drivers/net/ethernet/sfc/ethtool_common.c new file mode 100644 index 0000000..ab33710 --- /dev/null +++ b/src/drivers/net/ethernet/sfc/ethtool_common.c @@ -0,0 +1,2056 @@ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2018 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ +#include +#include +#include "net_driver.h" +#include "mcdi.h" +#include "nic.h" +#include "selftest.h" +#include "ethtool_common.h" +#include "efx_common.h" +#include "efx_channels.h" +#include "rx_common.h" +#include "mcdi_port_common.h" +#include "mcdi_filters.h" +#include "tc.h" + +struct efx_sw_stat_desc { + const char *name; + enum { + EFX_ETHTOOL_STAT_SOURCE_nic, + EFX_ETHTOOL_STAT_SOURCE_channel, + EFX_ETHTOOL_STAT_SOURCE_rx_queue, + EFX_ETHTOOL_STAT_SOURCE_tx_queue + } source; + unsigned int offset; + u64(*get_stat) (void *field); /* Reader function */ +}; + +/* Initialiser for a struct efx_sw_stat_desc with type-checking */ +#define EFX_ETHTOOL_STAT(stat_name, source_name, field, field_type, \ + get_stat_function) { \ + .name = #stat_name, \ + .source = EFX_ETHTOOL_STAT_SOURCE_##source_name, \ + .offset = ((((field_type *) 0) == \ + &((struct efx_##source_name *)0)->field) ? \ + offsetof(struct efx_##source_name, field) : \ + offsetof(struct efx_##source_name, field)), \ + .get_stat = get_stat_function, \ +} + +/* MAC address mask including only I/G bit */ +static const u8 mac_addr_ig_mask[ETH_ALEN] __aligned(2) = {0x01, 0, 0, 0, 0, 0}; + +static u64 efx_get_uint_stat(void *field) +{ + return *(unsigned int *)field; +} + +static u64 efx_get_atomic_stat(void *field) +{ + return atomic_read((atomic_t *) field); +} + +#define EFX_ETHTOOL_ATOMIC_NIC_ERROR_STAT(field) \ + EFX_ETHTOOL_STAT(field, nic, errors.field, \ + atomic_t, efx_get_atomic_stat) + +#define EFX_ETHTOOL_UINT_CHANNEL_STAT(field) \ + EFX_ETHTOOL_STAT(field, channel, n_##field, \ + unsigned int, efx_get_uint_stat) +#define EFX_ETHTOOL_UINT_CHANNEL_STAT_NO_N(field) \ + EFX_ETHTOOL_STAT(field, channel, field, \ + unsigned int, efx_get_uint_stat) + +#define EFX_ETHTOOL_UINT_RXQ_STAT(field) \ + EFX_ETHTOOL_STAT(field, rx_queue, n_##field, \ + unsigned int, efx_get_uint_stat) +#define EFX_ETHTOOL_UINT_TXQ_STAT(field) \ + EFX_ETHTOOL_STAT(tx_##field, tx_queue, field, \ + unsigned int, efx_get_uint_stat) + +static const struct efx_sw_stat_desc efx_sw_stat_desc[] = { + EFX_ETHTOOL_UINT_TXQ_STAT(merge_events), + EFX_ETHTOOL_UINT_TXQ_STAT(tso_bursts), + EFX_ETHTOOL_UINT_TXQ_STAT(tso_long_headers), + EFX_ETHTOOL_UINT_TXQ_STAT(tso_packets), + EFX_ETHTOOL_UINT_TXQ_STAT(tso_fallbacks), + EFX_ETHTOOL_UINT_TXQ_STAT(pushes), + EFX_ETHTOOL_UINT_TXQ_STAT(pio_packets), + EFX_ETHTOOL_UINT_TXQ_STAT(cb_packets), + EFX_ETHTOOL_ATOMIC_NIC_ERROR_STAT(rx_reset), + EFX_ETHTOOL_UINT_RXQ_STAT(rx_tobe_disc), + EFX_ETHTOOL_UINT_RXQ_STAT(rx_ip_hdr_chksum_err), + EFX_ETHTOOL_UINT_RXQ_STAT(rx_tcp_udp_chksum_err), + EFX_ETHTOOL_UINT_RXQ_STAT(rx_inner_ip_hdr_chksum_err), + EFX_ETHTOOL_UINT_RXQ_STAT(rx_inner_tcp_udp_chksum_err), + EFX_ETHTOOL_UINT_RXQ_STAT(rx_outer_ip_hdr_chksum_err), + EFX_ETHTOOL_UINT_RXQ_STAT(rx_outer_tcp_udp_chksum_err), + EFX_ETHTOOL_UINT_RXQ_STAT(rx_eth_crc_err), + EFX_ETHTOOL_UINT_RXQ_STAT(rx_mcast_mismatch), + EFX_ETHTOOL_UINT_RXQ_STAT(rx_frm_trunc), + EFX_ETHTOOL_UINT_RXQ_STAT(rx_merge_events), + EFX_ETHTOOL_UINT_RXQ_STAT(rx_merge_packets), +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_XDP) + EFX_ETHTOOL_UINT_RXQ_STAT(rx_xdp_drops), + EFX_ETHTOOL_UINT_RXQ_STAT(rx_xdp_bad_drops), + EFX_ETHTOOL_UINT_RXQ_STAT(rx_xdp_tx), + EFX_ETHTOOL_UINT_RXQ_STAT(rx_xdp_redirect), +#endif + EFX_ETHTOOL_UINT_RXQ_STAT(rx_mport_bad), +#ifdef CONFIG_RFS_ACCEL + EFX_ETHTOOL_UINT_CHANNEL_STAT_NO_N(rfs_filter_count), + EFX_ETHTOOL_UINT_CHANNEL_STAT(rfs_succeeded), + EFX_ETHTOOL_UINT_CHANNEL_STAT(rfs_failed), +#endif +}; + +#define EFX_ETHTOOL_SW_STAT_COUNT ARRAY_SIZE(efx_sw_stat_desc) + +static const char efx_ethtool_priv_flags_strings[][ETH_GSTRING_LEN] = { + "phy-power-follows-link", + "link-down-on-reset", + "xdp-tx", + "log-tc-errors", + "tc-match-ignore-ttl", +}; + +#define EFX_ETHTOOL_PRIV_FLAGS_PHY_POWER BIT(0) +#define EFX_ETHTOOL_PRIV_FLAGS_LINK_DOWN_ON_RESET BIT(1) +#define EFX_ETHTOOL_PRIV_FLAGS_XDP BIT(2) +#define EFX_ETHTOOL_PRIV_FLAGS_LOG_TC_ERRS BIT(3) +#define EFX_ETHTOOL_PRIV_FLAGS_TC_MATCH_IGNORE_TTL BIT(4) + +#define EFX_ETHTOOL_PRIV_FLAGS_COUNT ARRAY_SIZE(efx_ethtool_priv_flags_strings) + +void efx_ethtool_get_common_drvinfo(struct efx_nic *efx, + struct ethtool_drvinfo *info) +{ +#ifdef EFX_NOT_UPSTREAM + /* This is not populated on RHEL 6 */ + if (efx->pci_dev->driver) + strscpy(info->driver, efx->pci_dev->driver->name, + sizeof(info->driver)); + else + strscpy(info->driver, KBUILD_MODNAME, sizeof(info->driver)); + strscpy(info->version, EFX_DRIVER_VERSION, sizeof(info->version)); +#else + strscpy(info->driver, efx->pci_dev->driver->name, sizeof(info->driver)); +#endif + strscpy(info->fw_version, "N/A", sizeof(info->fw_version)); + strscpy(info->bus_info, pci_name(efx->pci_dev), sizeof(info->bus_info)); + info->n_priv_flags = EFX_ETHTOOL_PRIV_FLAGS_COUNT; +} + +void efx_ethtool_get_drvinfo(struct net_device *net_dev, + struct ethtool_drvinfo *info) +{ + struct efx_nic *efx = efx_netdev_priv(net_dev); + + efx_ethtool_get_common_drvinfo(efx, info); + if (!in_interrupt()) + efx_mcdi_print_fwver(efx, info->fw_version, + sizeof(info->fw_version)); +} + +/* Identify device by flashing LEDs */ +int efx_ethtool_phys_id(struct net_device *net_dev, + enum ethtool_phys_id_state state) +{ + struct efx_nic *efx = efx_netdev_priv(net_dev); + enum efx_led_mode mode = EFX_LED_DEFAULT; + + switch (state) { + case ETHTOOL_ID_ON: + mode = EFX_LED_ON; + break; + case ETHTOOL_ID_OFF: + mode = EFX_LED_OFF; + break; + case ETHTOOL_ID_INACTIVE: + mode = EFX_LED_DEFAULT; + break; + case ETHTOOL_ID_ACTIVE: + return 1; /* cycle on/off once per second */ + } + + return efx_mcdi_set_id_led(efx, mode); +} + +#if defined(EFX_USE_KCOMPAT) && !defined(EFX_HAVE_ETHTOOL_SET_PHYS_ID) +int efx_ethtool_phys_id_loop(struct net_device *net_dev, u32 count) +{ + /* Driver expects to be called at twice the frequency in rc */ + int rc = efx_ethtool_phys_id(net_dev, ETHTOOL_ID_ACTIVE); + int n = rc * 2, i, interval = HZ / n; + + /* Count down seconds */ + do { + /* Count down iterations per second */ + i = n; + do { + efx_ethtool_phys_id(net_dev, + (i & 1) ? ETHTOOL_ID_OFF + : ETHTOOL_ID_ON); + schedule_timeout_interruptible(interval); + } while (!signal_pending(current) && --i != 0); + } while (!signal_pending(current) && + (count == 0 || --count != 0)); + + (void)efx_ethtool_phys_id(net_dev, ETHTOOL_ID_INACTIVE); + return 0; +} +#endif + +u32 efx_ethtool_get_msglevel(struct net_device *net_dev) +{ + struct efx_nic *efx = efx_netdev_priv(net_dev); + return efx->msg_enable; +} + +void efx_ethtool_set_msglevel(struct net_device *net_dev, u32 msg_enable) +{ + struct efx_nic *efx = efx_netdev_priv(net_dev); + efx->msg_enable = msg_enable; +#ifdef EFX_NOT_UPSTREAM +#if IS_MODULE(CONFIG_SFC_DRIVERLINK) + efx->dl_nic.msg_enable = efx->msg_enable; +#endif +#endif +} + +void efx_ethtool_self_test(struct net_device *net_dev, + struct ethtool_test *test, u64 *data) +{ + struct efx_nic *efx = efx_netdev_priv(net_dev); + struct efx_self_tests *efx_tests; + bool already_up; + int rc = -ENOMEM; + + efx_tests = kzalloc(sizeof(*efx_tests), GFP_KERNEL); + if (!efx_tests) + goto fail; + + efx_tests->eventq_dma = kcalloc(efx_channels(efx), + sizeof(*efx_tests->eventq_dma), + GFP_KERNEL); + efx_tests->eventq_int = kcalloc(efx_channels(efx), + sizeof(*efx_tests->eventq_int), + GFP_KERNEL); + + if (!efx_tests->eventq_dma || !efx_tests->eventq_int) { + goto fail; + } + + if (!efx_net_active(efx->state)) { + rc = -EBUSY; + goto out; + } + + /* We need rx buffers and interrupts. */ + already_up = (efx->net_dev->flags & IFF_UP); + if (!already_up && test->flags & ETH_TEST_FL_OFFLINE) { + netif_info(efx, drv, efx->net_dev, + "cannot perform offline selftest while down\n"); + test->flags &= ~ETH_TEST_FL_OFFLINE; + } + + netif_info(efx, drv, efx->net_dev, "starting %sline testing\n", + (test->flags & ETH_TEST_FL_OFFLINE) ? "off" : "on"); + + /* We need rx buffers and interrupts. */ + if (!already_up) { + rc = dev_open(efx->net_dev, NULL); + if (rc) { + netif_err(efx, drv, efx->net_dev, + "failed opening device.\n"); + goto out; + } + } + + rc = efx_selftest(efx, efx_tests, test->flags); + + if (!already_up) + dev_close(efx->net_dev); + + netif_info(efx, drv, efx->net_dev, "%s %sline self-tests\n", + rc == 0 ? "passed" : "failed", + (test->flags & ETH_TEST_FL_OFFLINE) ? "off" : "on"); + +out: + efx_ethtool_fill_self_tests(efx, efx_tests, NULL, data); + +fail: + if (efx_tests) { + kfree(efx_tests->eventq_dma); + kfree(efx_tests->eventq_int); + kfree(efx_tests); + } + + if (rc) + test->flags |= ETH_TEST_FL_FAILED; +} + +/* Restart autonegotiation */ +int efx_ethtool_nway_reset(struct net_device *net_dev) +{ + struct efx_nic *efx = efx_netdev_priv(net_dev); + u32 flags = efx_get_mcdi_phy_flags(efx); + int rc; + + flags |= (1 << MC_CMD_SET_LINK_IN_POWEROFF_LBN); + + rc = efx_mcdi_set_link(efx, efx_get_mcdi_caps(efx), flags, + efx->loopback_mode, false, + SET_LINK_SEQ_IGNORE); + if (rc) + return rc; + + flags &= ~(1 << MC_CMD_SET_LINK_IN_POWEROFF_LBN); + flags &= ~(1 << MC_CMD_SET_LINK_IN_LOWPOWER_LBN); + + return efx_mcdi_set_link(efx, efx_get_mcdi_caps(efx), flags, + efx->loopback_mode, false, + SET_LINK_SEQ_IGNORE); +} + +void efx_ethtool_get_pauseparam(struct net_device *net_dev, + struct ethtool_pauseparam *pause) +{ + struct efx_nic *efx = efx_netdev_priv(net_dev); + + pause->rx_pause = !!(efx->wanted_fc & EFX_FC_RX); + pause->tx_pause = !!(efx->wanted_fc & EFX_FC_TX); + pause->autoneg = !!(efx->wanted_fc & EFX_FC_AUTO); +} + +int efx_ethtool_set_pauseparam(struct net_device *net_dev, + struct ethtool_pauseparam *pause) +{ + struct efx_nic *efx = efx_netdev_priv(net_dev); + u8 wanted_fc, old_fc; + u32 old_adv; + int rc = 0; + + mutex_lock(&efx->mac_lock); + + wanted_fc = ((pause->rx_pause ? EFX_FC_RX : 0) | + (pause->tx_pause ? EFX_FC_TX : 0) | + (pause->autoneg ? EFX_FC_AUTO : 0)); + + if ((wanted_fc & EFX_FC_TX) && !(wanted_fc & EFX_FC_RX)) { + netif_dbg(efx, drv, efx->net_dev, + "Flow control unsupported: tx ON rx OFF\n"); + rc = -EINVAL; + goto out; + } + + if ((wanted_fc & EFX_FC_AUTO) && + !(efx->link_advertising[0] & ADVERTISED_Autoneg)) { + netif_dbg(efx, drv, efx->net_dev, + "Autonegotiation is disabled\n"); + rc = -EINVAL; + goto out; + } + + old_adv = efx->link_advertising[0]; + old_fc = efx->wanted_fc; + efx_link_set_wanted_fc(efx, wanted_fc); + if (efx->link_advertising[0] != old_adv || + (efx->wanted_fc ^ old_fc) & EFX_FC_AUTO) { + rc = efx_mcdi_port_reconfigure(efx); + if (rc) { + netif_err(efx, drv, efx->net_dev, + "Unable to advertise requested flow " + "control setting\n"); + efx->link_advertising[0] = old_adv; + efx->wanted_fc = old_fc; + goto out; + } + } + + /* Reconfigure the MAC. The PHY *may* generate a link state change event + * if the user just changed the advertised capabilities, but there's no + * harm doing this twice */ + (void)efx_mac_reconfigure(efx, false); + +out: + mutex_unlock(&efx->mac_lock); + + return rc; +} + +/** + * efx_fill_test - fill in an individual self-test entry + * @test_index: Index of the test + * @strings: Ethtool strings, or %NULL + * @data: Ethtool test results, or %NULL + * @test: Pointer to test result (used only if data != %NULL) + * @unit_format: Unit name format (e.g. "chan\%d") + * @unit_id: Unit id (e.g. 0 for "chan0") + * @test_format: Test name format (e.g. "loopback.\%s.tx.sent") + * @test_id: Test id (e.g. "PHYXS" for "loopback.PHYXS.tx_sent") + * + * Fill in an individual self-test entry. + */ +static void efx_fill_test(unsigned int test_index, u8 *strings, u64 *data, + int *test, const char *unit_format, int unit_id, + const char *test_format, const char *test_id) +{ + char unit_str[ETH_GSTRING_LEN], test_str[ETH_GSTRING_LEN]; + + /* Fill data value, if applicable */ + if (data) + data[test_index] = *test; + + /* Fill string, if applicable */ + if (strings) { + if (strchr(unit_format, '%')) + snprintf(unit_str, sizeof(unit_str), + unit_format, unit_id); + else + strcpy(unit_str, unit_format); + snprintf(test_str, sizeof(test_str), test_format, test_id); + snprintf(strings + test_index * ETH_GSTRING_LEN, + ETH_GSTRING_LEN, + "%-6s %-24s", unit_str, test_str); + } +} + +#define EFX_LOOPBACK_NAME(_mode, _counter) \ + "loopback.%s." _counter, STRING_TABLE_LOOKUP(_mode, efx_loopback_mode) + +/** + * efx_fill_loopback_test - fill in a block of loopback self-test entries + * @efx: Efx NIC + * @lb_tests: Efx loopback self-test results structure + * @mode: Loopback test mode + * @test_index: Starting index of the test + * @strings: Ethtool strings, or %NULL + * @data: Ethtool test results, or %NULL + * + * Fill in a block of loopback self-test entries. + * + * Return: new test index. + */ +static int efx_fill_loopback_test(struct efx_nic *efx, + struct efx_loopback_self_tests *lb_tests, + enum efx_loopback_mode mode, + unsigned int test_index, + u8 *strings, u64 *data) +{ + if (efx->tx_channel_offset < efx_channels(efx)) { + struct efx_channel *channel = + efx_get_channel(efx, efx->tx_channel_offset); + struct efx_tx_queue *tx_queue; + + efx_for_each_channel_tx_queue(tx_queue, channel) { + efx_fill_test(test_index++, strings, data, + &lb_tests->tx_sent[tx_queue->queue], + EFX_TX_QUEUE_NAME(tx_queue), + EFX_LOOPBACK_NAME(mode, "tx_sent")); + efx_fill_test(test_index++, strings, data, + &lb_tests->tx_done[tx_queue->queue], + EFX_TX_QUEUE_NAME(tx_queue), + EFX_LOOPBACK_NAME(mode, "tx_done")); + } + } + efx_fill_test(test_index++, strings, data, + &lb_tests->rx_good, + "rx", 0, + EFX_LOOPBACK_NAME(mode, "rx_good")); + efx_fill_test(test_index++, strings, data, + &lb_tests->rx_bad, + "rx", 0, + EFX_LOOPBACK_NAME(mode, "rx_bad")); + + return test_index; +} + +/** + * efx_ethtool_fill_self_tests - get self-test details + * @efx: Efx NIC + * @tests: Efx self-test results structure, or %NULL + * @strings: Ethtool strings, or %NULL + * @data: Ethtool test results, or %NULL + * + * Get self-test number of strings, strings, and/or test results. + * + * The reason for merging these three functions is to make sure that + * they can never be inconsistent. + * + * Return: number of strings (equals number of test results). + */ +int efx_ethtool_fill_self_tests(struct efx_nic *efx, + struct efx_self_tests *tests, + u8 *strings, u64 *data) +{ + struct efx_channel *channel; + unsigned int n = 0, i; + enum efx_loopback_mode mode; + + efx_fill_test(n++, strings, data, &tests->phy_alive, + "phy", 0, "alive", NULL); + efx_fill_test(n++, strings, data, &tests->nvram, + "core", 0, "nvram", NULL); + efx_fill_test(n++, strings, data, &tests->interrupt, + "core", 0, "interrupt", NULL); + + /* Event queues */ + efx_for_each_channel(channel, efx) { + efx_fill_test(n++, strings, data, + tests ? &tests->eventq_dma[channel->channel] : NULL, + EFX_CHANNEL_NAME(channel), + "eventq.dma", NULL); + efx_fill_test(n++, strings, data, + tests ? &tests->eventq_int[channel->channel] : NULL, + EFX_CHANNEL_NAME(channel), + "eventq.int", NULL); + } + + efx_fill_test(n++, strings, data, &tests->memory, + "core", 0, "memory", NULL); + efx_fill_test(n++, strings, data, &tests->registers, + "core", 0, "registers", NULL); + + for (i = 0; true; ++i) { + const char *name; + + EFX_WARN_ON_PARANOID(i >= EFX_MAX_PHY_TESTS); + name = efx_mcdi_phy_test_name(efx, i); + if (name == NULL) + break; + + efx_fill_test(n++, strings, data, &tests->phy_ext[i], + "phy", 0, name, NULL); + } + + /* Loopback tests */ + for (mode = LOOPBACK_NONE; mode <= LOOPBACK_TEST_MAX; mode++) { + if (!(efx->loopback_modes & (1 << mode))) + continue; + n = efx_fill_loopback_test(efx, + &tests->loopback[mode], mode, n, + strings, data); + } + + return n; +} + +static size_t efx_describe_per_queue_stats(struct efx_nic *efx, u8 *strings) +{ + size_t n_stats = 0; + const char *q_name; + struct efx_channel *channel; + + efx_for_each_channel(channel, efx) { + if (efx_channel_has_tx_queues(channel)) { + n_stats++; + if (strings != NULL) { + unsigned int core_txq = channel->channel - + efx->tx_channel_offset; + + if (channel->type->get_queue_name) + q_name = channel->type->get_queue_name(channel, true); + else + q_name = "tx_packets"; + + snprintf(strings, ETH_GSTRING_LEN, + "tx-%u.%s", core_txq, q_name); + strings += ETH_GSTRING_LEN; + } + } + } + efx_for_each_channel(channel, efx) { + if (efx_channel_has_rx_queue(channel)) { + n_stats++; + if (strings != NULL) { + if (channel->type->get_queue_name) + q_name = channel->type->get_queue_name(channel, false); + else + q_name = "rx_packets"; + + snprintf(strings, ETH_GSTRING_LEN, + "rx-%d.%s", channel->channel, q_name); + strings += ETH_GSTRING_LEN; + } + } + } + if (efx->xdp_tx_queue_count && efx->xdp_tx_queues) { + unsigned short xdp; + + for (xdp = 0; xdp < efx->xdp_tx_queue_count; xdp++) { + if (efx->xdp_tx_queues[xdp]) { + n_stats++; + if (strings != NULL) { + snprintf(strings, ETH_GSTRING_LEN, + "tx-xdp-cpu-%hu.tx_packets", + xdp); + strings += ETH_GSTRING_LEN; + } + } + } + } +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_XDP_SOCK) +#if defined(CONFIG_XDP_SOCKETS) + efx_for_each_channel(channel, efx) { + unsigned int core_txq = channel->channel - + efx->tx_channel_offset; + n_stats++; + if (strings) { + snprintf(strings, ETH_GSTRING_LEN, + "tx-xsk-%u.tx_packets", + core_txq); + strings += ETH_GSTRING_LEN; + } + } +#endif +#endif + + return n_stats; +} + +int efx_ethtool_get_sset_count(struct net_device *net_dev, int string_set) +{ + struct efx_nic *efx = efx_netdev_priv(net_dev); + + switch (string_set) { + case ETH_SS_STATS: + return efx->type->describe_stats(efx, NULL) + + EFX_ETHTOOL_SW_STAT_COUNT + + efx_describe_per_queue_stats(efx, NULL) + + efx_ptp_describe_stats(efx, NULL); + case ETH_SS_TEST: + return efx_ethtool_fill_self_tests(efx, NULL, NULL, NULL); + case ETH_SS_PRIV_FLAGS: + return EFX_ETHTOOL_PRIV_FLAGS_COUNT; + default: + return -EINVAL; + } +} + +void efx_ethtool_get_strings(struct net_device *net_dev, u32 string_set, + u8 *strings) +{ + struct efx_nic *efx = efx_netdev_priv(net_dev); + int i; + + switch (string_set) { + case ETH_SS_STATS: + strings += (efx->type->describe_stats(efx, strings) * + ETH_GSTRING_LEN); + for (i = 0; i < EFX_ETHTOOL_SW_STAT_COUNT; i++) + strscpy(strings + i * ETH_GSTRING_LEN, + efx_sw_stat_desc[i].name, ETH_GSTRING_LEN); + strings += EFX_ETHTOOL_SW_STAT_COUNT * ETH_GSTRING_LEN; + strings += (efx_describe_per_queue_stats(efx, strings) * + ETH_GSTRING_LEN); + efx_ptp_describe_stats(efx, strings); + break; + case ETH_SS_TEST: + efx_ethtool_fill_self_tests(efx, NULL, strings, NULL); + break; + case ETH_SS_PRIV_FLAGS: + for (i = 0; i < EFX_ETHTOOL_PRIV_FLAGS_COUNT; i++) + strscpy(strings + i * ETH_GSTRING_LEN, + efx_ethtool_priv_flags_strings[i], + ETH_GSTRING_LEN); + break; + default: + /* No other string sets */ + break; + } +} + +u32 efx_ethtool_get_priv_flags(struct net_device *net_dev) +{ + struct efx_nic *efx = efx_netdev_priv(net_dev); + u32 ret_flags = 0; + + if (efx->phy_power_follows_link) + ret_flags |= EFX_ETHTOOL_PRIV_FLAGS_PHY_POWER; + + if (efx->link_down_on_reset) + ret_flags |= EFX_ETHTOOL_PRIV_FLAGS_LINK_DOWN_ON_RESET; + + if (efx->xdp_tx) + ret_flags |= EFX_ETHTOOL_PRIV_FLAGS_XDP; + + if (efx->log_tc_errs) + ret_flags |= EFX_ETHTOOL_PRIV_FLAGS_LOG_TC_ERRS; + + if (efx->tc_match_ignore_ttl) + ret_flags |= EFX_ETHTOOL_PRIV_FLAGS_TC_MATCH_IGNORE_TTL; + + return ret_flags; +} + +int efx_ethtool_set_priv_flags(struct net_device *net_dev, u32 flags) +{ + u32 prev_flags = efx_ethtool_get_priv_flags(net_dev); + struct efx_nic *efx = efx_netdev_priv(net_dev); + bool is_up = !efx_check_disabled(efx) && netif_running(efx->net_dev); + bool xdp_change = + (flags & EFX_ETHTOOL_PRIV_FLAGS_XDP) != + (prev_flags & EFX_ETHTOOL_PRIV_FLAGS_XDP); + +#ifdef EFX_NOT_UPSTREAM +#if IS_MODULE(CONFIG_SFC_DRIVERLINK) + if (xdp_change && efx->open_count > is_up) { + netif_err(efx, drv, efx->net_dev, + "unable to set XDP. device in use by driverlink stack\n"); + return -EBUSY; + } +#endif +#endif + + /* can't change XDP state when interface is up */ + if (is_up && xdp_change) + dev_close(net_dev); + + efx->phy_power_follows_link = + !!(flags & EFX_ETHTOOL_PRIV_FLAGS_PHY_POWER); + efx->link_down_on_reset = + !!(flags & EFX_ETHTOOL_PRIV_FLAGS_LINK_DOWN_ON_RESET); + efx->xdp_tx = + !!(flags & EFX_ETHTOOL_PRIV_FLAGS_XDP); + efx->log_tc_errs = + !!(flags & EFX_ETHTOOL_PRIV_FLAGS_LOG_TC_ERRS); + efx->tc_match_ignore_ttl = + !!(flags & EFX_ETHTOOL_PRIV_FLAGS_TC_MATCH_IGNORE_TTL); + + if (is_up && xdp_change) + return dev_open(net_dev, NULL); + + return 0; +} + +void efx_ethtool_get_stats(struct net_device *net_dev, + struct ethtool_stats *stats __attribute__ ((unused)), + u64 *data) +{ + struct efx_nic *efx = efx_netdev_priv(net_dev); + const struct efx_sw_stat_desc *stat; + struct efx_channel *channel; + struct efx_tx_queue *tx_queue; + struct efx_rx_queue *rx_queue; + int i; + + /* Get NIC statistics */ + data += efx->type->update_stats(efx, data, NULL); + /* efx->stats is obtained in update_stats and held */ + + /* Get software statistics */ + for (i = 0; i < EFX_ETHTOOL_SW_STAT_COUNT; i++) { + stat = &efx_sw_stat_desc[i]; + switch (stat->source) { + case EFX_ETHTOOL_STAT_SOURCE_nic: + data[i] = stat->get_stat((void *)efx + stat->offset); + break; + case EFX_ETHTOOL_STAT_SOURCE_channel: + data[i] = 0; + efx_for_each_channel(channel, efx) + data[i] += stat->get_stat((void *)channel + + stat->offset); + break; + case EFX_ETHTOOL_STAT_SOURCE_rx_queue: + data[i] = 0; + efx_for_each_channel(channel, efx) { + efx_for_each_channel_rx_queue(rx_queue, channel) + data[i] += + stat->get_stat((void *)rx_queue + + stat->offset); + } + break; + case EFX_ETHTOOL_STAT_SOURCE_tx_queue: + data[i] = 0; + efx_for_each_channel(channel, efx) { + efx_for_each_channel_tx_queue(tx_queue, channel) + data[i] += + stat->get_stat((void *)tx_queue + + stat->offset); + } + break; + } + } + data += EFX_ETHTOOL_SW_STAT_COUNT; + + /* release stats_lock obtained in update_stats */ + spin_unlock_bh(&efx->stats_lock); + + efx_for_each_channel(channel, efx) { + if (efx_channel_has_tx_queues(channel)) { + data[0] = 0; + efx_for_each_channel_tx_queue(tx_queue, channel) { +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_XDP_SOCK) +#if defined(CONFIG_XDP_SOCKETS) + if (!efx_is_xsk_tx_queue(tx_queue)) +#endif + data[0] += tx_queue->tx_packets; +#else + data[0] += tx_queue->tx_packets; +#endif + } + data++; + } + } + efx_for_each_channel(channel, efx) { + if (efx_channel_has_rx_queue(channel)) { + data[0] = 0; + efx_for_each_channel_rx_queue(rx_queue, channel) { + data[0] += rx_queue->rx_packets; + } + data++; + } + } + if (efx->xdp_tx_queue_count && efx->xdp_tx_queues) { + int xdp; + + for (xdp = 0; xdp < efx->xdp_tx_queue_count; xdp++) { + if (efx->xdp_tx_queues[xdp]) { + data[0] = efx->xdp_tx_queues[xdp]->tx_packets; + data++; + } + } +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_XDP_SOCK) +#if defined(CONFIG_XDP_SOCKETS) + efx_for_each_channel(channel, efx) { + tx_queue = efx_channel_get_xsk_tx_queue(channel); + if (tx_queue) + data[0] = tx_queue->tx_packets; + data++; + } +#endif +#endif + } + + efx_ptp_update_stats(efx, data); +} + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_ETHTOOL_CHANNELS) || defined(EFX_HAVE_ETHTOOL_EXT_CHANNELS) +void efx_ethtool_get_channels(struct net_device *net_dev, + struct ethtool_channels *channels) +{ + struct efx_nic *efx = efx_netdev_priv(net_dev); + + channels->combined_count = efx->n_combined_channels; + channels->rx_count = efx->n_rx_only_channels; + channels->tx_count = efx->n_tx_only_channels; + + /* count up 'other' channels */ + channels->max_other = efx_xdp_channels(efx) + efx->n_extra_channels; + channels->other_count = channels->max_other; + + if (efx->n_tx_only_channels && efx->n_rx_only_channels) { + channels->max_combined = 0; + channels->max_rx = efx->n_rx_only_channels; + channels->max_tx = efx->n_tx_only_channels; + } else { + channels->max_combined = efx->max_tx_channels - + channels->max_other; + channels->max_rx = 0; + channels->max_tx = 0; + } +} + +int efx_ethtool_set_channels(struct net_device *net_dev, + struct ethtool_channels *channels) +{ + struct efx_nic *efx = efx_netdev_priv(net_dev); + bool is_up = !efx_check_disabled(efx) && netif_running(efx->net_dev); + int rc, rc2 = 0; + + /* Cannot change special channels yet */ + if (channels->other_count != channels->max_other) + return -EINVAL; + + /* If we're in a separate TX channels config then reject any changes. + * If we're not then reject an attempt to make these non-zero. + */ + if (channels->rx_count != channels->max_rx || + channels->tx_count != channels->max_tx) + return -EINVAL; + +#ifdef EFX_NOT_UPSTREAM +#if IS_MODULE(CONFIG_SFC_DRIVERLINK) + if (efx->open_count > is_up) { + netif_err(efx, drv, efx->net_dev, + "unable to set channels. device in use by driverlink stack\n"); + return -EBUSY; + } +#endif +#endif + + if (is_up) + dev_close(net_dev); + + efx->n_combined_channels = channels->combined_count; + efx->n_tx_only_channels = 0; + efx->n_rx_only_channels = 0; + + /* Update the default RSS spread shown by ethtool -x */ + rc = efx_mcdi_push_default_indir_table(efx, + efx->n_combined_channels); + + /* changing the queue setup invalidates ntuple filters */ + if (!rc) + efx_filter_clear_ntuple(efx); + + /* Update the datapath with the new settings */ + if (is_up) + rc2 = dev_open(net_dev, NULL); + return (rc ? rc : rc2); +} +#endif + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_ETHTOOL_LINKSETTINGS) +int efx_ethtool_get_link_ksettings(struct net_device *net_dev, + struct ethtool_link_ksettings *out) +{ + struct efx_nic *efx = efx_netdev_priv(net_dev); + + mutex_lock(&efx->mac_lock); + efx_mcdi_phy_get_ksettings(efx, out); + mutex_unlock(&efx->mac_lock); + + return 0; +} + +int efx_ethtool_set_link_ksettings(struct net_device *net_dev, + const struct ethtool_link_ksettings *settings) +{ + __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising); + struct efx_nic *efx = efx_netdev_priv(net_dev); + int rc; + + mutex_lock(&efx->mac_lock); + rc = efx_mcdi_phy_set_ksettings(efx, settings, advertising); + if (rc > 0) { + efx_link_set_advertising(efx, advertising); + rc = 0; + } + mutex_unlock(&efx->mac_lock); + + return rc; +} +#endif + +#if defined(EFX_USE_KCOMPAT) && !defined(EFX_HAVE_ETHTOOL_LINKSETTINGS) || defined(EFX_HAVE_ETHTOOL_LEGACY) +/* This must be called with rtnl_lock held. */ +int efx_ethtool_get_settings(struct net_device *net_dev, + struct ethtool_cmd *ecmd) +{ + struct efx_nic *efx = efx_netdev_priv(net_dev); + struct efx_link_state *link_state = &efx->link_state; + + mutex_lock(&efx->mac_lock); + efx_mcdi_phy_get_settings(efx, ecmd); + mutex_unlock(&efx->mac_lock); + + /* Both MACs support pause frames (bidirectional and respond-only) */ + ecmd->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; + + if (LOOPBACK_INTERNAL(efx)) { + ethtool_cmd_speed_set(ecmd, link_state->speed); + ecmd->duplex = link_state->fd ? DUPLEX_FULL : DUPLEX_HALF; + } + + return 0; +} + +/* This must be called with rtnl_lock held. */ +int efx_ethtool_set_settings(struct net_device *net_dev, + struct ethtool_cmd *ecmd) +{ + struct efx_nic *efx = efx_netdev_priv(net_dev); + __ETHTOOL_DECLARE_LINK_MODE_MASK(new_adv); + int rc; + + /* GMAC does not support 1000Mbps HD */ + if ((ethtool_cmd_speed(ecmd) == SPEED_1000) && + (ecmd->duplex != DUPLEX_FULL)) { + netif_dbg(efx, drv, efx->net_dev, + "rejecting unsupported 1000Mbps HD setting\n"); + return -EINVAL; + } + + mutex_lock(&efx->mac_lock); + rc = efx_mcdi_phy_set_settings(efx, ecmd, new_adv); + if (rc > 0) { + efx_link_set_advertising(efx, new_adv); + rc = 0; + } + mutex_unlock(&efx->mac_lock); + return rc; +} +#endif + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_ETHTOOL_FECSTATS) +void efx_ethtool_get_fec_stats(struct net_device *net_dev, + struct ethtool_fec_stats *fec_stats) +{ + struct efx_nic *efx = efx_netdev_priv(net_dev); + + if (efx->type->get_fec_stats) + efx->type->get_fec_stats(efx, fec_stats); +} +#endif + +int efx_ethtool_get_fecparam(struct net_device *net_dev, + struct ethtool_fecparam *fecparam) +{ + struct efx_nic *efx = efx_netdev_priv(net_dev); + int rc; + + mutex_lock(&efx->mac_lock); + rc = efx_mcdi_phy_get_fecparam(efx, fecparam); + mutex_unlock(&efx->mac_lock); + + return rc; +} + +int efx_ethtool_set_fecparam(struct net_device *net_dev, + struct ethtool_fecparam *fecparam) +{ + struct efx_nic *efx = efx_netdev_priv(net_dev); + int rc; + + mutex_lock(&efx->mac_lock); + rc = efx_mcdi_phy_set_fecparam(efx, fecparam); + mutex_unlock(&efx->mac_lock); + return rc; +} + +/* Convert old-style _EN RSS flags into new style _MODE flags */ +static u32 efx_ethtool_convert_old_rss_flags(u32 old_flags) +{ + u32 flags = 0; + + if (old_flags & (1 << MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV4_EN_LBN)) + flags |= RSS_MODE_HASH_ADDRS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV4_RSS_MODE_LBN |\ + RSS_MODE_HASH_ADDRS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV4_RSS_MODE_LBN; + if (old_flags & (1 << MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV4_EN_LBN)) + flags |= RSS_MODE_HASH_4TUPLE << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV4_RSS_MODE_LBN; + if (old_flags & (1 << MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV6_EN_LBN)) + flags |= RSS_MODE_HASH_ADDRS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV6_RSS_MODE_LBN |\ + RSS_MODE_HASH_ADDRS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV6_RSS_MODE_LBN; + if (old_flags & (1 << MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV6_EN_LBN)) + flags |= RSS_MODE_HASH_4TUPLE << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV6_RSS_MODE_LBN; + return flags; +} + +static int efx_ethtool_set_rss_flags(struct efx_nic *efx, +#ifdef EFX_USE_KCOMPAT + struct efx_ethtool_rxnfc *info) +#else + struct ethtool_rxnfc *info) +#endif +{ + struct efx_rss_context *ctx = &efx->rss_context; + u32 flags, mode = 0; + int shift, rc = 0; + + if (!efx->type->rx_set_rss_flags) + return -EOPNOTSUPP; + if (!efx->type->rx_get_rss_flags) + return -EOPNOTSUPP; + mutex_lock(&efx->rss_lock); + if (info->flow_type & FLOW_RSS && info->rss_context) { + ctx = efx_find_rss_context_entry(efx, info->rss_context); + if (!ctx) { + rc = -ENOENT; + goto out_unlock; + } + } + efx->type->rx_get_rss_flags(efx, ctx); + flags = ctx->flags; + if (!(flags & RSS_CONTEXT_FLAGS_ADDITIONAL_MASK)) + flags = efx_ethtool_convert_old_rss_flags(flags); + /* In case we end up clearing all additional flags (meaning we + * want no RSS), make sure the old-style flags are cleared too. + */ + flags &= RSS_CONTEXT_FLAGS_ADDITIONAL_MASK; + + switch (info->flow_type & ~FLOW_RSS) { + case TCP_V4_FLOW: + shift = MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV4_RSS_MODE_LBN; + break; + case UDP_V4_FLOW: + shift = MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV4_RSS_MODE_LBN; + break; + case SCTP_V4_FLOW: + case AH_ESP_V4_FLOW: + /* Can't configure independently of other-IPv4 */ + rc = -EOPNOTSUPP; + goto out_unlock; + case IPV4_FLOW: + shift = MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV4_RSS_MODE_LBN; + break; + case TCP_V6_FLOW: + shift = MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV6_RSS_MODE_LBN; + break; + case UDP_V6_FLOW: + shift = MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV6_RSS_MODE_LBN; + break; + case SCTP_V6_FLOW: + case AH_ESP_V6_FLOW: + /* Can't configure independently of other-IPv6 */ + rc = -EOPNOTSUPP; + goto out_unlock; + case IPV6_FLOW: + shift = MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV6_RSS_MODE_LBN; + break; + default: + rc = -EOPNOTSUPP; + goto out_unlock; + } + + /* Clear the old flags for this flow_type */ + BUILD_BUG_ON(MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV4_RSS_MODE_WIDTH != 4); + flags &= ~(0xf << shift); + /* Construct new flags */ + if (info->data & RXH_IP_SRC) + mode |= 1 << RSS_MODE_HASH_SRC_ADDR_LBN; + if (info->data & RXH_IP_DST) + mode |= 1 << RSS_MODE_HASH_DST_ADDR_LBN; + if (info->data & RXH_L4_B_0_1) + mode |= 1 << RSS_MODE_HASH_SRC_PORT_LBN; + if (info->data & RXH_L4_B_2_3) + mode |= 1 << RSS_MODE_HASH_DST_PORT_LBN; + flags |= mode << shift; + rc = efx->type->rx_set_rss_flags(efx, ctx, flags); +out_unlock: + mutex_unlock(&efx->rss_lock); + return rc; +} + +static int efx_ethtool_get_class_rule(struct efx_nic *efx, +#ifdef EFX_USE_KCOMPAT + struct efx_ethtool_rx_flow_spec *rule, +#else + struct ethtool_rx_flow_spec *rule, +#endif + u32 *rss_context) +{ + struct ethtool_tcpip4_spec *ip_entry = &rule->h_u.tcp_ip4_spec; + struct ethtool_tcpip4_spec *ip_mask = &rule->m_u.tcp_ip4_spec; + struct ethtool_usrip4_spec *uip_entry = &rule->h_u.usr_ip4_spec; + struct ethtool_usrip4_spec *uip_mask = &rule->m_u.usr_ip4_spec; +#if defined(EFX_USE_KCOMPAT) && defined(EFX_NEED_IPV6_NFC) + struct ethtool_tcpip6_spec *ip6_entry = (void *)&rule->h_u; + struct ethtool_tcpip6_spec *ip6_mask = (void *)&rule->m_u; + struct ethtool_usrip6_spec *uip6_entry = (void *)&rule->h_u; + struct ethtool_usrip6_spec *uip6_mask = (void *)&rule->m_u; +#else + struct ethtool_tcpip6_spec *ip6_entry = &rule->h_u.tcp_ip6_spec; + struct ethtool_tcpip6_spec *ip6_mask = &rule->m_u.tcp_ip6_spec; + struct ethtool_usrip6_spec *uip6_entry = &rule->h_u.usr_ip6_spec; + struct ethtool_usrip6_spec *uip6_mask = &rule->m_u.usr_ip6_spec; +#endif + struct ethhdr *mac_entry = &rule->h_u.ether_spec; + struct ethhdr *mac_mask = &rule->m_u.ether_spec; + struct efx_filter_spec spec; + int rc; + + rc = efx_filter_ntuple_get(efx, rule->location, &spec); + if (rc) + return rc; + + if (spec.dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP) + rule->ring_cookie = RX_CLS_FLOW_DISC; + else + rule->ring_cookie = spec.dmaq_id; + + if ((spec.match_flags & EFX_FILTER_MATCH_ETHER_TYPE) && + spec.ether_type == htons(ETH_P_IP) && + (spec.match_flags & EFX_FILTER_MATCH_IP_PROTO) && + (spec.ip_proto == IPPROTO_TCP || spec.ip_proto == IPPROTO_UDP) && + !(spec.match_flags & + ~(EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_OUTER_VID | + EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_REM_HOST | + EFX_FILTER_MATCH_IP_PROTO | + EFX_FILTER_MATCH_LOC_PORT | EFX_FILTER_MATCH_REM_PORT))) { + rule->flow_type = ((spec.ip_proto == IPPROTO_TCP) ? + TCP_V4_FLOW : UDP_V4_FLOW); + if (spec.match_flags & EFX_FILTER_MATCH_LOC_HOST) { + ip_entry->ip4dst = spec.loc_host[0]; + ip_mask->ip4dst = IP4_ADDR_FULL_MASK; + } + if (spec.match_flags & EFX_FILTER_MATCH_REM_HOST) { + ip_entry->ip4src = spec.rem_host[0]; + ip_mask->ip4src = IP4_ADDR_FULL_MASK; + } + if (spec.match_flags & EFX_FILTER_MATCH_LOC_PORT) { + ip_entry->pdst = spec.loc_port; + ip_mask->pdst = PORT_FULL_MASK; + } + if (spec.match_flags & EFX_FILTER_MATCH_REM_PORT) { + ip_entry->psrc = spec.rem_port; + ip_mask->psrc = PORT_FULL_MASK; + } + } else if ((spec.match_flags & EFX_FILTER_MATCH_ETHER_TYPE) && + spec.ether_type == htons(ETH_P_IPV6) && + (spec.match_flags & EFX_FILTER_MATCH_IP_PROTO) && + (spec.ip_proto == IPPROTO_TCP || spec.ip_proto == IPPROTO_UDP) && + !(spec.match_flags & + ~(EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_OUTER_VID | + EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_REM_HOST | + EFX_FILTER_MATCH_IP_PROTO | + EFX_FILTER_MATCH_LOC_PORT | EFX_FILTER_MATCH_REM_PORT))) { + rule->flow_type = ((spec.ip_proto == IPPROTO_TCP) ? + TCP_V6_FLOW : UDP_V6_FLOW); + if (spec.match_flags & EFX_FILTER_MATCH_LOC_HOST) { + memcpy(ip6_entry->ip6dst, spec.loc_host, + sizeof(ip6_entry->ip6dst)); + ip6_fill_mask(ip6_mask->ip6dst); + } + if (spec.match_flags & EFX_FILTER_MATCH_REM_HOST) { + memcpy(ip6_entry->ip6src, spec.rem_host, + sizeof(ip6_entry->ip6src)); + ip6_fill_mask(ip6_mask->ip6src); + } + if (spec.match_flags & EFX_FILTER_MATCH_LOC_PORT) { + ip6_entry->pdst = spec.loc_port; + ip6_mask->pdst = PORT_FULL_MASK; + } + if (spec.match_flags & EFX_FILTER_MATCH_REM_PORT) { + ip6_entry->psrc = spec.rem_port; + ip6_mask->psrc = PORT_FULL_MASK; + } + } else if (!(spec.match_flags & + ~(EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_LOC_MAC_IG | + EFX_FILTER_MATCH_REM_MAC | EFX_FILTER_MATCH_ETHER_TYPE | + EFX_FILTER_MATCH_OUTER_VID))) { + rule->flow_type = ETHER_FLOW; + if (spec.match_flags & + (EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_LOC_MAC_IG)) { + ether_addr_copy(mac_entry->h_dest, spec.loc_mac); + if (spec.match_flags & EFX_FILTER_MATCH_LOC_MAC) + eth_broadcast_addr(mac_mask->h_dest); + else + ether_addr_copy(mac_mask->h_dest, + mac_addr_ig_mask); + } + if (spec.match_flags & EFX_FILTER_MATCH_REM_MAC) { + ether_addr_copy(mac_entry->h_source, spec.rem_mac); + eth_broadcast_addr(mac_mask->h_source); + } + if (spec.match_flags & EFX_FILTER_MATCH_ETHER_TYPE) { + mac_entry->h_proto = spec.ether_type; + mac_mask->h_proto = ETHER_TYPE_FULL_MASK; + } + } else if (spec.match_flags & EFX_FILTER_MATCH_ETHER_TYPE && + spec.ether_type == htons(ETH_P_IP)) { + rule->flow_type = IP_USER_FLOW; + uip_entry->ip_ver = ETH_RX_NFC_IP4; + if (spec.match_flags & EFX_FILTER_MATCH_IP_PROTO) { + uip_mask->proto = IP_PROTO_FULL_MASK; + uip_entry->proto = spec.ip_proto; + } + if (spec.match_flags & EFX_FILTER_MATCH_LOC_HOST) { + uip_entry->ip4dst = spec.loc_host[0]; + uip_mask->ip4dst = IP4_ADDR_FULL_MASK; + } + if (spec.match_flags & EFX_FILTER_MATCH_REM_HOST) { + uip_entry->ip4src = spec.rem_host[0]; + uip_mask->ip4src = IP4_ADDR_FULL_MASK; + } + } else if (spec.match_flags & EFX_FILTER_MATCH_ETHER_TYPE && + spec.ether_type == htons(ETH_P_IPV6)) { + rule->flow_type = IPV6_USER_FLOW; + if (spec.match_flags & EFX_FILTER_MATCH_IP_PROTO) { + uip6_mask->l4_proto = IP_PROTO_FULL_MASK; + uip6_entry->l4_proto = spec.ip_proto; + } + if (spec.match_flags & EFX_FILTER_MATCH_LOC_HOST) { + memcpy(uip6_entry->ip6dst, spec.loc_host, + sizeof(uip6_entry->ip6dst)); + ip6_fill_mask(uip6_mask->ip6dst); + } + if (spec.match_flags & EFX_FILTER_MATCH_REM_HOST) { + memcpy(uip6_entry->ip6src, spec.rem_host, + sizeof(uip6_entry->ip6src)); + ip6_fill_mask(uip6_mask->ip6src); + } + } else { + /* The above should handle all filters that we insert */ + WARN_ON(1); + return -EINVAL; + } + + if (spec.match_flags & EFX_FILTER_MATCH_OUTER_VID) { + rule->flow_type |= FLOW_EXT; + rule->h_ext.vlan_tci = spec.outer_vid; + rule->m_ext.vlan_tci = htons(0xfff); + } + + if (spec.flags & EFX_FILTER_FLAG_RX_RSS) { + rule->flow_type |= FLOW_RSS; + *rss_context = spec.rss_context; + } + + return rc; +} + +int efx_ethtool_get_rxnfc(struct net_device *net_dev, +#ifdef EFX_USE_KCOMPAT + struct efx_ethtool_rxnfc *info, +#else + struct ethtool_rxnfc *info, +#endif + u32 *rule_locs) +{ + struct efx_nic *efx = efx_netdev_priv(net_dev); + u32 rss_context = 0; + s32 rc = 0; + + switch (info->cmd) { + case ETHTOOL_GRXRINGS: + info->data = efx->n_rss_channels; + if (!info->data) + return -ENOENT; + return 0; + + case ETHTOOL_GRXFH: { + struct efx_rss_context *ctx = &efx->rss_context; + + mutex_lock(&efx->rss_lock); + if (info->flow_type & FLOW_RSS && info->rss_context) { + ctx = efx_find_rss_context_entry(efx, + info->rss_context); + if (!ctx) { + rc = -ENOENT; + goto out_unlock; + } + } + info->data = 0; + if (!efx_rss_active(ctx)) /* No RSS */ + goto out_unlock; + if (efx->type->rx_get_rss_flags) { + int rc; + + rc = efx->type->rx_get_rss_flags(efx, ctx); + if (rc) + goto out_unlock; + } + if (ctx->flags & RSS_CONTEXT_FLAGS_ADDITIONAL_MASK) { + int shift; + u8 mode; + + switch (info->flow_type & ~FLOW_RSS) { + case TCP_V4_FLOW: + shift = MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV4_RSS_MODE_LBN; + break; + case UDP_V4_FLOW: + shift = MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV4_RSS_MODE_LBN; + break; + case SCTP_V4_FLOW: + case AH_ESP_V4_FLOW: + case IPV4_FLOW: + shift = MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV4_RSS_MODE_LBN; + break; + case TCP_V6_FLOW: + shift = MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV6_RSS_MODE_LBN; + break; + case UDP_V6_FLOW: + shift = MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV6_RSS_MODE_LBN; + break; + case SCTP_V6_FLOW: + case AH_ESP_V6_FLOW: + case IPV6_FLOW: + shift = MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV6_RSS_MODE_LBN; + break; + default: + goto out_unlock; + } + mode = ctx->flags >> shift; + if (mode & (1 << RSS_MODE_HASH_SRC_ADDR_LBN)) + info->data |= RXH_IP_SRC; + if (mode & (1 << RSS_MODE_HASH_DST_ADDR_LBN)) + info->data |= RXH_IP_DST; + if (mode & (1 << RSS_MODE_HASH_SRC_PORT_LBN)) + info->data |= RXH_L4_B_0_1; + if (mode & (1 << RSS_MODE_HASH_DST_PORT_LBN)) + info->data |= RXH_L4_B_2_3; + } else { + switch (info->flow_type & ~FLOW_RSS) { + case TCP_V4_FLOW: + info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + fallthrough; + case UDP_V4_FLOW: + case SCTP_V4_FLOW: + case AH_ESP_V4_FLOW: + case IPV4_FLOW: + info->data |= RXH_IP_SRC | RXH_IP_DST; + break; + case TCP_V6_FLOW: + info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + fallthrough; + case UDP_V6_FLOW: + case SCTP_V6_FLOW: + case AH_ESP_V6_FLOW: + case IPV6_FLOW: + info->data |= RXH_IP_SRC | RXH_IP_DST; + break; + default: + break; + } + } +out_unlock: + mutex_unlock(&efx->rss_lock); + return rc; + } + + case ETHTOOL_GRXCLSRLCNT: + info->data = efx_filter_get_rx_id_limit(efx); + if (info->data == 0) + return -EOPNOTSUPP; + info->data |= RX_CLS_LOC_SPECIAL; + info->rule_cnt = efx_filter_count_ntuple(efx); + return 0; + + case ETHTOOL_GRXCLSRULE: + if (efx_filter_get_rx_id_limit(efx) == 0) + return -EOPNOTSUPP; + rc = efx_ethtool_get_class_rule(efx, &info->fs, &rss_context); + if (rc < 0) + return rc; + if (info->fs.flow_type & FLOW_RSS) + info->rss_context = rss_context; + return 0; + + case ETHTOOL_GRXCLSRLALL: + info->data = efx_filter_get_rx_id_limit(efx); + if (info->data == 0) + return -EOPNOTSUPP; + info->rule_cnt = efx_filter_count_ntuple(efx); + efx_filter_get_ntuple_ids(efx, rule_locs, info->rule_cnt); + return 0; + + default: + return -EOPNOTSUPP; + } +} + +static inline bool ip6_mask_is_full(__be32 mask[4]) +{ + return !~(mask[0] & mask[1] & mask[2] & mask[3]); +} + +static inline bool ip6_mask_is_empty(__be32 mask[4]) +{ + return !(mask[0] | mask[1] | mask[2] | mask[3]); +} + +#ifdef EFX_USE_KCOMPAT +static int efx_ethtool_set_class_rule(struct efx_nic *efx, + struct efx_ethtool_rx_flow_spec *rule, + u32 rss_context) +#else +static int efx_ethtool_set_class_rule(struct efx_nic *efx, + struct ethtool_rx_flow_spec *rule, + u32 rss_context) +#endif +{ + struct ethtool_tcpip4_spec *ip_entry = &rule->h_u.tcp_ip4_spec; + struct ethtool_tcpip4_spec *ip_mask = &rule->m_u.tcp_ip4_spec; + struct ethtool_usrip4_spec *uip_entry = &rule->h_u.usr_ip4_spec; + struct ethtool_usrip4_spec *uip_mask = &rule->m_u.usr_ip4_spec; +#if defined(EFX_USE_KCOMPAT) && defined(EFX_NEED_IPV6_NFC) + struct ethtool_tcpip6_spec *ip6_entry = (void *)&rule->h_u; + struct ethtool_tcpip6_spec *ip6_mask = (void *)&rule->m_u; + struct ethtool_usrip6_spec *uip6_entry = (void *)&rule->h_u; + struct ethtool_usrip6_spec *uip6_mask = (void *)&rule->m_u; +#else + struct ethtool_tcpip6_spec *ip6_entry = &rule->h_u.tcp_ip6_spec; + struct ethtool_tcpip6_spec *ip6_mask = &rule->m_u.tcp_ip6_spec; + struct ethtool_usrip6_spec *uip6_entry = &rule->h_u.usr_ip6_spec; + struct ethtool_usrip6_spec *uip6_mask = &rule->m_u.usr_ip6_spec; +#endif + u32 flow_type = rule->flow_type & ~(FLOW_EXT | FLOW_RSS); + struct ethhdr *mac_entry = &rule->h_u.ether_spec; + struct ethhdr *mac_mask = &rule->m_u.ether_spec; + enum efx_filter_flags flags = 0; + struct efx_filter_spec spec; + int rc; + + /* Check that user wants us to choose the location */ + if (rule->location != RX_CLS_LOC_ANY) + return -EINVAL; + + /* Range-check ring_cookie */ + if (rule->ring_cookie >= efx_rx_channels(efx) && + rule->ring_cookie != RX_CLS_FLOW_DISC) + return -EINVAL; + + /* Check for unsupported extensions */ + if ((rule->flow_type & FLOW_EXT) && + (rule->m_ext.vlan_etype || rule->m_ext.data[0] || + rule->m_ext.data[1])) + return -EINVAL; + + if (efx->rx_scatter) + flags |= EFX_FILTER_FLAG_RX_SCATTER; + if (rule->flow_type & FLOW_RSS) + flags |= EFX_FILTER_FLAG_RX_RSS; + + efx_filter_init_rx(&spec, EFX_FILTER_PRI_MANUAL, flags, + (rule->ring_cookie == RX_CLS_FLOW_DISC) ? + EFX_FILTER_RX_DMAQ_ID_DROP : rule->ring_cookie); + + if (rule->flow_type & FLOW_RSS) + spec.rss_context = rss_context; + + switch (flow_type) { + case TCP_V4_FLOW: + case UDP_V4_FLOW: + spec.match_flags = (EFX_FILTER_MATCH_ETHER_TYPE | + EFX_FILTER_MATCH_IP_PROTO); + spec.ether_type = htons(ETH_P_IP); + spec.ip_proto = flow_type == TCP_V4_FLOW ? IPPROTO_TCP + : IPPROTO_UDP; + if (ip_mask->ip4dst) { + if (ip_mask->ip4dst != IP4_ADDR_FULL_MASK) + return -EINVAL; + spec.match_flags |= EFX_FILTER_MATCH_LOC_HOST; + spec.loc_host[0] = ip_entry->ip4dst; + } + if (ip_mask->ip4src) { + if (ip_mask->ip4src != IP4_ADDR_FULL_MASK) + return -EINVAL; + spec.match_flags |= EFX_FILTER_MATCH_REM_HOST; + spec.rem_host[0] = ip_entry->ip4src; + } + if (ip_mask->pdst) { + if (ip_mask->pdst != PORT_FULL_MASK) + return -EINVAL; + spec.match_flags |= EFX_FILTER_MATCH_LOC_PORT; + spec.loc_port = ip_entry->pdst; + } + if (ip_mask->psrc) { + if (ip_mask->psrc != PORT_FULL_MASK) + return -EINVAL; + spec.match_flags |= EFX_FILTER_MATCH_REM_PORT; + spec.rem_port = ip_entry->psrc; + } + if (ip_mask->tos) + return -EINVAL; + break; + + case TCP_V6_FLOW: + case UDP_V6_FLOW: + spec.match_flags = (EFX_FILTER_MATCH_ETHER_TYPE | + EFX_FILTER_MATCH_IP_PROTO); + spec.ether_type = htons(ETH_P_IPV6); + spec.ip_proto = flow_type == TCP_V6_FLOW ? IPPROTO_TCP + : IPPROTO_UDP; + if (!ip6_mask_is_empty(ip6_mask->ip6dst)) { + if (!ip6_mask_is_full(ip6_mask->ip6dst)) + return -EINVAL; + spec.match_flags |= EFX_FILTER_MATCH_LOC_HOST; + memcpy(spec.loc_host, ip6_entry->ip6dst, + sizeof(spec.loc_host)); + } + if (!ip6_mask_is_empty(ip6_mask->ip6src)) { + if (!ip6_mask_is_full(ip6_mask->ip6src)) + return -EINVAL; + spec.match_flags |= EFX_FILTER_MATCH_REM_HOST; + memcpy(spec.rem_host, ip6_entry->ip6src, + sizeof(spec.rem_host)); + } + if (ip6_mask->pdst) { + if (ip6_mask->pdst != PORT_FULL_MASK) + return -EINVAL; + spec.match_flags |= EFX_FILTER_MATCH_LOC_PORT; + spec.loc_port = ip6_entry->pdst; + } + if (ip6_mask->psrc) { + if (ip6_mask->psrc != PORT_FULL_MASK) + return -EINVAL; + spec.match_flags |= EFX_FILTER_MATCH_REM_PORT; + spec.rem_port = ip6_entry->psrc; + } + if (ip6_mask->tclass) + return -EINVAL; + break; + + case IP_USER_FLOW: + if (uip_mask->l4_4_bytes || uip_mask->tos || uip_mask->ip_ver || + uip_entry->ip_ver != ETH_RX_NFC_IP4) + return -EINVAL; + spec.match_flags = EFX_FILTER_MATCH_ETHER_TYPE; + spec.ether_type = htons(ETH_P_IP); + if (uip_mask->ip4dst) { + if (uip_mask->ip4dst != IP4_ADDR_FULL_MASK) + return -EINVAL; + spec.match_flags |= EFX_FILTER_MATCH_LOC_HOST; + spec.loc_host[0] = uip_entry->ip4dst; + } + if (uip_mask->ip4src) { + if (uip_mask->ip4src != IP4_ADDR_FULL_MASK) + return -EINVAL; + spec.match_flags |= EFX_FILTER_MATCH_REM_HOST; + spec.rem_host[0] = uip_entry->ip4src; + } + if (uip_mask->proto) { + if (uip_mask->proto != IP_PROTO_FULL_MASK) + return -EINVAL; + spec.match_flags |= EFX_FILTER_MATCH_IP_PROTO; + spec.ip_proto = uip_entry->proto; + } + break; + + case IPV6_USER_FLOW: + if (uip6_mask->l4_4_bytes || uip6_mask->tclass) + return -EINVAL; + spec.match_flags = EFX_FILTER_MATCH_ETHER_TYPE; + spec.ether_type = htons(ETH_P_IPV6); + if (!ip6_mask_is_empty(uip6_mask->ip6dst)) { + if (!ip6_mask_is_full(uip6_mask->ip6dst)) + return -EINVAL; + spec.match_flags |= EFX_FILTER_MATCH_LOC_HOST; + memcpy(spec.loc_host, uip6_entry->ip6dst, + sizeof(spec.loc_host)); + } + if (!ip6_mask_is_empty(uip6_mask->ip6src)) { + if (!ip6_mask_is_full(uip6_mask->ip6src)) + return -EINVAL; + spec.match_flags |= EFX_FILTER_MATCH_REM_HOST; + memcpy(spec.rem_host, uip6_entry->ip6src, + sizeof(spec.rem_host)); + } + if (uip6_mask->l4_proto) { + if (uip6_mask->l4_proto != IP_PROTO_FULL_MASK) + return -EINVAL; + spec.match_flags |= EFX_FILTER_MATCH_IP_PROTO; + spec.ip_proto = uip6_entry->l4_proto; + } + break; + + case ETHER_FLOW: + if (!is_zero_ether_addr(mac_mask->h_dest)) { + if (ether_addr_equal(mac_mask->h_dest, + mac_addr_ig_mask)) + spec.match_flags |= EFX_FILTER_MATCH_LOC_MAC_IG; + else if (is_broadcast_ether_addr(mac_mask->h_dest)) + spec.match_flags |= EFX_FILTER_MATCH_LOC_MAC; + else + return -EINVAL; + ether_addr_copy(spec.loc_mac, mac_entry->h_dest); + } + if (!is_zero_ether_addr(mac_mask->h_source)) { + if (!is_broadcast_ether_addr(mac_mask->h_source)) + return -EINVAL; + spec.match_flags |= EFX_FILTER_MATCH_REM_MAC; + ether_addr_copy(spec.rem_mac, mac_entry->h_source); + } + if (mac_mask->h_proto) { + if (mac_mask->h_proto != ETHER_TYPE_FULL_MASK) + return -EINVAL; + spec.match_flags |= EFX_FILTER_MATCH_ETHER_TYPE; + spec.ether_type = mac_entry->h_proto; + } + break; + + default: + return -EINVAL; + } + + if ((rule->flow_type & FLOW_EXT) && rule->m_ext.vlan_tci) { + if (rule->m_ext.vlan_tci != htons(0xfff)) + return -EINVAL; + spec.match_flags |= EFX_FILTER_MATCH_OUTER_VID; + spec.outer_vid = rule->h_ext.vlan_tci; + } + + rc = efx_filter_ntuple_insert(efx, &spec); + if (rc < 0) + return rc; + + rule->location = rc; + return 0; +} + +int efx_ethtool_set_rxnfc(struct net_device *net_dev, +#ifdef EFX_USE_KCOMPAT + struct efx_ethtool_rxnfc *info) +#else + struct ethtool_rxnfc *info) +#endif +{ + struct efx_nic *efx = efx_netdev_priv(net_dev); + + if (efx_filter_get_rx_id_limit(efx) == 0) + return -EOPNOTSUPP; + + switch (info->cmd) { + case ETHTOOL_SRXCLSRLINS: + return efx_ethtool_set_class_rule(efx, &info->fs, + info->rss_context); + + case ETHTOOL_SRXCLSRLDEL: + return efx_filter_ntuple_remove(efx, info->fs.location); + + case ETHTOOL_SRXFH: + return efx_ethtool_set_rss_flags(efx, info); + + default: + return -EOPNOTSUPP; + } +} + +#ifdef EFX_USE_KCOMPAT +int efx_ethtool_get_rxnfc_wrapper(struct net_device *net_dev, + struct ethtool_rxnfc *info, +#ifdef EFX_HAVE_OLD_ETHTOOL_GET_RXNFC + void *rules) +#else + u32 *rules) +#endif +{ + return efx_ethtool_get_rxnfc(net_dev, (struct efx_ethtool_rxnfc *)info, + rules); +} + +int efx_ethtool_set_rxnfc_wrapper(struct net_device *net_dev, + struct ethtool_rxnfc *info) +{ + return efx_ethtool_set_rxnfc(net_dev, (struct efx_ethtool_rxnfc *)info); +} +#endif + +u32 efx_ethtool_get_rxfh_indir_size(struct net_device *net_dev) +{ + struct efx_nic *efx = efx_netdev_priv(net_dev); + + return ARRAY_SIZE(efx->rss_context.rx_indir_table); +} + + +u32 efx_ethtool_get_rxfh_key_size(struct net_device *net_dev) +{ + struct efx_nic *efx = efx_netdev_priv(net_dev); + + return efx->type->rx_hash_key_size; +} + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_ETHTOOL_GET_RXFH) || defined(EFX_HAVE_ETHTOOL_GET_RXFH_INDIR) || !defined(EFX_HAVE_ETHTOOL_RXFH_INDIR) +int efx_ethtool_get_rxfh(struct net_device *net_dev, u32 *indir, u8 *key, + u8 *hfunc) +{ + struct efx_nic *efx = efx_netdev_priv(net_dev); +#else +int efx_sfctool_get_rxfh(struct efx_nic *efx, u32 *indir, u8 *key, + u8 *hfunc) +{ +#endif + int rc; + + if (!efx->type->rx_pull_rss_config) + return -EOPNOTSUPP; + + rc = efx->type->rx_pull_rss_config(efx); + if (rc) + return rc; + + if (hfunc) + *hfunc = ETH_RSS_HASH_TOP; + if (indir) + memcpy(indir, efx->rss_context.rx_indir_table, + sizeof(efx->rss_context.rx_indir_table)); + if (key) + memcpy(key, efx->rss_context.rx_hash_key, + efx->type->rx_hash_key_size); + return 0; +} + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_ETHTOOL_GET_RXFH) || defined(EFX_HAVE_ETHTOOL_GET_RXFH_INDIR) || !defined(EFX_HAVE_ETHTOOL_RXFH_INDIR) +int efx_ethtool_set_rxfh(struct net_device *net_dev, + const u32 *indir, const u8 *key, const u8 hfunc) +{ + struct efx_nic *efx = efx_netdev_priv(net_dev); + +#else +int efx_sfctool_set_rxfh(struct efx_nic *efx, + const u32 *indir, const u8 *key, const u8 hfunc) +{ +#endif + if (!efx->type->rx_push_rss_config) + return -EOPNOTSUPP; + + /* We do not allow change in unsupported parameters */ + if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) + return -EOPNOTSUPP; + if (!indir && !key) + return 0; + + if (!key) + key = efx->rss_context.rx_hash_key; + if (!indir) + indir = efx->rss_context.rx_indir_table; + + return efx->type->rx_push_rss_config(efx, true, indir, key); +} + + +#if defined(EFX_HAVE_ETHTOOL_GET_RXFH) && !defined(EFX_HAVE_CONFIGURABLE_RSS_HASH) +/* Wrappers without hash function getting and setting. */ +int efx_ethtool_get_rxfh_no_hfunc(struct net_device *net_dev, + u32 *indir, u8 *key) +{ + return efx_ethtool_get_rxfh(net_dev, indir, key, NULL); +} + +# if defined(EFX_HAVE_ETHTOOL_SET_RXFH_NOCONST) +/* RH backported version doesn't have const for arguments. */ +int efx_ethtool_set_rxfh_no_hfunc(struct net_device *net_dev, + u32 *indir, u8 *key) +{ + return efx_ethtool_set_rxfh(net_dev, indir, key, + ETH_RSS_HASH_NO_CHANGE); +} +# else +int efx_ethtool_set_rxfh_no_hfunc(struct net_device *net_dev, + const u32 *indir, const u8 *key) +{ + return efx_ethtool_set_rxfh(net_dev, indir, key, + ETH_RSS_HASH_NO_CHANGE); +} +# endif +#endif + +#if defined(EFX_HAVE_OLD_ETHTOOL_RXFH_INDIR) || !defined(EFX_HAVE_ETHTOOL_RXFH_INDIR) +int efx_ethtool_old_get_rxfh_indir(struct net_device *net_dev, + struct ethtool_rxfh_indir *indir) +{ + u32 user_size = indir->size, dev_size; + + dev_size = efx_ethtool_get_rxfh_indir_size(net_dev); + if (dev_size == 0) + return -EOPNOTSUPP; + + if (user_size < dev_size) { + indir->size = dev_size; + return user_size == 0 ? 0 : -EINVAL; + } + + return efx_ethtool_get_rxfh(net_dev, indir->ring_index, NULL, NULL); +} + +int efx_ethtool_old_set_rxfh_indir(struct net_device *net_dev, + const struct ethtool_rxfh_indir *indir) +{ + struct efx_nic *efx = efx_netdev_priv(net_dev); + u32 user_size = indir->size, dev_size, i; + + dev_size = efx_ethtool_get_rxfh_indir_size(net_dev); + if (dev_size == 0) + return -EOPNOTSUPP; + + if (user_size != dev_size) + return -EINVAL; + + /* Validate ring indices */ + for (i = 0; i < dev_size; i++) + if (indir->ring_index[i] >= efx_rx_channels(efx)) + return -EINVAL; + + return efx_ethtool_set_rxfh(net_dev, indir->ring_index, NULL, + ETH_RSS_HASH_NO_CHANGE); +} +#endif + +#if defined(EFX_USE_KCOMPAT) +#if defined(EFX_HAVE_ETHTOOL_GET_RXFH_INDIR) && !defined(EFX_HAVE_ETHTOOL_GET_RXFH) && !defined(EFX_HAVE_OLD_ETHTOOL_RXFH_INDIR) +/* Wrappers that only set the indirection table, not the key. */ +int efx_ethtool_get_rxfh_indir(struct net_device *net_dev, u32 *indir) +{ + return efx_ethtool_get_rxfh(net_dev, indir, NULL, NULL); +} + +int efx_ethtool_set_rxfh_indir(struct net_device *net_dev, const u32 *indir) +{ + return efx_ethtool_set_rxfh(net_dev, indir, NULL, + ETH_RSS_HASH_NO_CHANGE); +} +#endif +#endif + +/* sfctool + */ +#if defined(EFX_USE_KCOMPAT) && !defined(EFX_HAVE_ETHTOOL_RXFH_CONTEXT) +int efx_sfctool_get_rxnfc(struct efx_nic *efx, + struct efx_ethtool_rxnfc *info, u32 *rule_locs) +{ + return efx_ethtool_get_rxnfc(efx->net_dev, info, rule_locs); +} +#endif + +#if defined(EFX_USE_KCOMPAT) && !defined(EFX_HAVE_ETHTOOL_RXFH_CONTEXT) +u32 efx_sfctool_get_rxfh_indir_size(struct efx_nic *efx) +{ + return efx_ethtool_get_rxfh_indir_size(efx->net_dev); +} + +u32 efx_sfctool_get_rxfh_key_size(struct efx_nic *efx) +{ + return efx_ethtool_get_rxfh_key_size(efx->net_dev); +} +#endif + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_ETHTOOL_RXFH_CONTEXT) +int efx_ethtool_get_rxfh_context(struct net_device *net_dev, u32 *indir, + u8 *key, u8 *hfunc, u32 rss_context) +{ + struct efx_nic *efx = efx_netdev_priv(net_dev); +#else +int efx_sfctool_get_rxfh_context(struct efx_nic *efx, u32 *indir, + u8 *key, u8 *hfunc, u32 rss_context) +{ +#endif + struct efx_rss_context *ctx; + int rc = 0; + + if (!efx->type->rx_pull_rss_context_config) + return -EOPNOTSUPP; + + mutex_lock(&efx->rss_lock); + ctx = efx_find_rss_context_entry(efx, rss_context); + if (!ctx) { + rc = -ENOENT; + goto out_unlock; + } + rc = efx->type->rx_pull_rss_context_config(efx, ctx); + if (rc) + goto out_unlock; + + if (hfunc) + *hfunc = ETH_RSS_HASH_TOP; + if (indir) + memcpy(indir, ctx->rx_indir_table, sizeof(ctx->rx_indir_table)); + if (key) + memcpy(key, ctx->rx_hash_key, efx->type->rx_hash_key_size); +out_unlock: + mutex_unlock(&efx->rss_lock); + return rc; +} + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_ETHTOOL_RXFH_CONTEXT) +int efx_ethtool_set_rxfh_context(struct net_device *net_dev, + const u32 *indir, const u8 *key, + const u8 hfunc, u32 *rss_context, + bool delete) +{ + struct efx_nic *efx = efx_netdev_priv(net_dev); +#else +int efx_sfctool_set_rxfh_context(struct efx_nic *efx, + const u32 *indir, const u8 *key, + const u8 hfunc, u32 *rss_context, + bool delete) +{ +#endif + struct efx_rss_context *ctx; + bool allocated = false; + int rc; + + if (!efx->type->rx_push_rss_context_config) + return -EOPNOTSUPP; + /* Hash function is Toeplitz, cannot be changed */ + if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) + return -EOPNOTSUPP; + + mutex_lock(&efx->rss_lock); + + if (*rss_context == ETH_RXFH_CONTEXT_ALLOC) { + if (delete) { + /* alloc + delete == Nothing to do */ + rc = -EINVAL; + goto out_unlock; + } + ctx = efx_alloc_rss_context_entry(efx); + if (!ctx) { + rc = -ENOMEM; + goto out_unlock; + } + ctx->context_id = EFX_MCDI_RSS_CONTEXT_INVALID; + /* Initialise indir table and key to defaults */ + efx_set_default_rx_indir_table(efx, ctx); + netdev_rss_key_fill(ctx->rx_hash_key, sizeof(ctx->rx_hash_key)); + allocated = true; + } else { + ctx = efx_find_rss_context_entry(efx, *rss_context); + if (!ctx) { + rc = -ENOENT; + goto out_unlock; + } + } + + if (delete) { + /* delete this context */ + rc = efx->type->rx_push_rss_context_config(efx, ctx, NULL, NULL); + if (!rc) + efx_free_rss_context_entry(ctx); + goto out_unlock; + } + + if (!key) + key = ctx->rx_hash_key; + if (!indir) + indir = ctx->rx_indir_table; + + rc = efx->type->rx_push_rss_context_config(efx, ctx, indir, key); + if (rc && allocated) + efx_free_rss_context_entry(ctx); + else + *rss_context = ctx->user_id; +out_unlock: + mutex_unlock(&efx->rss_lock); + return rc; +} + +int efx_ethtool_reset(struct net_device *net_dev, u32 *flags) +{ + struct efx_nic *efx = efx_netdev_priv(net_dev); + u32 reset_flags = *flags; + int rc; + + rc = efx->type->map_reset_flags(&reset_flags); + if (rc >= 0) { + rc = efx_reset(efx, rc); + /* Manual resets can be done as often as you like */ + efx->reset_count = 0; + /* update *flags if reset succeeded */ + if (!rc) + *flags = reset_flags; + } + + if (*flags & ETH_RESET_MAC) { + netif_info(efx, drv, efx->net_dev, + "Resetting statistics.\n"); + efx->stats_initialised = false; + efx->type->pull_stats(efx); + *flags &= ~ETH_RESET_MAC; + rc = 0; + } + + return rc; +} + +int efx_ethtool_get_module_eeprom(struct net_device *net_dev, + struct ethtool_eeprom *ee, + u8 *data) +{ + return efx_mcdi_phy_get_module_eeprom(efx_netdev_priv(net_dev), ee, data); +} + +int efx_ethtool_get_module_info(struct net_device *net_dev, + struct ethtool_modinfo *modinfo) +{ + return efx_mcdi_phy_get_module_info(efx_netdev_priv(net_dev), modinfo); +} diff --git a/src/drivers/net/ethernet/sfc/ethtool_common.h b/src/drivers/net/ethernet/sfc/ethtool_common.h new file mode 100644 index 0000000..1401600 --- /dev/null +++ b/src/drivers/net/ethernet/sfc/ethtool_common.h @@ -0,0 +1,171 @@ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2018 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#ifndef EFX_ETHTOOL_COMMON_H +#define EFX_ETHTOOL_COMMON_H + +int efx_ethtool_phys_id(struct net_device *net_dev, + enum ethtool_phys_id_state state); +#if defined(EFX_USE_KCOMPAT) && !defined(EFX_HAVE_ETHTOOL_SET_PHYS_ID) +int efx_ethtool_phys_id_loop(struct net_device *net_dev, u32 count); +#endif + +void efx_ethtool_get_common_drvinfo(struct efx_nic *efx, + struct ethtool_drvinfo *info); +void efx_ethtool_get_drvinfo(struct net_device *net_dev, + struct ethtool_drvinfo *info); +u32 efx_ethtool_get_msglevel(struct net_device *net_dev); +void efx_ethtool_set_msglevel(struct net_device *net_dev, u32 msg_enable); +void efx_ethtool_self_test(struct net_device *net_dev, + struct ethtool_test *test, u64 *data); +int efx_ethtool_nway_reset(struct net_device *net_dev); +void efx_ethtool_get_pauseparam(struct net_device *net_dev, + struct ethtool_pauseparam *pause); +int efx_ethtool_set_pauseparam(struct net_device *net_dev, + struct ethtool_pauseparam *pause); +int efx_ethtool_fill_self_tests(struct efx_nic *efx, + struct efx_self_tests *tests, + u8 *strings, u64 *data); +int efx_ethtool_get_sset_count(struct net_device *net_dev, int string_set); +void efx_ethtool_get_strings(struct net_device *net_dev, u32 string_set, + u8 *strings); +u32 efx_ethtool_get_priv_flags(struct net_device *net_dev); +int efx_ethtool_set_priv_flags(struct net_device *net_dev, u32 flags); +void efx_ethtool_get_stats(struct net_device *net_dev, + struct ethtool_stats *stats __attribute__ ((unused)), + u64 *data); +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_ETHTOOL_CHANNELS) || defined(EFX_HAVE_ETHTOOL_EXT_CHANNELS) +void efx_ethtool_get_channels(struct net_device *net_dev, + struct ethtool_channels *channels); +int efx_ethtool_set_channels(struct net_device *net_dev, + struct ethtool_channels *channels); +#endif + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_ETHTOOL_LINKSETTINGS) +int efx_ethtool_get_link_ksettings(struct net_device *net_dev, + struct ethtool_link_ksettings *out); +int efx_ethtool_set_link_ksettings(struct net_device *net_dev, + const struct ethtool_link_ksettings *settings); +#endif +#if defined(EFX_USE_KCOMPAT) && !defined(EFX_HAVE_ETHTOOL_LINKSETTINGS) || defined(EFX_HAVE_ETHTOOL_LEGACY) +int efx_ethtool_get_settings(struct net_device *net_dev, + struct ethtool_cmd *ecmd); +int efx_ethtool_set_settings(struct net_device *net_dev, + struct ethtool_cmd *ecmd); +#endif +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_ETHTOOL_FECSTATS) +void efx_ethtool_get_fec_stats(struct net_device *net_dev, + struct ethtool_fec_stats *fec_stats); +#endif +int efx_ethtool_get_fecparam(struct net_device *net_dev, + struct ethtool_fecparam *fecparam); +int efx_ethtool_set_fecparam(struct net_device *net_dev, + struct ethtool_fecparam *fecparam); + +#ifdef EFX_USE_KCOMPAT +int efx_ethtool_get_rxnfc(struct net_device *net_dev, + struct efx_ethtool_rxnfc *info, u32 *rule_locs); +#else +int efx_ethtool_get_rxnfc(struct net_device *net_dev, + struct ethtool_rxnfc *info, u32 *rule_locs); +#endif +#ifdef EFX_USE_KCOMPAT +int efx_ethtool_get_rxnfc_wrapper(struct net_device *net_dev, + struct ethtool_rxnfc *info, +#ifdef EFX_HAVE_OLD_ETHTOOL_GET_RXNFC + void *rules); +#else + u32 *rules); +#endif +int efx_ethtool_set_rxnfc_wrapper(struct net_device *net_dev, + struct ethtool_rxnfc *info); +#endif +int efx_ethtool_reset(struct net_device *net_dev, u32 *flags); + +#define IP4_ADDR_FULL_MASK ((__force __be32)~0) +#define IP_PROTO_FULL_MASK 0xFF +#define PORT_FULL_MASK ((__force __be16)~0) +#define ETHER_TYPE_FULL_MASK ((__force __be16)~0) + +static inline void ip6_fill_mask(__be32 *mask) +{ + mask[0] = mask[1] = mask[2] = mask[3] = ~(__be32)0; +} + +u32 efx_ethtool_get_rxfh_indir_size(struct net_device *net_dev); +u32 efx_ethtool_get_rxfh_key_size(struct net_device *net_dev); +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_ETHTOOL_GET_RXFH) || defined(EFX_HAVE_ETHTOOL_GET_RXFH_INDIR) || !defined(EFX_HAVE_ETHTOOL_RXFH_INDIR) +int efx_ethtool_get_rxfh(struct net_device *net_dev, u32 *indir, u8 *key, + u8 *hfunc); +int efx_ethtool_set_rxfh(struct net_device *net_dev, + const u32 *indir, const u8 *key, const u8 hfunc); +#else +int efx_sfctool_get_rxfh(struct efx_nic *efx, u32 *indir, u8 *key, + u8 *hfunc); +int efx_sfctool_set_rxfh(struct efx_nic *efx, + const u32 *indir, const u8 *key, const u8 hfunc); +#endif + + +#if defined(EFX_USE_KCOMPAT) +#if defined(EFX_HAVE_ETHTOOL_GET_RXFH_INDIR) && !defined(EFX_HAVE_ETHTOOL_GET_RXFH) && !defined(EFX_HAVE_OLD_ETHTOOL_RXFH_INDIR) +/* Wrappers that only set the indirection table, not the key. */ +int efx_ethtool_get_rxfh_indir(struct net_device *net_dev, u32 *indir); +int efx_ethtool_set_rxfh_indir(struct net_device *net_dev,const u32 *indir); +#endif +#endif +#if defined(EFX_HAVE_OLD_ETHTOOL_RXFH_INDIR) || !defined(EFX_HAVE_ETHTOOL_RXFH_INDIR) +int efx_ethtool_old_get_rxfh_indir(struct net_device *net_dev, + struct ethtool_rxfh_indir *indir); +int efx_ethtool_old_set_rxfh_indir(struct net_device *net_dev, + const struct ethtool_rxfh_indir *indir); +#endif + +#if defined(EFX_HAVE_ETHTOOL_GET_RXFH) && !defined(EFX_HAVE_CONFIGURABLE_RSS_HASH) +/* Wrappers without hash function getting and setting. */ +int efx_ethtool_get_rxfh_no_hfunc(struct net_device *net_dev, + u32 *indir, u8 *key); +# if defined(EFX_HAVE_ETHTOOL_SET_RXFH_NOCONST) +/* RH backported version doesn't have const for arguments. */ +int efx_ethtool_set_rxfh_no_hfunc(struct net_device *net_dev, + u32 *indir, u8 *key); +# else +int efx_ethtool_set_rxfh_no_hfunc(struct net_device *net_dev, + const u32 *indir, const u8 *key); +# endif +#endif + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_ETHTOOL_RXFH_CONTEXT) +int efx_ethtool_get_rxfh_context(struct net_device *net_dev, u32 *indir, + u8 *key, u8 *hfunc, u32 rss_context); +int efx_ethtool_set_rxfh_context(struct net_device *net_dev, + const u32 *indir, const u8 *key, + const u8 hfunc, u32 *rss_context, + bool delete); +#else +int efx_sfctool_get_rxfh_context(struct efx_nic *efx, u32 *indir, + u8 *key, u8 *hfunc, u32 rss_context); +int efx_sfctool_set_rxfh_context(struct efx_nic *efx, + const u32 *indir, const u8 *key, + const u8 hfunc, u32 *rss_context, + bool delete); +#endif + +#endif + +int efx_ethtool_get_module_eeprom(struct net_device *net_dev, + struct ethtool_eeprom *ee, + u8 *data); +int efx_ethtool_get_module_info(struct net_device *net_dev, + struct ethtool_modinfo *modinfo); + +#if defined(EFX_USE_KCOMPAT) && (!defined(EFX_USE_DEVLINK) || defined(EFX_NEED_ETHTOOL_FLASH_DEVICE)) +int efx_ethtool_flash_device(struct net_device *net_dev, + struct ethtool_flash *flash); +#endif diff --git a/src/drivers/net/ethernet/sfc/filter.h b/src/drivers/net/ethernet/sfc/filter.h new file mode 100644 index 0000000..a9f2d91 --- /dev/null +++ b/src/drivers/net/ethernet/sfc/filter.h @@ -0,0 +1,518 @@ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2005-2017 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#ifndef EFX_FILTER_H +#define EFX_FILTER_H + +#include +#include +#if !defined(EFX_NOT_UPSTREAM) || defined(__KERNEL__) +#include +#include +#else +/* Use userland definition of struct in6_addr */ +#include +#endif +#include + +#if !defined(EFX_USE_KCOMPAT) && defined(EFX_NOT_UPSTREAM) && defined(EFX_NEED_ETHER_ADDR_COPY) +/* Standalone KCOMPAT for driverlink headers */ +static inline void efx_ether_addr_copy(u8 *dst, const u8 *src) +{ + u16 *a = (u16 *)dst; + const u16 *b = (const u16 *)src; + + a[0] = b[0]; + a[1] = b[1]; + a[2] = b[2]; +} +#define ether_addr_copy efx_ether_addr_copy +#endif + +#if defined(EFX_USE_KCOMPAT) || !defined(struct_group) +/* Standalone KCOMPAT for driverlink headers */ +#define __struct_group(TAG, NAME, ATTRS, MEMBERS...) \ + union { \ + struct { MEMBERS } ATTRS; \ + struct TAG { MEMBERS } ATTRS NAME; \ + } +#define struct_group(NAME, MEMBERS...) \ + __struct_group(/* no tag */, NAME, /* no attrs */, MEMBERS) +#endif + +/** + * enum efx_filter_match_flags - Flags for hardware filter match type + * @EFX_FILTER_MATCH_REM_HOST: Match by remote IP host address + * @EFX_FILTER_MATCH_LOC_HOST: Match by local IP host address + * @EFX_FILTER_MATCH_REM_MAC: Match by remote MAC address + * @EFX_FILTER_MATCH_REM_PORT: Match by remote TCP/UDP port + * @EFX_FILTER_MATCH_LOC_MAC: Match by local MAC address + * @EFX_FILTER_MATCH_LOC_PORT: Match by local TCP/UDP port + * @EFX_FILTER_MATCH_ETHER_TYPE: Match by Ether-type + * @EFX_FILTER_MATCH_INNER_VID: Match by inner VLAN ID + * @EFX_FILTER_MATCH_OUTER_VID: Match by outer VLAN ID + * @EFX_FILTER_MATCH_IP_PROTO: Match by IP transport protocol + * @EFX_FILTER_MATCH_LOC_MAC_IG: Match by local MAC address I/G bit. + * Used for RX default unicast and multicast/broadcast filters. + * @EFX_FILTER_MATCH_ENCAP_TYPE: Match by encapsulation type. + * @EFX_FILTER_MATCH_ENCAP_TNI: Match by Tenant Network ID. + * Only applicable for filters with an encapsulation type set. + * @EFX_FILTER_MATCH_OUTER_LOC_MAC: Match by outer MAC for encapsulated packets + * Only applicable for filters with an encapsulation type set. + * + * Only some combinations are supported, depending on NIC type: + * + * - Falcon supports RX filters matching by {TCP,UDP}/IPv4 4-tuple or + * local 2-tuple (only implemented for Falcon B0) + * + * - Siena supports RX and TX filters matching by {TCP,UDP}/IPv4 4-tuple + * or local 2-tuple, or local MAC with or without outer VID, and RX + * default filters + * + * - Huntington supports filter matching controlled by firmware, potentially + * using {TCP,UDP}/IPv{4,6} 4-tuple or local 2-tuple, local MAC or I/G bit, + * with or without outer and inner VID + */ +enum efx_filter_match_flags { + EFX_FILTER_MATCH_REM_HOST = 0x0001, + EFX_FILTER_MATCH_LOC_HOST = 0x0002, + EFX_FILTER_MATCH_REM_MAC = 0x0004, + EFX_FILTER_MATCH_REM_PORT = 0x0008, + EFX_FILTER_MATCH_LOC_MAC = 0x0010, + EFX_FILTER_MATCH_LOC_PORT = 0x0020, + EFX_FILTER_MATCH_ETHER_TYPE = 0x0040, + EFX_FILTER_MATCH_INNER_VID = 0x0080, + EFX_FILTER_MATCH_OUTER_VID = 0x0100, + EFX_FILTER_MATCH_IP_PROTO = 0x0200, + EFX_FILTER_MATCH_LOC_MAC_IG = 0x0400, + EFX_FILTER_MATCH_ENCAP_TYPE = 0x0800, + EFX_FILTER_MATCH_ENCAP_TNI = 0x1000, + EFX_FILTER_MATCH_OUTER_LOC_MAC =0x2000, +}; + +#define EFX_FILTER_MATCH_FLAGS_RFS (EFX_FILTER_MATCH_ETHER_TYPE | \ + EFX_FILTER_MATCH_IP_PROTO | \ + EFX_FILTER_MATCH_LOC_HOST | \ + EFX_FILTER_MATCH_LOC_PORT | \ + EFX_FILTER_MATCH_REM_HOST | \ + EFX_FILTER_MATCH_REM_PORT) + +#define EFX_FILTER_MATCH_FLAGS_RFS_DEST_ONLY (EFX_FILTER_MATCH_ETHER_TYPE | \ + EFX_FILTER_MATCH_IP_PROTO | \ + EFX_FILTER_MATCH_LOC_HOST | \ + EFX_FILTER_MATCH_LOC_PORT) + +/** + * enum efx_filter_priority - priority of a hardware filter specification + * @EFX_FILTER_PRI_HINT: Performance hint + * @EFX_FILTER_PRI_AUTO: Automatic filter based on device address list + * or hardware requirements. This may only be used by the filter + * implementation for each NIC type. + * @EFX_FILTER_PRI_MANUAL: Manually configured filter + * @EFX_FILTER_PRI_REQUIRED: Required for correct behaviour (user-level + * networking and SR-IOV) + */ +enum efx_filter_priority { + EFX_FILTER_PRI_HINT = 1, + EFX_FILTER_PRI_AUTO, + EFX_FILTER_PRI_MANUAL, + EFX_FILTER_PRI_REQUIRED, +}; + +/** + * enum efx_filter_flags - flags for hardware filter specifications + * @EFX_FILTER_FLAG_RX_RSS: Use RSS to spread across multiple queues. + * By default, matching packets will be delivered only to the + * specified queue. If this flag is set, they will be delivered + * to a range of queues offset from the specified queue number + * according to the indirection table. + * @EFX_FILTER_FLAG_RX_SCATTER: Enable DMA scatter on the receiving + * queue. Note that this cannot be enabled independently for + * unicast and multicast default filters; it will only be enabled + * if both have this flag set. + * @EFX_FILTER_FLAG_RX_OVER_AUTO: Indicates a filter that is + * overriding an automatic filter (priority + * %EFX_FILTER_PRI_AUTO). This may only be set by the filter + * implementation for each type. A removal request will restore + * the automatic filter in its place. + * @EFX_FILTER_FLAG_RX: Filter is for RX + * @EFX_FILTER_FLAG_TX: Filter is for TX + * @EFX_FILTER_FLAG_STACK_ID: Stack ID value for self loopback supression. + * @EFX_FILTER_FLAG_VPORT_ID: Virtual port ID for adapter switching. + * @EFX_FILTER_FLAG_LOOPBACK: Filter is for loopback testing. + */ +enum efx_filter_flags { + EFX_FILTER_FLAG_RX_RSS = 0x01, + EFX_FILTER_FLAG_RX_SCATTER = 0x02, + EFX_FILTER_FLAG_RX_OVER_AUTO = 0x04, + EFX_FILTER_FLAG_RX = 0x08, + EFX_FILTER_FLAG_TX = 0x10, + EFX_FILTER_FLAG_STACK_ID = 0x20, + EFX_FILTER_FLAG_VPORT_ID = 0x40, + EFX_FILTER_FLAG_LOOPBACK = 0x80, +}; + +/* user_id of the driver-default RSS context */ +#define EFX_FILTER_RSS_CONTEXT_DEFAULT 0 + +/** enum efx_encap_type - types of encapsulation + * @EFX_ENCAP_TYPE_NONE: no encapsulation + * @EFX_ENCAP_TYPE_VXLAN: VXLAN encapsulation + * @EFX_ENCAP_TYPE_NVGRE: NVGRE encapsulation + * @EFX_ENCAP_TYPE_GENEVE: GENEVE encapsulation + * @EFX_ENCAP_FLAG_IPV6: indicates IPv6 outer frame + * + * Contains both enumerated types and flags. + * To get just the type, OR with @EFX_ENCAP_TYPES_MASK. + */ +enum efx_encap_type { + EFX_ENCAP_TYPE_NONE = 0, + EFX_ENCAP_TYPE_VXLAN = 1, + EFX_ENCAP_TYPE_NVGRE = 2, + EFX_ENCAP_TYPE_GENEVE = 3, + + EFX_ENCAP_TYPES_MASK = 7, + EFX_ENCAP_FLAG_IPV6 = 8, +}; + +/** + * struct efx_filter_spec - specification for a hardware filter + * @match_flags: Match type flags, from &enum efx_filter_match_flags + * @priority: Priority of the filter, from &enum efx_filter_priority + * @flags: Miscellaneous flags, from &enum efx_filter_flags + * @rss_context: RSS context to use, if %EFX_FILTER_FLAG_RX_RSS is set. This + * is a user_id (with %EFX_FILTER_RSS_CONTEXT_DEFAULT meaning the + * driver/default RSS context), not an MCFW context_id. + * @dmaq_id: Source/target queue index, or %EFX_FILTER_RX_DMAQ_ID_DROP for + * an RX drop filter + * @stack_id: Stack id associated with RX queue, used for + * multicast loopback suppression + * @vport_id: Virtual port user_id associated with RX queue, for adapter + * switching This is a user_id, with 0 meaning the main driver vport, not + * an MCFW vport_id. + * @outer_vid: Outer VLAN ID to match, if %EFX_FILTER_MATCH_OUTER_VID is set + * @inner_vid: Inner VLAN ID to match, if %EFX_FILTER_MATCH_INNER_VID is set + * @loc_mac: Local MAC address to match, if %EFX_FILTER_MATCH_LOC_MAC or + * %EFX_FILTER_MATCH_LOC_MAC_IG is set + * @rem_mac: Remote MAC address to match, if %EFX_FILTER_MATCH_REM_MAC is set + * @ether_type: Ether-type to match, if %EFX_FILTER_MATCH_ETHER_TYPE is set + * @ip_proto: IP transport protocol to match, if %EFX_FILTER_MATCH_IP_PROTO + * is set + * @loc_host: Local IP host to match, if %EFX_FILTER_MATCH_LOC_HOST is set + * @rem_host: Remote IP host to match, if %EFX_FILTER_MATCH_REM_HOST is set + * @loc_port: Local TCP/UDP port to match, if %EFX_FILTER_MATCH_LOC_PORT is set + * @rem_port: Remote TCP/UDP port to match, if %EFX_FILTER_MATCH_REM_PORT is set + * @tni: VXLAN Tenant Network ID. + * @encap_type: Type of encapsulation, see efx_encap_type above. + * @outer_loc_mac: Outer local MAC address to match, if + * %EFX_FILTER_MATCH_OUTER_LOC_MAC is set + * + * The efx_filter_init_rx() or efx_filter_init_tx() function *must* be + * used to initialise the structure. The efx_filter_set_*() functions + * may then be used to set @rss_context, @match_flags and related + * fields. + * + * The @priority field is used by software to determine whether a new + * filter may replace an old one. The hardware priority of a filter + * depends on which fields are matched. + */ +struct efx_filter_spec { + u32 match_flags:16; + u32 priority:8; + u32 flags:8; + u32 dmaq_id:16; + u32 stack_id:16; + u32 rss_context; + struct_group(match_key, + u32 vport_id; + __be16 outer_vid; + __be16 inner_vid; + u8 loc_mac[ETH_ALEN]; + u8 rem_mac[ETH_ALEN]; + __be16 ether_type; + u8 ip_proto; + __be32 loc_host[4]; + __be32 rem_host[4]; + __be16 loc_port; + __be16 rem_port; + u32 tni:24; + u32 encap_type:4; + u8 outer_loc_mac[ETH_ALEN]; + ); + /* total 82 bytes */ +}; + +enum { + EFX_FILTER_RX_DMAQ_ID_DROP = 0xfff +}; + +static inline void efx_filter_init_rx(struct efx_filter_spec *spec, + enum efx_filter_priority priority, + enum efx_filter_flags flags, + unsigned int rxq_id) +{ + memset(spec, 0, sizeof(*spec)); + spec->priority = priority; + spec->flags = EFX_FILTER_FLAG_RX | flags; + spec->rss_context = 0; + spec->dmaq_id = rxq_id; +} + +static inline void efx_filter_init_tx(struct efx_filter_spec *spec, + unsigned int txq_id) +{ + memset(spec, 0, sizeof(*spec)); + spec->priority = EFX_FILTER_PRI_REQUIRED; + spec->flags = EFX_FILTER_FLAG_TX; + spec->dmaq_id = txq_id; +} + +/** + * efx_filter_set_ipv4_local - specify IPv4 host, transport protocol and port + * @spec: Specification to initialise + * @proto: Transport layer protocol number + * @host: Local host address (network byte order) + * @port: Local port (network byte order) + * + * Return: a negative error code or 0 on success. + */ +static inline int +efx_filter_set_ipv4_local(struct efx_filter_spec *spec, u8 proto, + __be32 host, __be16 port) +{ + spec->match_flags |= + EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO | + EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT; + spec->ether_type = htons(ETH_P_IP); + spec->ip_proto = proto; + spec->loc_host[0] = host; + spec->loc_port = port; + return 0; +} + +/** + * efx_filter_set_ipv4_full - specify IPv4 hosts, transport protocol and ports + * @spec: Specification to initialise + * @proto: Transport layer protocol number + * @lhost: Local host address (network byte order) + * @lport: Local port (network byte order) + * @rhost: Remote host address (network byte order) + * @rport: Remote port (network byte order) + * + * Return: a negative error code or 0 on success. + */ +static inline int +efx_filter_set_ipv4_full(struct efx_filter_spec *spec, u8 proto, + __be32 lhost, __be16 lport, + __be32 rhost, __be16 rport) +{ + spec->match_flags |= + EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO | + EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT | + EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT; + spec->ether_type = htons(ETH_P_IP); + spec->ip_proto = proto; + spec->loc_host[0] = lhost; + spec->loc_port = lport; + spec->rem_host[0] = rhost; + spec->rem_port = rport; + return 0; +} + +/** + * efx_filter_set_ipv6_local - specify IPv6 host, transport protocol and port + * @spec: Specification to initialise + * @proto: Transport layer protocol number + * @host: Local host address (network byte order) + * @port: Local port (network byte order) + * + * Return: a negative error code or 0 on success. + */ +static inline int +efx_filter_set_ipv6_local(struct efx_filter_spec *spec, u8 proto, + const struct in6_addr *host, __be16 port) +{ + spec->match_flags |= + EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO | + EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT; + spec->ether_type = htons(ETH_P_IPV6); + spec->ip_proto = proto; + memcpy(spec->loc_host, host->s6_addr32, sizeof(spec->loc_host)); + spec->loc_port = port; + return 0; +} + +/** + * efx_filter_set_ipv6_full - specify IPv6 hosts, transport protocol and ports + * @spec: Specification to initialise + * @proto: Transport layer protocol number + * @lhost: Local host address (network byte order) + * @lport: Local port (network byte order) + * @rhost: Remote host address (network byte order) + * @rport: Remote port (network byte order) + * + * Return: a negative error code or 0 on success. + */ +static inline int +efx_filter_set_ipv6_full(struct efx_filter_spec *spec, u8 proto, + struct in6_addr lhost, __be16 lport, + struct in6_addr rhost, __be16 rport) +{ + spec->match_flags |= + EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO | + EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT | + EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT; + spec->ether_type = htons(ETH_P_IPV6); + spec->ip_proto = proto; + memcpy(spec->loc_host, lhost.s6_addr32, sizeof(spec->loc_host)); + spec->loc_port = lport; + memcpy(spec->rem_host, rhost.s6_addr32, sizeof(spec->rem_host)); + spec->rem_port = rport; + return 0; +} + +enum { + EFX_FILTER_VID_UNSPEC = 0xffff, +}; + +/** + * efx_filter_set_eth_local - specify local Ethernet address and/or VID + * @spec: Specification to initialise + * @vid: Outer VLAN ID to match, or %EFX_FILTER_VID_UNSPEC + * @addr: Local Ethernet MAC address, or %NULL + * + * Return: a negative error code or 0 on success. + */ +static inline int efx_filter_set_eth_local(struct efx_filter_spec *spec, + u16 vid, const u8 *addr) +{ + if (vid == EFX_FILTER_VID_UNSPEC && addr == NULL) + return -EINVAL; + + if (vid != EFX_FILTER_VID_UNSPEC) { + spec->match_flags |= EFX_FILTER_MATCH_OUTER_VID; + spec->outer_vid = htons(vid); + } + if (addr != NULL) { + spec->match_flags |= EFX_FILTER_MATCH_LOC_MAC; + ether_addr_copy(spec->loc_mac, addr); + } + return 0; +} + +/** + * efx_filter_set_uc_def - specify matching otherwise-unmatched unicast + * @spec: Specification to initialise + * + * Return: a negative error code or 0 on success. + */ +static inline int efx_filter_set_uc_def(struct efx_filter_spec *spec) +{ + spec->match_flags |= EFX_FILTER_MATCH_LOC_MAC_IG; + return 0; +} + +/** + * efx_filter_set_mc_def - specify matching otherwise-unmatched multicast + * @spec: Specification to initialise + * + * Return: a negative error code or 0 on success. + */ +static inline int efx_filter_set_mc_def(struct efx_filter_spec *spec) +{ + spec->match_flags |= EFX_FILTER_MATCH_LOC_MAC_IG; + spec->loc_mac[0] = 1; + return 0; +} + +/** + * efx_filter_set_stack_id - set stack id relating to filter + * @spec: Specification to initialise + * @stack_id: ID of the stack used to suppress stack's own traffic on loopback. + */ +static inline void efx_filter_set_stack_id(struct efx_filter_spec *spec, + unsigned int stack_id) +{ + spec->flags |= EFX_FILTER_FLAG_STACK_ID; + spec->stack_id = stack_id; +} + +/** + * efx_filter_set_vport_id - override virtual port user_id relating to filter + * @spec: Specification to initialise + * @vport_id: user_id of the virtual port + */ +static inline void efx_filter_set_vport_id(struct efx_filter_spec *spec, + unsigned int vport_id) +{ + spec->flags |= EFX_FILTER_FLAG_VPORT_ID; + spec->vport_id = vport_id; +} + +/** + * efx_filter_set_ethertype - add or override ethertype relating to filter + * @spec: Specification to set + * @ether_type: Ethernet protocol ID to match + */ +static inline void efx_filter_set_ethertype(struct efx_filter_spec *spec, + u16 ether_type) +{ + spec->flags |= EFX_FILTER_MATCH_ETHER_TYPE; + spec->ether_type = htons(ether_type); +} + +/** + * efx_filter_set_ipproto - add or override ip protocol relating to filter + * @spec: Specification to set + * @ip_proto: IP protocol ID to match + */ +static inline void efx_filter_set_ipproto(struct efx_filter_spec *spec, + u8 ip_proto) +{ + spec->flags |= EFX_FILTER_MATCH_IP_PROTO; + spec->ip_proto = ip_proto; +} + +static inline void efx_filter_set_encap_type(struct efx_filter_spec *spec, + enum efx_encap_type encap_type) +{ + spec->match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE; + spec->encap_type = encap_type; +} + +static inline enum efx_encap_type efx_filter_get_encap_type( + const struct efx_filter_spec *spec) +{ + if (spec->match_flags & EFX_FILTER_MATCH_ENCAP_TYPE) + return spec->encap_type; + return EFX_ENCAP_TYPE_NONE; +} + +static inline void efx_filter_set_encap_tni(struct efx_filter_spec *spec, + u32 tni) +{ + spec->match_flags |= EFX_FILTER_MATCH_ENCAP_TNI; + spec->tni = tni; +} + +/** + * efx_filter_set_eth_local - specify outer Ethernet address for an + * encapsulated filter + * @spec: Specification to initialise + * @addr: Local Ethernet MAC address, or %NULL + */ +static inline +void efx_filter_set_encap_outer_loc_mac(struct efx_filter_spec *spec, + const u8 *addr) +{ + spec->match_flags |= EFX_FILTER_MATCH_OUTER_LOC_MAC; + ether_addr_copy(spec->outer_loc_mac, addr); +} + +#endif /* EFX_FILTER_H */ diff --git a/src/drivers/net/ethernet/sfc/io.h b/src/drivers/net/ethernet/sfc/io.h new file mode 100644 index 0000000..41497d4 --- /dev/null +++ b/src/drivers/net/ethernet/sfc/io.h @@ -0,0 +1,270 @@ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2005-2006 Fen Systems Ltd. + * Copyright 2006-2017 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#ifndef EFX_IO_H +#define EFX_IO_H + +#include +#include + +/************************************************************************** + * + * NIC register I/O + * + ************************************************************************** + * + * The EF10 architecture exposes very few registers to the host and + * most of them are only 32 bits wide. The only exceptions are the MC + * doorbell register pair, which has its own latching, and + * TX_DESC_UPD. + * + * The TX_DESC_UPD DMA descriptor pointer is 128-bits but is a special + * case in the BIU to avoid the need for locking in the host: + * + * - It is write-only. + * - The semantics of writing to this register is such that + * replacing the low 96 bits with zero does not affect functionality. + * - If the host writes to the last dword address of the register + * (i.e. the high 32 bits) the underlying register will always be + * written. If the collector and the current write together do not + * provide values for all 128 bits of the register, the low 96 bits + * will be written as zero. + */ + +#if BITS_PER_LONG == 64 +#define EFX_USE_QWORD_IO 1 +#endif + +/* Hardware issue requires that only 64-bit naturally aligned writes + * are seen by hardware. Its not strictly necessary to restrict to + * x86_64 arch, but done for safety since unusual write combining behaviour + * can break PIO. + */ +#ifdef CONFIG_X86_64 +/* PIO is a win only if write-combining is possible */ +#ifdef ARCH_HAS_IOREMAP_WC +#define EFX_USE_PIO 1 +#endif +#endif + +static inline void __iomem *efx_mem(struct efx_nic *efx, unsigned int addr) +{ + return efx->membase + addr; +} + +static inline u32 efx_reg(struct efx_nic *efx, unsigned int reg) +{ + return efx->reg_base + reg; +} + +#ifdef EFX_USE_QWORD_IO +static inline void _efx_writeq(struct efx_nic *efx, __le64 value, + unsigned int reg) +{ + __raw_writeq((__force u64)value, efx_mem(efx, reg)); +} +static inline __le64 _efx_readq(struct efx_nic *efx, unsigned int reg) +{ + return (__force __le64)__raw_readq(efx_mem(efx, reg)); +} +#endif + +static inline void _efx_writed(struct efx_nic *efx, __le32 value, + unsigned int reg) +{ + __raw_writel((__force u32)value, efx_mem(efx, reg)); +} +static inline __le32 _efx_readd(struct efx_nic *efx, unsigned int reg) +{ + return (__force __le32)__raw_readl(efx_mem(efx, reg)); +} + +/* Write a normal 128-bit CSR, locking as appropriate. */ +static inline void efx_writeo(struct efx_nic *efx, const efx_oword_t *value, + unsigned int reg) +{ + unsigned long flags __attribute__ ((unused)); + + netif_vdbg(efx, hw, efx->net_dev, + "writing register %x with " EFX_OWORD_FMT "\n", reg, + EFX_OWORD_VAL(*value)); + + spin_lock_irqsave(&efx->biu_lock, flags); +#ifdef EFX_USE_QWORD_IO + _efx_writeq(efx, value->u64[0], reg + 0); + _efx_writeq(efx, value->u64[1], reg + 8); +#else + _efx_writed(efx, value->u32[0], reg + 0); + _efx_writed(efx, value->u32[1], reg + 4); + _efx_writed(efx, value->u32[2], reg + 8); + _efx_writed(efx, value->u32[3], reg + 12); +#endif +#if defined(EFX_USE_KCOMPAT) && defined(EFX_HAVE_MMIOWB) + /* Kernels from 5.2 onwards no longer have mmiowb(), because it is now + * implied by spin_unlock() on architectures that require it. Hence we + * can simply ifdef it out for kernels where it doesn't exist. We do + * this, rather than defining it to empty in kernel_compat.h, to make + * the unifdef'd driver match upstream. + */ + mmiowb(); +#endif + spin_unlock_irqrestore(&efx->biu_lock, flags); +} + +static inline void efx_writeq(struct efx_nic *efx, const efx_qword_t *value, + unsigned int reg) +{ + unsigned long flags __attribute__ ((unused)); + + netif_vdbg(efx, hw, efx->net_dev, + "writing register %x with "EFX_QWORD_FMT"\n", + reg, EFX_QWORD_VAL(*value)); + + spin_lock_irqsave(&efx->biu_lock, flags); +#ifdef EFX_USE_QWORD_IO + _efx_writeq(efx, value->u64[0], reg + 0); +#else + _efx_writed(efx, value->u32[0], reg + 0); + _efx_writed(efx, value->u32[1], reg + 4); +#endif +#if defined(EFX_USE_KCOMPAT) && defined(EFX_HAVE_MMIOWB) + mmiowb(); +#endif + spin_unlock_irqrestore(&efx->biu_lock, flags); +} + +/* Write a 32-bit CSR or the last dword of a special 128-bit CSR */ +static inline void efx_writed(struct efx_nic *efx, const efx_dword_t *value, + unsigned int reg) +{ + netif_vdbg(efx, hw, efx->net_dev, + "writing register %x with "EFX_DWORD_FMT"\n", + reg, EFX_DWORD_VAL(*value)); + + /* No lock required */ + _efx_writed(efx, value->u32[0], reg); +} + +/* Read a 128-bit CSR, locking as appropriate. */ +static inline void efx_reado(struct efx_nic *efx, efx_oword_t *value, + unsigned int reg) +{ + unsigned long flags __attribute__ ((unused)); + + spin_lock_irqsave(&efx->biu_lock, flags); + value->u32[0] = _efx_readd(efx, reg + 0); + value->u32[1] = _efx_readd(efx, reg + 4); + value->u32[2] = _efx_readd(efx, reg + 8); + value->u32[3] = _efx_readd(efx, reg + 12); + spin_unlock_irqrestore(&efx->biu_lock, flags); + + netif_vdbg(efx, hw, efx->net_dev, + "read from register %x, got " EFX_OWORD_FMT "\n", reg, + EFX_OWORD_VAL(*value)); +} + +/* Read a 32-bit CSR or SRAM */ +static inline void efx_readd(struct efx_nic *efx, efx_dword_t *value, + unsigned int reg) +{ + value->u32[0] = _efx_readd(efx, reg); + netif_vdbg(efx, hw, efx->net_dev, + "read from register %x, got "EFX_DWORD_FMT"\n", + reg, EFX_DWORD_VAL(*value)); +} + +/* Write a 128-bit CSR forming part of a table */ +static inline void +efx_writeo_table(struct efx_nic *efx, const efx_oword_t *value, + unsigned int reg, unsigned int index) +{ + efx_writeo(efx, value, reg + index * sizeof(efx_oword_t)); +} + +/* Read a 128-bit CSR forming part of a table */ +static inline void efx_reado_table(struct efx_nic *efx, efx_oword_t *value, + unsigned int reg, unsigned int index) +{ + efx_reado(efx, value, reg + index * sizeof(efx_oword_t)); +} + +/* default VI stride (step between per-VI registers) is 8K on EF10 and + * 64K on EF100 + */ +#define EFX_DEFAULT_VI_STRIDE 0x2000 +#define EF100_DEFAULT_VI_STRIDE 0x10000 + +/* Calculate offset to page-mapped register */ +static inline unsigned int efx_paged_reg(struct efx_nic *efx, unsigned int page, + unsigned int reg) +{ + return page * efx->vi_stride + reg; +} + +/* Write the whole of RX_DESC_UPD or TX_DESC_UPD */ +static inline void _efx_writeo_page(struct efx_nic *efx, efx_oword_t *value, + unsigned int reg, unsigned int page) +{ + efx_writeo(efx, value, efx_paged_reg(efx, page, reg)); +} +#define efx_writeo_page(efx, value, reg, page) \ + _efx_writeo_page(efx, value, \ + reg + \ + BUILD_BUG_ON_ZERO((reg) != 0x830 && (reg) != 0xa10), \ + page) + +/* Write a page-mapped 32-bit CSR (EVQ_RPTR, EVQ_TMR (EF10 and EF100), the + * RX/TX_RING_DOORBELL (EF100), or the high bits of RX_DESC_UPD or + * TX_DESC_UPD (EF10). + */ +static inline void +_efx_writed_page(struct efx_nic *efx, const efx_dword_t *value, + unsigned int reg, unsigned int page) +{ + efx_writed(efx, value, efx_paged_reg(efx, page, reg)); +} +#define efx_writed_page(efx, value, reg, page) \ + _efx_writed_page(efx, value, \ + reg + \ + BUILD_BUG_ON_ZERO((reg) != 0x180 && \ + (reg) != 0x200 && \ + (reg) != 0x400 && \ + (reg) != 0x420 && \ + (reg) != 0x830 && \ + (reg) != 0x83c && \ + (reg) != 0xa18 && \ + (reg) != 0xa1c), \ + page) + +/* Write TIMER_COMMAND. This is a page-mapped 32-bit CSR, but a bug + * in the BIU means that writes to TIMER_COMMAND[0] invalidate the + * collector register. + */ +static inline void _efx_writed_page_locked(struct efx_nic *efx, + const efx_dword_t *value, + unsigned int reg, + unsigned int page) +{ + unsigned long flags __attribute__ ((unused)); + + if (page == 0) { + spin_lock_irqsave(&efx->biu_lock, flags); + efx_writed(efx, value, efx_paged_reg(efx, page, reg)); + spin_unlock_irqrestore(&efx->biu_lock, flags); + } else { + efx_writed(efx, value, efx_paged_reg(efx, page, reg)); + } +} +#define efx_writed_page_locked(efx, value, reg, page) \ + _efx_writed_page_locked(efx, value, \ + reg + BUILD_BUG_ON_ZERO((reg) != 0x420), \ + page) + +#endif /* EFX_IO_H */ diff --git a/src/drivers/net/ethernet/sfc/ioctl.c b/src/drivers/net/ethernet/sfc/ioctl.c new file mode 100644 index 0000000..14e6c12 --- /dev/null +++ b/src/drivers/net/ethernet/sfc/ioctl.c @@ -0,0 +1,346 @@ +/**************************************************************************** + * Driver for Solarflare network controllers + * (including support for SFE4001 10GBT NIC) + * + * Copyright 2005-2006: Fen Systems Ltd. + * Copyright 2005-2012: Solarflare Communications Inc, + * 9501 Jeronimo Road, Suite 250, + * Irvine, CA 92618, USA + * + * Initially developed by Michael Brown + * Maintained by Solarflare Communications + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + **************************************************************************** + */ +#include "net_driver.h" +#include "efx_ioctl.h" +#include "nic.h" +#include "efx_common.h" +#include "ioctl_common.h" +#include "mcdi.h" +#include "mcdi_port_common.h" +#include "mcdi_pcol.h" +#include "ethtool_common.h" + +#include +#include +#include +#include +#include + +static int efx_ioctl_do_mcdi_old(struct efx_nic *efx, union efx_ioctl_data *data) +{ + struct efx_mcdi_request *req = &data->mcdi_request; + efx_dword_t *inbuf; + size_t inbuf_len, outlen; + int rc; + + if (req->len > sizeof(req->payload)) { + netif_err(efx, drv, efx->net_dev, "inlen is too long"); + return -EINVAL; + } + + inbuf_len = ALIGN(req->len, 4); + inbuf = kmalloc(inbuf_len, GFP_KERNEL); + if (!inbuf) + return -ENOMEM; + /* Ensure zero-padding if req->len not a multiple of 4 */ + if (req->len % 4) + inbuf[req->len / 4].u32[0] = 0; + + memcpy(inbuf, req->payload, req->len); + + rc = efx_mcdi_rpc_quiet(efx, req->cmd, inbuf, inbuf_len, + (efx_dword_t *)req->payload, + sizeof(req->payload), &outlen); + efx_ioctl_mcdi_complete_reset(efx, req->cmd, rc); + + req->rc = -rc; + req->len = (__u8)outlen; + + kfree(inbuf); + return 0; +} + +#if defined(EFX_USE_KCOMPAT) && !defined(EFX_HAVE_ETHTOOL_RESET) + +static int +efx_ioctl_reset_flags(struct efx_nic *efx, union efx_ioctl_data *data) +{ + return efx_ethtool_reset(efx->net_dev, &data->reset_flags.flags); +} + +#endif + +#if defined(EFX_USE_KCOMPAT) && !defined(EFX_HAVE_ETHTOOL_RXFH_INDIR) + +static int +efx_ioctl_rxfh_indir(struct efx_nic *efx, union efx_ioctl_data *data) +{ + BUILD_BUG_ON(ARRAY_SIZE(data->rxfh_indir.table) != + ARRAY_SIZE(efx->rss_context.rx_indir_table)); + + switch (data->rxfh_indir.head.cmd) { + case ETHTOOL_GRXFHINDIR: + return efx_ethtool_old_get_rxfh_indir(efx->net_dev, + &data->rxfh_indir.head); + case ETHTOOL_SRXFHINDIR: + return efx_ethtool_old_set_rxfh_indir(efx->net_dev, + &data->rxfh_indir.head); + default: + return -EOPNOTSUPP; + } +} + +#endif + +static int +efx_ioctl_get_mod_eeprom(struct efx_nic *efx, + union efx_ioctl_data __user *useraddr) +{ + struct ethtool_eeprom eeprom; + struct ethtool_modinfo modinfo; + void __user *userbuf = + ((void __user *)&useraddr->eeprom.ee) + sizeof(eeprom); + void __user *userbufptr = userbuf; + u32 bytes_remaining; + u32 total_len; + u8 *data; + int ret = 0; + + if (efx_mcdi_phy_get_module_info(efx, &modinfo)) + return -EINVAL; + + total_len = modinfo.eeprom_len; + + if (copy_from_user(&eeprom, &useraddr->eeprom.ee, sizeof(eeprom))) + return -EFAULT; + + /* Check for wrap and zero */ + if (eeprom.offset + eeprom.len <= eeprom.offset) + return -EINVAL; + + /* Check for exceeding total eeprom len */ + if (eeprom.offset + eeprom.len > total_len) + return -EINVAL; + + data = kmalloc(PAGE_SIZE, GFP_USER); + if (!data) + return -ENOMEM; + + bytes_remaining = eeprom.len; + while (bytes_remaining > 0) { + eeprom.len = min(bytes_remaining, (u32)PAGE_SIZE); + + ret = efx_mcdi_phy_get_module_eeprom(efx, &eeprom, data); + if (ret) + break; + if (copy_to_user(userbuf, data, eeprom.len)) { + ret = -EFAULT; + break; + } + userbuf += eeprom.len; + eeprom.offset += eeprom.len; + bytes_remaining -= eeprom.len; + } + + eeprom.len = userbuf - userbufptr; + eeprom.offset -= eeprom.len; + if (copy_to_user(&useraddr->eeprom.ee, &eeprom, sizeof(eeprom))) + ret = -EFAULT; + + kfree(data); + return ret; +} + +static int +efx_ioctl_get_mod_info(struct efx_nic *efx, union efx_ioctl_data *data) +{ + return efx_mcdi_phy_get_module_info(efx, &data->modinfo.info); +} + +#ifdef EFX_NOT_UPSTREAM +static int +efx_ioctl_update_license(struct efx_nic *efx, union efx_ioctl_data *data) +{ + struct efx_update_license2 *stats = &data->key_stats2; + int rc; + + if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0) { + rc = efx_ef10_update_keys(efx, stats); + /* return directly since SFA7942Q(Sorrento) only + * uses EF10 based licensing + */ + return rc; + } + + memset(stats, 0, sizeof(*stats)); + return 0; +} +#endif + +#ifdef EFX_NOT_UPSTREAM +static int +efx_ioctl_licensed_app_state(struct efx_nic *efx, union efx_ioctl_data *data) +{ + int rc; + + if (efx_nic_rev(efx) < EFX_REV_HUNT_A0) + return -EOPNOTSUPP; + rc = efx_ef10_licensed_app_state(efx, &data->app_state); + return rc; +} +#endif + +#ifdef CONFIG_SFC_DUMP +static int +efx_ioctl_dump(struct efx_nic *efx, union efx_ioctl_data __user *useraddr) +{ + struct ethtool_dump dump; + void __user *userbuf = + ((void __user *)&useraddr->dump) + sizeof(dump); + void *buffer; + int ret; + + if (copy_from_user(&dump, useraddr, sizeof(dump))) + return -EFAULT; + + switch (dump.cmd) { + case ETHTOOL_SET_DUMP: + ret = efx_ethtool_set_dump(efx->net_dev, &dump); + if (ret < 0) + return ret; + break; + case ETHTOOL_GET_DUMP_FLAG: + ret = efx_ethtool_get_dump_flag(efx->net_dev, &dump); + if (ret < 0) + return ret; + break; + case ETHTOOL_GET_DUMP_DATA: + ret = efx_ethtool_get_dump_flag(efx->net_dev, &dump); + if (ret < 0) + return ret; + + if (dump.len == 0) + return -EFAULT; + buffer = vzalloc(dump.len); + if (!buffer) + return -ENOMEM; + + ret = efx_ethtool_get_dump_data(efx->net_dev, &dump, + buffer); + if (ret == 0) { + if (copy_to_user(userbuf, buffer, dump.len)) + ret = -EFAULT; + } + vfree(buffer); + if (ret < 0) + return ret; + break; + default: + return -EOPNOTSUPP; + } + + if (copy_to_user(useraddr, &dump, sizeof(dump))) + return -EFAULT; + + return 0; +} +#endif + +/*****************************************************************************/ + +int efx_private_ioctl(struct efx_nic *efx, u16 cmd, + union efx_ioctl_data __user *user_data) +{ + int (*op)(struct efx_nic *, union efx_ioctl_data *); + union efx_ioctl_data *data = NULL; + size_t size; + int rc; + + rc = efx_private_ioctl_common(efx, cmd, user_data); + if (rc != -EOPNOTSUPP) + return rc; + + switch (cmd) { + case EFX_MCDI_REQUEST: + size = sizeof(data->mcdi_request); + op = efx_ioctl_do_mcdi_old; + break; +#if defined(EFX_USE_KCOMPAT) && !defined(EFX_HAVE_ETHTOOL_RESET) + case EFX_RESET_FLAGS: + size = sizeof(data->reset_flags); + op = efx_ioctl_reset_flags; + break; +#endif +#ifdef EFX_USE_KCOMPAT + case EFX_RXNFC: + /* This command has variable length */ + return efx_ioctl_rxnfc(efx, &user_data->rxnfc); +#endif +#if defined(EFX_USE_KCOMPAT) && !defined(EFX_HAVE_ETHTOOL_RXFH_INDIR) + case EFX_RXFHINDIR: + size = sizeof(data->rxfh_indir); + op = efx_ioctl_rxfh_indir; + break; +#endif + case EFX_MODULEEEPROM: + return efx_ioctl_get_mod_eeprom(efx, user_data); + + case EFX_GMODULEINFO: + size = sizeof(data->modinfo); + op = efx_ioctl_get_mod_info; + break; +#ifdef EFX_NOT_UPSTREAM + case EFX_LICENSE_UPDATE2: + size = sizeof(data->key_stats2); + op = efx_ioctl_update_license; + break; +#endif +#ifdef EFX_NOT_UPSTREAM + case EFX_LICENSED_APP_STATE: + size = sizeof(data->app_state); + op = efx_ioctl_licensed_app_state; + break; +#endif +#ifdef CONFIG_SFC_DUMP + case EFX_DUMP: + return efx_ioctl_dump(efx, user_data); +#endif + default: + netif_err(efx, drv, efx->net_dev, + "unknown private ioctl cmd %x\n", cmd); + return -EOPNOTSUPP; + } + + data = kmalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + if (copy_from_user(data, user_data, size)) { + kfree(data); + return -EFAULT; + } + + rc = op(efx, data); + if (!rc) { + if (copy_to_user(user_data, data, size)) + rc = -EFAULT; + } + + kfree(data); + return rc; +} + diff --git a/src/drivers/net/ethernet/sfc/ioctl_common.c b/src/drivers/net/ethernet/sfc/ioctl_common.c new file mode 100644 index 0000000..90e6804 --- /dev/null +++ b/src/drivers/net/ethernet/sfc/ioctl_common.c @@ -0,0 +1,425 @@ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2005-2019 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#include "net_driver.h" +#include "efx.h" +#include "efx_ioctl.h" +#include "nic.h" +#include "efx_common.h" +#include "mcdi.h" +#include "mcdi_pcol.h" +#include "sfctool.h" +#include "ioctl_common.h" + +#include +#include +#include +#include +#include + +void efx_ioctl_mcdi_complete_reset(struct efx_nic *efx, unsigned int cmd, + int rc) +{ + /* efx_mcdi_rpc() will not schedule a reset if MC_CMD_REBOOT causes + * a reboot. But from the user's POV, they're triggering a reboot + * 'externally', and want both ports to recover. So schedule the + * reset here. + */ + if (cmd == MC_CMD_REBOOT && rc == -EIO) { + netif_warn(efx, drv, efx->net_dev, "Expected MC rebooted\n"); + efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE); + } +} + +static int efx_ioctl_do_mcdi(struct efx_nic *efx, + struct efx_mcdi_request2 __user *user_req) +{ + struct efx_mcdi_request2 *req; + size_t inbuf_len, req_outlen, outlen_actual; + efx_dword_t *inbuf = NULL; + efx_dword_t *outbuf = NULL; + int rc; + + req = kmalloc(sizeof(*req), GFP_KERNEL); + if (!req) + return -ENOMEM; + + if (copy_from_user(req, user_req, sizeof(*req))) { + rc = -EFAULT; + goto out_free; + } + + /* No input flags are defined yet */ + if (req->flags != 0) { + rc = -EINVAL; + goto out_free; + } + + /* efx_mcdi_rpc() will check the length anyway, but this avoids + * trying to allocate an extreme amount of memory. + */ + if (req->inlen > MCDI_CTL_SDU_LEN_MAX_V2 || + req->outlen > MCDI_CTL_SDU_LEN_MAX_V2) { + rc = -EINVAL; + goto out_free; + } + + inbuf_len = ALIGN(req->inlen, 4); + inbuf = kmalloc(inbuf_len, GFP_USER); + if (!inbuf) { + rc = -ENOMEM; + goto out_free; + } + /* Ensure zero-padding if req.inlen not a multiple of 4 */ + if (req->inlen % 4) + inbuf[req->inlen / 4].u32[0] = 0; + + outbuf = kmalloc(ALIGN(req->outlen, 4), GFP_USER); + if (!outbuf) { + rc = -ENOMEM; + goto out_free; + } + + if (copy_from_user(inbuf, &user_req->payload, req->inlen)) { + rc = -EFAULT; + goto out_free; + } + + /* We use inbuf_len as an inlen not divisible by 4 annoys mcdi-logging. + * It doesn't care about outlen however. + */ + rc = efx_mcdi_rpc_quiet(efx, req->cmd, inbuf, inbuf_len, + outbuf, req->outlen, &outlen_actual); + efx_ioctl_mcdi_complete_reset(efx, req->cmd, rc); + + if (rc) { + if (outlen_actual) { + /* Error was reported by the MC */ + req->flags |= EFX_MCDI_REQUEST_ERROR; + req->host_errno = -rc; + rc = 0; + } else { + /* Communication failure */ + goto out_free; + } + } + req_outlen = req->outlen; + req->outlen = outlen_actual; + + if (copy_to_user(user_req, req, sizeof(*req)) || + copy_to_user(&user_req->payload, outbuf, + min(outlen_actual, req_outlen))) + rc = -EFAULT; + +out_free: + kfree(outbuf); + kfree(inbuf); + kfree(req); + return rc; +} + +#ifdef CONFIG_SFC_PTP +#ifdef EFX_NOT_UPSTREAM +static int efx_ioctl_get_ts_config(struct efx_nic *efx, + union efx_ioctl_data __user *user_data) +{ + struct ifreq ifr; + + /* ifr_data is declared as __user */ + ifr.ifr_data = &user_data->ts_init; + return efx_ptp_get_ts_config(efx, &ifr); +} + +static int efx_ioctl_ts_settime(struct efx_nic *efx, union efx_ioctl_data *data) +{ + return efx_ptp_ts_settime(efx, &data->ts_settime); +} + +static int efx_ioctl_ts_adjtime(struct efx_nic *efx, union efx_ioctl_data *data) +{ + return efx_ptp_ts_adjtime(efx, &data->ts_adjtime); +} + +static int efx_ioctl_ts_sync(struct efx_nic *efx, union efx_ioctl_data *data) +{ + return efx_ptp_ts_sync(efx, &data->ts_sync); +} + +static int efx_ioctl_ts_set_sync_status(struct efx_nic *efx, + union efx_ioctl_data *data) +{ + return efx_ptp_ts_set_sync_status(efx, &data->ts_set_sync_status); +} + +#if defined(EFX_USE_KCOMPAT) && !defined(EFX_HAVE_ETHTOOL_GET_TS_INFO) && !defined(EFX_HAVE_ETHTOOL_EXT_GET_TS_INFO) +static int efx_ioctl_get_ts_info(struct efx_nic *efx, + union efx_ioctl_data *data) +{ + memset(&data->ts_info, 0, sizeof(data->ts_info)); + data->ts_info.cmd = ETHTOOL_GET_TS_INFO; + return efx_ethtool_get_ts_info(efx->net_dev, &data->ts_info); +} +#endif +#endif +#endif + +static int efx_ioctl_get_pps_event(struct efx_nic *efx, + union efx_ioctl_data *data) +{ + return efx_ptp_pps_get_event(efx, &data->pps_event); +} + +static int +efx_ioctl_hw_pps_enable(struct efx_nic *efx, union efx_ioctl_data *data) +{ + return efx_ptp_hw_pps_enable(efx, data->pps_enable.enable != 0); +} + +static int efx_ioctl_get_device_ids(struct efx_nic *efx, + union efx_ioctl_data *data) +{ + struct efx_device_ids *ids = &data->device_ids; + + ids->vendor_id = efx->pci_dev->vendor; + ids->device_id = efx->pci_dev->device; + ids->subsys_vendor_id = efx->pci_dev->subsystem_vendor; + ids->subsys_device_id = efx->pci_dev->subsystem_device; + ids->phy_type = efx->phy_type; + ids->port_num = efx_port_num(efx); + /* ids->perm_addr isn't __aligned(2), so we can't use ether_addr_copy + * (and we can't change it because it's an ioctl argument) + */ + ether_addr_copy(ids->perm_addr, efx->net_dev->perm_addr); + + return 0; +} + +#ifdef EFX_USE_KCOMPAT + +#ifdef CONFIG_COMPAT +/* struct ethtool_rxnfc has extra padding on 64-bit architectures. + * And we have to follow this stupidity in order to use the same + * underlying implementation for both SIOCEFX and SIOCETHTOOL + * operations. + */ +struct efx_compat_ethtool_rx_flow_spec { + u32 flow_type; + union efx_ethtool_flow_union h_u; + struct efx_ethtool_flow_ext h_ext; + union efx_ethtool_flow_union m_u; + struct efx_ethtool_flow_ext m_ext; + compat_u64 ring_cookie; + u32 location; +}; +struct efx_compat_ethtool_rxnfc { + u32 cmd; + u32 flow_type; + compat_u64 data; + struct efx_compat_ethtool_rx_flow_spec fs; + u32 rule_cnt; + u32 rule_locs[0]; +}; +#endif + +int efx_ioctl_rxnfc(struct efx_nic *efx, void __user *useraddr) +{ +#ifdef CONFIG_COMPAT + struct efx_compat_ethtool_rxnfc __user *compat_rxnfc = useraddr; +#endif + struct efx_ethtool_rxnfc info; + int ret; + void *rule_buf = NULL; + +#ifdef CONFIG_COMPAT + if (is_compat_task()) { + if (copy_from_user(&info, compat_rxnfc, + (void *)(&info.fs.m_ext + 1) - + (void *)&info) || + copy_from_user(&info.fs.ring_cookie, + &compat_rxnfc->fs.ring_cookie, + (void *)(&info.fs.location + 1) - + (void *)&info.fs.ring_cookie) || + copy_from_user(&info.rule_cnt, &compat_rxnfc->rule_cnt, + sizeof(info.rule_cnt))) + return -EFAULT; + } else +#endif + if (copy_from_user(&info, useraddr, sizeof(info))) + return -EFAULT; + + switch (info.cmd) { + case ETHTOOL_GRXCLSRLALL: + if (info.rule_cnt > 0) { + /* No more than 1 MB of rule indices - way + * more than we could possibly have! */ + if (info.rule_cnt <= (1 << 18)) + rule_buf = kcalloc(info.rule_cnt, sizeof(u32), + GFP_USER); + if (!rule_buf) + return -ENOMEM; + } + fallthrough; + case ETHTOOL_GRXFH: + case ETHTOOL_GRXRINGS: + case ETHTOOL_GRXCLSRLCNT: + case ETHTOOL_GRXCLSRULE: + ret = efx_ethtool_get_rxnfc(efx->net_dev, &info, rule_buf); + break; + case ETHTOOL_SRXCLSRLINS: + case ETHTOOL_SRXCLSRLDEL: + ret = efx_ethtool_set_rxnfc(efx->net_dev, &info); + break; + default: + return -EOPNOTSUPP; + } + + if (ret < 0) + goto err_out; + + ret = -EFAULT; +#ifdef CONFIG_COMPAT + if (is_compat_task()) { + if (copy_to_user(compat_rxnfc, &info, + (const void *)(&info.fs.m_ext + 1) - + (const void *)&info) || + copy_to_user(&compat_rxnfc->fs.ring_cookie, + &info.fs.ring_cookie, + (const void *)(&info.fs.location + 1) - + (const void *)&info.fs.ring_cookie) || + copy_to_user(&compat_rxnfc->rule_cnt, &info.rule_cnt, + sizeof(info.rule_cnt))) + goto err_out; + } else +#endif + if (copy_to_user(useraddr, &info, sizeof(info))) + goto err_out; + + if (rule_buf) { +#ifdef CONFIG_COMPAT + if (is_compat_task()) + useraddr += offsetof(struct efx_compat_ethtool_rxnfc, + rule_locs); + else +#endif + useraddr += offsetof(struct efx_ethtool_rxnfc, + rule_locs); + if (copy_to_user(useraddr, rule_buf, + info.rule_cnt * sizeof(u32))) + goto err_out; + } + ret = 0; + +err_out: + kfree(rule_buf); + + return ret; +} +#endif + +#ifdef EFX_NOT_UPSTREAM +static int efx_ioctl_sfctool(struct efx_nic *efx, + union efx_ioctl_data __user *useraddr) +{ + struct efx_sfctool sfctool; + u32 ethcmd; + + if (copy_from_user(&sfctool, useraddr, sizeof(sfctool))) + return -EFAULT; + + if (copy_from_user(ðcmd, sfctool.data, sizeof(ethcmd))) + return -EFAULT; + + return efx_sfctool(efx, ethcmd, sfctool.data); +} +#endif + +/*****************************************************************************/ + +int efx_private_ioctl_common(struct efx_nic *efx, u16 cmd, + union efx_ioctl_data __user *user_data) +{ + int (*op)(struct efx_nic *, union efx_ioctl_data *); + union efx_ioctl_data *data = NULL; + size_t size; + int rc; + + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + switch (cmd) { + case EFX_MCDI_REQUEST2: + /* This command has variable length */ + return efx_ioctl_do_mcdi(efx, &user_data->mcdi_request2); +#ifdef CONFIG_SFC_PTP + case EFX_TS_INIT: + return -EOPNOTSUPP; +#if defined(EFX_NOT_UPSTREAM) + case EFX_GET_TS_CONFIG: + return efx_ioctl_get_ts_config(efx, user_data); + + case EFX_TS_SETTIME: + size = sizeof(data->ts_settime); + op = efx_ioctl_ts_settime; + break; + case EFX_TS_ADJTIME: + size = sizeof(data->ts_adjtime); + op = efx_ioctl_ts_adjtime; + break; + case EFX_TS_SYNC: + size = sizeof(data->ts_sync); + op = efx_ioctl_ts_sync; + break; + case EFX_TS_SET_SYNC_STATUS: + size = sizeof(data->ts_set_sync_status); + op = efx_ioctl_ts_set_sync_status; + break; +#if defined(EFX_USE_KCOMPAT) && !defined(EFX_HAVE_ETHTOOL_GET_TS_INFO) && !defined(EFX_HAVE_ETHTOOL_EXT_GET_TS_INFO) + case EFX_GET_TS_INFO: + size = sizeof(data->ts_info); + op = efx_ioctl_get_ts_info; + break; +#endif +#endif +#endif + case EFX_SFCTOOL: + return efx_ioctl_sfctool(efx, user_data); + case EFX_TS_GET_PPS: + size = sizeof(data->pps_event); + op = efx_ioctl_get_pps_event; + break; + case EFX_TS_ENABLE_HW_PPS: + size = sizeof(data->pps_enable); + op = efx_ioctl_hw_pps_enable; + break; + case EFX_GET_DEVICE_IDS: + size = sizeof(data->device_ids); + op = efx_ioctl_get_device_ids; + break; + default: + return -EOPNOTSUPP; + } + + data = kmalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + if (copy_from_user(data, user_data, size)) { + kfree(data); + return -EFAULT; + } + + rc = op(efx, data); + if (!rc) { + if (copy_to_user(user_data, data, size)) + rc = -EFAULT; + } + + kfree(data); + return rc; +} diff --git a/src/drivers/net/ethernet/sfc/ioctl_common.h b/src/drivers/net/ethernet/sfc/ioctl_common.h new file mode 100644 index 0000000..2ecced3 --- /dev/null +++ b/src/drivers/net/ethernet/sfc/ioctl_common.h @@ -0,0 +1,18 @@ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2005-2019 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +void efx_ioctl_mcdi_complete_reset(struct efx_nic *efx, unsigned int cmd, + int rc); +#ifndef EFX_FOR_UPSTREAM +/* The API below is used by sfctool */ +int efx_ioctl_rxnfc(struct efx_nic *efx, void __user *useraddr); +#endif + +int efx_private_ioctl_common(struct efx_nic *efx, u16 cmd, + union efx_ioctl_data __user *user_data); diff --git a/src/drivers/net/ethernet/sfc/kabi_compat.h b/src/drivers/net/ethernet/sfc/kabi_compat.h new file mode 100644 index 0000000..660e3f8 --- /dev/null +++ b/src/drivers/net/ethernet/sfc/kabi_compat.h @@ -0,0 +1,15 @@ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2019 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#undef EFX_HAVE_ROUND_JIFFIES_UP +#undef EFX_NEED_NS_TO_TIMESPEC +#define EFX_NEED_NS_TO_TIMESPEC +#undef EFX_HAVE_XEN_START_INFO +#undef EFX_NEED_SET_NORMALIZED_TIMESPEC +#define EFX_NEED_SET_NORMALIZED_TIMESPEC diff --git a/src/drivers/net/ethernet/sfc/kernel_compat.c b/src/drivers/net/ethernet/sfc/kernel_compat.c new file mode 100644 index 0000000..15a159f --- /dev/null +++ b/src/drivers/net/ethernet/sfc/kernel_compat.c @@ -0,0 +1,758 @@ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2005-2006 Fen Systems Ltd. + * Copyright 2006-2017 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#define EFX_IN_KCOMPAT_C 1 + +#include "efx.h" +#include "kernel_compat.h" +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef EFX_HAVE_STRUCT_SIZE +#include +#endif +#include +#include +#ifdef EFX_HAVE_LINUX_EXPORT_H +#include +#endif +#if defined(EFX_NEED_HWMON_DEVICE_REGISTER_WITH_INFO) +#include +#endif +#ifdef EFX_TC_OFFLOAD +#ifndef EFX_HAVE_TC_FLOW_OFFLOAD +#include +#include +#include +#include +#include +#include +#include +#endif +#endif + +/* + * Kernel backwards compatibility + * + * This file provides functionality missing from earlier kernels. + */ + +#ifdef EFX_NEED_USLEEP_RANGE + +void usleep_range(unsigned long min, unsigned long max) +{ + unsigned long delay = DIV_ROUND_UP(min, 1000) ? : 1; + msleep(delay); +} + +#endif + +#if defined(EFX_NEED_SAVE_MSIX_MESSAGES) + +#undef pci_save_state +#undef pci_restore_state + +#include + +#define PCI_MSIX_TABLE 4 +#define PCI_MSIX_PBA 8 +#define PCI_MSIX_BIR 0x7 + +#ifndef PCI_MSIX_ENTRY_SIZE +#define PCI_MSIX_ENTRY_SIZE 16 +#define PCI_MSIX_ENTRY_LOWER_ADDR 0 +#define PCI_MSIX_ENTRY_UPPER_ADDR 4 +#define PCI_MSIX_ENTRY_DATA 8 +#define PCI_MSIX_ENTRY_VECTOR_CTRL 12 +#endif + +static void +efx_for_each_msix_vector(struct efx_nic *efx, + void (*fn)(struct efx_channel *, void __iomem *)) +{ + struct pci_dev *pci_dev = efx->pci_dev; + resource_size_t membase_phys; + void __iomem *membase; + int msix, offset, bar, length, i; + u32 dword; + + if (efx->interrupt_mode != EFX_INT_MODE_MSIX) + return; + + /* Find the location (bar, offset) of the MSI-X table */ + msix = pci_find_capability(pci_dev, PCI_CAP_ID_MSIX); + if (!msix) + return; + pci_read_config_dword(pci_dev, msix + PCI_MSIX_TABLE, &dword); + bar = dword & PCI_MSIX_BIR; + offset = dword & ~PCI_MSIX_BIR; + + /* Map enough of the table for all our interrupts */ + membase_phys = pci_resource_start(pci_dev, bar); + length = efx_channels(efx) * 0x10; +#if defined(EFX_USE_KCOMPAT) + membase = efx_ioremap(membase_phys + offset, length); +#else + membase = ioremap(membase_phys + offset, length); +#endif + if (!membase) { + dev_dbg(&pci_dev->dev, "failed to remap MSI-X table\n"); + return; + } + + for (i = 0; i < efx_channels(efx); i++) + fn(efx_get_channel(efx, i), membase + i * PCI_MSIX_ENTRY_SIZE); + + /* Release the mapping */ + iounmap(membase); +} + +static void +efx_save_msix_state(struct efx_channel *channel, void __iomem *entry) +{ +#ifdef EFX_NEED_SAVE_MSIX_MESSAGES + channel->msix_msg.address_lo = readl(entry + PCI_MSIX_ENTRY_LOWER_ADDR); + channel->msix_msg.address_hi = readl(entry + PCI_MSIX_ENTRY_UPPER_ADDR); + channel->msix_msg.data = readl(entry + PCI_MSIX_ENTRY_DATA); +#endif +} + +int efx_pci_save_state(struct pci_dev *pci_dev) +{ + efx_for_each_msix_vector(pci_get_drvdata(pci_dev), efx_save_msix_state); + return pci_save_state(pci_dev); +} + +static void +efx_restore_msix_state(struct efx_channel *channel, void __iomem *entry) +{ +#ifdef EFX_NEED_SAVE_MSIX_MESSAGES + writel(channel->msix_msg.address_lo, entry + PCI_MSIX_ENTRY_LOWER_ADDR); + writel(channel->msix_msg.address_hi, entry + PCI_MSIX_ENTRY_UPPER_ADDR); + writel(channel->msix_msg.data, entry + PCI_MSIX_ENTRY_DATA); +#endif +} + +void efx_pci_restore_state(struct pci_dev *pci_dev) +{ + pci_restore_state(pci_dev); + efx_for_each_msix_vector(pci_get_drvdata(pci_dev), + efx_restore_msix_state); +} + +#endif /* EFX_NEED_SAVE_MSIX_MESSAGES */ + +#ifdef EFX_NEED_NS_TO_TIMESPEC +#ifndef EFX_HAVE_TIMESPEC64 + +#include + +struct timespec ns_to_timespec(const s64 nsec) +{ + struct timespec ts; + s32 rem; + + if (!nsec) + return (struct timespec) {0, 0}; + + ts.tv_sec = div_s64_rem(nsec, NSEC_PER_SEC, &rem); + if (unlikely(rem < 0)) { + ts.tv_sec--; + rem += NSEC_PER_SEC; + } + ts.tv_nsec = rem; + + return ts; +} + +#endif /* EFX_HAVE_TIMESPEC64 */ +#endif /* EFX_NEED_NS_TO_TIMESPEC */ + +#ifdef EFX_HAVE_PARAM_BOOL_INT + +int efx_param_set_bool(const char *val, struct kernel_param *kp) +{ + bool v; + + if (!val) { + /* No equals means "set"... */ + v = true; + } else { + /* One of =[yYnN01] */ + switch (val[0]) { + case 'y': + case 'Y': + case '1': + v = true; + break; + case 'n': + case 'N': + case '0': + v = false; + break; + default: + return -EINVAL; + } + } + + *(bool *)kp->arg = v; + return 0; +} + +int efx_param_get_bool(char *buffer, struct kernel_param *kp) +{ + /* Y and N chosen as being relatively non-coder friendly */ + return sprintf(buffer, "%c", *(bool *)kp->arg ? 'Y' : 'N'); +} + +#endif /* EFX_HAVE_PARAM_BOOL_INT */ + +#ifndef EFX_HAVE_PCI_VFS_ASSIGNED +int pci_vfs_assigned(struct pci_dev *dev) +{ +#if defined(CONFIG_PCI_ATS) && defined(EFX_HAVE_PCI_DEV_FLAGS_ASSIGNED) + struct pci_dev *vfdev; + int pos; + unsigned int vfs_assigned = 0; + unsigned short dev_id; + + /* only search if we are a PF */ + if (!dev->is_physfn) + return 0; + + pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV); + if (!pos) + return 0; + + /* + * determine the device ID for the VFs, the vendor ID will be the + * same as the PF so there is no need to check for that one + */ + pci_read_config_word(dev, pos + PCI_SRIOV_VF_DID, &dev_id); + + /* loop through all the VFs to see if we own any that are assigned */ + vfdev = pci_get_device(dev->vendor, dev_id, NULL); + while (vfdev) { + /* + * It is considered assigned if it is a virtual function with + * our dev as the physical function and the assigned bit is set + */ + if (vfdev->is_virtfn && (vfdev->physfn == dev) && + (vfdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)) + vfs_assigned++; + + vfdev = pci_get_device(dev->vendor, dev_id, vfdev); + } + + return vfs_assigned; +#else + return 0; +#endif +} +#endif +#ifdef EFX_HAVE_MSIX_CAP +#ifdef EFX_NEED_PCI_MSIX_VEC_COUNT +#ifndef msix_table_size +#define msix_table_size(flags) ((flags & PCI_MSIX_FLAGS_QSIZE) + 1) +#endif + +/** + * pci_msix_vec_count - return the number of device's MSI-X table entries + * @dev: pointer to the pci_dev data structure of MSI-X device function + * This function returns the number of device's MSI-X table entries and + * therefore the number of MSI-X vectors device is capable of sending. + * It returns a negative errno if the device is not capable of sending MSI-X + * interrupts. + **/ +int pci_msix_vec_count(struct pci_dev *dev) +{ + u16 control; + + if (!dev->msix_cap) + return -EINVAL; + + pci_read_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, &control); + return msix_table_size(control); +} +#endif +#endif + +#if defined(EFX_NEED_CPUMASK_LOCAL_SPREAD) && (NR_CPUS != 1) +unsigned int cpumask_local_spread(unsigned int i, int node) +{ + int cpu; + + /* Wrap: we always want a cpu. */ + i %= num_online_cpus(); + + if (node == -1) { + for_each_cpu(cpu, cpu_online_mask) + if (i-- == 0) + return cpu; + } else { + /* NUMA first. */ + for_each_cpu(cpu, cpu_online_mask) { + if (!cpumask_test_cpu(cpu, cpumask_of_node(node))) + continue; + if (i-- == 0) + return cpu; + } + + for_each_cpu(cpu, cpu_online_mask) { + /* Skip NUMA nodes, done above. */ + if (cpumask_test_cpu(cpu, cpumask_of_node(node))) + continue; + + if (i-- == 0) + return cpu; + } + } + BUG(); + /* silence compiler warning */ + return -EIO; +} +#endif + +#ifdef EFX_NEED_D_HASH_AND_LOOKUP +struct dentry *d_hash_and_lookup(struct dentry *dir, struct qstr *name) +{ + /* The real in-kernel function checks for an FS specific hash. + * We only use this in debugfs handling, where there is no such + * hash. + */ + name->hash = full_name_hash(name->name, name->len); + return d_lookup(dir, name); +} +#endif + +#if defined(EFX_NEED_HWMON_DEVICE_REGISTER_WITH_INFO) && defined(CONFIG_SFC_MCDI_MON) +struct device *hwmon_device_register_with_info( + struct device *dev, + const char *name __always_unused, + void *drvdata __always_unused, + const struct hwmon_chip_info *info __always_unused, + const struct attribute_group **extra_groups __always_unused) +{ + return hwmon_device_register(dev); +} +#endif + +#ifdef EFX_TC_OFFLOAD +#ifndef EFX_HAVE_TC_FLOW_OFFLOAD +struct flow_rule *flow_rule_alloc(unsigned int num_actions) +{ + struct flow_rule *rule; + + rule = kzalloc(struct_size(rule, action.entries, num_actions), + GFP_KERNEL); + if (!rule) + return NULL; + + rule->action.num_entries = num_actions; + + return rule; +} + +#define FLOW_DISSECTOR_MATCH(__rule, __type, __out) \ + const struct flow_match *__m = &(__rule)->match; \ + struct flow_dissector *__d = (__m)->dissector; \ + \ + (__out)->key = skb_flow_dissector_target(__d, __type, (__m)->key); \ + (__out)->mask = skb_flow_dissector_target(__d, __type, (__m)->mask); \ + +void flow_rule_match_basic(const struct flow_rule *rule, + struct flow_match_basic *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_BASIC, out); +} + +void flow_rule_match_control(const struct flow_rule *rule, + struct flow_match_control *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_CONTROL, out); +} + +void flow_rule_match_eth_addrs(const struct flow_rule *rule, + struct flow_match_eth_addrs *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS, out); +} + +void flow_rule_match_vlan(const struct flow_rule *rule, + struct flow_match_vlan *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_VLAN, out); +} + +#ifdef EFX_HAVE_FLOW_DISSECTOR_KEY_CVLAN +void flow_rule_match_cvlan(const struct flow_rule *rule, + struct flow_match_vlan *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_CVLAN, out); +} +#endif + +void flow_rule_match_ipv4_addrs(const struct flow_rule *rule, + struct flow_match_ipv4_addrs *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS, out); +} + +void flow_rule_match_ipv6_addrs(const struct flow_rule *rule, + struct flow_match_ipv6_addrs *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS, out); +} + +void flow_rule_match_ip(const struct flow_rule *rule, + struct flow_match_ip *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IP, out); +} + +void flow_rule_match_ports(const struct flow_rule *rule, + struct flow_match_ports *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_PORTS, out); +} + +void flow_rule_match_tcp(const struct flow_rule *rule, + struct flow_match_tcp *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_TCP, out); +} + +void flow_rule_match_icmp(const struct flow_rule *rule, + struct flow_match_icmp *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ICMP, out); +} + +void flow_rule_match_mpls(const struct flow_rule *rule, + struct flow_match_mpls *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_MPLS, out); +} + +void flow_rule_match_enc_control(const struct flow_rule *rule, + struct flow_match_control *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL, out); +} + +void flow_rule_match_enc_ipv4_addrs(const struct flow_rule *rule, + struct flow_match_ipv4_addrs *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, out); +} + +void flow_rule_match_enc_ipv6_addrs(const struct flow_rule *rule, + struct flow_match_ipv6_addrs *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, out); +} + +#ifdef EFX_HAVE_FLOW_DISSECTOR_KEY_ENC_IP +void flow_rule_match_enc_ip(const struct flow_rule *rule, + struct flow_match_ip *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_IP, out); +} +#endif + +void flow_rule_match_enc_ports(const struct flow_rule *rule, + struct flow_match_ports *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_PORTS, out); +} + +void flow_rule_match_enc_keyid(const struct flow_rule *rule, + struct flow_match_enc_keyid *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_KEYID, out); +} + +#ifndef tcf_exts_for_each_action +#ifdef CONFIG_NET_CLS_ACT +#define tcf_exts_for_each_action(i, a, exts) \ + for (i = 0; i < TCA_ACT_MAX_PRIO && ((a) = (exts)->actions[i]); i++) +#else +#define tcf_exts_for_each_action(i, a, exts) \ + for (; 0; (void)(i), (void)(a), (void)(exts)) +#endif +#endif + +#ifdef EFX_NEED_TCF_MIRRED_DEV +static inline struct net_device *tcf_mirred_dev(const struct tc_action *a) +{ + return rtnl_dereference(to_mirred(a)->tcfm_dev); +} +#endif + +unsigned int tcf_exts_num_actions(struct tcf_exts *exts) +{ + unsigned int num_acts = 0; + struct tc_action *act; + int i; + + tcf_exts_for_each_action(i, act, exts) { + if (is_tcf_pedit(act)) + num_acts += tcf_pedit_nkeys(act); + else + num_acts++; + } + return num_acts; +} + +int tc_setup_flow_action(struct flow_action *flow_action, + const struct tcf_exts *exts) +{ + const struct tc_action *act; + int i, j, k; + + if (!exts) + return 0; + + j = 0; + tcf_exts_for_each_action(i, act, exts) { + struct flow_action_entry *entry; + + entry = &flow_action->entries[j]; + if (is_tcf_gact_ok(act)) { + entry->id = FLOW_ACTION_ACCEPT; + } else if (is_tcf_gact_shot(act)) { + entry->id = FLOW_ACTION_DROP; + } else if (is_tcf_gact_trap(act)) { + entry->id = FLOW_ACTION_TRAP; + } else if (is_tcf_gact_goto_chain(act)) { + entry->id = FLOW_ACTION_GOTO; + entry->chain_index = tcf_gact_goto_chain_index(act); + } else if (is_tcf_mirred_egress_redirect(act)) { + entry->id = FLOW_ACTION_REDIRECT; + entry->dev = tcf_mirred_dev(act); + } else if (is_tcf_mirred_egress_mirror(act)) { + entry->id = FLOW_ACTION_MIRRED; + entry->dev = tcf_mirred_dev(act); + } else if (is_tcf_vlan(act)) { + switch (tcf_vlan_action(act)) { + case TCA_VLAN_ACT_PUSH: + entry->id = FLOW_ACTION_VLAN_PUSH; + entry->vlan.vid = tcf_vlan_push_vid(act); + entry->vlan.proto = tcf_vlan_push_proto(act); + entry->vlan.prio = tcf_vlan_push_prio(act); + break; + case TCA_VLAN_ACT_POP: + entry->id = FLOW_ACTION_VLAN_POP; + break; + case TCA_VLAN_ACT_MODIFY: + entry->id = FLOW_ACTION_VLAN_MANGLE; + entry->vlan.vid = tcf_vlan_push_vid(act); + entry->vlan.proto = tcf_vlan_push_proto(act); + entry->vlan.prio = tcf_vlan_push_prio(act); + break; + default: + goto err_out; + } + } else if (is_tcf_tunnel_set(act)) { + entry->id = FLOW_ACTION_TUNNEL_ENCAP; + entry->tunnel = tcf_tunnel_info(act); + } else if (is_tcf_tunnel_release(act)) { + entry->id = FLOW_ACTION_TUNNEL_DECAP; + } else if (is_tcf_pedit(act)) { + for (k = 0; k < tcf_pedit_nkeys(act); k++) { + switch (tcf_pedit_cmd(act, k)) { + case TCA_PEDIT_KEY_EX_CMD_SET: + entry->id = FLOW_ACTION_MANGLE; + break; + case TCA_PEDIT_KEY_EX_CMD_ADD: + entry->id = FLOW_ACTION_ADD; + break; + default: + goto err_out; + } + entry->mangle.htype = tcf_pedit_htype(act, k); + entry->mangle.mask = tcf_pedit_mask(act, k); + entry->mangle.val = tcf_pedit_val(act, k); + entry->mangle.offset = tcf_pedit_offset(act, k); + entry = &flow_action->entries[++j]; + } + } else if (is_tcf_csum(act)) { + entry->id = FLOW_ACTION_CSUM; + entry->csum_flags = tcf_csum_update_flags(act); + } else if (is_tcf_skbedit_mark(act)) { + entry->id = FLOW_ACTION_MARK; + entry->mark = tcf_skbedit_mark(act); + } else { + goto err_out; + } + + if (!is_tcf_pedit(act)) + j++; + } + return 0; +err_out: + return -EOPNOTSUPP; +} + +struct flow_rule *efx_compat_flow_rule_build(struct tc_cls_flower_offload *tc) +{ + struct flow_rule *rule = flow_rule_alloc(tcf_exts_num_actions(tc->exts)); + int err; + + if (!rule) + return ERR_PTR(-ENOMEM); + rule->match.dissector = tc->dissector; + rule->match.mask = tc->mask; + rule->match.key = tc->key; + err = tc_setup_flow_action(&rule->action, tc->exts); + if (err) { + kfree(rule); + return ERR_PTR(err); + } + return rule; +} +#endif /* EFX_HAVE_TC_FLOW_OFFLOAD */ +#endif /* EFX_TC_OFFLOAD */ + +#if !defined(EFX_HAVE_PCI_FIND_NEXT_EXT_CAPABILITY) + +#define PCI_CFG_SPACE_SIZE 256 +#define PCI_CFG_SPACE_EXP_SIZE 4096 + +int pci_find_next_ext_capability(struct pci_dev *dev, int start, int cap) +{ + u32 header; + int ttl; + int pos = PCI_CFG_SPACE_SIZE; + + /* minimum 8 bytes per capability */ + ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8; + + if (dev->cfg_size <= PCI_CFG_SPACE_SIZE) + return 0; + + if (start) + pos = start; + + if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL) + return 0; + + /* + * If we have no capabilities, this is indicated by cap ID, + * cap version and next pointer all being 0. + */ + if (header == 0) + return 0; + + while (ttl-- > 0) { + if (PCI_EXT_CAP_ID(header) == cap && pos != start) + return pos; + + pos = PCI_EXT_CAP_NEXT(header); + if (pos < PCI_CFG_SPACE_SIZE) + break; + + if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL) + break; + } + + return 0; +} + +#endif + +#ifdef EFX_USE_DEVLINK + +#ifdef EFX_NEED_DEVLINK_FLASH_UPDATE_TIMEOUT_NOTIFY +void devlink_flash_update_timeout_notify(struct devlink *devlink, + const char *status_msg, + const char *component, + unsigned long timeout) +{ + /* Treat as a status notifcation with no timeout indicated */ + devlink_flash_update_status_notify(devlink, status_msg, component, 0, 0); +} +#endif + +#else + +static int efx_devlink_info_add(struct devlink_info_req *req, + const char *prefix, const char *name, + const char *value) +{ + int offset = strlen(req->buf); + + scnprintf(&req->buf[offset], req->bufsize - offset, "%s%s: %s\n", + prefix, name, value); + return 0; +} + +int devlink_info_serial_number_put(struct devlink_info_req *req, const char *sn) +{ + return efx_devlink_info_add(req, "", "serial_number", sn); +} + +int devlink_info_driver_name_put(struct devlink_info_req *req, const char *name) +{ + return efx_devlink_info_add(req, "", "driver", name); +} + +int devlink_info_board_serial_number_put(struct devlink_info_req *req, + const char *bsn) +{ + return efx_devlink_info_add(req, "board.", "serial_number", bsn); +} + +int devlink_info_version_fixed_put(struct devlink_info_req *req, + const char *version_name, + const char *version_value) +{ + return efx_devlink_info_add(req, "fixed.", version_name, version_value); +} + +int devlink_info_version_stored_put(struct devlink_info_req *req, + const char *version_name, + const char *version_value) +{ + return efx_devlink_info_add(req, "stored.", version_name, + version_value); +} + +int devlink_info_version_running_put(struct devlink_info_req *req, + const char *version_name, + const char *version_value) +{ + return efx_devlink_info_add(req, "running.", version_name, + version_value); +} + +#endif /* !EFX_USE_DEVLINK */ + +#ifdef CONFIG_DEBUG_FS +void debugfs_lookup_and_remove(const char *name, struct dentry *dir) +{ + struct qstr child_name = QSTR_INIT(name, strlen(name)); + struct dentry *child; + + child = d_hash_and_lookup(dir, &child_name); + if (!IS_ERR_OR_NULL(child)) { + /* If it's a "regular" file, free its parameter binding */ + if (S_ISREG(child->d_inode->i_mode)) + kfree(child->d_inode->i_private); + debugfs_remove(child); + dput(child); + } +} +#endif /* CONFIG_DEBUGFS */ diff --git a/src/drivers/net/ethernet/sfc/kernel_compat.h b/src/drivers/net/ethernet/sfc/kernel_compat.h new file mode 100644 index 0000000..8dbb1b4 --- /dev/null +++ b/src/drivers/net/ethernet/sfc/kernel_compat.h @@ -0,0 +1,2737 @@ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2005-2006 Fen Systems Ltd. + * Copyright 2006-2017 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#ifndef EFX_KERNEL_COMPAT_H +#define EFX_KERNEL_COMPAT_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef CONFIG_SFC_MTD +/* This is conditional because it's fairly disgusting */ +#include "linux_mtd_mtd.h" +#endif +#include +#include + +/************************************************************************** + * + * Autoconf compatability + * + **************************************************************************/ + +#include "autocompat.h" + +#ifdef KMP_RELEASE +#include "kabi_compat.h" +#endif + +/************************************************************************** + * + * Resolve conflicts between feature switches and compatibility flags + * + **************************************************************************/ + +#ifndef EFX_HAVE_GRO + #undef EFX_USE_GRO +#endif + +/************************************************************************** + * + * Version/config/architecture compatability. + * + ************************************************************************** + * + * The preferred kernel compatability mechanism is through the autoconf + * layer above. The following definitions are all deprecated + */ + + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) + #error "This kernel version is now unsupported" +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36) + /* Fixing MSI-X masking introduced another bug, fixed in 2.6.36 */ + #define EFX_NEED_SAVE_MSIX_MESSAGES yes +#endif + +/************************************************************************** + * + * Definitions of missing constants, types, functions and macros + * + ************************************************************************** + * + */ +#ifndef READ_ONCE +#define READ_ONCE(x) ACCESS_ONCE((x)) +#endif + +#ifndef WRITE_ONCE +#define WRITE_ONCE(x, v) (ACCESS_ONCE((x)) = (v)) +#endif + +#ifndef fallthrough +#ifdef __has_attribute +#ifndef __GCC4_has_attribute___fallthrough__ +#define __GCC4_has_attribute___fallthrough__ 0 +#endif +#if __has_attribute(__fallthrough__) +# define fallthrough __attribute__((__fallthrough__)) +#else +# define fallthrough do {} while (0) /* fallthrough */ +#endif +#else +# define fallthrough do {} while (0) /* fallthrough */ +#endif +#endif + +#ifndef NETIF_F_CSUM_MASK + #define NETIF_F_CSUM_MASK \ + (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_HW_CSUM) +#endif + +#ifdef NETIF_F_RXHASH + #define EFX_HAVE_RXHASH_SUPPORT yes +#else + /* This reduces the need for #ifdefs */ + #define NETIF_F_RXHASH 0 + #define ETH_FLAG_RXHASH 0 +#endif + +/* Older kernel versions assume that a device with the NETIF_F_NTUPLE + * feature implements ethtool_ops::set_rx_ntuple, which is not the + * case in this driver. If we enable this feature on those kernel + * versions, 'ethtool -U' will crash. Therefore we prevent the + * feature from being set even if it is defined, unless this is a safe + * version: Linux 3.0+ or RHEL 6 with backported RX NFC and ARFS + * support. + */ +#if !defined(NETIF_F_NTUPLE) || (LINUX_VERSION_CODE < KERNEL_VERSION(3,0,0) && !(defined(RHEL_MAJOR) && RHEL_MAJOR == 6)) + #undef NETIF_F_NTUPLE + #define NETIF_F_NTUPLE 0 + #undef ETH_FLAG_NTUPLE + #define ETH_FLAG_NTUPLE 0 +#endif + +#ifndef NETIF_F_RXCSUM + /* This reduces the need for #ifdefs */ + #define NETIF_F_RXCSUM 0 +#endif + +/* This reduces the need for #ifdefs */ +#ifndef NETIF_F_ALL_TSO + #define NETIF_F_ALL_TSO (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN) +#endif + +#ifndef NETIF_F_GSO_GRE + #define NETIF_F_GSO_GRE 0 +#endif +#ifndef NETIF_F_GSO_GRE_CSUM + #define NETIF_F_GSO_GRE_CSUM 0 +#endif +#ifndef NETIF_F_GSO_UDP_TUNNEL + #define NETIF_F_GSO_UDP_TUNNEL 0 +#endif +#ifndef NETIF_F_GSO_UDP_TUNNEL_CSUM + #define NETIF_F_GSO_UDP_TUNNEL_CSUM 0 +#endif +#ifndef EFX_HAVE_GSO_UDP_TUNNEL + #define SKB_GSO_UDP_TUNNEL 0 +#endif +#ifndef EFX_HAVE_GSO_UDP_TUNNEL_CSUM + #define SKB_GSO_UDP_TUNNEL_CSUM 0 +#endif + +#ifndef NETIF_F_RXFCS + #define NETIF_F_RXFCS 0 +#endif +#ifndef NETIF_F_RXALL + #define NETIF_F_RXALL 0 +#endif + +/* RHEL 6.2 introduced XPS support but didn't add it under CONFIG_XPS. + * Instead the code was simply included directly, so it's enabled in all + * configurations. We check for the presence of CONFIG_XPS in other code. + */ +#if (LINUX_VERSION_CODE == KERNEL_VERSION(2,6,32)) && \ + defined(RHEL_MAJOR) && (RHEL_MAJOR == 6) && \ + defined(RHEL_MINOR) && (RHEL_MINOR >= 2) +# define CONFIG_XPS +#endif + +#ifndef PCI_EXP_LNKCAP_SLS_5_0GB + #define PCI_EXP_LNKCAP_SLS_5_0GB 0x00000002 /* LNKCAP2 SLS bit 1 */ +#endif +#ifndef PCI_EXP_LNKCAP2 + #define PCI_EXP_LNKCAP2 44 /* Link Capabilities 2 */ +#endif +#ifndef PCI_EXP_LNKCAP2_SLS_8_0GB + #define PCI_EXP_LNKCAP2_SLS_8_0GB 0x00000008 /* Supported 8.0GT/s */ +#endif + +#ifndef PCI_VENDOR_ID_SOLARFLARE + #define PCI_VENDOR_ID_SOLARFLARE 0x1924 + #define PCI_DEVICE_ID_SOLARFLARE_SFC4000A_0 0x0703 + #define PCI_DEVICE_ID_SOLARFLARE_SFC4000A_1 0x6703 + #define PCI_DEVICE_ID_SOLARFLARE_SFC4000B 0x0710 +#endif + +#ifndef PCI_EXT_CAP_ID_VNDR + #define PCI_EXT_CAP_ID_VNDR 0x0B +#endif + +#if defined(__GNUC__) && !defined(inline) + #define inline inline __attribute__ ((always_inline)) +#endif + +#if !defined(round_up) && !defined(round_down) && !defined(__round_mask) +/* + * This looks more complex than it should be. But we need to + * get the type for the ~ right in round_down (it needs to be + * as wide as the result!), and we want to evaluate the macro + * arguments just once each. + */ +#define __round_mask(x, y) ((__typeof__(x))((y)-1)) +#define round_up(x, y) ((((x)-1) | __round_mask(x, y))+1) +#define round_down(x, y) ((x) & ~__round_mask(x, y)) +#endif + +#ifndef DEVICE_ATTR_RO +#define DEVICE_ATTR_RO(_name) DEVICE_ATTR(_name, 0444, _name##_show, NULL) +#endif +#ifndef DEVICE_ATTR_RW +#define DEVICE_ATTR_RW(_name) DEVICE_ATTR(_name, 0644, _name##_show, \ + _name##_store) +#endif + +#ifndef sysfs_attr_init + #define sysfs_attr_init(attr) do {} while (0) +#endif + +#if defined(CONFIG_X86) && !defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) + #define CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS +#endif + +#ifndef VLAN_PRIO_MASK + #define VLAN_PRIO_MASK 0xe000 +#endif +#ifndef VLAN_PRIO_SHIFT + #define VLAN_PRIO_SHIFT 13 +#endif + +#ifndef SIOCGHWTSTAMP + #define SIOCGHWTSTAMP 0x89b1 +#endif + +#ifdef EFX_NEED_IPV6_NFC + /** + * struct ethtool_tcpip6_spec - flow specification for TCP/IPv6 etc. + * @ip6src: Source host + * @ip6dst: Destination host + * @psrc: Source port + * @pdst: Destination port + * @tclass: Traffic Class + * + * This can be used to specify a TCP/IPv6, UDP/IPv6 or SCTP/IPv6 flow. + */ + struct ethtool_tcpip6_spec { + __be32 ip6src[4]; + __be32 ip6dst[4]; + __be16 psrc; + __be16 pdst; + __u8 tclass; + }; + + /** + * struct ethtool_ah_espip6_spec - flow specification for IPsec/IPv6 + * @ip6src: Source host + * @ip6dst: Destination host + * @spi: Security parameters index + * @tclass: Traffic Class + * + * This can be used to specify an IPsec transport or tunnel over IPv6. + */ + struct ethtool_ah_espip6_spec { + __be32 ip6src[4]; + __be32 ip6dst[4]; + __be32 spi; + __u8 tclass; + }; + + /** + * struct ethtool_usrip6_spec - general flow specification for IPv6 + * @ip6src: Source host + * @ip6dst: Destination host + * @l4_4_bytes: First 4 bytes of transport (layer 4) header + * @tclass: Traffic Class + * @l4_proto: Transport protocol number (nexthdr after any Extension Headers) + */ + struct ethtool_usrip6_spec { + __be32 ip6src[4]; + __be32 ip6dst[4]; + __be32 l4_4_bytes; + __u8 tclass; + __u8 l4_proto; + }; + + #ifndef IPV6_USER_FLOW + #define IPV6_USER_FLOW 0x0e /* spec only (usr_ip6_spec; nfc only) */ + #endif +#endif + +/**************************************************************************/ + +#ifndef ETH_RESET_SHARED_SHIFT + enum ethtool_reset_flags { + /* These flags represent components dedicated to the interface + * the command is addressed to. Shift any flag left by + * ETH_RESET_SHARED_SHIFT to reset a shared component of the + * same type. + */ + ETH_RESET_MGMT = 1 << 0, /* Management processor */ + ETH_RESET_IRQ = 1 << 1, /* Interrupt requester */ + ETH_RESET_DMA = 1 << 2, /* DMA engine */ + ETH_RESET_FILTER = 1 << 3, /* Filtering/flow direction */ + ETH_RESET_OFFLOAD = 1 << 4, /* Protocol offload */ + ETH_RESET_MAC = 1 << 5, /* Media access controller */ + ETH_RESET_PHY = 1 << 6, /* Transceiver/PHY */ + ETH_RESET_RAM = 1 << 7, /* RAM shared between + * multiple components */ + + ETH_RESET_DEDICATED = 0x0000ffff, /* All components dedicated to + * this interface */ + ETH_RESET_ALL = 0xffffffff, /* All components used by this + * interface, even if shared */ + }; + #define ETH_RESET_SHARED_SHIFT 16 + #define ETHTOOL_RESET 0x00000034 +#endif + +#ifndef AH_V4_FLOW + #define AH_V4_FLOW 0x09 + #define ESP_V4_FLOW 0x0a + #define AH_V6_FLOW 0x0b + #define ESP_V6_FLOW 0x0c + #define IP_USER_FLOW 0x0d +#endif +#ifndef IPV4_FLOW + #define IPV4_FLOW 0x10 + #define IPV6_FLOW 0x11 +#endif +#ifndef ETHER_FLOW + #define ETHER_FLOW 0x12 +#endif +#ifndef FLOW_EXT + #define FLOW_EXT 0x80000000 +#endif +#ifndef FLOW_RSS + #define FLOW_RSS 0x20000000 +#endif + +/* We want to use the latest definition of ethtool_rxnfc, even if the + * kernel headers don't define all the fields in it. Use our own name + * and cast as necessary. + */ +#ifndef EFX_HAVE_EFX_ETHTOOL_RXNFC + union efx_ethtool_flow_union { + struct ethtool_tcpip4_spec tcp_ip4_spec; + struct ethtool_tcpip4_spec udp_ip4_spec; + struct ethtool_tcpip4_spec sctp_ip4_spec; + struct ethtool_usrip4_spec usr_ip4_spec; + struct ethtool_tcpip6_spec tcp_ip6_spec; + struct ethtool_tcpip6_spec udp_ip6_spec; + struct ethtool_tcpip6_spec sctp_ip6_spec; + struct ethtool_usrip6_spec usr_ip6_spec; + struct ethhdr ether_spec; + /* unneeded members omitted... */ + __u8 hdata[60]; + }; + struct efx_ethtool_flow_ext { + __be16 vlan_etype; + __be16 vlan_tci; + __be32 data[2]; + }; + struct efx_ethtool_rx_flow_spec { + __u32 flow_type; + union efx_ethtool_flow_union h_u; + struct efx_ethtool_flow_ext h_ext; + union efx_ethtool_flow_union m_u; + struct efx_ethtool_flow_ext m_ext; + __u64 ring_cookie; + __u32 location; + }; + struct efx_ethtool_rxnfc { + __u32 cmd; + __u32 flow_type; + __u64 data; + struct efx_ethtool_rx_flow_spec fs; + union { + __u32 rule_cnt; + __u32 rss_context; + }; + __u32 rule_locs[0]; + }; + #define EFX_HAVE_EFX_ETHTOOL_RXNFC yes +#endif + +#ifndef RX_CLS_LOC_SPECIAL + #define RX_CLS_LOC_SPECIAL 0x80000000 + #define RX_CLS_LOC_ANY 0xffffffff + #define RX_CLS_LOC_FIRST 0xfffffffe + #define RX_CLS_LOC_LAST 0xfffffffd +#endif + +#ifdef ETHTOOL_GRXFHINDIR + #define EFX_HAVE_ETHTOOL_RXFH_INDIR yes +#else + struct ethtool_rxfh_indir { + __u32 cmd; + /* On entry, this is the array size of the user buffer. On + * return from ETHTOOL_GRXFHINDIR, this is the array size of + * the hardware indirection table. */ + __u32 size; + __u32 ring_index[0]; /* ring/queue index for each hash value */ + }; + #define ETHTOOL_GRXFHINDIR 0x00000038 + #define ETHTOOL_SRXFHINDIR 0x00000039 +#endif + +#ifdef EFX_NEED_ETHTOOL_RXFH_INDIR_DEFAULT + static inline u32 ethtool_rxfh_indir_default(u32 index, u32 n_rx_rings) + { + return index % n_rx_rings; + } +#endif + +#ifndef EFX_HAVE_ETHTOOL_SET_PHYS_ID + enum ethtool_phys_id_state { + ETHTOOL_ID_INACTIVE, + ETHTOOL_ID_ACTIVE, + ETHTOOL_ID_ON, + ETHTOOL_ID_OFF + }; +#endif + +#ifdef ETHTOOL_GMODULEEEPROM + #define EFX_HAVE_ETHTOOL_GMODULEEEPROM yes + #ifndef ETH_MODULE_SFF_8436 + #define ETH_MODULE_SFF_8436 0x3 + #define ETH_MODULE_SFF_8436_LEN 256 + #endif + #ifndef ETH_MODULE_SFF_8436_MAX_LEN + #define ETH_MODULE_SFF_8436_MAX_LEN 640 + #endif +#else + struct ethtool_modinfo { + __u32 cmd; + __u32 type; + __u32 eeprom_len; + __u32 reserved[8]; + }; + + #define ETH_MODULE_SFF_8079 0x1 + #define ETH_MODULE_SFF_8079_LEN 256 + #define ETH_MODULE_SFF_8472 0x2 + #define ETH_MODULE_SFF_8472_LEN 512 + #define ETH_MODULE_SFF_8436 0x3 + #define ETH_MODULE_SFF_8436_LEN 256 + #define ETH_MODULE_SFF_8436_MAX_LEN 640 + + #define ETHTOOL_GMODULEINFO 0x00000042 + #define ETHTOOL_GMODULEEEPROM 0x00000043 +#endif + +#ifndef ETHTOOL_GET_TS_INFO + struct ethtool_ts_info { + __u32 cmd; + __u32 so_timestamping; + __s32 phc_index; + __u32 tx_types; + __u32 tx_reserved[3]; + __u32 rx_filters; + __u32 rx_reserved[3]; + }; + #define ETHTOOL_GET_TS_INFO 0x00000041 /* Get time stamping and PHC info */ +#endif + +#ifndef PORT_DA + #define PORT_DA 0x05 +#endif + +#ifndef SUPPORTED_40000baseKR4_Full + #define SUPPORTED_40000baseKR4_Full (1 << 23) + #define SUPPORTED_40000baseCR4_Full (1 << 24) +#endif + +#ifdef EFX_NEED_VZALLOC + static inline void *vzalloc(unsigned long size) + { + void *buf = vmalloc(size); + if (buf) + memset(buf, 0, size); + return buf; + } +#endif + +#ifndef NETIF_F_GSO + #define efx_gso_size tso_size + #undef gso_size + #define gso_size efx_gso_size + #define efx_gso_segs tso_segs + #undef gso_segs + #define gso_segs efx_gso_segs +#endif + +#ifndef GSO_LEGACY_MAX_SIZE + #define GSO_LEGACY_MAX_SIZE 65536u +#endif + +#ifndef netdev_for_each_uc_addr + #define netdev_for_each_uc_addr(uclist, dev) \ + list_for_each_entry(uclist, &(dev)->uc.list, list) + #define netdev_uc_count(dev) (dev)->uc.count +#endif + +#ifndef netdev_for_each_mc_addr + #define netdev_for_each_mc_addr(mclist, dev) \ + for (mclist = (dev)->mc_list; mclist; mclist = mclist->next) +#endif + +#ifndef netdev_mc_count + #define netdev_mc_count(dev) (dev)->mc_count +#endif + +#ifdef EFX_NEED_NETIF_SET_REAL_NUM_TX_QUEUES + static inline void + netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq) + { + dev->real_num_tx_queues = txq; + } +#endif + +#ifdef EFX_NEED_NETIF_SET_REAL_NUM_RX_QUEUES + static inline void + netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq) + { +#ifdef CONFIG_RPS + dev->num_rx_queues = rxq; +#endif + } +#endif + +#ifdef EFX_NEED_DMA_SET_COHERENT_MASK + static inline int dma_set_coherent_mask(struct device *dev, u64 mask) + { + return pci_set_consistent_dma_mask(to_pci_dev(dev), mask); + } +#endif + +#ifdef EFX_NEED_DMA_SET_MASK_AND_COHERENT + static inline int dma_set_mask_and_coherent(struct device *dev, u64 mask) + { + int rc; + + /* + * Truncate the mask to the actually supported dma_addr_t + * width to avoid generating unsupportable addresses. + */ + mask = (dma_addr_t)mask; + + rc = dma_set_mask(dev, mask); + if (rc == 0) + dma_set_coherent_mask(dev, mask); + return rc; + } +#endif + +/* + * Recent mainline kernels can be configured so that the resulting + * image will run both on 'bare metal' and in a Xen domU. + * xen_domain() or xen_start_info tells us which is the case at + * run-time. If neither is defined, assume that CONFIG_XEN tells us + * at compile-time. + */ +#if defined(EFX_HAVE_XEN_XEN_H) + #include +#elif defined(CONFIG_XEN) && defined(EFX_HAVE_XEN_START_INFO) + /* We should be able to include but that + * is broken (#includes other headers that are not installed) in + * Fedora 10. */ + extern struct start_info *xen_start_info; + #define xen_domain() (xen_start_info ? 1 : 0) +#endif +#ifndef xen_domain + #ifdef CONFIG_XEN + #define xen_domain() 1 + #else + #define xen_domain() 0 + #endif +#endif + +#ifndef netif_printk + + static inline const char *netdev_name(const struct net_device *dev) + { + if (dev->reg_state != NETREG_REGISTERED) + return "(unregistered net_device)"; + return dev->name; + } + + #define netdev_printk(level, netdev, format, args...) \ + dev_printk(level, (netdev)->dev.parent, \ + "%s: " format, \ + netdev_name(netdev), ##args) + + #define netif_printk(priv, type, level, dev, fmt, args...) \ + do { \ + if (netif_msg_##type(priv)) \ + netdev_printk(level, (dev), fmt, ##args); \ + } while (0) + + #define netif_emerg(priv, type, dev, fmt, args...) \ + netif_printk(priv, type, KERN_EMERG, dev, fmt, ##args) + #define netif_alert(priv, type, dev, fmt, args...) \ + netif_printk(priv, type, KERN_ALERT, dev, fmt, ##args) + #define netif_crit(priv, type, dev, fmt, args...) \ + netif_printk(priv, type, KERN_CRIT, dev, fmt, ##args) + #define netif_err(priv, type, dev, fmt, args...) \ + netif_printk(priv, type, KERN_ERR, dev, fmt, ##args) + #define netif_warn(priv, type, dev, fmt, args...) \ + netif_printk(priv, type, KERN_WARNING, dev, fmt, ##args) + #define netif_notice(priv, type, dev, fmt, args...) \ + netif_printk(priv, type, KERN_NOTICE, dev, fmt, ##args) + #define netif_info(priv, type, dev, fmt, args...) \ + netif_printk(priv, type, KERN_INFO, (dev), fmt, ##args) + + #if defined(DEBUG) + #define netif_dbg(priv, type, dev, format, args...) \ + netif_printk(priv, type, KERN_DEBUG, dev, format, ##args) + #elif defined(CONFIG_DYNAMIC_DEBUG) + #define netif_dbg(priv, type, netdev, format, args...) \ + do { \ + if (netif_msg_##type(priv)) \ + dynamic_dev_dbg((netdev)->dev.parent, \ + "%s: " format, \ + netdev_name(netdev), ##args); \ + } while (0) + #else + #define netif_dbg(priv, type, dev, format, args...) \ + ({ \ + if (0) \ + netif_printk(priv, type, KERN_DEBUG, dev, \ + format, ##args); \ + 0; \ + }) + #endif + +#endif + +/* netif_vdbg may be defined wrongly */ +#undef netif_vdbg +#if defined(VERBOSE_DEBUG) +#define netif_vdbg netif_dbg +#else +#define netif_vdbg(priv, type, dev, format, args...) \ +({ \ + if (0) \ + netif_printk(priv, type, KERN_DEBUG, dev, \ + format, ##args); \ + 0; \ +}) +#endif + +/* netdev_WARN may be defined wrongly (with a trailing semi-colon) */ +#undef netdev_WARN +#define netdev_WARN(dev, format, args...) \ + WARN(1, "netdevice: %s\n" format, netdev_name(dev), ##args) + +#ifndef netif_cond_dbg +/* if @cond then downgrade to debug, else print at @level */ +#define netif_cond_dbg(priv, type, netdev, cond, level, fmt, args...) \ + do { \ + if (cond) \ + netif_dbg(priv, type, netdev, fmt, ##args); \ + else \ + netif_ ## level(priv, type, netdev, fmt, ##args); \ + } while (0) +#endif + +/* __maybe_unused may be defined wrongly */ +#undef __maybe_unused +#define __maybe_unused __attribute__((unused)) + +#ifndef __always_unused + #define __always_unused __attribute__((unused)) +#endif + +#ifdef EFX_NEED_ETHER_ADDR_COPY + static inline void ether_addr_copy(u8 *dst, const u8 *src) + { + u16 *a = (u16 *)dst; + const u16 *b = (const u16 *)src; + + a[0] = b[0]; + a[1] = b[1]; + a[2] = b[2]; + } +#endif + +#ifdef EFX_NEED_ETHER_ADDR_EQUAL + static inline bool ether_addr_equal(const u8 *addr1, const u8 *addr2) + { + return !compare_ether_addr(addr1, addr2); + } +#endif + +#ifdef EFX_NEED_ETH_ZERO_ADDR + static inline void eth_zero_addr(u8 *addr) + { + memset(addr, 0x00, ETH_ALEN); + } +#endif + +#ifdef EFX_NEED_ETH_BROADCAST_ADDR + static inline void eth_broadcast_addr(u8 *addr) + { + memset(addr, 0xff, ETH_ALEN); + } +#endif + +#ifdef EFX_NEED_ETH_RANDOM_ADDR +/* random_ether_addr was renamed in: + * 0a4dd594982a ("etherdevice: Rename random_ether_addr to eth_random_addr") + */ +#define eth_random_addr random_ether_addr +#endif + +#ifdef EFX_NEED_ETH_HW_ADDR_SET + static inline void eth_hw_addr_set(struct net_device *dev, const u8 *addr) + { + ether_addr_copy(dev->dev_addr, addr); + } +#endif + +#ifdef EFX_NEED_MAC_PTON + #ifndef EFX_HAVE_HEX_TO_BIN + static inline int hex_to_bin(char ch) + { + if ((ch >= '0') && (ch <= '9')) + return ch - '0'; + ch = tolower(ch); + if ((ch >= 'a') && (ch <= 'f')) + return ch - 'a' + 10; + return -1; + } + #endif + + static inline int mac_pton(const char *s, u8 *mac) + { + int i; + if (strlen(s) < 3 * ETH_ALEN - 1) + return 0; + for (i = 0; i < ETH_ALEN; i++) { + if (!isxdigit(s[i * 3]) || !isxdigit(s[i * 3 + 1])) + return 0; + if (i != ETH_ALEN - 1 && s[i * 3 + 2] != ':') + return 0; + } + for (i = 0; i < ETH_ALEN; i++) + mac[i] = (hex_to_bin(s[i * 3]) << 4) | + hex_to_bin(s[i * 3 + 1]); + return 1; + } +#endif + +#ifdef EFX_NEED_IP_IS_FRAGMENT + static inline bool ip_is_fragment(const struct iphdr *iph) + { + return (iph->frag_off & htons(IP_MF | IP_OFFSET)) != 0; + } +#endif + +#ifdef EFX_NEED_NETDEV_FEATURES_T + typedef u32 netdev_features_t; +#endif + +#ifdef EFX_NEED_NETDEV_NOTIFIER_INFO_TO_DEV +static inline struct net_device * +netdev_notifier_info_to_dev(const void *info) +{ + return (struct net_device *) info; +} +#endif + +#ifdef EFX_NEED_SKB_FRAG_DMA_MAP + static inline dma_addr_t skb_frag_dma_map(struct device *dev, + const skb_frag_t *frag, + size_t offset, size_t size, + enum dma_data_direction dir) + { + return dma_map_page(dev, frag->page, + frag->page_offset + offset, size, dir); + } +#endif + +#ifdef EFX_NEED_SKB_FRAG_ADDRESS + static inline void *skb_frag_address(const skb_frag_t *frag) + { + return page_address(frag->page) + frag->page_offset; + } +#endif + +#ifdef EFX_NEED_SKB_FRAG_SIZE + static inline unsigned int skb_frag_size(const skb_frag_t *frag) + { + return frag->size; + } +#endif + +#ifdef EFX_NEED_SKB_FRAG_PAGE + static inline struct page *skb_frag_page(const skb_frag_t *frag) + { + return frag->page; + } +#endif + +#ifdef EFX_NEED_SKB_FRAG_OFF +/** + * skb_frag_off() - Returns the offset of a skb fragment + * @frag: the paged fragment + */ +static inline unsigned int skb_frag_off(const skb_frag_t *frag) +{ + /* This later got renamed bv_offset (because skb_frag_t is now really + * a struct bio_vec), but the page_offset name should work in any + * kernel that doesn't already have skb_frag_off defined. + */ + return frag->page_offset; +} +#endif + +#if defined(CONFIG_COMPAT) && defined(EFX_NEED_COMPAT_U64) + #if defined(CONFIG_X86_64) || defined(CONFIG_IA64) + typedef u64 __attribute__((aligned(4))) compat_u64; + #else + typedef u64 compat_u64; + #endif +#endif + +#ifdef EFX_NEED_BYTE_QUEUE_LIMITS +static inline void netdev_tx_sent_queue(struct netdev_queue *dev_queue, + unsigned int bytes) +{} +static inline void netdev_tx_completed_queue(struct netdev_queue *dev_queue, + unsigned int pkts, + unsigned int bytes) +{} +static inline void netdev_tx_reset_queue(struct netdev_queue *q) {} +#endif + +#ifdef EFX_NEED___BQL +/* Variant of netdev_tx_sent_queue() for drivers that are aware + * that they should not test BQL status themselves. + * We do want to change __QUEUE_STATE_STACK_XOFF only for the last + * skb of a batch. + * Returns true if the doorbell must be used to kick the NIC. + */ +static inline bool __netdev_tx_sent_queue(struct netdev_queue *dev_queue, + unsigned int bytes, + bool xmit_more) +{ + if (xmit_more) { +#ifdef CONFIG_BQL + dql_queued(&dev_queue->dql, bytes); +#endif + return netif_tx_queue_stopped(dev_queue); + } + netdev_tx_sent_queue(dev_queue, bytes); + return true; +} +#endif + +#ifdef EFX_NEED_IS_COMPAT_TASK + static inline int is_compat_task(void) + { + #if !defined(CONFIG_COMPAT) + return 0; + #elif defined(CONFIG_X86_64) + #if defined(EFX_HAVE_TIF_ADDR32) + return test_thread_flag(TIF_ADDR32); + #else + return test_thread_flag(TIF_IA32); + #endif + #elif defined(CONFIG_PPC64) + return test_thread_flag(TIF_32BIT); + #else + #error "cannot define is_compat_task() for this architecture" + #endif + } +#endif + +#ifdef EFX_NEED_SKB_CHECKSUM_NONE_ASSERT +static inline void skb_checksum_none_assert(const struct sk_buff *skb) +{ +#ifdef DEBUG + BUG_ON(skb->ip_summed != CHECKSUM_NONE); +#endif +} +#endif + +#ifndef NETIF_F_TSO_MANGLEID + #define NETIF_F_TSO_MANGLEID 0 +#endif +#ifndef SKB_GSO_TCP_FIXEDID + #define SKB_GSO_TCP_FIXEDID 0 +#endif + +#ifndef __read_mostly + #define __read_mostly +#endif + +#ifndef NETIF_F_HW_VLAN_CTAG_TX + #define NETIF_F_HW_VLAN_CTAG_TX NETIF_F_HW_VLAN_TX +#endif + +#ifndef NETIF_F_HW_VLAN_CTAG_RX + #define NETIF_F_HW_VLAN_CTAG_RX NETIF_F_HW_VLAN_RX +#endif + +#ifdef EFX_HAVE_OLD___VLAN_HWACCEL_PUT_TAG +static inline struct sk_buff * + efx___vlan_hwaccel_put_tag(struct sk_buff *skb, __be16 vlan_proto, + u16 vlan_tci) + { + WARN_ON(vlan_proto != htons(ETH_P_8021Q)); + return __vlan_hwaccel_put_tag(skb, vlan_tci); + } + #define __vlan_hwaccel_put_tag efx___vlan_hwaccel_put_tag +#endif +#ifndef NETIF_F_HW_VLAN_CTAG_FILTER + #define NETIF_F_HW_VLAN_CTAG_FILTER NETIF_F_HW_VLAN_FILTER +#endif + +#ifdef EFX_NEED___SET_BIT_LE + /* Depending on kernel version, BITOP_LE_SWIZZLE may be + * defined the way we want or unconditionally as the + * big-endian value (or not at all). Use our own name. + */ + #if defined(__LITTLE_ENDIAN) + #define EFX_BITOP_LE_SWIZZLE 0 + #elif defined(__BIG_ENDIAN) + #define EFX_BITOP_LE_SWIZZLE ((BITS_PER_LONG-1) & ~0x7) + #endif + + /* __set_bit_le() and __clear_bit_le() may already be defined + * as macros with the wrong effective parameter type (volatile + * unsigned long *), so use brute force to replace them. + */ + + static inline void efx__set_bit_le(int nr, void *addr) + { + __set_bit(nr ^ EFX_BITOP_LE_SWIZZLE, addr); + } + #undef __set_bit_le + #define __set_bit_le efx__set_bit_le + + static inline void efx__clear_bit_le(int nr, void *addr) + { + __clear_bit(nr ^ EFX_BITOP_LE_SWIZZLE, addr); + } + #undef __clear_bit_le + #define __clear_bit_le efx__clear_bit_le +#endif + +#ifdef CONFIG_SFC_MTD +#ifdef EFX_NEED_MTD_DEVICE_REGISTER + struct mtd_partition; + + static inline int + mtd_device_register(struct mtd_info *master, + const struct mtd_partition *parts, + int nr_parts) + { + BUG_ON(parts); + return add_mtd_device(master); + } + + static inline int mtd_device_unregister(struct mtd_info *master) + { + return del_mtd_device(master); + } +#endif +#endif + +#ifndef for_each_set_bit + #define for_each_set_bit(bit, addr, size) \ + for ((bit) = find_first_bit((addr), (size)); \ + (bit) < (size); \ + (bit) = find_next_bit((addr), (size), (bit) + 1)) +#endif + +#ifdef EFX_NEED_BITMAP_ZALLOC +#define bitmap_zalloc(count, gfp) kzalloc(BITS_TO_LONGS(count), gfp) +#define bitmap_free(ptr) kfree(ptr) +#endif + +#ifndef EFX_HAVE_IOREMAP_WC + /* This should never be called */ + static inline void * + ioremap_wc(resource_size_t offset, resource_size_t size) + { + WARN_ON(1); + return NULL; + } +#endif + +#ifdef EFX_HAVE_IOREMAP_NOCACHE + /* On old kernels ioremap_nocache() differs from ioremap() */ + #define efx_ioremap(phys,size) ioremap_nocache(phys,size) +#else + #define efx_ioremap(phys,size) ioremap(phys,size) +#endif + +#ifdef EFX_NEED_SKB_TRANSPORT_HEADER_WAS_SET + #define skb_transport_header_was_set(skb) \ + (!!(skb)->transport_header) +#endif + +#ifndef EFX_HAVE_NAPI_STRUCT +/* We use a napi_struct pointer as context in some compat functions even if the + * kernel doesn't use this structure at all. + */ +struct efx_napi_dummy {}; +#define napi_struct efx_napi_dummy +#endif + +#ifndef NAPI_POLL_WEIGHT +#define NAPI_POLL_WEIGHT 64 +#endif + +#ifdef EFX_HAVE_RXHASH_SUPPORT +#ifdef EFX_NEED_SKB_SET_HASH +enum pkt_hash_types { + PKT_HASH_TYPE_NONE, + PKT_HASH_TYPE_L2, + PKT_HASH_TYPE_L3, + PKT_HASH_TYPE_L4, +}; + +static inline void skb_set_hash(struct sk_buff *skb, __u32 hash, + enum pkt_hash_types type) +{ +#ifdef EFX_HAVE_SKB_L4HASH + skb->l4_rxhash = (type == PKT_HASH_TYPE_L4); +#endif + skb->rxhash = hash; +} +#endif +#endif + +#ifndef EFX_HAVE_BUSY_POLL +static inline void skb_mark_napi_id(struct sk_buff *skb, + struct napi_struct *napi) {} +#endif + +#ifdef EFX_NEED_USLEEP_RANGE +void usleep_range(unsigned long min, unsigned long max); +#endif + +#ifdef EFX_NEED_SKB_VLAN_TAG_GET +#define skb_vlan_tag_get vlan_tx_tag_get +#define skb_vlan_tag_present vlan_tx_tag_present +#endif + +#ifdef EFX_NEED_PAGE_REF_ADD +static inline void page_ref_add(struct page *page, int nr) +{ + atomic_add(nr, &page->_count); +} +#endif + +#ifdef EFX_HAVE_SKB_ENCAPSULATION +#ifdef EFX_SKB_HAS_INNER_NETWORK_HEADER +#define EFX_CAN_SUPPORT_ENCAP_TSO +#ifndef EFX_HAVE_SKB_INNER_NETWORK_HEADER +static inline unsigned char *skb_inner_network_header(const struct sk_buff *skb) +{ + return skb->head + skb->inner_network_header; +} +#endif + +#ifndef EFX_HAVE_INNER_IP_HDR +static inline struct iphdr *inner_ip_hdr(const struct sk_buff *skb) +{ + return (struct iphdr *)skb_inner_network_header(skb); +} +#endif +#endif /* EFX_SKB_HAS_INNER_NETWORK_HEADER */ + +#ifdef EFX_SKB_HAS_INNER_TRANSPORT_HEADER +#ifndef EFX_HAVE_SKB_INNER_TRANSPORT_HEADER +static inline unsigned char *skb_inner_transport_header(const struct sk_buff *skb) +{ + return skb->head + skb->inner_transport_header; +} +#endif + +#ifndef EFX_HAVE_INNER_TCP_HDR +static inline struct tcphdr *inner_tcp_hdr(const struct sk_buff *skb) +{ + return (struct tcphdr *)skb_inner_transport_header(skb); +} +#endif +#else /* !EFX_SKB_HAS_INNER_TRANSPORT_HEADER */ +#undef EFX_CAN_SUPPORT_ENCAP_TSO +#endif /* EFX_SKB_HAS_INNER_TRANSPORT_HEADER */ +#ifndef NETIF_F_GSO_GRE +#undef EFX_CAN_SUPPORT_ENCAP_TSO +#endif /* !NETIF_F_GSO_GRE */ +#endif /* EFX_HAVE_SKB_ENCAPSULATION */ + +#ifndef EFX_HAVE_INDIRECT_CALL_WRAPPERS +#ifdef CONFIG_RETPOLINE + +/* + * INDIRECT_CALL_$NR - wrapper for indirect calls with $NR known builtin + * @f: function pointer + * @f$NR: builtin functions names, up to $NR of them + * @__VA_ARGS__: arguments for @f + * + * Avoid retpoline overhead for known builtin, checking @f vs each of them and + * eventually invoking directly the builtin function. The functions are check + * in the given order. Fallback to the indirect call. + */ +#define INDIRECT_CALL_1(f, f1, ...) \ + ({ \ + likely(f == f1) ? f1(__VA_ARGS__) : f(__VA_ARGS__); \ + }) +#define INDIRECT_CALL_2(f, f2, f1, ...) \ + ({ \ + likely(f == f2) ? f2(__VA_ARGS__) : \ + INDIRECT_CALL_1(f, f1, __VA_ARGS__); \ + }) + +#else +#define INDIRECT_CALL_1(f, f1, ...) f(__VA_ARGS__) +#define INDIRECT_CALL_2(f, f2, f1, ...) f(__VA_ARGS__) +#endif +#endif /* EFX_HAVE_INDIRECT_CALL_WRAPPERS */ + +#ifndef EFX_HAVE_ETHTOOL_LINKSETTINGS +/* We use an array of size 1 so that legacy code using index [0] will + * work with both this and a real link_mode_mask. + */ +#define __ETHTOOL_DECLARE_LINK_MODE_MASK(name) unsigned long name[1] +#endif + +#ifdef EFX_HAVE_XDP_TX +#ifndef EFX_HAVE_XDP_FRAME_API +#define xdp_frame xdp_buff +#endif +#endif + +#if !defined(EFX_USE_NETDEV_STATS64) +#define rtnl_link_stats64 net_device_stats +#endif + +#ifdef EFX_NEED_MOD_DELAYED_WORK +static inline bool mod_delayed_work(struct workqueue_struct *wq, + struct delayed_work *dwork, + unsigned long delay) +{ + cancel_delayed_work(dwork); + queue_delayed_work(wq, dwork, delay); + return true; +} +#endif + +#ifdef EFX_NEED_PCI_AER_CLEAR_NONFATAL_STATUS +static inline int pci_aer_clear_nonfatal_status(struct pci_dev *dev) +{ + return pci_cleanup_aer_uncorrect_error_status(dev); +} +#endif + +#ifndef pci_info +#define pci_info(pdev, fmt, arg...) dev_info(&(pdev)->dev, fmt, ##arg) +#endif +#ifndef pci_warn +#define pci_warn(pdev, fmt, arg...) dev_warn(&(pdev)->dev, fmt, ##arg) +#endif +#ifndef pci_err +#define pci_err(pdev, fmt, arg...) dev_err(&(pdev)->dev, fmt, ##arg) +#endif +#ifndef pci_dbg +#define pci_dbg(pdev, fmt, arg...) dev_dbg(&(pdev)->dev, fmt, ##arg) +#endif +#ifndef pci_notice +#define pci_notice(pdev, fmt, arg...) dev_notice(&(pdev)->dev, fmt, ##arg) +#endif + +#if defined(EFX_HAVE_RHASHTABLE) && defined(EFX_HAVE_RHASHTABLE_LOOKUP_FAST) +#ifdef EFX_NEED_RHASHTABLE_WALK_ENTER +#include + +static inline void rhashtable_walk_enter(struct rhashtable *ht, + struct rhashtable_iter *iter) +{ + int rc; + + rc = rhashtable_walk_init(ht, iter +#ifdef EFX_HAVE_RHASHTABLE_WALK_INIT_GFP + , GFP_KERNEL +#endif + ); + WARN_ON_ONCE(rc); +} +#endif +#endif + +#ifdef EFX_NEED_STRSCPY +#define strscpy(dest, src, count) strlcpy(dest, src, count) +#endif + +#ifdef EFX_NEED_ARRAY_SIZE +/** + * array_size() - Calculate size of 2-dimensional array. + * + * @a: dimension one + * @b: dimension two + * + * Calculates size of 2-dimensional array: @a * @b. + * + * Returns: number of bytes needed to represent the array. + */ +static inline __must_check size_t array_size(size_t a, size_t b) +{ + return(a * b); +} +#else +/* On RHEL7.6 nothing includes this yet */ +#include +#endif + +#ifdef EFX_NEED_KREALLOC_ARRAY +#ifndef SIZE_MAX +#define SIZE_MAX (~(size_t)0) +#endif + +static __must_check inline void * +krealloc_array(void *p, size_t new_n, size_t new_size, gfp_t flags) +{ + size_t bytes = array_size(new_n, new_size); + + return (bytes == SIZE_MAX ? NULL : krealloc(p, bytes, flags)); +} +#endif + +#ifndef struct_group +#define __struct_group(TAG, NAME, ATTRS, MEMBERS...) \ + union { \ + struct { MEMBERS } ATTRS; \ + struct TAG { MEMBERS } ATTRS NAME; \ + } +#define struct_group(NAME, MEMBERS...) \ + __struct_group(/* no tag */, NAME, /* no attrs */, MEMBERS) +#endif + +#ifdef EFX_NEED_NETDEV_HOLD +#ifdef EFX_HAVE_DEV_HOLD_TRACK +/* Commit d62607c3fe45 ("net: rename reference+tracking helpers") + * renamed these. + */ +#define netdev_hold(_n, _t, _g) dev_hold_track(_n, _t, _g) +#define netdev_put(_n, _t) dev_put_track(_n, _t) +#else +/* This was introduced in the same commit that adds dev_hold_track */ +typedef struct {} netdevice_tracker; + +static inline void netdev_hold(struct net_device *dev, + netdevice_tracker *tracker __always_unused, + gfp_t gfp __always_unused) +{ + dev_hold(dev); +} + +static inline void netdev_put(struct net_device *dev, + netdevice_tracker *tracker __always_unused) +{ + dev_put(dev); +} +#endif +#endif + +#ifdef EFX_NEED_DEBUGFS_LOOKUP_AND_REMOVE +#ifdef CONFIG_DEBUG_FS +void debugfs_lookup_and_remove(const char *name, struct dentry *parent); +#else +static inline void debugfs_lookup_and_remove(const char *name, + struct dentry *parent) +{ } +#endif +#endif + +/************************************************************************** + * + * Missing functions provided by kernel_compat.c + * + ************************************************************************** + * + */ + +#if defined(EFX_NEED_SAVE_MSIX_MESSAGES) + + #include + + int efx_pci_save_state(struct pci_dev *dev); + #define pci_save_state efx_pci_save_state + + void efx_pci_restore_state(struct pci_dev *dev); + #define pci_restore_state efx_pci_restore_state + +#endif + +#if !defined(topology_sibling_cpumask) && defined(topology_thread_cpumask) + #define topology_sibling_cpumask topology_thread_cpumask +#endif + +#ifdef EFX_NEED_CPUMASK_LOCAL_SPREAD +#if NR_CPUS == 1 +static inline unsigned int cpumask_local_spread(unsigned int i, int node) +{ + return 0; +} +#else +unsigned int cpumask_local_spread(unsigned int i, int node); +#endif +#endif + +/************************************************************************** + * + * Wrappers to fix bugs and parameter changes + * + ************************************************************************** + * + */ + +#ifdef EFX_NEED_WQ_SYSFS + #define WQ_SYSFS 0 +#endif +#ifndef WQ_MEM_RECLAIM + #define WQ_MEM_RECLAIM 0 +#endif + +#ifdef EFX_HAVE_ALLOC_WORKQUEUE +#ifndef EFX_HAVE_NEW_ALLOC_WORKQUEUE + #define efx_alloc_workqueue(_fmt, _flags, _max, _name) \ + alloc_workqueue(_name, _flags, _max) +#else + #define efx_alloc_workqueue(_fmt, _flags, _max, _name) \ + alloc_workqueue(_fmt, _flags, _max, _name) +#endif +#else + #define efx_alloc_workqueue(_fmt, _flags, _max, _name) \ + create_singlethread_workqueue(_name) +#endif + +#if defined(EFX_USE_GRO) && defined(EFX_HAVE_NAPI_GRO_RECEIVE_GR) + /* Redhat backports of functions returning gro_result_t */ + #define napi_gro_frags napi_gro_frags_gr + #define napi_gro_receive napi_gro_receive_gr +#elif defined(EFX_USE_GRO) && defined(EFX_NEED_GRO_RESULT_T) + typedef int gro_result_t; + + #define napi_gro_frags(_napi) \ + ({ napi_gro_frags(_napi); \ + GRO_MERGED; }) + #define napi_gro_receive(_napi, _skb) \ + ({ napi_gro_receive(_napi, _skb); \ + GRO_MERGED; }) +#endif +#if defined(EFX_USE_GRO) && (defined(EFX_HAVE_NAPI_GRO_RECEIVE_GR) || defined(EFX_NEED_GRO_RESULT_T)) + /* vlan_gro_{frags,receive} won't return gro_result_t in + * either of the above cases. + */ + #define vlan_gro_frags(_napi, _group, _tag) \ + ({ vlan_gro_frags(_napi, _group, _tag); \ + GRO_MERGED; }) + #define vlan_gro_receive(_napi, _group, _tag, _skb) \ + ({ vlan_gro_receive(_napi, _group, _tag, _skb); \ + GRO_MERGED; }) +#endif + +#ifndef list_first_entry_or_null + #define list_first_entry_or_null(ptr, type, member) \ + (!list_empty(ptr) ? list_first_entry(ptr, type, member) : NULL) +#endif + +#ifdef EFX_NEED_NS_TO_TIMESPEC + struct timespec ns_to_timespec(const s64 nsec); +#endif + +#ifdef EFX_NEED_RTC_TIME64_TO_TM + #include + static inline void rtc_time64_to_tm(unsigned long time, struct rtc_time *tm) + { + rtc_time_to_tm(time, tm); + } +#endif + +#ifdef EFX_NEED_KTIME_COMPARE + static inline int ktime_compare(const ktime_t cmp1, const ktime_t cmp2) + { + if (cmp1.tv64 < cmp2.tv64) + return -1; + if (cmp1.tv64 > cmp2.tv64) + return 1; + return 0; + } +#endif + +#include + +#ifndef EFX_HAVE_TIMESPEC64 +#ifdef EFX_NEED_SET_NORMALIZED_TIMESPEC + /* set_normalized_timespec() might be defined by the kernel + * but not exported. Define it under our own name. + */ + static inline void + efx_set_normalized_timespec(struct timespec *ts, time_t sec, long nsec) + { + while (nsec >= NSEC_PER_SEC) { + nsec -= NSEC_PER_SEC; + ++sec; + } + while (nsec < 0) { + nsec += NSEC_PER_SEC; + --sec; + } + ts->tv_sec = sec; + ts->tv_nsec = nsec; + } + #define set_normalized_timespec efx_set_normalized_timespec +#endif + +#ifdef EFX_NEED_SET_NORMALIZED_TIMESPEC + /* timespec_sub() may need to be redefined because of + * set_normalized_timespec() not being exported. Define it + * under our own name. + */ + static inline struct timespec efx_timespec_sub(struct timespec lhs, + struct timespec rhs) + { + struct timespec ts_delta; + set_normalized_timespec(&ts_delta, lhs.tv_sec - rhs.tv_sec, + lhs.tv_nsec - rhs.tv_nsec); + return ts_delta; + } + #define timespec_sub efx_timespec_sub +#endif + + + #define timespec64 timespec + #define timespec64_compare timespec_compare + #define timespec64_add_ns timespec_add_ns + #define timespec64_sub timespec_sub + #define ns_to_timespec64 ns_to_timespec + #define timespec64_to_ns timespec_to_ns + #define ktime_to_timespec64 ktime_to_timespec + #define timespec64_to_ktime timespec_to_ktime + #define timespec_to_timespec64(t) (t) + #define timespec64_to_timespec(t) (t) +#else +/* RHEL 9.0 has commit cb47755725da ("time: Prevent undefined behaviour + * in timespec64_to_ns()") but not commit 39ff83f2f6cc ("time: Handle + * negative seconds correctly in timespec64_to_ns()") which fixes it. + * This pulls in that patch. + */ +#ifdef EFX_NEED_TIMESPEC64_TO_NS_SIGNED + #define KTIME_MIN (-KTIME_MAX - 1) + #define KTIME_SEC_MIN (KTIME_MIN / NSEC_PER_SEC) + + static inline s64 efx_timespec64_to_ns(const struct timespec64 *ts) + { + /* Prevent multiplication overflow / underflow */ + if (ts->tv_sec >= KTIME_SEC_MAX) + return KTIME_MAX; + + if (ts->tv_sec <= KTIME_SEC_MIN) + return KTIME_MIN; + + return ((s64) ts->tv_sec * NSEC_PER_SEC) + ts->tv_nsec; + } + #define timespec64_to_ns efx_timespec64_to_ns +#endif +#endif // EFX_HAVE_TIMESPEC64 +#ifdef EFX_NEED_KTIME_GET_REAL_TS64 + static inline void efx_ktime_get_real_ts64(struct timespec64 *ts64) + { + struct timespec ts; + ktime_get_real_ts(&ts); + *ts64 = timespec_to_timespec64(ts); + } + + #define ktime_get_real_ts64 efx_ktime_get_real_ts64 +#endif // EFX_NEED_KTIME_GET_REAL_TS64 + +#if defined(CONFIG_X86) && NET_IP_ALIGN != 0 + #undef NET_IP_ALIGN + #define NET_IP_ALIGN 0 +#endif + +#if defined(EFX_HAVE_OLD___VLAN_PUT_TAG) + static inline struct sk_buff * + efx___vlan_put_tag(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci) + { + WARN_ON(vlan_proto != htons(ETH_P_8021Q)); + return __vlan_put_tag(skb, vlan_tci); + } + #define vlan_insert_tag_set_proto efx___vlan_put_tag +#elif !defined(EFX_HAVE_VLAN_INSERT_TAG_SET_PROTO) + static inline struct sk_buff * + vlan_insert_tag_set_proto(struct sk_buff *skb, __be16 vlan_proto, + u16 vlan_tci) + { + return __vlan_put_tag(skb, vlan_proto, vlan_tci); + } +#endif + +#ifdef EFX_HAVE_ASM_SYSTEM_H +#include +#endif + +#include + +#ifdef EFX_NEED_PPS_EVENT_TIME + struct pps_event_time { + #ifdef CONFIG_NTP_PPS + struct timespec ts_raw; + #endif /* CONFIG_NTP_PPS */ + struct timespec ts_real; + }; +#endif + +#ifdef EFX_NEED_PPS_GET_TS +#ifdef CONFIG_NTP_PPS + static inline void pps_get_ts(struct pps_event_time *ts) + { + getnstime_raw_and_real(&ts->ts_raw, &ts->ts_real); + } +#else /* CONFIG_NTP_PPS */ + static inline void pps_get_ts(struct pps_event_time *ts) + { + getnstimeofday(&ts->ts_real); + } +#endif /* CONFIG_NTP_PPS */ +#endif + +#ifdef EFX_NEED_PPS_SUB_TS + static inline void pps_sub_ts(struct pps_event_time *ts, struct timespec delta) + { + ts->ts_real = timespec_sub(ts->ts_real, delta); + #ifdef CONFIG_NTP_PPS + ts->ts_raw = timespec_sub(ts->ts_raw, delta); + #endif + } +#endif + + +#ifdef EFX_NEED_KTIME_GET_SNAPSHOT +/* simplified structure for systems which don't have a kernel definition + * we only need a couple of fields and layout doesn't matter for this usage */ +struct system_time_snapshot { + ktime_t real; + ktime_t raw; +}; + +static inline void ktime_get_snapshot(struct system_time_snapshot *systime_snapshot) +{ + struct timespec64 ts_real; + struct timespec64 ts_raw = {}; + +#ifdef CONFIG_NTP_PPS + getnstime_raw_and_real(&ts_raw, &ts_real); +#else + getnstimeofday(&ts_real); +#endif + + systime_snapshot->real = timespec64_to_ktime(ts_real); + systime_snapshot->raw = timespec64_to_ktime(ts_raw); +} +#else +#include +#endif + + +#ifndef EFX_HAVE_PHC_SUPPORT + struct ptp_clock_time { + __s64 sec; + __u32 nsec; + __u32 reserved; + }; + + struct ptp_extts_request { + unsigned int index; + unsigned int flags; + unsigned int rsv[2]; + }; + + struct ptp_perout_request { + struct ptp_clock_time start; + struct ptp_clock_time period; + unsigned int index; + unsigned int flags; + unsigned int rsv[4]; + }; + + struct ptp_clock_request { + enum { + PTP_CLK_REQ_EXTTS, + PTP_CLK_REQ_PEROUT, + PTP_CLK_REQ_PPS, + } type; + union { + struct ptp_extts_request extts; + struct ptp_perout_request perout; + }; + }; + + struct ptp_clock_info { + }; +#else +#include +#endif + +#ifdef EFX_NEED_PTP_CLOCK_PPSUSR +#define PTP_CLOCK_PPSUSR (PTP_CLOCK_PPS + 1) +#endif + +#ifdef EFX_NEED_SCALED_PPM_TO_PPB +/** + * scaled_ppm_to_ppb() - convert scaled ppm to ppb + * + * @ppm: Parts per million, but with a 16 bit binary fractional field + */ +static inline long scaled_ppm_to_ppb(long ppm) +{ + /* + * The 'freq' field in the 'struct timex' is in parts per + * million, but with a 16 bit binary fractional field. + * + * We want to calculate + * + * ppb = scaled_ppm * 1000 / 2^16 + * + * which simplifies to + * + * ppb = scaled_ppm * 125 / 2^13 + */ + s64 ppb = 1 + ppm; + + ppb *= 125; + ppb >>= 13; + return (long)ppb; +} +#endif + +#ifdef EFX_NEED_RCU_ACCESS_POINTER +#define rcu_access_pointer rcu_dereference +#endif + +#ifndef EFX_HAVE_PCI_VFS_ASSIGNED +int pci_vfs_assigned(struct pci_dev *dev); +#endif + +#ifdef EFX_NEED_PCI_ENABLE_MSIX_RANGE +/** + * pci_enable_msix_range - configure device's MSI-X capability structure + * @dev: pointer to the pci_dev data structure of MSI-X device function + * @entries: pointer to an array of MSI-X entries + * @minvec: minimum number of MSI-X irqs requested + * @maxvec: maximum number of MSI-X irqs requested + * + * Setup the MSI-X capability structure of device function with a maximum + * possible number of interrupts in the range between @minvec and @maxvec + * upon its software driver call to request for MSI-X mode enabled on its + * hardware device function. It returns a negative errno if an error occurs. + * If it succeeds, it returns the actual number of interrupts allocated and + * indicates the successful configuration of MSI-X capability structure + * with new allocated MSI-X interrupts. + * + * NOTE: This is implemented inline here since it is also used by onload. + **/ +static inline int pci_enable_msix_range(struct pci_dev *dev, + struct msix_entry *entries, + int minvec, int maxvec) +{ + int nvec = maxvec; + int rc; + + if (maxvec < minvec) + return -ERANGE; + + do { + rc = pci_enable_msix(dev, entries, nvec); + if (rc < 0) { + return rc; + } else if (rc > 0) { + if (rc < minvec) + return -ENOSPC; + nvec = rc; + } + } while (rc); + + return nvec; +} +#endif +#ifdef EFX_NEED_PCI_MSIX_VEC_COUNT +int pci_msix_vec_count(struct pci_dev *dev); +#endif + +#ifdef EFX_NEED_KMALLOC_ARRAY +#define kmalloc_array(n,s,f) kcalloc(n,s,f) +#endif + +/* 3.19 renamed netdev_phys_port_id to netdev_phys_item_id */ +#ifndef MAX_PHYS_ITEM_ID_LEN +#define MAX_PHYS_ITEM_ID_LEN MAX_PHYS_PORT_ID_LEN +#define netdev_phys_item_id netdev_phys_port_id +#endif + +#ifdef ETH_RSS_HASH_TOP +#define EFX_HAVE_CONFIGURABLE_RSS_HASH +#else +#define ETH_RSS_HASH_NO_CHANGE 0 +#define ETH_RSS_HASH_TOP 1 +#endif + +/* Some functions appear in either net_device_ops or in net_device_ops_ext. The + * latter is used in RHEL for backported features. To simplify conditionals + * elsewhere, we merge them here. + */ +#if defined(EFX_HAVE_NDO_GET_PHYS_PORT_ID) || defined(EFX_HAVE_NET_DEVICE_OPS_EXT_GET_PHYS_PORT_ID) +#define EFX_NEED_GET_PHYS_PORT_ID +#endif + +#ifdef EFX_NEED_SKB_GSO_TCPV6 +#define SKB_GSO_TCPV6 0 +#endif + +#ifdef EFX_NEED_SKB_IS_GSO_TCP +static inline bool skb_is_gso_tcp(const struct sk_buff *skb) +{ + return skb_is_gso(skb) && + skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6); +} +#endif + +#ifdef EFX_NEED_SET_TSO_MAX_SIZE +static inline void netif_set_tso_max_size(struct net_device *dev, + unsigned int size) +{ + /* dev->gso_max_size is read locklessly from sk_setup_caps() */ + WRITE_ONCE(dev->gso_max_size, size); +} +#endif + +#ifdef EFX_NEED_SET_TSO_MAX_SEGS +static inline void netif_set_tso_max_segs(struct net_device *dev, + unsigned int segs) +{ +#ifdef EFX_HAVE_GSO_MAX_SEGS + /* dev->gso_max_segs is read locklessly from sk_setup_caps() */ + WRITE_ONCE(dev->gso_max_segs, segs); +#endif +} +#endif + +#ifdef EFX_NEED_IS_ERR_OR_NULL +static inline bool __must_check IS_ERR_OR_NULL(__force const void *ptr) +{ + return !ptr || IS_ERR_VALUE((unsigned long)ptr); +} +#endif + +#ifdef EFX_NEED_NETDEV_RSS_KEY_FILL +#define netdev_rss_key_fill get_random_bytes +#endif + +#ifndef smp_mb__before_atomic +#ifdef CONFIG_X86 +/* As in arch/x86/include/asm/barrier.h */ +#define smp_mb__before_atomic() barrier() +#else +/* As in include/asm-generic/barrier.h and arch/powerpc/include/asm/barrier.h */ +#define smp_mb__before_atomic() smp_mb() +#endif +#endif + +#ifndef NUMA_NO_NODE +#define NUMA_NO_NODE (-1) +#endif + +#if !defined(CONFIG_HAVE_MEMORYLESS_NODES) && !defined(cpu_to_mem) +#define cpu_to_mem(cpu) cpu_to_node(cpu) +#endif + +/* As in include/linux/kconfig.h */ +#ifndef __take_second_arg +#define __take_second_arg(__ignored, val, ...) val +#endif +#ifndef __and +#define __ARG_PLACEHOLDER_1 0, + +#define __and(x, y) ___and(x, y) +#define ___and(x, y) ____and(__ARG_PLACEHOLDER_##x, y) +#define ____and(arg1_or_junk, y) __take_second_arg(arg1_or_junk y, 0) + +#define __or(x, y) ___or(x, y) +#define ___or(x, y) ____or(__ARG_PLACEHOLDER_##x, y) +#define ____or(arg1_or_junk, y) __take_second_arg(arg1_or_junk 1, y) +#endif + +#if !defined(IS_ENABLED) || LINUX_VERSION_CODE < KERNEL_VERSION(3,4,0) +#undef IS_ENABLED +#undef IS_BUILTIN +#undef IS_MODULE +#undef IS_REACHABLE + +#define __is_defined(x) ___is_defined(x) +#define ___is_defined(val) ____is_defined(__ARG_PLACEHOLDER_##val) +#define ____is_defined(arg1_or_junk) __take_second_arg(arg1_or_junk 1, 0) + +#define IS_BUILTIN(option) __is_defined(option) +#define IS_MODULE(option) __is_defined(option##_MODULE) +#define IS_REACHABLE(option) __or(IS_BUILTIN(option), \ + __and(IS_MODULE(option), __is_defined(MODULE))) +#define IS_ENABLED(option) __or(IS_BUILTIN(option), IS_MODULE(option)) +#endif + +#ifndef EFX_HAVE_NETIF_XMIT_STOPPED +#define netif_xmit_stopped netif_tx_queue_stopped +#endif + +#ifdef EFX_HAVE_HW_ENC_FEATURES +#ifdef EFX_NEED_SKB_INNER_TRANSPORT_OFFSET +static inline int skb_inner_transport_offset(const struct sk_buff *skb) +{ + return skb_inner_transport_header(skb) - skb->data; +} +#endif +#endif + +#ifndef QSTR_INIT +#define QSTR_INIT(n,l) { .len = l, .name = n } +#endif + +#ifdef EFX_HAVE_NETDEV_REGISTER_RH +/* The _rh versions of these appear in RHEL7.3. + * Wrap them to make the calling code simpler. + */ +static inline int efx_register_netdevice_notifier(struct notifier_block *b) +{ + return register_netdevice_notifier_rh(b); +} + +static inline int efx_unregister_netdevice_notifier(struct notifier_block *b) +{ + return unregister_netdevice_notifier_rh(b); +} + +#define register_netdevice_notifier efx_register_netdevice_notifier +#define unregister_netdevice_notifier efx_unregister_netdevice_notifier +#endif + +#ifdef EFX_NEED_NETIF_NAPI_ADD_WEIGHT +static inline void netif_napi_add_weight(struct net_device *dev, + struct napi_struct *napi, + int (*poll)(struct napi_struct *, int), + int weight) +{ + netif_napi_add(dev, napi, poll, weight); +} +#endif + +#ifdef EFX_HAVE_NAPI_HASH_ADD +/* napi_hash_add appeared in 3.11 and is no longer exported as of 4.10. + * + * Although newer versions of netif_napi_add call napi_hash_add we can still + * call napi_hash_add here regardless, since there is a state bit to avoid + * double adds. + */ +static inline void efx_netif_napi_add(struct net_device *dev, + struct napi_struct *napi, + int (*poll)(struct napi_struct *, int), + int weight) +{ + netif_napi_add(dev, napi, poll, weight); + napi_hash_add(napi); +} +#ifdef netif_napi_add +/* RHEL7.3 defines netif_napi_add as _netif_napi_add for KABI compat. */ +#define _netif_napi_add efx_netif_napi_add +#else +#define netif_napi_add efx_netif_napi_add +#endif + +/* napi_hash_del still exists as of 4.10, even though napi_hash_add has gone. + * This is because it returns whether an RCU grace period is needed, allowing + * drivers to coalesce them. We don't do this. + * + * Although newer versions of netif_napi_del() call napi_hash_del() already + * this is safe - it uses a state bit to determine if it needs deleting. + */ +static inline void efx_netif_napi_del(struct napi_struct *napi) +{ + might_sleep(); +#ifndef EFX_HAVE_NAPI_HASH_DEL_RETURN + napi_hash_del(napi); + if (1) /* Always call synchronize_net */ +#else + if (napi_hash_del(napi)) +#endif + synchronize_net(); + netif_napi_del(napi); +} +#define netif_napi_del efx_netif_napi_del +#endif + +#ifndef tcp_flag_byte +#define tcp_flag_byte(th) (((u_int8_t *)th)[13]) +#endif + +#ifndef TCPHDR_FIN +#define TCPHDR_FIN 0x01 +#define TCPHDR_SYN 0x02 +#define TCPHDR_RST 0x04 +#define TCPHDR_PSH 0x08 +#define TCPHDR_ACK 0x10 +#define TCPHDR_URG 0x20 +#define TCPHDR_ECE 0x40 +#define TCPHDR_CWR 0x80 +#endif + +#if defined(EFX_HAVE_NDO_BUSY_POLL) || defined(EFX_HAVE_NDO_EXT_BUSY_POLL) +/* Combined define for driver-based busy poll. Later kernels (4.11+) implement + * busy polling in the core. + */ +#define EFX_WANT_DRIVER_BUSY_POLL +#endif + +#if defined(EFX_NEED_BOOL_NAPI_COMPLETE_DONE) +static inline bool efx_napi_complete_done(struct napi_struct *napi, + int spent __always_unused) +{ + napi_complete(napi); + return true; +} +#define napi_complete_done efx_napi_complete_done +#endif + +#if defined(EFX_NEED_HWMON_DEVICE_REGISTER_WITH_INFO) +struct hwmon_chip_info; +struct attribute_group; + +enum hwmon_sensor_types { + hwmon_chip, + hwmon_temp, + hwmon_in, + hwmon_curr, + hwmon_power, + hwmon_energy, + hwmon_humidity, + hwmon_fan, + hwmon_pwm, +}; + +struct device *hwmon_device_register_with_info( + struct device *dev, + const char *name __always_unused, + void *drvdata __always_unused, + const struct hwmon_chip_info *info __always_unused, + const struct attribute_group **extra_groups __always_unused); +#else +#if defined(EFX_NEED_HWMON_T_ALARM) +#define HWMON_T_ALARM BIT(hwmon_temp_alarm) +#endif +#endif +/* For RHEL 6 and 7 the above takes care of hwmon_device_register_with_info(), + * but they are missing the read_string() API in struct hwmon_ops. + */ +#if defined(HWMON_T_MIN) && (defined(EFX_HAVE_HWMON_READ_STRING) || \ + defined(EFX_HAVE_HWMON_READ_STRING_CONST)) +#define EFX_HAVE_HWMON_DEVICE_REGISTER_WITH_INFO +#endif + +#if defined(EFX_HAVE_XDP_EXT) +/* RHEL 7 adds the XDP related NDOs in the net_device_ops_extended area. + * The XDP interfaces in RHEL 7 are merely stubs intended to make Red Hat's + * backporting easier, and XDP is not functional. So we can just not build + * XDP in this case. + * It's unlikely that anybody else will have a net_device_ops_extended in + * this way. + */ +#undef EFX_HAVE_XDP +#undef EFX_HAVE_XDP_HEAD +#undef EFX_HAVE_XDP_OLD +#undef EFX_HAVE_XDP_REDIR +#undef EFX_HAVE_XDP_TX +#endif + +#ifdef EFX_HAVE_XDP_TX +#ifndef EFX_HAVE_XDP_FRAME_API +#define xdp_frame xdp_buff +#endif +#endif + +#if defined(EFX_HAVE_XDP_OLD) +/* ndo_xdp and netdev_xdp were renamed in 4.15 */ +#define ndo_bpf ndo_xdp +#define netdev_bpf netdev_xdp +#define EFX_HAVE_XDP +#endif + +#if defined(EFX_HAVE_XDP) && !defined(EFX_HAVE_XDP_TRACE) +#define trace_xdp_exception(dev, prog, act) +#endif + +#ifndef EFX_HAVE_XDP_RXQ_INFO_NAPI_ID +#define xdp_rxq_info_reg(_i,_d,_q,_n) xdp_rxq_info_reg(_i,_d,_q) +#endif + +#if defined(EFX_HAVE_XDP) && defined(EFX_NEED_XDP_INIT_BUFF) +static __always_inline void +xdp_init_buff(struct xdp_buff *xdp, u32 frame_sz +#ifdef EFX_HAVE_XDP_RXQ_INFO + , struct xdp_rxq_info *rxq +#endif + ) +{ +#ifdef EFX_HAVE_XDP_FRAME_SZ + xdp->frame_sz = frame_sz; +#endif +#ifdef EFX_HAVE_XDP_RXQ_INFO + xdp->rxq = rxq; +#endif +} +#endif + +#if defined(EFX_HAVE_XDP) && defined(EFX_NEED_XDP_PREPARE_BUFF) +static __always_inline void +xdp_prepare_buff(struct xdp_buff *xdp, unsigned char *hard_start, + int headroom, int data_len, const bool meta_valid) +{ + unsigned char *data = hard_start + headroom; + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_XDP_HEAD) + xdp->data_hard_start = hard_start; +#endif + xdp->data = data; + xdp->data_end = data + data_len; +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_XDP_DATA_META) + xdp->data_meta = meta_valid ? data : data + 1; +#endif +} +#endif + +#ifdef EFX_NEED_VOID_SKB_PUT +static inline void *efx_skb_put(struct sk_buff *skb, unsigned int len) +{ + return skb_put(skb, len); +} +#define skb_put efx_skb_put +#endif + +#if defined(EFX_NEED_PAGE_FRAG_FREE) +#if defined(EFX_HAVE_FREE_PAGE_FRAG) +/* Renamed in v4.10 */ +#define page_frag_free __free_page_frag +#else +static inline void page_frag_free(void *p) +{ + put_page(virt_to_head_page(p)); +} +#endif +#endif + +#ifndef BIT_ULL +#define BIT_ULL(nr) (1ULL << (nr)) +#endif + +#ifdef EFX_NEED_PCI_DEV_TO_EEH_DEV +#define pci_dev_to_eeh_dev(pci_dev) \ + of_node_to_eeh_dev(pci_device_to_OF_node((pci_dev))) +#endif + +#ifndef USER_TICK_USEC +#define USER_TICK_USEC TICK_USEC +#endif + +#ifdef EFX_HAVE_NETIF_SET_XPS_QUEUE_NON_CONST +static inline int efx_netif_set_xps_queue(struct net_device *netdev, + const struct cpumask *mask, u16 index) +{ + /* De-constify the mask */ + return netif_set_xps_queue(netdev, (struct cpumask*)mask, index); +} +#define netif_set_xps_queue efx_netif_set_xps_queue +#endif + +#ifdef EFX_HAVE_OLD_DEV_OPEN +static inline int efx_dev_open(struct net_device *netdev, + void *unused __always_unused) +{ + return dev_open(netdev); +} +#define dev_open efx_dev_open +#endif + +#ifdef EFX_NEED_CONSUME_SKB_ANY +#define dev_consume_skb_any(skb) dev_kfree_skb_any(skb) +#endif + +#ifdef EFX_NEED_SKB_CHECKSUM_START_OFFSET +static inline int skb_checksum_start_offset(const struct sk_buff *skb) +{ + return skb->csum_start - skb_headroom(skb); +} +#endif + +#ifdef EFX_NEED_CSUM16_SUB +static inline __sum16 csum16_add(__sum16 csum, __be16 addend) +{ + u16 res = (__force u16)csum; + + res += (__force u16)addend; + return (__force __sum16)(res + (res < (__force u16)addend)); +} + +static inline __sum16 csum16_sub(__sum16 csum, __be16 addend) +{ + return csum16_add(csum, ~addend); +} +#endif + +#ifdef EFX_NEED_CSUM_REPLACE_BY_DIFF +static inline void csum_replace_by_diff(__sum16 *sum, __wsum diff) +{ + *sum = csum_fold(csum_add(diff, ~csum_unfold(*sum))); +} +#endif + +#ifndef EFX_HAVE_STRUCT_SIZE +/* upstream version of this checks for overflow, but that's too much machinery + * to replicate for older kernels. + */ +#define struct_size(p, member, n) (sizeof(*(p)) + n * sizeof(*(p)->member)) +#endif + +/* Several kernel features are needed for TC offload to work. Only enable it + * if we have all of them. + */ +#ifdef EFX_HAVE_NEW_NDO_SETUP_TC +#if defined(EFX_HAVE_TC_BLOCK_OFFLOAD) || defined(EFX_HAVE_FLOW_BLOCK_OFFLOAD) +#if !defined(EFX_HAVE_FLOW_INDR_DEV_REGISTER) || defined(EFX_HAVE_FLOW_INDR_BLOCK_CB_ALLOC) +#if defined(CONFIG_NET_CLS_ACT) || defined(EFX_HAVE_TC_FLOW_OFFLOAD) +#define EFX_TC_OFFLOAD yes +/* Further features needed for conntrack offload */ +#if defined(EFX_HAVE_NF_FLOW_TABLE_OFFLOAD) && defined(EFX_HAVE_TC_ACT_CT) +#define EFX_CONNTRACK_OFFLOAD yes +#endif +#endif +#endif +#endif +#endif + +/* We only need netif_is_{vxlan,geneve} & flow_rule_match_cvlan if we have TC offload support */ +#ifdef EFX_TC_OFFLOAD +#ifndef EFX_HAVE_FLOW_BLOCK_OFFLOAD +/* Old names of structs... */ +#define flow_block_offload tc_block_offload +#define flow_block tcf_block +#define flow_cls_offload tc_cls_flower_offload +/* ... accessors... */ +#define flow_cls_offload_flow_rule tc_cls_flower_offload_flow_rule +/* ... and enumerators */ +#define FLOW_CLS_REPLACE TC_CLSFLOWER_REPLACE +#define FLOW_CLS_DESTROY TC_CLSFLOWER_DESTROY +#define FLOW_CLS_STATS TC_CLSFLOWER_STATS +#endif +#ifndef EFX_HAVE_TC_CAN_EXTACK +#include +static inline bool tc_can_offload_extack(const struct net_device *dev, + struct netlink_ext_ack *extack) +{ + bool can = tc_can_offload(dev); + + if (!can) + NL_SET_ERR_MSG(extack, "TC offload is disabled on net device"); + + return can; +} +#endif +#ifdef EFX_HAVE_TC_INDR_BLOCK_CB_REGISTER +#define __flow_indr_block_cb_register __tc_indr_block_cb_register +#define __flow_indr_block_cb_unregister __tc_indr_block_cb_unregister +#define EFX_HAVE_FLOW_INDR_BLOCK_CB_REGISTER yes +#endif +#ifndef EFX_HAVE_NETIF_IS_VXLAN +static inline bool netif_is_vxlan(const struct net_device *dev) +{ + return dev->rtnl_link_ops && + !strcmp(dev->rtnl_link_ops->kind, "vxlan"); +} +#endif +#ifndef EFX_HAVE_NETIF_IS_GENEVE +static inline bool netif_is_geneve(const struct net_device *dev) +{ + return dev->rtnl_link_ops && + !strcmp(dev->rtnl_link_ops->kind, "geneve"); +} +#endif +#ifdef EFX_NEED_IDA_ALLOC_RANGE +#define ida_alloc_range ida_simple_get +#define ida_free ida_simple_remove +#endif +#ifdef EFX_HAVE_TC_FLOW_OFFLOAD +#ifdef EFX_NEED_FLOW_RULE_MATCH_CVLAN +#include +static inline void flow_rule_match_cvlan(const struct flow_rule *rule, + struct flow_match_vlan *out) +{ + const struct flow_match *m = &rule->match; + struct flow_dissector *d = m->dissector; + + out->key = skb_flow_dissector_target(d, FLOW_DISSECTOR_KEY_CVLAN, m->key); + out->mask = skb_flow_dissector_target(d, FLOW_DISSECTOR_KEY_CVLAN, m->mask); +} +#endif +#if defined(EFX_NEED_FLOW_RULE_MATCH_CT) && defined(EFX_CONNTRACK_OFFLOAD) +#include +struct flow_match_ct { + struct flow_dissector_key_ct *key, *mask; +}; +static inline void flow_rule_match_ct(const struct flow_rule *rule, + struct flow_match_ct *out) +{ + const struct flow_match *m = &rule->match; + struct flow_dissector *d = m->dissector; + + out->key = skb_flow_dissector_target(d, FLOW_DISSECTOR_KEY_CT, m->key); + out->mask = skb_flow_dissector_target(d, FLOW_DISSECTOR_KEY_CT, m->mask); +} +#endif +#else /* EFX_HAVE_TC_FLOW_OFFLOAD */ +#include +#include + +struct flow_match { + struct flow_dissector *dissector; + void *mask; + void *key; +}; + +struct flow_match_basic { + struct flow_dissector_key_basic *key, *mask; +}; + +struct flow_match_control { + struct flow_dissector_key_control *key, *mask; +}; + +struct flow_match_eth_addrs { + struct flow_dissector_key_eth_addrs *key, *mask; +}; + +struct flow_match_vlan { + struct flow_dissector_key_vlan *key, *mask; +}; + +struct flow_match_ipv4_addrs { + struct flow_dissector_key_ipv4_addrs *key, *mask; +}; + +struct flow_match_ipv6_addrs { + struct flow_dissector_key_ipv6_addrs *key, *mask; +}; + +struct flow_match_ip { + struct flow_dissector_key_ip *key, *mask; +}; + +struct flow_match_ports { + struct flow_dissector_key_ports *key, *mask; +}; + +struct flow_match_icmp { + struct flow_dissector_key_icmp *key, *mask; +}; + +struct flow_match_tcp { + struct flow_dissector_key_tcp *key, *mask; +}; + +struct flow_match_mpls { + struct flow_dissector_key_mpls *key, *mask; +}; + +struct flow_match_enc_keyid { + struct flow_dissector_key_keyid *key, *mask; +}; + +struct flow_rule; + +void flow_rule_match_basic(const struct flow_rule *rule, + struct flow_match_basic *out); +void flow_rule_match_control(const struct flow_rule *rule, + struct flow_match_control *out); +void flow_rule_match_eth_addrs(const struct flow_rule *rule, + struct flow_match_eth_addrs *out); +void flow_rule_match_vlan(const struct flow_rule *rule, + struct flow_match_vlan *out); +#ifdef EFX_HAVE_FLOW_DISSECTOR_KEY_CVLAN +void flow_rule_match_cvlan(const struct flow_rule *rule, + struct flow_match_vlan *out); +#endif +void flow_rule_match_ipv4_addrs(const struct flow_rule *rule, + struct flow_match_ipv4_addrs *out); +void flow_rule_match_ipv6_addrs(const struct flow_rule *rule, + struct flow_match_ipv6_addrs *out); +void flow_rule_match_ip(const struct flow_rule *rule, + struct flow_match_ip *out); +void flow_rule_match_ports(const struct flow_rule *rule, + struct flow_match_ports *out); +void flow_rule_match_tcp(const struct flow_rule *rule, + struct flow_match_tcp *out); +void flow_rule_match_icmp(const struct flow_rule *rule, + struct flow_match_icmp *out); +void flow_rule_match_mpls(const struct flow_rule *rule, + struct flow_match_mpls *out); +void flow_rule_match_enc_control(const struct flow_rule *rule, + struct flow_match_control *out); +void flow_rule_match_enc_ipv4_addrs(const struct flow_rule *rule, + struct flow_match_ipv4_addrs *out); +void flow_rule_match_enc_ipv6_addrs(const struct flow_rule *rule, + struct flow_match_ipv6_addrs *out); +#ifdef EFX_HAVE_FLOW_DISSECTOR_KEY_ENC_IP +void flow_rule_match_enc_ip(const struct flow_rule *rule, + struct flow_match_ip *out); +#endif +void flow_rule_match_enc_ports(const struct flow_rule *rule, + struct flow_match_ports *out); +void flow_rule_match_enc_keyid(const struct flow_rule *rule, + struct flow_match_enc_keyid *out); + +enum flow_action_id { + FLOW_ACTION_ACCEPT = 0, + FLOW_ACTION_DROP, + FLOW_ACTION_TRAP, + FLOW_ACTION_GOTO, + FLOW_ACTION_REDIRECT, + FLOW_ACTION_MIRRED, + FLOW_ACTION_VLAN_PUSH, + FLOW_ACTION_VLAN_POP, + FLOW_ACTION_VLAN_MANGLE, + FLOW_ACTION_TUNNEL_ENCAP, + FLOW_ACTION_TUNNEL_DECAP, + FLOW_ACTION_MANGLE, + FLOW_ACTION_ADD, + FLOW_ACTION_CSUM, + FLOW_ACTION_MARK, + FLOW_ACTION_WAKE, + FLOW_ACTION_QUEUE, + FLOW_ACTION_SAMPLE, + FLOW_ACTION_POLICE, +}; + +/* This is mirroring enum pedit_header_type definition for easy mapping between + * tc pedit action. Legacy TCA_PEDIT_KEY_EX_HDR_TYPE_NETWORK is mapped to + * FLOW_ACT_MANGLE_UNSPEC, which is supported by no driver. + */ +enum flow_action_mangle_base { + FLOW_ACT_MANGLE_UNSPEC = 0, + FLOW_ACT_MANGLE_HDR_TYPE_ETH, + FLOW_ACT_MANGLE_HDR_TYPE_IP4, + FLOW_ACT_MANGLE_HDR_TYPE_IP6, + FLOW_ACT_MANGLE_HDR_TYPE_TCP, + FLOW_ACT_MANGLE_HDR_TYPE_UDP, +}; + +struct flow_action_entry { + enum flow_action_id id; + union { + u32 chain_index; /* FLOW_ACTION_GOTO */ + struct net_device *dev; /* FLOW_ACTION_REDIRECT */ + struct { /* FLOW_ACTION_VLAN */ + u16 vid; + __be16 proto; + u8 prio; + } vlan; + struct { /* FLOW_ACTION_PACKET_EDIT */ + enum flow_action_mangle_base htype; + u32 offset; + u32 mask; + u32 val; + } mangle; + const struct ip_tunnel_info *tunnel; /* FLOW_ACTION_TUNNEL_ENCAP */ + u32 csum_flags; /* FLOW_ACTION_CSUM */ + u32 mark; /* FLOW_ACTION_MARK */ + struct { /* FLOW_ACTION_QUEUE */ + u32 ctx; + u32 index; + u8 vf; + } queue; + struct { /* FLOW_ACTION_SAMPLE */ + struct psample_group *psample_group; + u32 rate; + u32 trunc_size; + bool truncate; + } sample; + struct { /* FLOW_ACTION_POLICE */ + s64 burst; + u64 rate_bytes_ps; + } police; + }; +}; + +struct flow_action { + unsigned int num_entries; + struct flow_action_entry entries[0]; +}; + +#define flow_action_for_each(__i, __act, __actions) \ + for (__i = 0, __act = &(__actions)->entries[0]; __i < (__actions)->num_entries; __act = &(__actions)->entries[++__i]) + +struct flow_rule { + struct flow_match match; + struct flow_action action; +}; + +static inline bool flow_rule_match_key(const struct flow_rule *rule, + enum flow_dissector_key_id key) +{ + return dissector_uses_key(rule->match.dissector, key); +} + +struct flow_rule *efx_compat_flow_rule_build(struct tc_cls_flower_offload *tc); +#endif /* EFX_HAVE_TC_FLOW_OFFLOAD */ +#endif /* EFX_TC_OFFLOAD */ + +#ifndef EFX_HAVE_NETDEV_XMIT_MORE +#ifdef EFX_HAVE_SKB_XMIT_MORE +/* This relies on places that use netdev_xmit_more having an SKB structure + * called skb. + */ +#define netdev_xmit_more() (skb->xmit_more) +#else +#define netdev_xmit_more() (0) +#endif +#endif + +#ifndef EFX_HAVE_RECEIVE_SKB_LIST +#ifdef EFX_HAVE_SKB__LIST +#ifdef EFX_NEED_SKB_LIST_DEL_INIT +static inline void skb_list_del_init(struct sk_buff *skb) +{ + __list_del_entry(&skb->list); + skb->next = NULL; +} +#endif + +static inline void netif_receive_skb_list(struct list_head *head) +{ + struct sk_buff *skb, *next; + + list_for_each_entry_safe(skb, next, head, list) { + skb_list_del_init(skb); + netif_receive_skb(skb); + } +} +#else +static inline void netif_receive_skb_list(struct sk_buff_head *head) +{ + struct sk_buff *skb; + + while ((skb = __skb_dequeue(head))) + netif_receive_skb(skb); +} +#endif +#endif + +#ifdef EFX_NEED_SKB_MARK_NOT_ON_LIST +static inline void skb_mark_not_on_list(struct sk_buff *skb) +{ + skb->next = NULL; +} +#endif + +#ifndef skb_list_walk_safe +/* Iterate through singly-linked GSO fragments of an skb. */ +#define skb_list_walk_safe(first, skb, next_skb) \ + for ((skb) = (first), (next_skb) = (skb) ? (skb)->next : NULL; (skb); \ + (skb) = (next_skb), (next_skb) = (skb) ? (skb)->next : NULL) +#endif + +#if !defined(EFX_HAVE_PCI_FIND_NEXT_EXT_CAPABILITY) +int pci_find_next_ext_capability(struct pci_dev *dev, int pos, int cap); +#endif + +/* XDP_SOCK check on latest kernels */ +#if defined(EFX_HAVE_XSK_POOL) +#define EFX_HAVE_XDP_SOCK_DRV yes +#define EFX_HAVE_XSK_NEED_WAKEUP yes +#endif +#if defined(EFX_HAVE_XDP_SOCK_DRV) +#undef EFX_HAVE_XDP_SOCK_DRV +#define EFX_USE_XSK_BUFFER_ALLOC yes +#ifndef EFX_HAVE_XDP_SOCK +#define EFX_HAVE_XDP_SOCK +#endif +#ifndef EFX_HAVE_XSK_UMEM_CONS_TX_2PARAM +#define EFX_HAVE_XSK_UMEM_CONS_TX_2PARAM yes +#endif +#include +#elif defined(EFX_HAVE_XDP_SOCK) +#include +#endif /* EFX_HAVE_XDP_SOCK_DRV */ + +#if !defined(EFX_HAVE_XDP_HEAD) && !defined(XDP_PACKET_HEADROOM) +#define XDP_PACKET_HEADROOM 0 +#endif + +#if defined(VIRTIO_F_IOMMU_PLATFORM) && !defined(VIRTIO_F_ACCESS_PLATFORM) +#define VIRTIO_F_ACCESS_PLATFORM VIRTIO_F_IOMMU_PLATFORM +#endif + +#ifndef VIRTIO_NET_F_HASH_REPORT +#define VIRTIO_NET_F_HASH_REPORT 57 +#endif + +#ifndef VIRTIO_NET_F_RSS +#define VIRTIO_NET_F_RSS 60 +#endif + +#ifndef VIRTIO_NET_F_RSC_EXT +#define VIRTIO_NET_F_RSC_EXT 61 +#endif + +#ifndef VIRTIO_F_IN_ORDER +/* Virtio feature bit number 35 is not defined in + * include/uapi/linux/virtio_config.h for kernel versions < 5.18. + * So make it available for the out-of-tree builds. VIRTIO_F_IN_ORDER + * is defined in section 6 (Reserved Feature Bits) of the VirtIO v1.1 spec + */ +#define VIRTIO_F_IN_ORDER 35 +#endif + +#if defined(EFX_HAVE_NET_DEVLINK_H) && defined(EFX_HAVE_DEVLINK_INFO) && defined(CONFIG_NET_DEVLINK) +/* Minimum requirements met to use the kernel's devlink suppport */ +#include + +/* devlink is available, augment the provided support with wrappers and stubs + * for newer APIs as appropriate. + */ +#define EFX_USE_DEVLINK + +#ifdef EFX_NEED_DEVLINK_INFO_BOARD_SERIAL_NUMBER_PUT +static inline int devlink_info_board_serial_number_put(struct devlink_info_req *req, + const char *bsn) +{ + /* Do nothing */ + return 0; +} +#endif +#ifdef EFX_NEED_DEVLINK_FLASH_UPDATE_STATUS_NOTIFY +static inline void devlink_flash_update_status_notify(struct devlink *devlink, + const char *status_msg, + const char *component, + unsigned long done, + unsigned long total) +{ + /* Do nothing */ +} +#endif +#ifdef EFX_NEED_DEVLINK_FLASH_UPDATE_TIMEOUT_NOTIFY +void devlink_flash_update_timeout_notify(struct devlink *devlink, + const char *status_msg, + const char *component, + unsigned long timeout); +#endif +#else + +/* devlink is not available, provide a 'fake' devlink info request structure + * and functions to expose the version information via a file in sysfs. + */ + +struct devlink_info_req { + char *buf; + size_t bufsize; +}; + +int devlink_info_serial_number_put(struct devlink_info_req *req, const char *sn); +int devlink_info_driver_name_put(struct devlink_info_req *req, const char *name); +int devlink_info_board_serial_number_put(struct devlink_info_req *req, + const char *bsn); +int devlink_info_version_fixed_put(struct devlink_info_req *req, + const char *version_name, + const char *version_value); +int devlink_info_version_stored_put(struct devlink_info_req *req, + const char *version_name, + const char *version_value); +int devlink_info_version_running_put(struct devlink_info_req *req, + const char *version_name, + const char *version_value); + +/* Provide a do-nothing stubs for the flash update status notifications */ + +struct devlink; + +static inline void devlink_flash_update_status_notify(struct devlink *devlink, + const char *status_msg, + const char *component, + unsigned long done, + unsigned long total) +{ + /* Do nothing */ +} + +static inline void devlink_flash_update_timeout_notify(struct devlink *devlink, + const char *status_msg, + const char *component, + unsigned long timeout) +{ + /* Do nothing */ +} + +#endif /* EFX_HAVE_NET_DEVLINK_H && EFX_HAVE_DEVLINK_INFO && CONFIG_NET_DEVLINK */ + +/* Irrespective of whether devlink is available, use the generic devlink info + * version object names where possible. Many of these definitions were added + * to net/devlink.h over time so augment whatever is provided. + */ +#ifndef DEVLINK_INFO_VERSION_GENERIC_BOARD_ID +#define DEVLINK_INFO_VERSION_GENERIC_BOARD_ID "board.id" +#endif +#ifndef DEVLINK_INFO_VERSION_GENERIC_BOARD_REV +#define DEVLINK_INFO_VERSION_GENERIC_BOARD_REV "board.rev" +#endif +#ifndef DEVLINK_INFO_VERSION_GENERIC_BOARD_MANUFACTURE +#define DEVLINK_INFO_VERSION_GENERIC_BOARD_MANUFACTURE "board.manufacture" +#endif +#ifndef DEVLINK_INFO_VERSION_GENERIC_ASIC_ID +#define DEVLINK_INFO_VERSION_GENERIC_ASIC_ID "asic.id" +#endif +#ifndef DEVLINK_INFO_VERSION_GENERIC_ASIC_REV +#define DEVLINK_INFO_VERSION_GENERIC_ASIC_REV "asic.rev" +#endif +#ifndef DEVLINK_INFO_VERSION_GENERIC_FW +#define DEVLINK_INFO_VERSION_GENERIC_FW "fw" +#endif +#ifndef DEVLINK_INFO_VERSION_GENERIC_FW_MGMT +#define DEVLINK_INFO_VERSION_GENERIC_FW_MGMT "fw.mgmt" +#endif +#ifndef DEVLINK_INFO_VERSION_GENERIC_FW_MGMT_API +#define DEVLINK_INFO_VERSION_GENERIC_FW_MGMT_API "fw.mgmt.api" +#endif +#ifndef DEVLINK_INFO_VERSION_GENERIC_FW_APP +#define DEVLINK_INFO_VERSION_GENERIC_FW_APP "fw.app" +#endif +#ifndef DEVLINK_INFO_VERSION_GENERIC_FW_UNDI +#define DEVLINK_INFO_VERSION_GENERIC_FW_UNDI "fw.undi" +#endif +#ifndef DEVLINK_INFO_VERSION_GENERIC_FW_NCSI +#define DEVLINK_INFO_VERSION_GENERIC_FW_NCSI "fw.ncsi" +#endif +#ifndef DEVLINK_INFO_VERSION_GENERIC_FW_PSID +#define DEVLINK_INFO_VERSION_GENERIC_FW_PSID "fw.psid" +#endif +#ifndef DEVLINK_INFO_VERSION_GENERIC_FW_ROCE +#define DEVLINK_INFO_VERSION_GENERIC_FW_ROCE "fw.roce" +#endif +#ifndef DEVLINK_INFO_VERSION_GENERIC_FW_BUNDLE_ID +#define DEVLINK_INFO_VERSION_GENERIC_FW_BUNDLE_ID "fw.bundle_id" +#endif + +#ifdef EFX_NEED_REFCOUNT_T +typedef atomic_t refcount_t; + +#define refcount_set atomic_set +#define refcount_inc_not_zero atomic_inc_not_zero +#define refcount_dec_and_test atomic_dec_and_test +#endif + +#ifdef EFX_NEED_DEVICE_IOMMU_CAPABLE +#ifndef EFX_HAVE_IOMMU_CAPABLE +enum iommu_cap { IOMMU_CAP_DUMMY, }; +#endif + +static inline bool device_iommu_capable(struct device *dev, enum iommu_cap cap) +{ +#ifdef EFX_HAVE_IOMMU_CAPABLE + return iommu_capable(dev->bus, cap); +#else + return false; +#endif +} +#endif + +#ifndef EFX_HAVE_IOMMU_MAP_GFP_PARAM +static inline int efx_iommu_map(struct iommu_domain *domain, + unsigned long iova, phys_addr_t paddr, + size_t size, int prot, gfp_t gfp) +{ + return iommu_map(domain, iova, paddr, size, prot); +} +#undef iommu_map +#define iommu_map efx_iommu_map +#endif + +#ifndef EFX_HAVE_SK_BUFF_LIST +static inline void efx__skb_queue_tail(struct sk_buff_head *list, + struct sk_buff *newsk) +{ + // This calls __skb_queue_before(list, (struct sk_buff *)list, newsk) + __skb_insert(newsk, list->prev, (struct sk_buff *)list, list); +} +#undef __skb_queue_tail +#define __skb_queue_tail efx__skb_queue_tail +#endif + +/* xdp_do_flush_map has been renamed xdp_do_flush */ +#ifdef EFX_NEED_XDP_DO_FLUSH +#define xdp_do_flush xdp_do_flush_map +#endif + +#endif /* EFX_KERNEL_COMPAT_H */ diff --git a/src/drivers/net/ethernet/sfc/kernel_compat.sh b/src/drivers/net/ethernet/sfc/kernel_compat.sh new file mode 100755 index 0000000..d9f9efd --- /dev/null +++ b/src/drivers/net/ethernet/sfc/kernel_compat.sh @@ -0,0 +1,583 @@ +#!/bin/bash -eu + +###################################################################### +# +# Driver for Solarflare network controllers and boards +# Copyright 2019 Solarflare Communications Inc. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 as published +# by the Free Software Foundation, incorporated herein by reference. +# +###################################################################### + +me=$(basename "$0") + +###################################################################### +# Symbol definition map + +function generate_kompat_symbols() { + echo " +EFX_HAVE_MTD_TABLE kver < 2.6.35 +EFX_HAVE_VMALLOC_REG_DUMP_BUF kver >= 2.6.37 +EFX_USE_ETHTOOL_OP_GET_LINK kver >= 2.6.38 +EFX_WANT_NDO_POLL_CONTROLLER kver < 4.19 +EFX_HAVE_GRO custom +EFX_NEED_GRO_RESULT_T nsymbol gro_result_t include/linux/netdevice.h +EFX_HAVE_NAPI_GRO_RECEIVE_GR symbol napi_gro_receive_gr include/linux/netdevice.h +EFX_HAVE_NET_GRO_H file include/net/gro.h +EFX_NEED_VZALLOC nsymbol vzalloc include/linux/vmalloc.h +EFX_NEED_MTD_DEVICE_REGISTER nsymbol mtd_device_register include/linux/mtd/mtd.h +EFX_HAVE_MTD_DIRECT_ACCESS custom +EFX_NEED_NETIF_SET_REAL_NUM_TX_QUEUES nsymbol netif_set_real_num_tx_queues include/linux/netdevice.h +EFX_NEED_NETIF_SET_REAL_NUM_RX_QUEUES nsymbol netif_set_real_num_rx_queues include/linux/netdevice.h +EFX_HAVE_ROUND_JIFFIES_UP symbol round_jiffies_up include/linux/timer.h +EFX_NEED_SKB_CHECKSUM_START_OFFSET nsymbol skb_checksum_start_offset include/linux/skbuff.h +EFX_HAVE_CSUM_LEVEL symbol csum_level include/linux/skbuff.h +EFX_HAVE_SKBTX_HW_TSTAMP symbol SKBTX_HW_TSTAMP include/linux/skbuff.h +EFX_HAVE_SKB_SYSTSTAMP member struct_skb_shared_hwtstamps syststamp include/linux/skbuff.h +EFX_HAVE_SKB_TX_TIMESTAMP symbol skb_tx_timestamp include/linux/skbuff.h +EFX_HAVE_HWTSTAMP_FLAGS symbol hwtstamp_flags include/uapi/linux/net_tstamp.h +EFX_NEED_WQ_SYSFS nsymbol WQ_SYSFS include/linux/workqueue.h +EFX_HAVE_ALLOC_WORKQUEUE symbol alloc_workqueue include/linux/workqueue.h +EFX_HAVE_NEW_ALLOC_WORKQUEUE custom +EFX_NEED_MOD_DELAYED_WORK nsymbol mod_delayed_work include/linux/workqueue.h +EFX_USE_ETHTOOL_FLAGS symbol get_flags include/linux/ethtool.h +EFX_USE_ETHTOOL_LP_ADVERTISING symbol lp_advertising include/linux/ethtool.h +EFX_USE_ETHTOOL_MDIO_SUPPORT symbol mdio_support include/linux/ethtool.h +EFX_USE_MTD_WRITESIZE symbol writesize include/linux/mtd/mtd.h +EFX_HAVE_MTD_USECOUNT member struct_mtd_info usecount include/linux/mtd/mtd.h +EFX_USE_NETDEV_STATS custom +EFX_USE_NETDEV_STATS64 member struct_net_device_ops ndo_get_stats64 include/linux/netdevice.h +EFX_HAVE_NETDEV_STATS64_VOID memtype struct_net_device_ops ndo_get_stats64 include/linux/netdevice.h void(*)(struct net_device *, struct rtnl_link_stats64 *) +EFX_HAVE_NET_DEVICE_MC memtype struct_net_device mc include/linux/netdevice.h struct netdev_hw_addr_list +EFX_NEED_HWMON_T_ALARM nsymbol HWMON_T_ALARM include/linux/hwmon.h +EFX_HAVE_HWMON_READ_STRING member struct_hwmon_ops read_string include/linux/hwmon.h +EFX_HAVE_HWMON_READ_STRING_CONST memtype struct_hwmon_ops read_string include/linux/hwmon.h int(*)(struct device *, enum hwmon_sensor_types, u32, int, const char **) +EFX_USE_ETHTOOL_GET_SSET_COUNT symbol get_sset_count include/linux/ethtool.h +# Do not use struct ethtool_ops_ext due to RH BZ 1008678 (SF bug 39031) +EFX_HAVE_ETHTOOL_RESET member struct_ethtool_ops reset include/linux/ethtool.h +EFX_HAVE_ETHTOOL_SET_PHYS_ID symbol set_phys_id include/linux/ethtool.h +EFX_HAVE_ETHTOOL_GMODULEEEPROM symbol get_module_eeprom include/linux/ethtool.h +EFX_NEED_DMA_SET_COHERENT_MASK nsymbol dma_set_coherent_mask include/linux/dma-mapping.h +EFX_NEED_DMA_SET_MASK_AND_COHERENT nsymbol dma_set_mask_and_coherent include/linux/dma-mapping.h +EFX_NEED_BITMAP_ZALLOC nsymbol bitmap_zalloc include/linux/bitmap.h +EFX_USE_PM_EXT_OPS symbol pm_ext_ops include/linux/pm.h +EFX_USE_DEV_PM_OPS symbol dev_pm_ops include/linux/pm.h +EFX_HAVE_XEN_XEN_H file include/xen/xen.h +EFX_HAVE_ASM_SYSTEM_H file asm/system.h +EFX_HAVE_XEN_START_INFO custom +EFX_HAVE_EXPORTED_CPU_SIBLING_MAP export (per_cpu__)?cpu_sibling_map include/asm/smp.h arch/$SRCARCH/include/asm/smp.h arch/$SRCARCH/kernel/smpboot.c drivers/xen/core/smpboot.c +EFX_HAVE_PCI_NUM_VF export pci_num_vf include/linux/pci.h drivers/pci/iov.c +EFX_HAVE_SRIOV_CONFIGURE member struct_pci_driver sriov_configure include/linux/pci.h +EFX_HAVE_PCI_DRIVER_RH member struct_pci_driver_rh sriov_configure include/linux/pci.h +EFX_HAVE_NDO_SIZE member struct_net_device_ops ndo_size include/linux/netdevice.h +EFX_HAVE_NDO_SIZE_RH member struct_net_device_ops ndo_size_rh include/linux/netdevice.h +EFX_HAVE_NDO_SET_VF_MAC member struct_net_device_ops ndo_set_vf_mac include/linux/netdevice.h +EFX_HAVE_NDO_SET_VF_VLAN_PROTO memtype struct_net_device_ops ndo_set_vf_vlan include/linux/netdevice.h int (*)(struct net_device *, int, u16, u8, __be16) +EFX_HAVE_NDO_EXT_SET_VF_VLAN_PROTO memtype struct_net_device_ops_extended ndo_set_vf_vlan include/linux/netdevice.h int (*)(struct net_device *, int, u16, u8, __be16) +EFX_HAVE_NDO_SET_VF_SPOOFCHK member struct_net_device_ops ndo_set_vf_spoofchk include/linux/netdevice.h +EFX_HAVE_NDO_SET_FEATURES member struct_net_device_ops ndo_set_features include/linux/netdevice.h +EFX_HAVE_NDO_FEATURES_CHECK member struct_net_device_ops ndo_features_check include/linux/netdevice.h +EFX_HAVE_EXT_NDO_SET_FEATURES member struct_net_device_ops_ext ndo_set_features include/linux/netdevice.h +EFX_HAVE_VF_LINK_STATE member struct_net_device_ops ndo_set_vf_link_state include/linux/netdevice.h +EFX_HAVE_NDO_SET_MULTICAST_LIST member struct_net_device_ops ndo_set_multicast_list include/linux/netdevice.h +EFX_HAVE_NDO_BUSY_POLL member struct_net_device_ops ndo_busy_poll include/linux/netdevice.h +EFX_HAVE_NDO_GET_PHYS_PORT_ID member struct_net_device_ops ndo_get_phys_port_id include/linux/netdevice.h +EFX_HAVE_NDO_GET_PHYS_PORT_NAME member struct_net_device_ops ndo_get_phys_port_name include/linux/netdevice.h +EFX_HAVE_NDO_GET_PORT_PARENT_ID member struct_net_device_ops ndo_get_port_parent_id include/linux/netdevice.h +EFX_HAVE_NDO_VLAN_RX_ADD_VID_PROTO memtype struct_net_device_ops ndo_vlan_rx_add_vid include/linux/netdevice.h int (*)(struct net_device *, __be16, u16) +EFX_HAVE_NDO_VLAN_RX_ADD_VID_RC memtype struct_net_device_ops ndo_vlan_rx_add_vid include/linux/netdevice.h int (*)(struct net_device *, u16) +EFX_NEED_ETHER_ADDR_COPY nsymbol ether_addr_copy include/linux/etherdevice.h +EFX_NEED_ETHER_ADDR_EQUAL nsymbol ether_addr_equal include/linux/etherdevice.h +EFX_NEED_ETH_ZERO_ADDR nsymbol eth_zero_addr include/linux/etherdevice.h +EFX_NEED_ETH_BROADCAST_ADDR nsymbol eth_broadcast_addr include/linux/etherdevice.h +EFX_NEED_ETH_RANDOM_ADDR nsymbol eth_random_addr include/linux/etherdevice.h +EFX_NEED_ETH_HW_ADDR_SET nsymbol eth_hw_addr_set include/linux/etherdevice.h +EFX_NEED_MAC_PTON nsymbol mac_pton include/linux/kernel.h include/linux/hex.h include/linux/if_ether.h +EFX_HAVE_HEX_TO_BIN symbol hex_to_bin include/linux/kernel.h include/linux/hex.h +EFX_NEED_NS_TO_TIMESPEC nexport ns_to_timespec include/linux/time.h kernel/time.c +EFX_HAVE_TIMESPEC64 symbol timespec64 include/linux/time64.h include/linux/time.h +EFX_NEED_KTIME_GET_REAL_TS64 nsymbol ktime_get_real_ts64 include/linux/timekeeping.h include/linux/ktime.h +EFX_NEED_KTIME_COMPARE nsymbol ktime_compare include/linux/ktime.h +EFX_NEED_RTC_TIME64_TO_TM nsymbol rtc_time64_to_tm include/linux/rtc.h +EFX_NEED_SET_NORMALIZED_TIMESPEC custom +EFX_HAVE_VLAN_RX_PATH symbol vlan_hwaccel_receive_skb include/linux/if_vlan.h +EFX_HAVE_OLD_ETHTOOL_GET_RXNFC memtype struct_ethtool_ops get_rxnfc include/linux/ethtool.h int (*)(struct net_device *, struct ethtool_rxnfc *, void *) +EFX_HAVE_CPU_RMAP file include/linux/cpu_rmap.h +EFX_HAVE_PTP_PF_NONE symbol PTP_PF_NONE include/linux/ptp_clock.h include/uapi/linux/ptp_clock.h +EFX_NEED_PTP_CLOCK_PPSUSR custom +EFX_USE_64BIT_PHC member struct_ptp_clock_info gettime64 include/linux/ptp_clock_kernel.h +EFX_HAVE_PTP_GETCROSSTSTAMP member struct_ptp_clock_info getcrosststamp include/linux/ptp_clock_kernel.h +EFX_NEED_KTIME_GET_SNAPSHOT nsymbol ktime_get_snapshot include/linux/timekeeping.h +EFX_HAVE_PHC_SUPPORT custom +EFX_HAVE_PTP_CLOCK_INFO_ADJFINE member struct_ptp_clock_info adjfine include/linux/ptp_clock_kernel.h +EFX_HAVE_PTP_CLOCK_INFO_ADJFREQ member struct_ptp_clock_info adjfreq include/linux/ptp_clock_kernel.h +EFX_HAVE_PTP_CLOCK_GETTIMEX64 member struct_ptp_clock_info gettimex64 include/linux/ptp_clock_kernel.h +EFX_NEED_SCALED_PPM_TO_PPB nsymbol scaled_ppm_to_ppb include/linux/ptp_clock_kernel.h +EFX_NEED_PPS_SUB_TS nsymbol pps_sub_ts include/linux/pps_kernel.h +EFX_NEED_PPS_EVENT_TIME nsymbol pps_event_time include/linux/pps_kernel.h +EFX_HAVE_PPS_EVENT_TIME_TIMESPEC nmemtype struct_pps_event_time ts_real include/linux/pps_kernel.h struct timespec64 +EFX_NEED_PPS_GET_TS nsymbol pps_get_ts include/linux/pps_kernel.h +EFX_NEED_IP_IS_FRAGMENT nsymbol ip_is_fragment include/net/ip.h +EFX_NEED_NETDEV_FEATURES_T nsymbol netdev_features_t include/linux/netdevice.h +EFX_NEED_SKB_FRAG_DMA_MAP nsymbol skb_frag_dma_map include/linux/skbuff.h +EFX_NEED_SKB_FRAG_ADDRESS nsymbol skb_frag_address include/linux/skbuff.h +EFX_NEED_SKB_FRAG_SIZE nsymbol skb_frag_size include/linux/skbuff.h +EFX_NEED_SKB_FRAG_PAGE nsymbol skb_frag_page include/linux/skbuff.h +EFX_NEED_SKB_FRAG_OFF nsymbol skb_frag_off include/linux/skbuff.h +EFX_HAVE_ETHTOOL_GET_RXFH_INDIR symbol get_rxfh_indir include/linux/ethtool.h +EFX_HAVE_ETHTOOL_GET_RXFH_INDIR_SIZE symbol get_rxfh_indir_size include/linux/ethtool.h +EFX_HAVE_ETHTOOL_GET_RXFH symbol get_rxfh include/linux/ethtool.h +EFX_HAVE_ETHTOOL_GET_RXFH_KEY_SIZE symbol get_rxfh_key_size include/linux/ethtool.h +EFX_HAVE_ETHTOOL_SET_RXFH_NOCONST custom +EFX_HAVE_OLD_ETHTOOL_RXFH_INDIR custom +EFX_NEED_ETHTOOL_RXFH_INDIR_DEFAULT nsymbol ethtool_rxfh_indir_default include/linux/ethtool.h +EFX_NEED_IS_COMPAT_TASK custom +EFX_HAVE_TIF_ADDR32 symbol TIF_ADDR32 arch/x86/include/asm/thread_info.h +EFX_NEED_COMPAT_U64 nsymbol compat_u64 include/asm/compat.h arch/$SRCARCH/include/asm/compat.h include/asm-$SRCARCH/compat.h +EFX_HAVE_IRQ_NOTIFIERS symbol irq_affinity_notify include/linux/interrupt.h +EFX_HAVE_GSO_MAX_SEGS member struct_net_device gso_max_segs include/linux/netdevice.h +EFX_NEED_SET_TSO_MAX_SIZE nsymbol netif_set_tso_max_size include/linux/netdevice.h +EFX_NEED_SET_TSO_MAX_SEGS nsymbol netif_set_tso_max_segs include/linux/netdevice.h +EFX_NEED_BYTE_QUEUE_LIMITS nsymbol netdev_tx_sent_queue include/linux/netdevice.h +EFX_NEED___BQL nsymbol __netdev_tx_sent_queue include/linux/netdevice.h +EFX_NEED_SKB_CHECKSUM_NONE_ASSERT nsymbol skb_checksum_none_assert include/linux/skbuff.h +EFX_HAVE_NON_CONST_KERNEL_PARAM symtype param_set_uint include/linux/moduleparam.h int (const char *, struct kernel_param *) +EFX_HAVE_KERNEL_PARAM_OPS symbol kernel_param_ops include/linux/moduleparam.h +EFX_NEED___SET_BIT_LE nsymtype __set_bit_le include/asm-generic/bitops/le.h void (int, void *) +EFX_USE_ETHTOOL_OPS_EXT symbol ethtool_ops_ext include/linux/ethtool.h +EFX_HAVE_ETHTOOL_GET_DUMP_FLAG member struct_ethtool_ops get_dump_flag include/linux/ethtool.h +EFX_HAVE_ETHTOOL_GET_DUMP_DATA member struct_ethtool_ops get_dump_data include/linux/ethtool.h +EFX_HAVE_ETHTOOL_SET_DUMP member struct_ethtool_ops set_dump include/linux/ethtool.h +EFX_HAVE_ETHTOOL_GET_TS_INFO member struct_ethtool_ops get_ts_info include/linux/ethtool.h +EFX_HAVE_ETHTOOL_EXT_GET_TS_INFO member struct_ethtool_ops_ext get_ts_info include/linux/ethtool.h +EFX_HAVE_OLD___VLAN_PUT_TAG symtype __vlan_put_tag include/linux/if_vlan.h struct sk_buff *(struct sk_buff *, u16) +EFX_HAVE_VLAN_INSERT_TAG_SET_PROTO symbol vlan_insert_tag_set_proto include/linux/if_vlan.h +EFX_NEED_NETDEV_NOTIFIER_INFO_TO_DEV nsymbol netdev_notifier_info_to_dev include/linux/netdevice.h +EFX_HAVE_NETDEV_REGISTER_RH symbol register_netdevice_notifier_rh include/linux/netdevice.h +EFX_HAVE_NETDEV_RFS_INFO symbol netdev_rfs_info include/linux/netdevice.h +EFX_NEED_PCI_AER_CLEAR_NONFATAL_STATUS nsymbol pci_aer_clear_nonfatal_status include/linux/aer.h +EFX_HAVE_PCI_ENABLE_PCIE_ERROR_REPORTING symbol pci_enable_pcie_error_reporting include/linux/aer.h +EFX_HAVE_EEH_DEV_CHECK_FAILURE symbol eeh_dev_check_failure arch/powerpc/include/asm/eeh.h +EFX_NEED_PCI_DEV_TO_EEH_DEV nsymbol pci_dev_to_eeh_dev include/linux/pci.h +EFX_HAVE_IOREMAP_WC symbol ioremap_wc arch/$SRCARCH/include/asm/io.h include/asm-$SRCARCH/io.h include/asm-generic/io.h +EFX_HAVE_IOREMAP_NOCACHE symbol ioremap_nocache include/asm-generic/io.h +EFX_NEED_SKB_TRANSPORT_HEADER_WAS_SET nsymbol skb_transport_header_was_set include/linux/skbuff.h +EFX_HAVE_OLD_KMAP_ATOMIC custom +EFX_HAVE_NAPI_STRUCT symbol napi_struct include/linux/netdevice.h +EFX_HAVE_NAPI_STRUCT_NAPI_ID member struct_napi_struct napi_id include/linux/netdevice.h +EFX_HAVE_NAPI_HASH_ADD symbol napi_hash_add include/linux/netdevice.h +EFX_HAVE_NAPI_HASH_DEL_RETURN symtype napi_hash_del include/linux/netdevice.h int (struct napi_struct *) +EFX_NEED_NETIF_NAPI_ADD_WEIGHT nsymbol netif_napi_add_weight include/linux/netdevice.h +EFX_HAVE_OLD_NETIF_NAPI_ADD symtype netif_napi_add include/linux/netdevice.h void (struct net_device *, struct napi_struct *, int (*)(struct napi_struct *, int), int) +EFX_NEED_SKB_SET_HASH nsymbol skb_set_hash include/linux/skbuff.h +EFX_HAVE_SKB_L4HASH member struct_sk_buff l4_rxhash include/linux/skbuff.h +EFX_HAVE_SKB_VLANTCI member struct_sk_buff vlan_tci include/linux/skbuff.h +EFX_HAVE_BUSY_POLL file include/net/busy_poll.h +EFX_NEED_USLEEP_RANGE nsymbol usleep_range include/linux/delay.h +EFX_HAVE_SRIOV_GET_TOTALVFS symbol pci_sriov_get_totalvfs include/linux/pci.h +EFX_NEED_SKB_VLAN_TAG_GET nsymbol skb_vlan_tag_get include/linux/if_vlan.h +EFX_HAVE_OLD___VLAN_HWACCEL_PUT_TAG symtype __vlan_hwaccel_put_tag include/linux/if_vlan.h struct sk_buff *(struct sk_buff *, u16) +EFX_HAVE_ETHTOOL_CHANNELS member struct_ethtool_ops get_channels include/linux/ethtool.h +EFX_HAVE_ETHTOOL_EXT_CHANNELS member struct_ethtool_ops_ext get_channels include/linux/ethtool.h +EFX_NEED_IPV6_NFC nsymbol ethtool_tcpip6_spec include/uapi/linux/ethtool.h +EFX_HAVE_SKB_HASH member struct_sk_buff hash include/linux/skbuff.h +EFX_HAVE_SKB_INNER_NETWORK_HEADER symbol skb_inner_network_header include/linux/skbuff.h +EFX_SKB_HAS_INNER_NETWORK_HEADER member struct_sk_buff inner_network_header include/linux/skbuff.h +EFX_HAVE_SKB_INNER_TRANSPORT_HEADER symbol skb_inner_transport_header include/linux/skbuff.h +EFX_SKB_HAS_INNER_TRANSPORT_HEADER member struct_sk_buff inner_transport_header include/linux/skbuff.h +EFX_HAVE_SKB_FRAG_TRUESIZE symtype skb_add_rx_frag include/linux/skbuff.h void (struct sk_buff *, int, struct page *, int, int, unsigned int) +EFX_HAVE_INNER_IP_HDR symbol inner_ip_hdr include/linux/ip.h +EFX_HAVE_INNER_TCP_HDR symbol inner_tcp_hdr include/linux/tcp.h +EFX_HAVE_INDIRECT_CALL_WRAPPERS file include/linux/indirect_call_wrapper.h + +# Stuff needed in code other than the linux net driver +EFX_HAVE_NEW_KFIFO symbol kfifo_out include/linux/kfifo.h +EFX_HAVE_NETFILTER_INDIRECT_SKB memtype struct_nf_hook_ops hook include/linux/netfilter.h unsigned int(*)(unsigned int, struct sk_buff **, const struct net_device *, const struct net_device *, int (*)(struct sk_buff *)) +EFX_HAVE_NFPROTO_CONSTANTS symbol NFPROTO_NUMPROTO include/linux/netfilter.h +EFX_HAVE___REGISTER_CHRDEV symbol __register_chrdev include/linux/fs.h +EFX_HAVE_MSIX_CAP symbol msix_cap include/linux/pci.h +EFX_NEED_PCI_ENABLE_MSIX_RANGE nsymbol pci_enable_msix_range include/linux/pci.h +EFX_NEED_PCI_MSIX_VEC_COUNT nsymbol pci_msix_vec_count include/linux/pci.h +EFX_HAVE_SKB_OOO_OKAY member struct_sk_buff ooo_okay include/linux/skbuff.h +EFX_HAVE_SKB_TX_HASH symbol skb_tx_hash include/linux/netdevice.h include/linux/skbuff.h +EFX_HAVE_SK_SET_TX_QUEUE symbol sk_tx_queue_set include/net/sock.h +EFX_HAVE_SKB_GET_RX_QUEUE symbol skb_get_rx_queue include/linux/skbuff.h +EFX_NEED_RCU_ACCESS_POINTER nsymbol rcu_access_pointer include/linux/rcupdate.h +EFX_HAVE_VF_INFO_MIN_TX_RATE member struct_ifla_vf_info min_tx_rate include/linux/if_link.h +EFX_HAVE_NETDEV_HW_FEATURES member struct_net_device hw_features include/linux/netdevice.h +EFX_HAVE_NETDEV_EXTENDED_HW_FEATURES member struct_net_device_extended hw_features include/linux/netdevice.h +EFX_HAVE_NETDEV_FEATURES_CHANGE symbol netdev_features_change include/linux/netdevice.h +EFX_HAVE_PCI_DEV_FLAGS_ASSIGNED symbol PCI_DEV_FLAGS_ASSIGNED include/linux/pci.h +EFX_HAVE_PCI_VFS_ASSIGNED symbol pci_vfs_assigned include/linux/pci.h +EFX_HAVE_LINUX_EXPORT_H file include/linux/export.h +EFX_NEED_KMALLOC_ARRAY nsymbol kmalloc_array include/linux/slab.h +EFX_HAVE_VOID_DYNAMIC_NETDEV_DBG symtype __dynamic_netdev_dbg include/linux/dynamic_debug.h void (struct _ddebug *, const struct net_device *, const char *, ...) +EFX_HAVE_NDO_EXT_BUSY_POLL member struct_net_device_extended ndo_busy_poll include/linux/netdevice.h +EFX_HAVE_NET_DEVICE_OPS_EXT member struct_net_device_extended netdev_ops_ext include/linux/netdevice.h +EFX_HAVE_NET_DEVICE_OPS_EXT_GET_PHYS_PORT_ID member struct_net_device_ops_ext ndo_get_phys_port_id include/linux/netdevice.h +EFX_HAVE_NET_DEVICE_OPS_EXT_SET_VF_SPOOFCHK member struct_net_device_ops_ext ndo_set_vf_spoofchk include/linux/netdevice.h +EFX_HAVE_NET_DEVICE_OPS_EXT_SET_VF_LINK_STATE member struct_net_device_ops_ext ndo_set_vf_link_state include/linux/netdevice.h +EFX_NEED_SKB_GSO_TCPV6 nsymbol SKB_GSO_TCPV6 include/linux/skbuff.h +EFX_HAVE_GSO_PARTIAL symbol SKB_GSO_PARTIAL include/linux/skbuff.h +EFX_HAVE_GSO_UDP_TUNNEL symbol SKB_GSO_UDP_TUNNEL include/linux/skbuff.h +EFX_HAVE_GSO_UDP_TUNNEL_CSUM symbol SKB_GSO_UDP_TUNNEL_CSUM include/linux/skbuff.h +EFX_NEED_SKB_IS_GSO_TCP nsymbol skb_is_gso_tcp include/linux/skbuff.h +EFX_HAVE_GSO_H file include/net/gso.h +EFX_NEED_IS_ERR_OR_NULL nsymbol IS_ERR_OR_NULL include/linux/err.h +EFX_NEED_NETDEV_RSS_KEY_FILL nsymbol netdev_rss_key_fill include/linux/netdevice.h +EFX_HAVE_NETIF_SET_XPS_QUEUE symbol netif_set_xps_queue include/linux/netdevice.h +EFX_HAVE_NETIF_SET_XPS_QUEUE_NON_CONST symtype netif_set_xps_queue include/linux/netdevice.h int (struct net_device *, struct cpumask *, u16) +EFX_HAVE_ALLOC_PAGES_NODE symbol alloc_pages_node include/linux/gfp.h +EFX_HAVE_NETIF_XMIT_STOPPED symbol netif_xmit_stopped include/linux/netdevice.h +EFX_NEED_CPUMASK_LOCAL_SPREAD nsymbol cpumask_local_spread include/linux/cpumask.h +EFX_HAVE_CONST_PCI_ERR_HANDLER memtype struct_pci_driver err_handler include/linux/pci.h const struct pci_error_handlers * +EFX_HAVE_HW_ENC_FEATURES member struct_net_device hw_enc_features include/linux/netdevice.h +EFX_NEED_SKB_INNER_TRANSPORT_OFFSET nsymbol skb_inner_transport_offset include/linux/skbuff.h +EFX_HAVE_SKB_XMIT_MORE bitfield struct_sk_buff xmit_more include/linux/skbuff.h +EFX_HAVE_SK_BUFF_LIST symbol sk_buff_list include/linux/skbuff.h +EFX_HAVE_NETDEV_XMIT_MORE symbol netdev_xmit_more include/linux/netdevice.h +EFX_HAVE_NDO_ADD_VXLAN_PORT member struct_net_device_ops ndo_add_vxlan_port include/linux/netdevice.h +EFX_NEED_PAGE_REF_ADD nfile include/linux/page_ref.h +EFX_NEED_D_HASH_AND_LOOKUP nexport d_hash_and_lookup include/linux/dcache.h fs/dcache.c +EFX_HAVE_KTIME_UNION custom +EFX_NEED_HWMON_DEVICE_REGISTER_WITH_INFO nsymbol hwmon_device_register_with_info include/linux/hwmon.h +EFX_HAVE_NDO_UDP_TUNNEL_ADD member struct_net_device_ops ndo_udp_tunnel_add include/linux/netdevice.h +EFX_HAVE_UDP_TUNNEL_NIC_INFO symbol udp_tunnel_nic_info include/net/udp_tunnel.h +EFX_HAVE_NEW_FLOW_KEYS member struct_flow_keys basic include/net/flow_dissector.h +EFX_HAVE_SKB_ENCAPSULATION bitfield struct_sk_buff encapsulation include/linux/skbuff.h +EFX_HAVE_NDO_ADD_GENEVE_PORT member struct_net_device_ops ndo_add_geneve_port include/linux/netdevice.h +EFX_HAVE_NETDEV_MTU_LIMITS member struct_net_device max_mtu include/linux/netdevice.h +EFX_NEED_BOOL_NAPI_COMPLETE_DONE nsymtype napi_complete_done include/linux/netdevice.h bool (struct napi_struct *, int) +EFX_HAVE_XDP symbol netdev_bpf include/linux/netdevice.h +EFX_HAVE_XDP_OLD symbol netdev_xdp include/linux/netdevice.h +EFX_HAVE_XDP_TRACE file include/trace/events/xdp.h +EFX_HAVE_XDP_HEAD member struct_xdp_buff data_hard_start include/linux/filter.h +EFX_HAVE_XDP_TX symbol XDP_TX include/uapi/linux/bpf.h +EFX_HAVE_XDP_TX_FLAGS memtype struct_net_device_ops ndo_xdp_xmit include/linux/netdevice.h int (*)(struct net_device *, int, struct xdp_frame **, u32) +EFX_HAVE_XDP_REDIR symbol XDP_REDIRECT include/uapi/linux/bpf.h +EFX_HAVE_XDP_RXQ_INFO symbol xdp_rxq_info include/net/xdp.h +EFX_HAVE_XDP_RXQ_INFO_NAPI_ID symtype xdp_rxq_info_reg include/net/xdp.h int(struct xdp_rxq_info *, struct net_device *, u32, unsigned int) +EFX_HAVE_XDP_EXT member struct_net_device_ops_extended ndo_xdp include/linux/netdevice.h +EFX_NEED_XDP_FLUSH member struct_net_device_ops ndo_xdp_flush include/linux/netdevice.h +EFX_HAVE_XDP_PROG_ATTACHED member struct_netdev_bpf prog_attached include/linux/netdevice.h +EFX_HAVE_XDP_PROG_ID member struct_netdev_bpf prog_id include/linux/netdevice.h +EFX_HAVE_BPF_WARN_INVALID_XDP_ACTION_3PARAM symtype bpf_warn_invalid_xdp_action include/linux/filter.h void(struct net_device *, struct bpf_prog *, u32) +EFX_NEED_PAGE_FRAG_FREE nsymbol page_frag_free include/linux/gfp.h +EFX_HAVE_FREE_PAGE_FRAG symbol __free_page_frag include/linux/gfp.h +EFX_NEED_VOID_SKB_PUT nsymtype skb_put include/linux/skbuff.h void *(struct sk_buff *, unsigned int) +EFX_HAVE_ETHTOOL_FCS symbol NETIF_F_RXALL include/linux/netdev_features.h +EFX_HAVE_ETHTOOL_LINKSETTINGS symbol ethtool_link_ksettings include/linux/ethtool.h +EFX_HAVE_ETHTOOL_LEGACY symbol __ethtool_get_settings include/linux/ethtool.h +EFX_HAVE_LINK_MODE_1000X symbol ETHTOOL_LINK_MODE_1000baseX_Full_BIT include/uapi/linux/ethtool.h +EFX_HAVE_LINK_MODE_25_50_100 symbol ETHTOOL_LINK_MODE_25000baseCR_Full_BIT include/uapi/linux/ethtool.h +EFX_HAVE_LINK_MODE_FEC_BITS symbol ETHTOOL_LINK_MODE_FEC_BASER_BIT include/uapi/linux/ethtool.h +EFX_HAVE_NETDEV_EXT_MTU_LIMITS member struct_net_device_extended max_mtu include/linux/netdevice.h +EFX_HAVE_NDO_EXT_CHANGE_MTU memtype struct_net_device_ops_extended ndo_change_mtu include/linux/netdevice.h int (*)(struct net_device *, int) +EFX_HAVE_NDO_TX_TIMEOUT_TXQUEUE memtype struct_net_device_ops ndo_tx_timeout include/linux/netdevice.h void (*)(struct net_device *, unsigned int) +EFX_HAVE_ETHTOOL_FECSTATS member struct_ethtool_ops get_fec_stats include/linux/ethtool.h +EFX_HAVE_ETHTOOL_FECPARAM member struct_ethtool_ops get_fecparam include/linux/ethtool.h +EFX_HAVE_ETHTOOL_RXFH_CONTEXT member struct_ethtool_ops get_rxfh_context include/linux/ethtool.h +EFX_HAVE_ETHTOOL_RXNFC_CONTEXT member struct_ethtool_rxnfc rss_context include/linux/ethtool.h +EFX_HAVE_XDP_FRAME_API symbol xdp_frame include/net/xdp.h +EFX_HAVE_XDP_COVERT_XDP_BUFF_FRAME_API symbol xdp_convert_buff_to_frame include/net/xdp.h +EFX_HAVE_XDP_DATA_META member struct_xdp_buff data_meta include/linux/filter.h +EFX_HAVE_OLD_DEV_OPEN symtype dev_open include/linux/netdevice.h int (struct net_device *) +EFX_NEED_CONSUME_SKB_ANY nsymbol dev_consume_skb_any include/linux/netdevice.h +EFX_NEED_CSUM16_SUB nsymbol csum16_sub include/net/checksum.h +EFX_NEED_CSUM_REPLACE_BY_DIFF nsymbol csum_replace_by_diff include/net/checksum.h +EFX_HAVE_NEW_NDO_SETUP_TC memtype struct_net_device_ops ndo_setup_tc include/linux/netdevice.h int (*)(struct net_device *, enum tc_setup_type, void *) +EFX_HAVE_TC_BLOCK_OFFLOAD symbol tc_block_offload include/net/pkt_cls.h +EFX_HAVE_FLOW_BLOCK_OFFLOAD symbol flow_block_offload include/net/flow_offload.h +EFX_HAVE_TC_INDR_BLOCK_CB_REGISTER symbol __tc_indr_block_cb_register include/net/pkt_cls.h +EFX_HAVE_FLOW_INDR_BLOCK_CB_REGISTER symbol __flow_indr_block_cb_register include/net/flow_offload.h +EFX_HAVE_FLOW_INDR_BLOCK_CB_ALLOC symbol flow_indr_block_cb_alloc include/net/flow_offload.h +EFX_HAVE_FLOW_INDR_DEV_REGISTER symbol flow_indr_dev_register include/net/flow_offload.h +EFX_HAVE_FLOW_INDR_QDISC member struct_flow_block_indr sch include/net/flow_offload.h +EFX_HAVE_TC_ACTION_COOKIE custom +EFX_HAVE_TC_FLOW_OFFLOAD file include/net/flow_offload.h +EFX_NEED_TCF_MIRRED_DEV nsymbol tcf_mirred_dev include/net/tc_act/tc_mirred.h +EFX_HAVE_STRUCT_SIZE symbol struct_size include/linux/overflow.h +EFX_NEED_ARRAY_SIZE nsymbol array_size include/linux/overflow.h +EFX_NEED_FLOW_RULE_MATCH_CVLAN nsymbol flow_rule_match_cvlan include/net/flow_offload.h +EFX_NEED_FLOW_RULE_MATCH_CT nsymbol flow_rule_match_ct include/net/flow_offload.h +EFX_HAVE_FLOW_DISSECTOR_KEY_CVLAN symbol FLOW_DISSECTOR_KEY_CVLAN include/net/flow_dissector.h +EFX_HAVE_FLOW_DISSECTOR_KEY_ENC_IP symbol FLOW_DISSECTOR_KEY_ENC_IP include/net/flow_dissector.h +EFX_HAVE_FLOW_DISSECTOR_64BIT_USED_KEYS memtype struct_flow_dissector used_keys include/net/flow_dissector.h unsigned long long +EFX_HAVE_FLOW_DISSECTOR_VLAN_TPID member struct_flow_dissector_key_vlan vlan_tpid include/net/flow_dissector.h +EFX_HAVE_OLD_TCF_ACTION_STATS_UPDATE symtype tcf_action_stats_update include/net/act_api.h void(struct tc_action *a, u64 bytes, u64 packets, u64 lastuse) +EFX_HAVE_FLOW_STATS_TYPE symbol flow_action_hw_stats include/net/flow_offload.h +EFX_HAVE_FLOW_STATS_DROPS member struct_flow_stats drops include/net/flow_offload.h +EFX_HAVE_NF_FLOW_TABLE_OFFLOAD symbol nf_flow_table_offload_add_cb include/net/netfilter/nf_flow_table.h +EFX_HAVE_TC_ACT_CT file include/net/tc_act/tc_ct.h +EFX_HAVE_TCB_EXTACK member struct_tc_block_offload extack include/net/pkt_cls.h +EFX_HAVE_TCF_EXTACK member struct_tc_cls_common_offload extack include/net/pkt_cls.h +EFX_HAVE_TC_CAN_EXTACK symbol tc_can_offload_extack include/net/pkt_cls.h +EFX_HAVE_NETIF_IS_VXLAN symbol netif_is_vxlan include/net/vxlan.h +EFX_HAVE_NETIF_IS_GENEVE symbol netif_is_geneve include/net/geneve.h +EFX_NEED_IDA_ALLOC_RANGE nsymbol ida_alloc_range include/linux/idr.h +EFX_HAVE_IPV6_STUBS_DST_LOOKUP_FLOW custom +EFX_HAVE_SKB__LIST member struct_sk_buff list include/linux/skbuff.h +EFX_HAVE_RECEIVE_SKB_LIST symbol netif_receive_skb_list include/linux/netdevice.h +EFX_NEED_SKB_LIST_DEL_INIT nsymbol skb_list_del_init include/linux/skbuff.h +EFX_NEED_SKB_MARK_NOT_ON_LIST nsymbol skb_mark_not_on_list include/linux/skbuff.h +EFX_HAVE_MMIOWB symbol mmiowb include/asm-generic/io.h +EFX_HAVE_NET_DEVLINK_H file include/net/devlink.h +EFX_HAVE_NDO_GET_DEVLINK_PORT member struct_net_device_ops ndo_get_devlink_port include/linux/netdevice.h +EFX_HAVE_SET_NETDEV_DEVLINK_PORT symbol SET_NETDEV_DEVLINK_PORT include/linux/netdevice.h +EFX_HAVE_DEVLINK_INFO symbol devlink_info_version_running_put include/net/devlink.h +EFX_NEED_DEVLINK_INFO_BOARD_SERIAL_NUMBER_PUT nsymbol devlink_info_board_serial_number_put include/net/devlink.h +EFX_HAVE_DEVLINK_INFO_DRIVER_NAME_PUT symbol devlink_info_driver_name_put include/net/devlink.h +EFX_HAVE_DEVLINK_OPS_SUPPORTED_FLASH_UPDATE_PARAMS member devlink_ops supported_flash_update_params include/net/devlink.h +EFX_HAVE_DEVLINK_FLASH_UPDATE_PARAMS symbol devlink_flash_update_params include/net/devlink.h +EFX_HAVE_DEVLINK_FLASH_UPDATE_PARAMS_FW member struct_devlink_flash_update_params fw include/net/devlink.h +EFX_HAVE_DEVLINK_FLASH_UPDATE_BEGIN_NOTIFY symbol devlink_flash_update_begin_notify include/net/devlink.h +EFX_NEED_DEVLINK_FLASH_UPDATE_STATUS_NOTIFY nsymbol devlink_flash_update_status_notify include/net/devlink.h +EFX_NEED_DEVLINK_FLASH_UPDATE_TIMEOUT_NOTIFY nsymbol devlink_flash_update_timeout_notify include/net/devlink.h +EFX_HAVE_DEVLINK_ALLOC_DEV symtype devlink_alloc include/net/devlink.h struct devlink *(const struct devlink_ops *, size_t, struct device *) +EFX_HAVE_VOID_DEVLINK_REGISTER symtype devlink_register include/net/devlink.h void(struct devlink *) +EFX_NEED_ETHTOOL_FLASH_DEVICE nsymbol devlink_compat_flash_update include/net/devlink.h +EFX_HAVE_ETHTOOL_COALESCE_CQE memtype struct_ethtool_ops get_coalesce include/linux/ethtool.h int (*)(struct net_device *, struct ethtool_coalesce *, struct kernel_ethtool_coalesce *, struct netlink_ext_ack *) +EFX_HAVE_ETHTOOL_GET_RINGPARAM_EXTACK memtype struct_ethtool_ops get_ringparam include/linux/ethtool.h void (*)(struct net_device *, struct ethtool_ringparam *, struct kernel_ethtool_ringparam *, struct netlink_ext_ack *) +EFX_HAVE_ETHTOOL_SET_RINGPARAM_EXTACK memtype struct_ethtool_ops set_ringparam include/linux/ethtool.h int (*)(struct net_device *, struct ethtool_ringparam *, struct kernel_ethtool_ringparam *, struct netlink_ext_ack *) + +EFX_HAVE_PCI_FIND_NEXT_EXT_CAPABILITY symbol pci_find_next_ext_capability include/linux/pci.h +EFX_HAVE_XDP_SOCK export xdp_get_umem_from_qid include/net/xdp_sock.h +EFX_HAVE_XDP_SOCK_DRV export xdp_get_umem_from_qid include/net/xdp_sock_drv.h +EFX_HAVE_XSK_POOL export xsk_get_pool_from_qid include/net/xdp_sock_drv.h +EFX_HAVE_XSK_OFFSET_ADJUST symbol xsk_umem_adjust_offset include/net/xdp_sock.h +EFX_HAVE_XDP_UMEM_RELEASE_ADDR symbol xsk_umem_release_addr include/net/xdp_sock.h +EFX_HAVE_XSK_UMEM_CONS_TX_2PARAM symtype xsk_umem_consume_tx include/net/xdp_sock.h bool(struct xdp_umem *umem, struct xdp_desc *) +EFX_HAVE_XSK_NEED_WAKEUP symbol xsk_umem_uses_need_wakeup include/net/xdp_sock.h include/net/xdp_sock_drv.h +EFX_HAVE_COALESCE_PARAMS member struct_ethtool_ops supported_coalesce_params include/linux/ethtool.h +EFX_HAVE_ETHTOOL_COALESCE_CQE memtype struct_ethtool_ops get_coalesce include/linux/ethtool.h int (*)(struct net_device *, struct ethtool_coalesce *, struct kernel_ethtool_coalesce *, struct netlink_ext_ack *) +EFX_HAVE_XDP_QUERY_PROG symbol XDP_QUERY_PROG include/linux/netdevice.h +EFX_HAVE_XDP_FRAME_SZ member struct_xdp_buff frame_sz include/net/xdp.h +EFX_NEED_XDP_INIT_BUFF nsymbol xdp_init_buff include/net/xdp.h +EFX_NEED_XDP_PREPARE_BUFF nsymbol xdp_prepare_buff include/net/xdp.h +EFX_NEED_XDP_DO_FLUSH nsymbol xdp_do_flush include/linux/filter.h +EFX_HAVE_VDPA_VQ_STATE symbol vdpa_vq_state include/linux/vdpa.h +EFX_HAVE_VDPA_VQ_STATE_SPLIT symbol vdpa_vq_state_split include/linux/vdpa.h +EFX_HAVE_GET_VQ_IRQ member struct_vdpa_config_ops get_vq_irq include/linux/vdpa.h +EFX_HAVE_GET_VQ_NOTIFY member struct_vdpa_config_ops get_vq_notification include/linux/vdpa.h +EFX_HAVE_GET_DEVICE_FEATURES member struct_vdpa_config_ops get_device_features include/linux/vdpa.h +EFX_HAVE_VDPA_RESET member struct_vdpa_config_ops reset include/linux/vdpa.h +EFX_HAVE_GET_CONFIG_SIZE member struct_vdpa_config_ops get_config_size include/linux/vdpa.h +EFX_HAVE_VDPA_ALLOC_NVQS_PARAM symtype __vdpa_alloc_device include/linux/vdpa.h struct vdpa_device *(struct device *, const struct vdpa_config_ops *, int, size_t) +EFX_HAVE_VDPA_ALLOC_NAME_PARAM symtype __vdpa_alloc_device include/linux/vdpa.h struct vdpa_device *(struct device *, const struct vdpa_config_ops *, size_t, const char *) +EFX_HAVE_VDPA_ALLOC_NAME_USEVA_PARAMS symtype __vdpa_alloc_device include/linux/vdpa.h struct vdpa_device *(struct device *, const struct vdpa_config_ops *, size_t, const char *, bool) +EFX_HAVE_VDPA_ALLOC_ASID_NAME_USEVA_PARAMS symtype __vdpa_alloc_device include/linux/vdpa.h struct vdpa_device *(struct device *, const struct vdpa_config_ops *, unsigned int, unsigned int, size_t, const char *, bool) +EFX_HAVE_VDPA_REGISTER_NVQS_PARAM symtype _vdpa_register_device include/linux/vdpa.h int(struct vdpa_device *, int ) +EFX_HAVE_VDPA_DMA_MAP_OPAQUE_PARAM memtype struct_vdpa_config_ops dma_map include/linux/vdpa.h int (*)(struct vdpa_device *, u64, u64, u64, u32, void *) +EFX_HAVE_VDPA_CONFIG_OP_SUSPEND member struct_vdpa_config_ops suspend include/linux/vdpa.h +EFX_HAVE_RHASHTABLE file include/linux/rhashtable.h +EFX_HAVE_RHASHTABLE_LOOKUP_FAST symbol rhashtable_lookup_fast include/linux/rhashtable.h +EFX_NEED_RHASHTABLE_WALK_ENTER nsymbol rhashtable_walk_enter include/linux/rhashtable.h +EFX_HAVE_RHASHTABLE_WALK_INIT_GFP symtype rhashtable_walk_init include/linux/rhashtable.h int(struct rhashtable *, struct rhashtable_iter *, gfp_t) +EFX_NEED_STRSCPY nsymbol strscpy include/linux/fortify-string.h +EFX_HAVE_NDO_SIOCDEVPRIVATE member struct_net_device_ops ndo_siocdevprivate include/linux/netdevice.h +EFX_HAVE_NDO_ETH_IOCTL member struct_net_device_ops ndo_eth_ioctl include/linux/netdevice.h +EFX_NEED_NETDEV_HOLD nsymbol netdev_hold include/linux/netdevice.h +EFX_HAVE_DEV_HOLD_TRACK symbol dev_hold_track include/linux/netdevice.h +EFX_NEED_KREALLOC_ARRAY nsymbol krealloc_array include/linux/slab.h +EFX_HAVE_VDPA_MGMT_INTERFACE symbol vdpa_mgmtdev_register include/linux/vdpa.h +EFX_HAVE_IOMMU_CAPABLE symbol iommu_capable include/linux/iommu.h +EFX_NEED_DEVICE_IOMMU_CAPABLE nsymbol device_iommu_capable include/linux/iommu.h +EFX_HAVE_IOMMU_MAP_GFP_PARAM symtype iommu_map include/linux/iommu.h int(struct iommu_domain *, unsigned long iova, phys_addr_t paddr, size_t, int, gfp_t) +EFX_HAVE_VDPA_MGMT_OPS_CONFIG_PARAM memtype struct_vdpa_mgmtdev_ops dev_add include/linux/vdpa.h int (*)(struct vdpa_mgmt_dev *, const char *, const struct vdpa_dev_set_config *) +EFX_HAVE_VDPA_SUPPORTED_FEATURES member struct_vdpa_mgmt_dev supported_features include/linux/vdpa.h +EFX_HAVE_VDPA_MAX_SUPPORTED_VQS member struct_vdpa_mgmt_dev max_supported_vqs include/linux/vdpa.h +EFX_HAVE_VIRTIO_NET_SPEED_LE32 memtype struct_virtio_net_config speed include/uapi/linux/virtio_net.h __le32 +EFX_NEED_TIMESPEC64_TO_NS_SIGNED custom +EFX_HAVE_KOBJECT_DEFAULT_GROUPS member struct_kobj_type default_groups include/linux/kobject.h +EFX_NEED_REFCOUNT_T nsymbol refcount_t include/linux/refcount.h +EFX_NEED_DEBUGFS_LOOKUP_AND_REMOVE nsymbol debugfs_lookup_and_remove include/linux/debugfs.h +" | egrep -v -e '^#' -e '^$' | sed 's/[ \t][ \t]*/:/g' +} + +###################################################################### +# Implementation for more tricky types + +function do_EFX_HAVE_MTD_DIRECT_ACCESS() +{ + # RHEL 4 is missing ; assume old operation names + # in this case + # kernels post 3.5 changed to use _ for function pointers + # kernels post 3.7 changed the location of mtd-abi.h to uapi/.. + (! test -f $KBUILD_SRC/include/mtd/mtd-abi.h && \ + ! test -f $KBUILD_SRC/include/uapi/mtd/mtd-abi.h ) || \ + defer_test_memtype pos struct_mtd_info erase include/linux/mtd/mtd.h void +} + +function do_EFX_USE_NETDEV_STATS() +{ + local source=" +#include +struct net_device_stats *stats; +void test(struct net_device *net_dev); +void test(struct net_device *net_dev) { stats = &net_dev->stats; }" + defer_test_compile pos "$source" +} + +function do_EFX_HAVE_XEN_START_INFO() +{ + case $SRCARCH in + i386 | x86) + test_export xen_start_info arch/$SRCARCH/xen/enlighten.c || return + ;; + ia64) + test_export xen_start_info arch/ia64/xen/hypervisor.c || return + ;; + *) + return 1 + ;; + esac + + test_symbol xen_start_info \ + include/asm/xen/hypervisor.h \ + arch/$SRCARCH/include/asm/xen/hypervisor.h +} + +function do_EFX_HAVE_GRO() +{ + # We check symbol types here because in Linux 2.6.29 and 2.6.30 + # napi_gro_frags() took an extra parameter. We don't bother to + # support GRO on those versions; no major distribution used them. + if test_symbol napi_gro_receive_gr include/linux/netdevice.h; then + true + elif test_symbol gro_result_t include/linux/netdevice.h; then + defer_test_symtype pos napi_gro_frags include/linux/netdevice.h "gro_result_t(struct napi_struct *)" + else + defer_test_symtype pos napi_gro_frags include/linux/netdevice.h "int(struct napi_struct *)" + fi +} + +function do_EFX_NEED_SET_NORMALIZED_TIMESPEC +{ + ! test_inline_symbol set_normalized_timespec include/linux/time.h && \ + ! test_export set_normalized_timespec include/linux/time.h kernel/time.c +} + +function do_EFX_HAVE_OLD_ETHTOOL_RXFH_INDIR +{ + test_symbol ETHTOOL_GRXFHINDIR include/linux/ethtool.h && \ + ! test_symbol get_rxfh_indir_size include/linux/ethtool.h +} + +function do_EFX_HAVE_ETHTOOL_SET_RXFH_NOCONST +{ + defer_test_compile pos " +#include +static int test_func(struct net_device *a, u32 *b, u8 *c) +{ + return 0; +} +struct ethtool_ops_ext test = { + .set_rxfh = test_func +}; +" +} + +function do_EFX_NEED_IS_COMPAT_TASK +{ + defer_test_compile neg " +#include + +int test(void); +int test(void) { return is_compat_task(); } +" +} + +function do_EFX_NEED_PTP_CLOCK_PPSUSR +{ + # If the enum is not complete + test_symbol PTP_CLOCK_PPS include/linux/ptp_clock_kernel.h && \ + ! test_symbol PTP_CLOCK_PPSUSR include/linux/ptp_clock_kernel.h +} + +function do_EFX_HAVE_PHC_SUPPORT +{ + if [ "${CONFIG_PTP_1588_CLOCK:-}" = "y" ] || [ "${CONFIG_PTP_1588_CLOCK:-}" = "m" ]; then + # Ideally this would use this (but it is a deferred test) + # test_member struct ptp_clock_event ptp_evt pps_times + # NB pps_times is needed for the PTP_CLOCK_PPSUSR event + test_export ptp_clock_register && \ + test_symbol pps_times include/linux/ptp_clock_kernel.h + else + return 1 + fi +} + +function do_EFX_HAVE_OLD_KMAP_ATOMIC +{ + # This is a negative test because the new implementation of + # kmap_atomic() was a macro that accepts and ignores extra + # arguments. + defer_test_compile neg " +#include + +void *f(struct page *p); +void *f(struct page *p) +{ + return kmap_atomic(p); +} +" +} + +function do_EFX_HAVE_KTIME_UNION +{ + defer_test_compile pos " +#include + +void f(void); +void f(void) +{ + ktime_t t; + t.tv64 = 0; +} +" +} + +function do_EFX_HAVE_NEW_ALLOC_WORKQUEUE +{ + # The old macro only accepts 3 arguments. + defer_test_compile pos ' +#include + +void f(void); +void f(void) +{ + alloc_workqueue("%s", 0, 0, "test"); +} +' +} + +function do_EFX_HAVE_TC_ACTION_COOKIE +{ + # TC action cookie isn't upstream yet. When it is, we expect something + # like the following compat rule to work: + # member struct_flow_action_entry cookie include/net/flow_offload.h + # In the meantime, we keep around our code to support it but make the + # compat symbol always false, in case the eventually merged version is + # different to that against which our current code was developed. + return 1 +} + +function do_EFX_HAVE_IPV6_STUBS_DST_LOOKUP_FLOW +{ + if test_symbol ipv6_dst_lookup_flow include/net/addrconf.h; then + defer_test_memtype pos struct_ipv6_stub ipv6_dst_lookup_flow include/net/addrconf.h void + else + defer_test_memtype pos struct_ipv6_stub ipv6_dst_lookup_flow include/net/ipv6_stubs.h void + fi +} + +function do_EFX_NEED_TIMESPEC64_TO_NS_SIGNED +{ + test -f $KBUILD_SRC/include/linux/time64.h && + grep -q 'Prevent multiplication overflow ./' $KBUILD_SRC/include/linux/time64.h +} + +TOPDIR=$(dirname "$0")/../../../.. +source $TOPDIR/scripts/kernel_compat_funcs.sh diff --git a/src/drivers/net/ethernet/sfc/linux_mtd_mtd.h b/src/drivers/net/ethernet/sfc/linux_mtd_mtd.h new file mode 100644 index 0000000..0cc1cd1 --- /dev/null +++ b/src/drivers/net/ethernet/sfc/linux_mtd_mtd.h @@ -0,0 +1,23 @@ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2012 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +/* Prior to Linux 3.2, would define a DEBUG + * function-like macro, which we really don't want. Save and + * restore the defined-ness of DEBUG across this #include. + */ + +#ifdef DEBUG +#define EFX_MTD_DEBUG +#undef DEBUG +#endif +#include +#undef DEBUG +#ifdef EFX_MTD_DEBUG +#define DEBUG +#endif diff --git a/src/drivers/net/ethernet/sfc/mae.c b/src/drivers/net/ethernet/sfc/mae.c new file mode 100644 index 0000000..815c11c --- /dev/null +++ b/src/drivers/net/ethernet/sfc/mae.c @@ -0,0 +1,2408 @@ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2019 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#include "mae.h" +#include "mcdi.h" +#include "mcdi_pcol.h" +#include "mcdi_pcol_mae.h" +#include "tc_encap_actions.h" +#include "tc_conntrack.h" + +int efx_mae_allocate_mport(struct efx_nic *efx, u32 *id, u32 *label) +{ + MCDI_DECLARE_BUF(outbuf, MC_CMD_MAE_MPORT_ALLOC_ALIAS_OUT_LEN); + MCDI_DECLARE_BUF(inbuf, MC_CMD_MAE_MPORT_ALLOC_ALIAS_IN_LEN); + size_t outlen; + int rc; + + if (WARN_ON_ONCE(!id)) + return -EINVAL; + if (WARN_ON_ONCE(!label)) + return -EINVAL; + + MCDI_SET_DWORD(inbuf, MAE_MPORT_ALLOC_ALIAS_IN_TYPE, + MC_CMD_MAE_MPORT_ALLOC_ALIAS_IN_MPORT_TYPE_ALIAS); + MCDI_SET_DWORD(inbuf, MAE_MPORT_ALLOC_ALIAS_IN_DELIVER_MPORT, + MAE_MPORT_SELECTOR_ASSIGNED); + rc = efx_mcdi_rpc(efx, MC_CMD_MAE_MPORT_ALLOC, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), &outlen); + if (rc) + return rc; + if (outlen < sizeof(outbuf)) + return -EIO; + *id = MCDI_DWORD(outbuf, MAE_MPORT_ALLOC_ALIAS_OUT_MPORT_ID); + *label = MCDI_DWORD(outbuf, MAE_MPORT_ALLOC_ALIAS_OUT_LABEL); + return 0; +} + +int efx_mae_free_mport(struct efx_nic *efx, u32 id) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_MAE_MPORT_FREE_IN_LEN); + + BUILD_BUG_ON(MC_CMD_MAE_MPORT_FREE_OUT_LEN); + MCDI_SET_DWORD(inbuf, MAE_MPORT_FREE_IN_MPORT_ID, id); + return efx_mcdi_rpc(efx, MC_CMD_MAE_MPORT_FREE, inbuf, sizeof(inbuf), + NULL, 0, NULL); +} + +void efx_mae_mport_wire(struct efx_nic *efx, u32 *out) +{ + efx_dword_t mport; + + EFX_POPULATE_DWORD_2(mport, + MAE_MPORT_SELECTOR_TYPE, MAE_MPORT_SELECTOR_TYPE_PPORT, + MAE_MPORT_SELECTOR_PPORT_ID, efx->port_num); + *out = EFX_DWORD_VAL(mport); +} + +void efx_mae_mport_uplink(struct efx_nic *efx __always_unused, u32 *out) +{ + efx_dword_t mport; + + EFX_POPULATE_DWORD_3(mport, + MAE_MPORT_SELECTOR_TYPE, MAE_MPORT_SELECTOR_TYPE_FUNC, + MAE_MPORT_SELECTOR_FUNC_PF_ID, MAE_MPORT_SELECTOR_FUNC_PF_ID_CALLER, + MAE_MPORT_SELECTOR_FUNC_VF_ID, MAE_MPORT_SELECTOR_FUNC_VF_ID_NULL); + *out = EFX_DWORD_VAL(mport); +} + +void efx_mae_mport_vf(struct efx_nic *efx __always_unused, u32 vf_id, u32 *out) +{ + efx_dword_t mport; + + EFX_POPULATE_DWORD_3(mport, + MAE_MPORT_SELECTOR_TYPE, MAE_MPORT_SELECTOR_TYPE_FUNC, + MAE_MPORT_SELECTOR_FUNC_PF_ID, MAE_MPORT_SELECTOR_FUNC_PF_ID_CALLER, + MAE_MPORT_SELECTOR_FUNC_VF_ID, vf_id); + *out = EFX_DWORD_VAL(mport); +} + +/* Constructs an mport selector from an mport ID, because they're not the same */ +void efx_mae_mport_mport(struct efx_nic *efx __always_unused, u32 mport_id, u32 *out) +{ + efx_dword_t mport; + + EFX_POPULATE_DWORD_2(mport, + MAE_MPORT_SELECTOR_TYPE, MAE_MPORT_SELECTOR_TYPE_MPORT_ID, + MAE_MPORT_SELECTOR_MPORT_ID, mport_id); + *out = EFX_DWORD_VAL(mport); +} + +/* id is really only 24 bits wide */ +int efx_mae_lookup_mport(struct efx_nic *efx, u32 selector, u32 *id) +{ + MCDI_DECLARE_BUF(outbuf, MC_CMD_MAE_MPORT_LOOKUP_OUT_LEN); + MCDI_DECLARE_BUF(inbuf, MC_CMD_MAE_MPORT_LOOKUP_IN_LEN); + size_t outlen; + int rc; + + MCDI_SET_DWORD(inbuf, MAE_MPORT_LOOKUP_IN_MPORT_SELECTOR, selector); + rc = efx_mcdi_rpc(efx, MC_CMD_MAE_MPORT_LOOKUP, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), &outlen); + if (rc) + return rc; + if (outlen < sizeof(outbuf)) + return -EIO; + *id = MCDI_DWORD(outbuf, MAE_MPORT_LOOKUP_OUT_MPORT_ID); + return 0; +} + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_TC_OFFLOAD) +int efx_mae_start_counters(struct efx_nic *efx, struct efx_rx_queue *rx_queue) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_MAE_COUNTERS_STREAM_START_V2_IN_LEN); + MCDI_DECLARE_BUF(outbuf, MC_CMD_MAE_COUNTERS_STREAM_START_OUT_LEN); + u32 out_flags; + size_t outlen; + int rc; + + MCDI_SET_WORD(inbuf, MAE_COUNTERS_STREAM_START_V2_IN_QID, rx_queue->queue); + MCDI_SET_WORD(inbuf, MAE_COUNTERS_STREAM_START_V2_IN_PACKET_SIZE, + efx->net_dev->mtu); + MCDI_SET_DWORD(inbuf, MAE_COUNTERS_STREAM_START_V2_IN_COUNTER_TYPES_MASK, + BIT(MAE_COUNTER_TYPE_AR) | BIT(MAE_COUNTER_TYPE_CT) | + BIT(MAE_COUNTER_TYPE_OR)); + rc = efx_mcdi_rpc(efx, MC_CMD_MAE_COUNTERS_STREAM_START, + inbuf, sizeof(inbuf), outbuf, sizeof(outbuf), &outlen); + if (rc) + return rc; + if (outlen < sizeof(outbuf)) + return -EIO; + out_flags = MCDI_DWORD(outbuf, MAE_COUNTERS_STREAM_START_OUT_FLAGS); + if (out_flags & BIT(MC_CMD_MAE_COUNTERS_STREAM_START_OUT_USES_CREDITS_OFST)) { + netif_dbg(efx, drv, efx->net_dev, + "MAE counter stream uses credits\n"); + rx_queue->grant_credits = true; + out_flags &= ~BIT(MC_CMD_MAE_COUNTERS_STREAM_START_OUT_USES_CREDITS_OFST); + } + if (out_flags) { + netif_err(efx, drv, efx->net_dev, + "MAE counter stream start: unrecognised flags %x\n", + out_flags); + goto out_stop; + } + return 0; +out_stop: + efx_mae_stop_counters(efx, rx_queue); + return -EOPNOTSUPP; +} + +static bool efx_mae_counters_flushed(u32 *flush_gen, u32 *seen_gen) +{ + int i; + + for (i = 0; i < EFX_TC_COUNTER_TYPE_MAX; i++) + if((s32)(flush_gen[i] - seen_gen[i]) > 0) + return false; + return true; +} + +int efx_mae_stop_counters(struct efx_nic *efx, struct efx_rx_queue *rx_queue) +{ + MCDI_DECLARE_BUF(outbuf, MC_CMD_MAE_COUNTERS_STREAM_STOP_V2_OUT_LENMAX); + MCDI_DECLARE_BUF(inbuf, MC_CMD_MAE_COUNTERS_STREAM_STOP_IN_LEN); + size_t outlen; + int rc, i; + + MCDI_SET_WORD(inbuf, MAE_COUNTERS_STREAM_STOP_IN_QID, rx_queue->queue); + rc = efx_mcdi_rpc(efx, MC_CMD_MAE_COUNTERS_STREAM_STOP, + inbuf, sizeof(inbuf), outbuf, sizeof(outbuf), &outlen); + + if (rc) + return rc; + + netif_dbg(efx, drv, efx->net_dev, "Draining counters:\n"); + /* Only process received generation counts */ + for (i = 0; (i < (outlen / 4)) && (i < EFX_TC_COUNTER_TYPE_MAX); i++) { + efx->tc->flush_gen[i] = MCDI_ARRAY_DWORD( + outbuf, + MAE_COUNTERS_STREAM_STOP_V2_OUT_GENERATION_COUNT, + i); + netif_dbg(efx, drv, efx->net_dev, + "\ttype %u, awaiting gen %u\n", i, + efx->tc->flush_gen[i]); + } + + efx->tc->flush_counters = true; + + /* Drain can take up to 2 seconds owing to FWRIVERHD-2884; whatever + * timeout we use, that delay is added to unload on nonresponsive + * hardware, so 2500ms seems like a reasonable compromise. + */ + if (!wait_event_timeout(efx->tc->flush_wq, + efx_mae_counters_flushed(efx->tc->flush_gen, + efx->tc->seen_gen), + msecs_to_jiffies(2500))) + netif_warn(efx, drv, efx->net_dev, + "Failed to drain counters RXQ, FW may be unhappy\n"); + + efx->tc->flush_counters = false; + + return rc; +} + +void efx_mae_counters_grant_credits(struct work_struct *work) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_MAE_COUNTERS_STREAM_GIVE_CREDITS_IN_LEN); + struct efx_rx_queue *rx_queue = container_of(work, struct efx_rx_queue, + grant_work); + struct efx_nic *efx = rx_queue->efx; + unsigned int credits; + + BUILD_BUG_ON(MC_CMD_MAE_COUNTERS_STREAM_GIVE_CREDITS_OUT_LEN); + credits = READ_ONCE(rx_queue->notified_count) - rx_queue->granted_count; + MCDI_SET_DWORD(inbuf, MAE_COUNTERS_STREAM_GIVE_CREDITS_IN_NUM_CREDITS, + credits); + if (!efx_mcdi_rpc(efx, MC_CMD_MAE_COUNTERS_STREAM_GIVE_CREDITS, + inbuf, sizeof(inbuf), NULL, 0, NULL)) + rx_queue->granted_count += credits; +} +#endif + +/* mport handling + */ +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_RHASHTABLE_LOOKUP_FAST) +static const struct rhashtable_params efx_mae_mports_ht_params = { + .key_len = sizeof(u32), + .key_offset = offsetof(struct mae_mport_desc, mport_id), + .head_offset = offsetof(struct mae_mport_desc, linkage), +}; + +struct mae_mport_desc *efx_mae_get_mport(struct efx_nic *efx, u32 mport_id) +{ + struct mae_mport_desc *desc; + + if (!efx->mae) + return NULL; + rcu_read_lock(); + desc = rhashtable_lookup_fast(&efx->mae->mports_ht, &mport_id, + efx_mae_mports_ht_params); + if (desc) + /* try to take a ref */ + if (!refcount_inc_not_zero(&desc->ref)) + desc = ERR_PTR(-EAGAIN); + rcu_read_unlock(); + return desc; +} + +static int efx_mae_add_mport(struct efx_nic *efx, struct mae_mport_desc *desc) +{ + struct efx_mae *mae = efx->mae; + int rc; + + /* One ref for the hashtable, and one for us in case it gets removed + * in parallel before we get to add_mport() + */ + refcount_set(&desc->ref, 2); + rc = rhashtable_insert_fast(&mae->mports_ht, &desc->linkage, + efx_mae_mports_ht_params); + + if (rc) { + pci_err(efx->pci_dev, "Failed to insert MPORT %08x, rc %d\n", + desc->mport_id, rc); + kfree(desc); + return rc; + } + if (efx->type->add_mport) + rc = efx->type->add_mport(efx, desc); + /* drop our extra ref */ + efx_mae_put_mport(efx, desc); + return rc; +} + +void efx_mae_put_mport(struct efx_nic *efx, struct mae_mport_desc *desc) +{ + if (!refcount_dec_and_test(&desc->ref)) + return; /* still in use */ + synchronize_rcu(); + if (efx->type->remove_mport) + efx->type->remove_mport(efx, desc); + kfree(desc); +} + +static void efx_mae_remove_mport(struct efx_nic *efx, + struct mae_mport_desc *desc) +{ + struct efx_mae *mae = efx->mae; + int rc; + + rc = rhashtable_remove_fast(&mae->mports_ht, &desc->linkage, + efx_mae_mports_ht_params); + if (rc) + netif_err(efx, drv, efx->net_dev, + "Failed to remove MPORT %08x, rc %d\n", + desc->mport_id, rc); + else + efx_mae_put_mport(efx, desc); +} + +static int efx_mae_process_mport(struct efx_nic *efx, struct mae_mport_desc *desc) +{ + struct mae_mport_desc *mport; + + mport = efx_mae_get_mport(efx, desc->mport_id); + if (!IS_ERR_OR_NULL(mport)) { + efx_mae_remove_mport(efx, mport); + /* remove will put the HT's ref, but we still need to put + * the one we just got. + */ + efx_mae_put_mport(efx, mport); + } + if (desc->caller_flags & MAE_MPORT_DESC_FLAG_IS_ZOMBIE) { + kfree(desc); + return 0; + } + return efx_mae_add_mport(efx, desc); +} +#else /* EFX_HAVE_RHASHTABLE_LOOKUP_FAST */ +struct mae_mport_desc *efx_mae_get_mport(struct efx_nic *efx, u32 mport_id) +{ + return NULL; +} + +void efx_mae_put_mport(struct efx_nic *efx, struct mae_mport_desc *desc) +{ + /* Shouldn't ever be reached; potential callers should never have a + * reference to put, since efx_mae_get_mport() never gives one out + */ + WARN_ON(1); +} + +static int efx_mae_process_mport(struct efx_nic *efx, struct mae_mport_desc *desc) +{ + return 0; +} +#endif /* EFX_HAVE_RHASHTABLE_LOOKUP_FAST */ + +int efx_mae_enumerate_mports(struct efx_nic *efx) +{ +#define MCDI_MPORT_JOURNAL_LEN \ + sizeof(efx_dword_t[DIV_ROUND_UP(MC_CMD_MAE_MPORT_READ_JOURNAL_OUT_LENMAX_MCDI2, 4)]) + efx_dword_t *outbuf = kzalloc(MCDI_MPORT_JOURNAL_LEN, GFP_KERNEL); + MCDI_DECLARE_BUF(inbuf, MC_CMD_MAE_MPORT_READ_JOURNAL_IN_LEN); + MCDI_DECLARE_STRUCT_PTR(desc); + size_t outlen, stride, count; + int rc, i; + + if (!outbuf) + return -ENOMEM; + do { + rc = efx_mcdi_rpc(efx, MC_CMD_MAE_MPORT_READ_JOURNAL, inbuf, sizeof(inbuf), + outbuf, MCDI_MPORT_JOURNAL_LEN, &outlen); + if (rc) + goto fail; + if (outlen < MC_CMD_MAE_MPORT_READ_JOURNAL_OUT_MPORT_DESC_DATA_OFST) { + rc = -EIO; + goto fail; + } + count = MCDI_DWORD(outbuf, MAE_MPORT_READ_JOURNAL_OUT_MPORT_DESC_COUNT); + if (!count) + continue; /* not break; we want to look at MORE flag */ + stride = MCDI_DWORD(outbuf, MAE_MPORT_READ_JOURNAL_OUT_SIZEOF_MPORT_DESC); + if (stride < MAE_MPORT_DESC_LEN) { + rc = -EIO; + goto fail; + } + if (outlen < MC_CMD_MAE_MPORT_READ_JOURNAL_OUT_LEN(count * stride)) { + rc = -EIO; + goto fail; + } + for (i = 0; i < count; i++) { + struct mae_mport_desc *d; + + d = kzalloc(sizeof(*d), GFP_KERNEL); + if (!d) { + rc = -ENOMEM; + goto fail; + } + + desc = (efx_dword_t *)_MCDI_PTR(outbuf, MC_CMD_MAE_MPORT_READ_JOURNAL_OUT_MPORT_DESC_DATA_OFST + i * stride); + d->mport_id = MCDI_STRUCT_DWORD(desc, MAE_MPORT_DESC_MPORT_ID); + d->flags = MCDI_STRUCT_DWORD(desc, MAE_MPORT_DESC_FLAGS); + d->caller_flags = MCDI_STRUCT_DWORD(desc, + MAE_MPORT_DESC_CALLER_FLAGS); + d->mport_type = MCDI_STRUCT_DWORD(desc, + MAE_MPORT_DESC_MPORT_TYPE); + switch (d->mport_type) { + case MAE_MPORT_DESC_MPORT_TYPE_NET_PORT: + d->port_idx = MCDI_STRUCT_DWORD(desc, + MAE_MPORT_DESC_NET_PORT_IDX); + break; + case MAE_MPORT_DESC_MPORT_TYPE_ALIAS: + d->alias_mport_id = MCDI_STRUCT_DWORD(desc, + MAE_MPORT_DESC_ALIAS_DELIVER_MPORT_ID); + break; + case MAE_MPORT_DESC_MPORT_TYPE_VNIC: + d->vnic_client_type = MCDI_STRUCT_DWORD(desc, + MAE_MPORT_DESC_VNIC_CLIENT_TYPE); + d->interface_idx = MCDI_STRUCT_DWORD(desc, + MAE_MPORT_DESC_VNIC_FUNCTION_INTERFACE); + d->pf_idx = MCDI_STRUCT_WORD(desc, + MAE_MPORT_DESC_VNIC_FUNCTION_PF_IDX); + d->vf_idx = MCDI_STRUCT_WORD(desc, + MAE_MPORT_DESC_VNIC_FUNCTION_VF_IDX); + break; + default: + /* Unknown mport_type, just accept it */ + break; + } + efx_mae_process_mport(efx, d); + } + } while (MCDI_FIELD(outbuf, MAE_MPORT_READ_JOURNAL_OUT, MORE) && + !WARN_ON(!count)); +fail: + kfree(outbuf); + return rc; +} + +static void efx_mae_mport_work(struct work_struct *data) +{ + struct efx_mae *mae = container_of(data, struct efx_mae, mport_work); + struct efx_nic *efx = mae->efx; + + efx_mae_enumerate_mports(efx); +} + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_TC_OFFLOAD) +static int efx_mae_table_get_desc(struct efx_nic *efx, + struct efx_tc_table_desc *desc, + u32 table_id) +{ + MCDI_DECLARE_BUF(outbuf, MC_CMD_TABLE_DESCRIPTOR_OUT_LEN(16)); + MCDI_DECLARE_BUF(inbuf, MC_CMD_TABLE_DESCRIPTOR_IN_LEN); + unsigned int offset = 0, i; + size_t outlen; + int rc; + + memset(desc, 0, sizeof(*desc)); + + MCDI_SET_DWORD(inbuf, TABLE_DESCRIPTOR_IN_TABLE_ID, table_id); +more: + MCDI_SET_DWORD(inbuf, TABLE_DESCRIPTOR_IN_FIRST_FIELDS_INDEX, offset); + rc = efx_mcdi_rpc(efx, MC_CMD_TABLE_DESCRIPTOR, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), &outlen); + if (rc) + goto fail; + if (outlen < MC_CMD_TABLE_DESCRIPTOR_OUT_LEN(1)) { + rc = -EIO; + goto fail; + } + if (!offset) { /* first iteration: get metadata */ + desc->type = MCDI_WORD(outbuf, TABLE_DESCRIPTOR_OUT_TYPE); + desc->key_width = MCDI_WORD(outbuf, TABLE_DESCRIPTOR_OUT_KEY_WIDTH); + desc->resp_width = MCDI_WORD(outbuf, TABLE_DESCRIPTOR_OUT_RESP_WIDTH); + desc->n_keys = MCDI_WORD(outbuf, TABLE_DESCRIPTOR_OUT_N_KEY_FIELDS); + desc->n_resps = MCDI_WORD(outbuf, TABLE_DESCRIPTOR_OUT_N_RESP_FIELDS); + desc->n_prios = MCDI_WORD(outbuf, TABLE_DESCRIPTOR_OUT_N_PRIORITIES); + desc->flags = MCDI_BYTE(outbuf, TABLE_DESCRIPTOR_OUT_FLAGS); + rc = -EOPNOTSUPP; + if (desc->flags) + goto fail; + desc->scheme = MCDI_BYTE(outbuf, TABLE_DESCRIPTOR_OUT_SCHEME); + if (desc->scheme) + goto fail; + rc = -ENOMEM; + desc->keys = kcalloc(desc->n_keys, + sizeof(struct efx_tc_table_field_fmt), + GFP_KERNEL); + if (!desc->keys) + goto fail; + desc->resps = kcalloc(desc->n_resps, + sizeof(struct efx_tc_table_field_fmt), + GFP_KERNEL); + if (!desc->resps) + goto fail; + } + /* FW could have returned more than the 16 field_descrs we + * made room for in our outbuf + */ + outlen = min(outlen, sizeof(outbuf)); + for (i = 0; i + offset < desc->n_keys + desc->n_resps; i++) { + struct efx_tc_table_field_fmt *field; + MCDI_DECLARE_STRUCT_PTR(fdesc); + + if (outlen < MC_CMD_TABLE_DESCRIPTOR_OUT_LEN(i + 1)) { + offset += i; + goto more; + } + if (i + offset < desc->n_keys) + field = desc->keys + i + offset; + else + field = desc->resps + (i + offset - desc->n_keys); + fdesc = MCDI_ARRAY_STRUCT_PTR(outbuf, + TABLE_DESCRIPTOR_OUT_FIELDS, i); + field->field_id = MCDI_STRUCT_WORD(fdesc, + TABLE_FIELD_DESCR_FIELD_ID); + field->lbn = MCDI_STRUCT_WORD(fdesc, TABLE_FIELD_DESCR_LBN); + field->width = MCDI_STRUCT_WORD(fdesc, TABLE_FIELD_DESCR_WIDTH); + field->masking = MCDI_STRUCT_BYTE(fdesc, TABLE_FIELD_DESCR_MASK_TYPE); + field->scheme = MCDI_STRUCT_BYTE(fdesc, TABLE_FIELD_DESCR_SCHEME); + } + return 0; + +fail: + kfree(desc->keys); + kfree(desc->resps); + return rc; +} + +static int efx_mae_table_hook_find(u16 n_fields, + struct efx_tc_table_field_fmt *fields, + u16 field_id) +{ + unsigned int i; + + for (i = 0; i < n_fields; i++) { + if (fields[i].field_id == field_id) + return i; + } + return -EPROTO; +} + +#define TABLE_FIND_KEY(_desc, _id) \ + efx_mae_table_hook_find((_desc)->n_keys, (_desc)->keys, _id) +#define TABLE_FIND_RESP(_desc, _id) \ + efx_mae_table_hook_find((_desc)->n_resps, (_desc)->resps, _id) + +#define TABLE_HOOK_KEY(_meta, _name, _mcdi_name) ({ \ + int _rc = TABLE_FIND_KEY(&_meta->desc, TABLE_FIELD_ID_##_mcdi_name); \ + \ + if (_rc > U8_MAX) \ + _rc = -EOPNOTSUPP; \ + if (_rc >= 0) { \ + _meta->keys._name##_idx = _rc; \ + _rc = 0; \ + } \ + _rc; \ +}) +#define TABLE_HOOK_RESP(_meta, _name, _mcdi_name) ({ \ + int _rc = TABLE_FIND_RESP(&_meta->desc, TABLE_FIELD_ID_##_mcdi_name); \ + \ + if (_rc > U8_MAX) \ + _rc = -EOPNOTSUPP; \ + if (_rc >= 0) { \ + _meta->resps._name##_idx = _rc; \ + _rc = 0; \ + } \ + _rc; \ +}) + +static int efx_mae_table_hook_ct(struct efx_nic *efx, + struct efx_tc_table_ct *meta_ct) +{ + int rc; + + rc = TABLE_HOOK_KEY(meta_ct, eth_proto, ETHER_TYPE); + if (rc) + return rc; + rc = TABLE_HOOK_KEY(meta_ct, ip_proto, IP_PROTO); + if (rc) + return rc; + rc = TABLE_HOOK_KEY(meta_ct, src_ip, SRC_IP); + if (rc) + return rc; + rc = TABLE_HOOK_KEY(meta_ct, dst_ip, DST_IP); + if (rc) + return rc; + rc = TABLE_HOOK_KEY(meta_ct, l4_sport, SRC_PORT); + if (rc) + return rc; + rc = TABLE_HOOK_KEY(meta_ct, l4_dport, DST_PORT); + if (rc) + return rc; + rc = TABLE_HOOK_KEY(meta_ct, zone, DOMAIN); + if (rc) + return rc; + rc = TABLE_HOOK_RESP(meta_ct, dnat, NAT_DIR); + if (rc) + return rc; + rc = TABLE_HOOK_RESP(meta_ct, nat_ip, NAT_IP); + if (rc) + return rc; + rc = TABLE_HOOK_RESP(meta_ct, l4_natport, NAT_PORT); + if (rc) + return rc; + rc = TABLE_HOOK_RESP(meta_ct, mark, CT_MARK); + if (rc) + return rc; + rc = TABLE_HOOK_RESP(meta_ct, counter_id, COUNTER_ID); + if (rc) + return rc; + meta_ct->hooked = true; + return 0; +} + +static void efx_mae_table_free_desc(struct efx_tc_table_desc *desc) +{ + kfree(desc->keys); + kfree(desc->resps); + memset(desc, 0, sizeof(*desc)); +} + +static bool efx_mae_check_table_exists(struct efx_nic *efx, u32 tbl_req) +{ + MCDI_DECLARE_BUF(outbuf, MC_CMD_TABLE_LIST_OUT_LEN(16)); + MCDI_DECLARE_BUF(inbuf, MC_CMD_TABLE_LIST_IN_LEN); + u32 tbl_id, tbl_total, tbl_cnt, pos = 0; + size_t outlen, msg_max; + bool ct_tbl = false; + int rc, idx; + + msg_max = sizeof(outbuf); + efx->tc->meta_ct.hooked = false; +more: + memset(outbuf, 0, sizeof(*outbuf)); + MCDI_SET_DWORD(inbuf, TABLE_LIST_IN_FIRST_TABLE_ID_INDEX, pos); + rc = efx_mcdi_rpc(efx, MC_CMD_TABLE_LIST, inbuf, sizeof(inbuf), outbuf, + msg_max, &outlen); + if (rc) + return false; + + if (outlen < MC_CMD_TABLE_LIST_OUT_LEN(1)) + return false; + + tbl_total = MCDI_DWORD(outbuf, TABLE_LIST_OUT_N_TABLES); + tbl_cnt = MC_CMD_TABLE_LIST_OUT_TABLE_ID_NUM(min(outlen, msg_max)); + + for (idx = 0; idx < tbl_cnt; idx++) { + tbl_id = MCDI_ARRAY_DWORD(outbuf, TABLE_LIST_OUT_TABLE_ID, idx); + if (tbl_id == tbl_req) { + ct_tbl = true; + break; + } + } + + pos += tbl_cnt; + if (!ct_tbl && pos < tbl_total) + goto more; + + return ct_tbl; +} + +int efx_mae_get_tables(struct efx_nic *efx) +{ + int rc; + + efx->tc->meta_ct.hooked = false; + if (efx_mae_check_table_exists(efx, TABLE_ID_CONNTRACK_TABLE)) { + rc = efx_mae_table_get_desc(efx, &efx->tc->meta_ct.desc, + TABLE_ID_CONNTRACK_TABLE); + if (rc) { + pci_info(efx->pci_dev, + "FW does not support conntrack desc rc %d\n", + rc); + return 0; + } + + rc = efx_mae_table_hook_ct(efx, &efx->tc->meta_ct); + if (rc) { + pci_info(efx->pci_dev, + "FW does not support conntrack hook rc %d\n", + rc); + return 0; + } + } else { + pci_info(efx->pci_dev, + "FW does not support conntrack table\n"); + } + return 0; +} + +void efx_mae_free_tables(struct efx_nic *efx) +{ + efx_mae_table_free_desc(&efx->tc->meta_ct.desc); + efx->tc->meta_ct.hooked = false; +} +#else /* EFX_TC_OFFLOAD */ +int efx_mae_get_tables(struct efx_nic *efx) { return 0; } +void efx_mae_free_tables(struct efx_nic *efx) {} +#endif + +static int efx_mae_get_basic_caps(struct efx_nic *efx, struct mae_caps *caps) +{ + MCDI_DECLARE_BUF(outbuf, MC_CMD_MAE_GET_CAPS_OUT_LEN); + size_t outlen; + int rc; + + BUILD_BUG_ON(MC_CMD_MAE_GET_CAPS_IN_LEN); + + rc = efx_mcdi_rpc(efx, MC_CMD_MAE_GET_CAPS, NULL, 0, outbuf, + sizeof(outbuf), &outlen); + if (rc) + return rc; + if (outlen < sizeof(outbuf)) + return -EIO; + caps->match_field_count = MCDI_DWORD(outbuf, MAE_GET_CAPS_OUT_MATCH_FIELD_COUNT); + caps->encap_types = MCDI_DWORD(outbuf, MAE_GET_CAPS_OUT_ENCAP_TYPES_SUPPORTED); + caps->action_prios = MCDI_DWORD(outbuf, MAE_GET_CAPS_OUT_ACTION_PRIOS); + return 0; +} + +static int efx_mae_get_rule_fields(struct efx_nic *efx, u32 cmd, + u8 *field_support) +{ + MCDI_DECLARE_BUF(outbuf, MC_CMD_MAE_GET_AR_CAPS_OUT_LEN(MAE_NUM_FIELDS)); + MCDI_DECLARE_STRUCT_PTR(caps); + unsigned int count; + size_t outlen; + int rc, i; + + BUILD_BUG_ON(MC_CMD_MAE_GET_AR_CAPS_OUT_LEN(MAE_NUM_FIELDS) < + MC_CMD_MAE_GET_OR_CAPS_OUT_LEN(MAE_NUM_FIELDS)); + BUILD_BUG_ON(MC_CMD_MAE_GET_AR_CAPS_IN_LEN); + BUILD_BUG_ON(MC_CMD_MAE_GET_OR_CAPS_IN_LEN); + + rc = efx_mcdi_rpc(efx, cmd, NULL, 0, outbuf, sizeof(outbuf), &outlen); + if (rc) + return rc; + BUILD_BUG_ON(MC_CMD_MAE_GET_AR_CAPS_OUT_COUNT_OFST != + MC_CMD_MAE_GET_OR_CAPS_OUT_COUNT_OFST); + count = MCDI_DWORD(outbuf, MAE_GET_AR_CAPS_OUT_COUNT); + memset(field_support, MAE_FIELD_UNSUPPORTED, MAE_NUM_FIELDS); + BUILD_BUG_ON(MC_CMD_MAE_GET_AR_CAPS_OUT_FIELD_FLAGS_OFST != + MC_CMD_MAE_GET_OR_CAPS_OUT_FIELD_FLAGS_OFST); + caps = _MCDI_DWORD(outbuf, MAE_GET_AR_CAPS_OUT_FIELD_FLAGS); + /* We're only interested in the support status enum, not any other + * flags, so just extract that from each entry. + */ + for (i = 0; i < count; i++) + if (i * sizeof(*outbuf) + MC_CMD_MAE_GET_AR_CAPS_OUT_FIELD_FLAGS_OFST < outlen) + field_support[i] = EFX_DWORD_FIELD(caps[i], MAE_FIELD_FLAGS_SUPPORT_STATUS); + return 0; +} + +int efx_mae_get_caps(struct efx_nic *efx, struct mae_caps *caps) +{ + int rc; + + rc = efx_mae_get_basic_caps(efx, caps); + if (rc) + return rc; + rc = efx_mae_get_rule_fields(efx, MC_CMD_MAE_GET_AR_CAPS, + caps->action_rule_fields); + if (rc) + return rc; + return efx_mae_get_rule_fields(efx, MC_CMD_MAE_GET_OR_CAPS, + caps->outer_rule_fields); +} + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_TC_OFFLOAD) +/* Bit twiddling: + * Prefix: 1...110...0 + * ~: 0...001...1 + * + 1: 0...010...0 is power of two + * so (~x) & ((~x) + 1) == 0. Converse holds also. + */ +#define is_prefix_byte(_x) !(((_x) ^ 0xff) & (((_x) ^ 0xff) + 1)) + +enum mask_type { MASK_ONES, MASK_ZEROES, MASK_PREFIX, MASK_OTHER }; + +static const char *mask_type_name(enum mask_type typ) +{ + switch (typ) { + case MASK_ONES: + return "all-1s"; + case MASK_ZEROES: + return "all-0s"; + case MASK_PREFIX: + return "prefix"; + case MASK_OTHER: + return "arbitrary"; + default: /* can't happen */ + return "unknown"; + } +} + +/* Checks a (big-endian) bytestring is a bit prefix */ +static enum mask_type classify_mask(const u8 *mask, size_t len) +{ + bool zeroes = true; /* All bits seen so far are zeroes */ + bool ones = true; /* All bits seen so far are ones */ + bool prefix = true; /* Valid prefix so far */ + size_t i; + + for (i = 0; i < len; i++) { + if (ones) { + if (!is_prefix_byte(mask[i])) + prefix = false; + } else if (mask[i]) { + prefix = false; + } + if (mask[i] != 0xff) + ones = false; + if (mask[i]) + zeroes = false; + } + if (ones) + return MASK_ONES; + if (zeroes) + return MASK_ZEROES; + if (prefix) + return MASK_PREFIX; + return MASK_OTHER; +} + +static int efx_mae_match_check_cap_typ(u8 support, enum mask_type typ) +{ + switch (support) { + case MAE_FIELD_UNSUPPORTED: + case MAE_FIELD_SUPPORTED_MATCH_NEVER: + if (typ == MASK_ZEROES) + return 0; + return -EOPNOTSUPP; + case MAE_FIELD_SUPPORTED_MATCH_OPTIONAL: + if (typ == MASK_ZEROES) + return 0; + fallthrough; + case MAE_FIELD_SUPPORTED_MATCH_ALWAYS: + if (typ == MASK_ONES) + return 0; + return -EINVAL; + case MAE_FIELD_SUPPORTED_MATCH_PREFIX: + if (typ == MASK_OTHER) + return -EOPNOTSUPP; + return 0; + case MAE_FIELD_SUPPORTED_MATCH_MASK: + return 0; + default: + return -EIO; + } +} + +#define CHECK(_mcdi, _field) do { \ + enum mask_type typ = classify_mask((const u8 *)&mask->_field, \ + sizeof(mask->_field)); \ + \ + rc = efx_mae_match_check_cap_typ(supported_fields[MAE_FIELD_ ## _mcdi],\ + typ); \ + if (rc) { \ + efx_tc_err(efx, "No support for %s mask in field %s\n", \ + mask_type_name(typ), #_field); \ + if (typ == MASK_ONES) \ + NL_SET_ERR_MSG_MOD(extack, "Unsupported match field " #_field);\ + else \ + NL_SET_ERR_MSG_MOD(extack, "Unsupported mask type for " #_field);\ + return rc; \ + } \ +} while (0) +#define UNSUPPORTED(_field) \ +if (classify_mask((const u8 *)&mask->_field, \ + sizeof(mask->_field)) != MASK_ZEROES) { \ + EFX_TC_ERR_MSG(efx, extack, "Unsupported match field " #_field); \ + return -EOPNOTSUPP; \ +} +/* Bitfield members need special handling. For now we only have 1-bit fields */ +#define CHECK_BIT(_mcdi, _field) do { \ + enum mask_type typ = mask->_field ? MASK_ONES : MASK_ZEROES; \ + \ + rc = efx_mae_match_check_cap_typ(supported_fields[MAE_FIELD_ ## _mcdi],\ + typ); \ + if (rc) { \ + efx_tc_err(efx, "No support for %s mask in field %s\n", \ + mask_type_name(typ), #_field); \ + if (typ == MASK_ONES) \ + NL_SET_ERR_MSG_MOD(extack, "Unsupported match field " #_field);\ + else \ + NL_SET_ERR_MSG_MOD(extack, "Unsupported mask type for " #_field);\ + return rc; \ + } \ +} while (0) +#define UNSUPPORTED_BIT(_field) \ +if (mask->_field) { \ + EFX_TC_ERR_MSG(efx, extack, "Unsupported match field " #_field); \ + return -EOPNOTSUPP; \ +} + +int efx_mae_match_check_caps(struct efx_nic *efx, + const struct efx_tc_match_fields *mask, + struct netlink_ext_ack *extack) +{ + const u8 *supported_fields = efx->tc->caps->action_rule_fields; + __be32 ingress_port = cpu_to_be32(mask->ingress_port); + enum mask_type ingress_port_mask_type; + int rc; + + /* Check for _PREFIX assumes big-endian, so we need to convert */ + ingress_port_mask_type = classify_mask((const u8 *)&ingress_port, + sizeof(ingress_port)); + rc = efx_mae_match_check_cap_typ(supported_fields[MAE_FIELD_INGRESS_PORT], + ingress_port_mask_type); + if (rc) { + efx_tc_err(efx, "No support for %s mask in field %s\n", + mask_type_name(ingress_port_mask_type), "ingress_port"); + NL_SET_ERR_MSG_MOD(extack, "Unsupported mask type for ingress_port"); + return rc; + } + CHECK(ETHER_TYPE, eth_proto); + CHECK(VLAN0_TCI, vlan_tci[0]); + CHECK(VLAN0_PROTO, vlan_proto[0]); + CHECK(VLAN1_TCI, vlan_tci[1]); + CHECK(VLAN1_PROTO, vlan_proto[1]); + CHECK(ETH_SADDR, eth_saddr); + CHECK(ETH_DADDR, eth_daddr); + CHECK(IP_PROTO, ip_proto); + CHECK(IP_TOS, ip_tos); + CHECK(IP_TTL, ip_ttl); + CHECK(SRC_IP4, src_ip); + CHECK(DST_IP4, dst_ip); +#ifdef CONFIG_IPV6 + CHECK(SRC_IP6, src_ip6); + CHECK(DST_IP6, dst_ip6); +#endif + CHECK(L4_SPORT, l4_sport); + CHECK(L4_DPORT, l4_dport); + CHECK(TCP_FLAGS, tcp_flags); + CHECK_BIT(TCP_SYN_FIN_RST, tcp_syn_fin_rst); + if (efx_tc_match_is_encap(mask)) { + rc = efx_mae_match_check_cap_typ( + supported_fields[MAE_FIELD_OUTER_RULE_ID], + MASK_ONES); + if (rc) { + EFX_TC_ERR_MSG(efx, extack, "No support for encap rule ID matches"); + return rc; + } + CHECK(ENC_VNET_ID, enc_keyid); + } else if (mask->enc_keyid) { + EFX_TC_ERR_MSG(efx, extack, "Match on enc_keyid requires other encap fields"); + return -EINVAL; + } + CHECK_BIT(IS_IP_FRAG, ip_frag); + CHECK_BIT(IP_FIRST_FRAG, ip_firstfrag); + CHECK_BIT(DO_CT, ct_state_trk); + CHECK_BIT(CT_HIT, ct_state_est); + UNSUPPORTED_BIT(ct_state_new); + UNSUPPORTED_BIT(ct_state_rel); + CHECK(CT_MARK, ct_mark); + CHECK(CT_DOMAIN, ct_zone); + CHECK(RECIRC_ID, recirc_id); + return 0; +} + +int efx_mae_match_check_caps_lhs(struct efx_nic *efx, + const struct efx_tc_match_fields *mask, + struct netlink_ext_ack *extack) +{ + const u8 *supported_fields = efx->tc->caps->outer_rule_fields; + __be32 ingress_port = cpu_to_be32(mask->ingress_port); + enum mask_type ingress_port_mask_type; + int rc; + + /* Check for _PREFIX assumes big-endian, so we need to convert */ + ingress_port_mask_type = classify_mask((const u8 *)&ingress_port, + sizeof(ingress_port)); + rc = efx_mae_match_check_cap_typ(supported_fields[MAE_FIELD_INGRESS_PORT], + ingress_port_mask_type); + if (rc) { + efx_tc_err(efx, "No support for %s mask in field %s\n", + mask_type_name(ingress_port_mask_type), "ingress_port"); + NL_SET_ERR_MSG_MOD(extack, "Unsupported mask type for ingress_port"); + return rc; + } + CHECK(ENC_ETHER_TYPE, eth_proto); + CHECK(ENC_VLAN0_TCI, vlan_tci[0]); + CHECK(ENC_VLAN0_PROTO, vlan_proto[0]); + CHECK(ENC_VLAN1_TCI, vlan_tci[1]); + CHECK(ENC_VLAN1_PROTO, vlan_proto[1]); + CHECK(ENC_ETH_SADDR, eth_saddr); + CHECK(ENC_ETH_DADDR, eth_daddr); + CHECK(ENC_IP_PROTO, ip_proto); + CHECK(ENC_IP_TOS, ip_tos); + CHECK(ENC_IP_TTL, ip_ttl); + CHECK_BIT(ENC_IP_FRAG, ip_frag); + CHECK(ENC_SRC_IP4, src_ip); + CHECK(ENC_DST_IP4, dst_ip); +#ifdef CONFIG_IPV6 + CHECK(ENC_SRC_IP6, src_ip6); + CHECK(ENC_DST_IP6, dst_ip6); +#endif + CHECK(ENC_L4_SPORT, l4_sport); + CHECK(ENC_L4_DPORT, l4_dport); + UNSUPPORTED(tcp_flags); + CHECK_BIT(TCP_SYN_FIN_RST, tcp_syn_fin_rst); + if (efx_tc_match_is_encap(mask)) { + /* can't happen; disallowed for local rules, translated + * for foreign rules. + */ + EFX_TC_ERR_MSG(efx, extack, "Unexpected encap match in LHS rule"); + return -EOPNOTSUPP; + } + UNSUPPORTED(enc_keyid); + /* Can't filter on conntrack in LHS rules */ + UNSUPPORTED_BIT(ct_state_trk); + UNSUPPORTED_BIT(ct_state_est); + UNSUPPORTED_BIT(ct_state_new); + UNSUPPORTED_BIT(ct_state_rel); + UNSUPPORTED(ct_mark); + UNSUPPORTED(recirc_id); + /* Can't filter on ip_firstfrag either */ + UNSUPPORTED_BIT(ip_firstfrag); + return 0; +} +#undef CHECK +#undef UNSUPPORTED + +#define CHECK(_mcdi) do { \ + rc = efx_mae_match_check_cap_typ(supported_fields[MAE_FIELD_ ## _mcdi],\ + MASK_ONES); \ + if (rc) { \ + efx_tc_err(efx, "No support for field %s\n", #_mcdi); \ + return rc; \ + } \ +} while (0) +/* Checks that the fields needed for encap-rule matches are supported by the + * MAE. All the fields are exact-match, except possibly ENC_IP_TOS. + */ +int efx_mae_check_encap_match_caps(struct efx_nic *efx, bool ipv6, + u8 ip_tos_mask) +{ + u8 *supported_fields = efx->tc->caps->outer_rule_fields; + enum mask_type typ; + int rc; + + CHECK(ENC_ETHER_TYPE); + if (ipv6) { + CHECK(ENC_SRC_IP6); + CHECK(ENC_DST_IP6); + } else { + CHECK(ENC_SRC_IP4); + CHECK(ENC_DST_IP4); + } + CHECK(ENC_L4_DPORT); + CHECK(ENC_IP_PROTO); + typ = classify_mask(&ip_tos_mask, sizeof(ip_tos_mask)); + rc = efx_mae_match_check_cap_typ(supported_fields[MAE_FIELD_ENC_IP_TOS], + typ); + if (rc) { + efx_tc_err(efx, "No support for %s mask in field %s\n", + mask_type_name(typ), "enc_ip_tos"); + return rc; + } + return 0; +} +#undef CHECK + +int efx_mae_check_encap_type_supported(struct efx_nic *efx, enum efx_encap_type typ) +{ + unsigned int bit; + + switch (typ & EFX_ENCAP_TYPES_MASK) { + case EFX_ENCAP_TYPE_VXLAN: + bit = MC_CMD_MAE_GET_CAPS_OUT_ENCAP_TYPE_VXLAN_LBN; + break; + case EFX_ENCAP_TYPE_NVGRE: + bit = MC_CMD_MAE_GET_CAPS_OUT_ENCAP_TYPE_NVGRE_LBN; + break; + case EFX_ENCAP_TYPE_GENEVE: + bit = MC_CMD_MAE_GET_CAPS_OUT_ENCAP_TYPE_GENEVE_LBN; + break; + default: + return -EOPNOTSUPP; + } + if (efx->tc->caps->encap_types & BIT(bit)) + return 0; + return -EOPNOTSUPP; +} + +int efx_mae_allocate_counter(struct efx_nic *efx, struct efx_tc_counter *cnt) +{ + MCDI_DECLARE_BUF(outbuf, MC_CMD_MAE_COUNTER_ALLOC_OUT_LEN(1)); + MCDI_DECLARE_BUF(inbuf, MC_CMD_MAE_COUNTER_ALLOC_V2_IN_LEN); + size_t outlen; + int rc; + + if (!cnt) + return -EINVAL; + + MCDI_SET_DWORD(inbuf, MAE_COUNTER_ALLOC_V2_IN_REQUESTED_COUNT, 1); + MCDI_SET_DWORD(inbuf, MAE_COUNTER_ALLOC_V2_IN_COUNTER_TYPE, cnt->type); + rc = efx_mcdi_rpc(efx, MC_CMD_MAE_COUNTER_ALLOC, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), &outlen); + if (rc) + return rc; + /* pcol says this can't happen, since count is 1 */ + if (outlen < sizeof(outbuf)) + return -EIO; + cnt->fw_id = MCDI_DWORD(outbuf, MAE_COUNTER_ALLOC_OUT_COUNTER_ID); + cnt->gen = MCDI_DWORD(outbuf, MAE_COUNTER_ALLOC_OUT_GENERATION_COUNT); + return 0; +} + +int efx_mae_free_counter(struct efx_nic *efx, struct efx_tc_counter *cnt) +{ + MCDI_DECLARE_BUF(outbuf, MC_CMD_MAE_COUNTER_FREE_OUT_LEN(1)); + MCDI_DECLARE_BUF(inbuf, MC_CMD_MAE_COUNTER_FREE_V2_IN_LEN); + size_t outlen; + int rc; + + MCDI_SET_DWORD(inbuf, MAE_COUNTER_FREE_V2_IN_COUNTER_ID_COUNT, 1); + MCDI_SET_DWORD(inbuf, MAE_COUNTER_FREE_V2_IN_FREE_COUNTER_ID, cnt->fw_id); + MCDI_SET_DWORD(inbuf, MAE_COUNTER_FREE_V2_IN_COUNTER_TYPE, cnt->type); + rc = efx_mcdi_rpc(efx, MC_CMD_MAE_COUNTER_FREE, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), &outlen); + if (rc) + return rc; + /* pcol says this can't happen, since count is 1 */ + if (outlen < sizeof(outbuf)) + return -EIO; + /* FW freed a different ID than we asked for, should also never happen. + * Warn because it means we've now got a different idea to the FW of + * what counters exist, which could cause mayhem later. + */ + if (WARN_ON(MCDI_DWORD(outbuf, MAE_COUNTER_FREE_OUT_FREED_COUNTER_ID) != + cnt->fw_id)) + return -EIO; + return 0; +} + +static int efx_mae_encap_type_to_mae_type(enum efx_encap_type type) +{ + switch (type & EFX_ENCAP_TYPES_MASK) { + case EFX_ENCAP_TYPE_NONE: + return MAE_MCDI_ENCAP_TYPE_NONE; + case EFX_ENCAP_TYPE_VXLAN: + return MAE_MCDI_ENCAP_TYPE_VXLAN; + case EFX_ENCAP_TYPE_GENEVE: + return MAE_MCDI_ENCAP_TYPE_GENEVE; + default: + return -EOPNOTSUPP; + } +} + +int efx_mae_allocate_encap_md(struct efx_nic *efx, + struct efx_tc_encap_action *encap) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_MAE_ENCAP_HEADER_ALLOC_IN_LEN(EFX_TC_MAX_ENCAP_HDR)); + MCDI_DECLARE_BUF(outbuf, MC_CMD_MAE_ENCAP_HEADER_ALLOC_OUT_LEN); + size_t inlen, outlen; + int rc; + + rc = efx_mae_encap_type_to_mae_type(encap->type); + if (rc < 0) + return rc; + MCDI_SET_DWORD(inbuf, MAE_ENCAP_HEADER_ALLOC_IN_ENCAP_TYPE, rc); + inlen = MC_CMD_MAE_ENCAP_HEADER_ALLOC_IN_LEN(encap->encap_hdr_len); + if (WARN_ON(inlen > sizeof(inbuf))) /* can't happen */ + return -EINVAL; + memcpy(MCDI_PTR(inbuf, MAE_ENCAP_HEADER_ALLOC_IN_HDR_DATA), + encap->encap_hdr, + encap->encap_hdr_len); + rc = efx_mcdi_rpc(efx, MC_CMD_MAE_ENCAP_HEADER_ALLOC, inbuf, + inlen, outbuf, sizeof(outbuf), &outlen); + if (rc) + return rc; + if (outlen < sizeof(outbuf)) + return -EIO; + encap->fw_id = MCDI_DWORD(outbuf, MAE_ENCAP_HEADER_ALLOC_OUT_ENCAP_HEADER_ID); + return 0; +} + +int efx_mae_update_encap_md(struct efx_nic *efx, + struct efx_tc_encap_action *encap) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_MAE_ENCAP_HEADER_UPDATE_IN_LEN(EFX_TC_MAX_ENCAP_HDR)); + size_t inlen; + int rc; + + rc = efx_mae_encap_type_to_mae_type(encap->type); + if (rc < 0) + return rc; + MCDI_SET_DWORD(inbuf, MAE_ENCAP_HEADER_UPDATE_IN_ENCAP_TYPE, rc); + MCDI_SET_DWORD(inbuf, MAE_ENCAP_HEADER_UPDATE_IN_EH_ID, + encap->fw_id); + inlen = MC_CMD_MAE_ENCAP_HEADER_UPDATE_IN_LEN(encap->encap_hdr_len); + if (WARN_ON(inlen > sizeof(inbuf))) /* can't happen */ + return -EINVAL; + memcpy(MCDI_PTR(inbuf, MAE_ENCAP_HEADER_UPDATE_IN_HDR_DATA), + encap->encap_hdr, + encap->encap_hdr_len); + + BUILD_BUG_ON(MC_CMD_MAE_ENCAP_HEADER_UPDATE_OUT_LEN != 0); + return efx_mcdi_rpc(efx, MC_CMD_MAE_ENCAP_HEADER_UPDATE, inbuf, + inlen, NULL, 0, NULL); +} + +int efx_mae_free_encap_md(struct efx_nic *efx, + struct efx_tc_encap_action *encap) +{ + MCDI_DECLARE_BUF(outbuf, MC_CMD_MAE_ENCAP_HEADER_FREE_OUT_LEN(1)); + MCDI_DECLARE_BUF(inbuf, MC_CMD_MAE_ENCAP_HEADER_FREE_IN_LEN(1)); + size_t outlen; + int rc; + + MCDI_SET_DWORD(inbuf, MAE_ENCAP_HEADER_FREE_IN_EH_ID, encap->fw_id); + rc = efx_mcdi_rpc(efx, MC_CMD_MAE_ENCAP_HEADER_FREE, inbuf, + sizeof(inbuf), outbuf, sizeof(outbuf), &outlen); + if (rc) + return rc; + if (outlen < sizeof(outbuf)) + return -EIO; + /* FW freed a different ID than we asked for, should also never happen. + * Warn because it means we've now got a different idea to the FW of + * what encap_mds exist, which could cause mayhem later. + */ + if (WARN_ON(MCDI_DWORD(outbuf, MAE_ENCAP_HEADER_FREE_OUT_FREED_EH_ID) != encap->fw_id)) + return -EIO; + /* We're probably about to free @encap, but let's just make sure its + * fw_id is blatted so that it won't look valid if it leaks out. + */ + encap->fw_id = MC_CMD_MAE_ENCAP_HEADER_ALLOC_OUT_ENCAP_HEADER_ID_NULL; + return 0; +} + +int efx_mae_allocate_pedit_mac(struct efx_nic *efx, + struct efx_tc_mac_pedit_action *ped) +{ + MCDI_DECLARE_BUF(outbuf, MC_CMD_MAE_MAC_ADDR_ALLOC_OUT_LEN); + MCDI_DECLARE_BUF(inbuf, MC_CMD_MAE_MAC_ADDR_ALLOC_IN_LEN); + size_t outlen; + int rc; + + BUILD_BUG_ON(MC_CMD_MAE_MAC_ADDR_ALLOC_IN_MAC_ADDR_LEN != + sizeof(ped->h_addr)); + memcpy(MCDI_PTR(inbuf, MAE_MAC_ADDR_ALLOC_IN_MAC_ADDR), ped->h_addr, + sizeof(ped->h_addr)); + rc = efx_mcdi_rpc(efx, MC_CMD_MAE_MAC_ADDR_ALLOC, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), &outlen); + if (rc) + return rc; + if (outlen < sizeof(outbuf)) + return -EIO; + ped->fw_id = MCDI_DWORD(outbuf, MAE_MAC_ADDR_ALLOC_OUT_MAC_ID); + return 0; +} + +int efx_mae_free_pedit_mac(struct efx_nic *efx, + struct efx_tc_mac_pedit_action *ped) +{ + MCDI_DECLARE_BUF(outbuf, MC_CMD_MAE_MAC_ADDR_FREE_OUT_LEN(1)); + MCDI_DECLARE_BUF(inbuf, MC_CMD_MAE_MAC_ADDR_FREE_IN_LEN(1)); + size_t outlen; + int rc; + + MCDI_SET_DWORD(inbuf, MAE_MAC_ADDR_FREE_IN_MAC_ID, ped->fw_id); + rc = efx_mcdi_rpc(efx, MC_CMD_MAE_MAC_ADDR_FREE, inbuf, + sizeof(inbuf), outbuf, sizeof(outbuf), &outlen); + if (rc) + return rc; + if (outlen < sizeof(outbuf)) + return -EIO; + /* FW freed a different ID than we asked for, should also never happen. + * Warn because it means we've now got a different idea to the FW of + * what MAC addresses exist, which could cause mayhem later. + */ + if (WARN_ON(MCDI_DWORD(outbuf, MAE_MAC_ADDR_FREE_OUT_FREED_MAC_ID) != ped->fw_id)) + return -EIO; + /* We're probably about to free @ped, but let's just make sure its + * fw_id is blatted so that it won't look valid if it leaks out. + */ + ped->fw_id = MC_CMD_MAE_MAC_ADDR_ALLOC_OUT_MAC_ID_NULL; + return 0; +} +#endif /* EFX_TC_OFFLOAD */ + +static bool efx_mae_asl_id(u32 id) +{ + return !!(id & BIT(31)); +} + +int efx_mae_alloc_action_set(struct efx_nic *efx, struct efx_tc_action_set *act) +{ + MCDI_DECLARE_BUF(outbuf, MC_CMD_MAE_ACTION_SET_ALLOC_OUT_LEN); + MCDI_DECLARE_BUF(inbuf, MC_CMD_MAE_ACTION_SET_ALLOC_IN_LEN); + unsigned char vlan_push, vlan_pop; + size_t outlen; + int rc; + + /* Translate vlan actions from bitmask to count */ + switch (act->vlan_push) { + case 0: + case 1: + vlan_push = act->vlan_push; + break; + case 2: /* can't happen */ + default: + return -EINVAL; + case 3: + vlan_push = 2; + break; + } + switch (act->vlan_pop) { + case 0: + case 1: + vlan_pop = act->vlan_pop; + break; + case 2: /* can't happen */ + default: + return -EINVAL; + case 3: + vlan_pop = 2; + break; + } + + MCDI_POPULATE_DWORD_5(inbuf, MAE_ACTION_SET_ALLOC_IN_FLAGS, + MAE_ACTION_SET_ALLOC_IN_VLAN_PUSH, vlan_push, + MAE_ACTION_SET_ALLOC_IN_VLAN_POP, vlan_pop, + MAE_ACTION_SET_ALLOC_IN_DECAP, act->decap, + MAE_ACTION_SET_ALLOC_IN_DO_NAT, act->do_nat, + MAE_ACTION_SET_ALLOC_IN_DO_DECR_IP_TTL, + act->do_ttl_dec); +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_TC_OFFLOAD) + if (act->src_mac) + MCDI_SET_DWORD(inbuf, MAE_ACTION_SET_ALLOC_IN_SRC_MAC_ID, + act->src_mac->fw_id); + else +#endif + MCDI_SET_DWORD(inbuf, MAE_ACTION_SET_ALLOC_IN_SRC_MAC_ID, + MC_CMD_MAE_MAC_ADDR_ALLOC_OUT_MAC_ID_NULL); +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_TC_OFFLOAD) + if (act->dst_mac) + MCDI_SET_DWORD(inbuf, MAE_ACTION_SET_ALLOC_IN_DST_MAC_ID, + act->dst_mac->fw_id); + else +#endif + MCDI_SET_DWORD(inbuf, MAE_ACTION_SET_ALLOC_IN_DST_MAC_ID, + MC_CMD_MAE_MAC_ADDR_ALLOC_OUT_MAC_ID_NULL); +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_TC_OFFLOAD) + if (act->count && !WARN_ON(!act->count->cnt)) + MCDI_SET_DWORD(inbuf, MAE_ACTION_SET_ALLOC_IN_COUNTER_ID, + act->count->cnt->fw_id); + else +#endif + MCDI_SET_DWORD(inbuf, MAE_ACTION_SET_ALLOC_IN_COUNTER_ID, + MC_CMD_MAE_COUNTER_ALLOC_OUT_COUNTER_ID_NULL); + MCDI_SET_DWORD(inbuf, MAE_ACTION_SET_ALLOC_IN_COUNTER_LIST_ID, + MC_CMD_MAE_COUNTER_LIST_ALLOC_OUT_COUNTER_LIST_ID_NULL); + if (act->vlan_push & 1) { + MCDI_SET_WORD_BE(inbuf, MAE_ACTION_SET_ALLOC_IN_VLAN0_TCI_BE, + act->vlan_tci[0]); + MCDI_SET_WORD_BE(inbuf, MAE_ACTION_SET_ALLOC_IN_VLAN0_PROTO_BE, + act->vlan_proto[0]); + } + if (act->vlan_push & 2) { + MCDI_SET_WORD_BE(inbuf, MAE_ACTION_SET_ALLOC_IN_VLAN1_TCI_BE, + act->vlan_tci[1]); + MCDI_SET_WORD_BE(inbuf, MAE_ACTION_SET_ALLOC_IN_VLAN1_PROTO_BE, + act->vlan_proto[1]); + } +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_TC_OFFLOAD) + if (act->encap_md) + MCDI_SET_DWORD(inbuf, MAE_ACTION_SET_ALLOC_IN_ENCAP_HEADER_ID, + act->encap_md->fw_id); + else +#endif + MCDI_SET_DWORD(inbuf, MAE_ACTION_SET_ALLOC_IN_ENCAP_HEADER_ID, + MC_CMD_MAE_ENCAP_HEADER_ALLOC_OUT_ENCAP_HEADER_ID_NULL); + if (act->deliver) + MCDI_SET_DWORD(inbuf, MAE_ACTION_SET_ALLOC_IN_DELIVER, + act->dest_mport); + BUILD_BUG_ON(MAE_MPORT_SELECTOR_NULL); + rc = efx_mcdi_rpc(efx, MC_CMD_MAE_ACTION_SET_ALLOC, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), &outlen); + if (rc) + return rc; + if (outlen < sizeof(outbuf)) + return -EIO; + act->fw_id = MCDI_DWORD(outbuf, MAE_ACTION_SET_ALLOC_OUT_AS_ID); + /* We rely on the high bit of AS IDs always being clear. + * The firmware API guarantees this, but let's check it ourselves. + */ + if (WARN_ON_ONCE(efx_mae_asl_id(act->fw_id))) { + efx_mae_free_action_set(efx, act->fw_id); + return -EIO; + } + return 0; +} + +int efx_mae_free_action_set(struct efx_nic *efx, u32 fw_id) +{ + MCDI_DECLARE_BUF(outbuf, MC_CMD_MAE_ACTION_SET_FREE_OUT_LEN(1)); + MCDI_DECLARE_BUF(inbuf, MC_CMD_MAE_ACTION_SET_FREE_IN_LEN(1)); + size_t outlen; + int rc; + + MCDI_SET_DWORD(inbuf, MAE_ACTION_SET_FREE_IN_AS_ID, fw_id); + rc = efx_mcdi_rpc(efx, MC_CMD_MAE_ACTION_SET_FREE, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), &outlen); + if (rc) + return rc; + if (outlen < sizeof(outbuf)) + return -EIO; + /* FW freed a different ID than we asked for, should never happen. + * Warn because it means we've now got a different idea to the FW of + * what encap_mds exist, which could cause mayhem later. + */ + if (WARN_ON(MCDI_DWORD(outbuf, MAE_ACTION_SET_FREE_OUT_FREED_AS_ID) != fw_id)) + return -EIO; + return 0; +} + +int efx_mae_alloc_action_set_list(struct efx_nic *efx, + struct efx_tc_action_set_list *acts) +{ + MCDI_DECLARE_BUF(outbuf, MC_CMD_MAE_ACTION_SET_LIST_ALLOC_OUT_LEN); + struct efx_tc_action_set *act; + size_t inlen, outlen, i = 0; + efx_dword_t *inbuf; + int rc; + + list_for_each_entry(act, &acts->list, list) + i++; + if (i == 0) + return -EINVAL; + if (i == 1) { + /* Don't wrap an ASL around a single AS, just use the AS_ID + * directly. ASLs are a more limited resource. + */ + act = list_first_entry(&acts->list, struct efx_tc_action_set, list); + acts->fw_id = act->fw_id; + return 0; + } + if (i > MC_CMD_MAE_ACTION_SET_LIST_ALLOC_IN_AS_IDS_MAXNUM_MCDI2) + return -EOPNOTSUPP; /* Too many actions */ + inlen = MC_CMD_MAE_ACTION_SET_LIST_ALLOC_IN_LEN(i); + inbuf = kzalloc(inlen, GFP_KERNEL); + if (!inbuf) + return -ENOMEM; + i = 0; + list_for_each_entry(act, &acts->list, list) { + MCDI_SET_ARRAY_DWORD(inbuf, MAE_ACTION_SET_LIST_ALLOC_IN_AS_IDS, + i, act->fw_id); + i++; + } + MCDI_SET_DWORD(inbuf, MAE_ACTION_SET_LIST_ALLOC_IN_COUNT, i); + rc = efx_mcdi_rpc(efx, MC_CMD_MAE_ACTION_SET_LIST_ALLOC, inbuf, inlen, + outbuf, sizeof(outbuf), &outlen); + if (rc) + goto out_free; + if (outlen < sizeof(outbuf)) { + rc = -EIO; + goto out_free; + } + acts->fw_id = MCDI_DWORD(outbuf, MAE_ACTION_SET_LIST_ALLOC_OUT_ASL_ID); + /* We rely on the high bit of ASL IDs always being set. + * The firmware API guarantees this, but let's check it ourselves. + */ + if (WARN_ON_ONCE(!efx_mae_asl_id(acts->fw_id))) { + efx_mae_free_action_set_list(efx, acts); + rc = -EIO; + } +out_free: + kfree(inbuf); + return rc; +} + +int efx_mae_free_action_set_list(struct efx_nic *efx, + struct efx_tc_action_set_list *acts) +{ + MCDI_DECLARE_BUF(outbuf, MC_CMD_MAE_ACTION_SET_LIST_FREE_OUT_LEN(1)); + MCDI_DECLARE_BUF(inbuf, MC_CMD_MAE_ACTION_SET_LIST_FREE_IN_LEN(1)); + size_t outlen; + int rc; + + /* If this is just an AS_ID with no ASL wrapper, then there is + * nothing for us to free. (The AS will be freed later.) + */ + if (efx_mae_asl_id(acts->fw_id)) { + MCDI_SET_DWORD(inbuf, MAE_ACTION_SET_LIST_FREE_IN_ASL_ID, + acts->fw_id); + rc = efx_mcdi_rpc(efx, MC_CMD_MAE_ACTION_SET_LIST_FREE, inbuf, + sizeof(inbuf), outbuf, sizeof(outbuf), &outlen); + if (rc) + return rc; + if (outlen < sizeof(outbuf)) + return -EIO; + /* FW freed a different ID than we asked for, should never happen. + * Warn because it means we've now got a different idea to the FW of + * what encap_mds exist, which could cause mayhem later. + */ + if (WARN_ON(MCDI_DWORD(outbuf, MAE_ACTION_SET_LIST_FREE_OUT_FREED_ASL_ID) != acts->fw_id)) + return -EIO; + } + /* We're probably about to free @acts, but let's just make sure its + * fw_id is blatted so that it won't look valid if it leaks out. + */ + acts->fw_id = MC_CMD_MAE_ACTION_SET_LIST_ALLOC_OUT_ACTION_SET_LIST_ID_NULL; + return 0; +} + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_TC_OFFLOAD) +int efx_mae_register_encap_match(struct efx_nic *efx, + struct efx_tc_encap_match *encap) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_MAE_OUTER_RULE_INSERT_IN_LEN(MAE_ENC_FIELD_PAIRS_LEN)); + MCDI_DECLARE_BUF(outbuf, MC_CMD_MAE_OUTER_RULE_INSERT_OUT_LEN); + MCDI_DECLARE_STRUCT_PTR(match_crit); + size_t outlen; + int rc; + + rc = efx_mae_encap_type_to_mae_type(encap->tun_type); + if (rc < 0) + return rc; + match_crit = _MCDI_DWORD(inbuf, MAE_OUTER_RULE_INSERT_IN_FIELD_MATCH_CRITERIA); + /* The struct contains IP src and dst, and udp dport. + * So we actually need to filter on IP src and dst, L4 dport, and + * ipproto == udp. + */ + MCDI_SET_DWORD(inbuf, MAE_OUTER_RULE_INSERT_IN_ENCAP_TYPE, rc); +#ifdef CONFIG_IPV6 + if (encap->src_ip | encap->dst_ip) { +#endif + MCDI_STRUCT_SET_DWORD_BE(match_crit, MAE_ENC_FIELD_PAIRS_ENC_SRC_IP4_BE, + encap->src_ip); + MCDI_STRUCT_SET_DWORD_BE(match_crit, MAE_ENC_FIELD_PAIRS_ENC_SRC_IP4_BE_MASK, + ~(__be32)0); + MCDI_STRUCT_SET_DWORD_BE(match_crit, MAE_ENC_FIELD_PAIRS_ENC_DST_IP4_BE, + encap->dst_ip); + MCDI_STRUCT_SET_DWORD_BE(match_crit, MAE_ENC_FIELD_PAIRS_ENC_DST_IP4_BE_MASK, + ~(__be32)0); + MCDI_STRUCT_SET_WORD_BE(match_crit, MAE_ENC_FIELD_PAIRS_ENC_ETHER_TYPE_BE, + htons(ETH_P_IP)); +#ifdef CONFIG_IPV6 + } else { + memcpy(MCDI_STRUCT_PTR(match_crit, MAE_ENC_FIELD_PAIRS_ENC_SRC_IP6_BE), + &encap->src_ip6, sizeof(encap->src_ip6)); + memset(MCDI_STRUCT_PTR(match_crit, MAE_ENC_FIELD_PAIRS_ENC_SRC_IP6_BE_MASK), + 0xff, sizeof(encap->src_ip6)); + memcpy(MCDI_STRUCT_PTR(match_crit, MAE_ENC_FIELD_PAIRS_ENC_DST_IP6_BE), + &encap->dst_ip6, sizeof(encap->dst_ip6)); + memset(MCDI_STRUCT_PTR(match_crit, MAE_ENC_FIELD_PAIRS_ENC_DST_IP6_BE_MASK), + 0xff, sizeof(encap->dst_ip6)); + MCDI_STRUCT_SET_WORD_BE(match_crit, MAE_ENC_FIELD_PAIRS_ENC_ETHER_TYPE_BE, + htons(ETH_P_IPV6)); + } +#endif + MCDI_STRUCT_SET_WORD_BE(match_crit, MAE_ENC_FIELD_PAIRS_ENC_ETHER_TYPE_BE_MASK, + ~(__be16)0); + MCDI_STRUCT_SET_WORD_BE(match_crit, MAE_ENC_FIELD_PAIRS_ENC_L4_DPORT_BE, + encap->udp_dport); + MCDI_STRUCT_SET_WORD_BE(match_crit, MAE_ENC_FIELD_PAIRS_ENC_L4_DPORT_BE_MASK, + ~(__be16)0); + MCDI_STRUCT_SET_BYTE(match_crit, MAE_ENC_FIELD_PAIRS_ENC_IP_PROTO, IPPROTO_UDP); + MCDI_STRUCT_SET_BYTE(match_crit, MAE_ENC_FIELD_PAIRS_ENC_IP_PROTO_MASK, ~0); + MCDI_STRUCT_SET_BYTE(match_crit, MAE_ENC_FIELD_PAIRS_ENC_IP_TOS, + encap->ip_tos); + MCDI_STRUCT_SET_BYTE(match_crit, MAE_ENC_FIELD_PAIRS_ENC_IP_TOS_MASK, + encap->ip_tos_mask); + rc = efx_mcdi_rpc(efx, MC_CMD_MAE_OUTER_RULE_INSERT, inbuf, + sizeof(inbuf), outbuf, sizeof(outbuf), &outlen); + if (rc) + return rc; + if (outlen < sizeof(outbuf)) + return -EIO; + encap->fw_id = MCDI_DWORD(outbuf, MAE_OUTER_RULE_INSERT_OUT_OR_ID); + return 0; +} + +int efx_mae_unregister_encap_match(struct efx_nic *efx, + struct efx_tc_encap_match *encap) +{ + MCDI_DECLARE_BUF(outbuf, MC_CMD_MAE_OUTER_RULE_REMOVE_OUT_LEN(1)); + MCDI_DECLARE_BUF(inbuf, MC_CMD_MAE_OUTER_RULE_REMOVE_IN_LEN(1)); + size_t outlen; + int rc; + + MCDI_SET_DWORD(inbuf, MAE_OUTER_RULE_REMOVE_IN_OR_ID, encap->fw_id); + rc = efx_mcdi_rpc(efx, MC_CMD_MAE_OUTER_RULE_REMOVE, inbuf, + sizeof(inbuf), outbuf, sizeof(outbuf), &outlen); + if (rc) + return rc; + if (outlen < sizeof(outbuf)) + return -EIO; + /* FW freed a different ID than we asked for, should also never happen. + * Warn because it means we've now got a different idea to the FW of + * what encap_mds exist, which could cause mayhem later. + */ + if (WARN_ON(MCDI_DWORD(outbuf, MAE_OUTER_RULE_REMOVE_OUT_REMOVED_OR_ID) != encap->fw_id)) + return -EIO; + /* We're probably about to free @encap, but let's just make sure its + * fw_id is blatted so that it won't look valid if it leaks out. + */ + encap->fw_id = MC_CMD_MAE_OUTER_RULE_INSERT_OUT_OUTER_RULE_ID_NULL; + return 0; +} + +static int efx_mae_populate_lhs_match_criteria(MCDI_DECLARE_STRUCT_PTR(match_crit), + const struct efx_tc_match *match) +{ + if (match->mask.ingress_port) { + if (~match->mask.ingress_port) + return -EOPNOTSUPP; + MCDI_STRUCT_SET_DWORD(match_crit, + MAE_ENC_FIELD_PAIRS_INGRESS_MPORT_SELECTOR, + match->value.ingress_port); + } + MCDI_STRUCT_SET_DWORD(match_crit, MAE_ENC_FIELD_PAIRS_INGRESS_MPORT_SELECTOR_MASK, + match->mask.ingress_port); + MCDI_STRUCT_SET_WORD_BE(match_crit, MAE_ENC_FIELD_PAIRS_ENC_ETHER_TYPE_BE, + match->value.eth_proto); + MCDI_STRUCT_SET_WORD_BE(match_crit, MAE_ENC_FIELD_PAIRS_ENC_ETHER_TYPE_BE_MASK, + match->mask.eth_proto); + MCDI_STRUCT_SET_WORD_BE(match_crit, MAE_ENC_FIELD_PAIRS_ENC_VLAN0_TCI_BE, + match->value.vlan_tci[0]); + MCDI_STRUCT_SET_WORD_BE(match_crit, MAE_ENC_FIELD_PAIRS_ENC_VLAN0_TCI_BE_MASK, + match->mask.vlan_tci[0]); + MCDI_STRUCT_SET_WORD_BE(match_crit, MAE_ENC_FIELD_PAIRS_ENC_VLAN0_PROTO_BE, + match->value.vlan_proto[0]); + MCDI_STRUCT_SET_WORD_BE(match_crit, MAE_ENC_FIELD_PAIRS_ENC_VLAN0_PROTO_BE_MASK, + match->mask.vlan_proto[0]); + MCDI_STRUCT_SET_WORD_BE(match_crit, MAE_ENC_FIELD_PAIRS_ENC_VLAN1_TCI_BE, + match->value.vlan_tci[1]); + MCDI_STRUCT_SET_WORD_BE(match_crit, MAE_ENC_FIELD_PAIRS_ENC_VLAN1_TCI_BE_MASK, + match->mask.vlan_tci[1]); + MCDI_STRUCT_SET_WORD_BE(match_crit, MAE_ENC_FIELD_PAIRS_ENC_VLAN1_PROTO_BE, + match->value.vlan_proto[1]); + MCDI_STRUCT_SET_WORD_BE(match_crit, MAE_ENC_FIELD_PAIRS_ENC_VLAN1_PROTO_BE_MASK, + match->mask.vlan_proto[1]); + memcpy(MCDI_STRUCT_PTR(match_crit, MAE_ENC_FIELD_PAIRS_ENC_ETH_SADDR_BE), + match->value.eth_saddr, ETH_ALEN); + memcpy(MCDI_STRUCT_PTR(match_crit, MAE_ENC_FIELD_PAIRS_ENC_ETH_SADDR_BE_MASK), + match->mask.eth_saddr, ETH_ALEN); + memcpy(MCDI_STRUCT_PTR(match_crit, MAE_ENC_FIELD_PAIRS_ENC_ETH_DADDR_BE), + match->value.eth_daddr, ETH_ALEN); + memcpy(MCDI_STRUCT_PTR(match_crit, MAE_ENC_FIELD_PAIRS_ENC_ETH_DADDR_BE_MASK), + match->mask.eth_daddr, ETH_ALEN); + MCDI_STRUCT_SET_BYTE(match_crit, MAE_ENC_FIELD_PAIRS_ENC_IP_PROTO, + match->value.ip_proto); + MCDI_STRUCT_SET_BYTE(match_crit, MAE_ENC_FIELD_PAIRS_ENC_IP_PROTO_MASK, + match->mask.ip_proto); + MCDI_STRUCT_SET_BYTE(match_crit, MAE_ENC_FIELD_PAIRS_ENC_IP_TOS, + match->value.ip_tos); + MCDI_STRUCT_SET_BYTE(match_crit, MAE_ENC_FIELD_PAIRS_ENC_IP_TOS_MASK, + match->mask.ip_tos); + MCDI_STRUCT_SET_BYTE(match_crit, MAE_ENC_FIELD_PAIRS_ENC_IP_TTL, + match->value.ip_ttl); + MCDI_STRUCT_SET_BYTE(match_crit, MAE_ENC_FIELD_PAIRS_ENC_IP_TTL_MASK, + match->mask.ip_ttl); + MCDI_STRUCT_POPULATE_BYTE_1(match_crit, + MAE_ENC_FIELD_PAIRS_ENC_VLAN_FLAGS, + MAE_ENC_FIELD_PAIRS_ENC_IP_FRAG, + match->value.ip_frag); + MCDI_STRUCT_POPULATE_BYTE_1(match_crit, + MAE_ENC_FIELD_PAIRS_ENC_VLAN_FLAGS_MASK, + MAE_ENC_FIELD_PAIRS_ENC_IP_FRAG_MASK, + match->mask.ip_frag); + MCDI_STRUCT_SET_DWORD_BE(match_crit, MAE_ENC_FIELD_PAIRS_ENC_SRC_IP4_BE, + match->value.src_ip); + MCDI_STRUCT_SET_DWORD_BE(match_crit, MAE_ENC_FIELD_PAIRS_ENC_SRC_IP4_BE_MASK, + match->mask.src_ip); + MCDI_STRUCT_SET_DWORD_BE(match_crit, MAE_ENC_FIELD_PAIRS_ENC_DST_IP4_BE, + match->value.dst_ip); + MCDI_STRUCT_SET_DWORD_BE(match_crit, MAE_ENC_FIELD_PAIRS_ENC_DST_IP4_BE_MASK, + match->mask.dst_ip); +#ifdef CONFIG_IPV6 + memcpy(MCDI_STRUCT_PTR(match_crit, MAE_ENC_FIELD_PAIRS_ENC_SRC_IP6_BE), + &match->value.src_ip6, sizeof(struct in6_addr)); + memcpy(MCDI_STRUCT_PTR(match_crit, MAE_ENC_FIELD_PAIRS_ENC_SRC_IP6_BE_MASK), + &match->mask.src_ip6, sizeof(struct in6_addr)); + memcpy(MCDI_STRUCT_PTR(match_crit, MAE_ENC_FIELD_PAIRS_ENC_DST_IP6_BE), + &match->value.dst_ip6, sizeof(struct in6_addr)); + memcpy(MCDI_STRUCT_PTR(match_crit, MAE_ENC_FIELD_PAIRS_ENC_DST_IP6_BE_MASK), + &match->mask.dst_ip6, sizeof(struct in6_addr)); +#endif + MCDI_STRUCT_SET_WORD_BE(match_crit, MAE_ENC_FIELD_PAIRS_ENC_L4_SPORT_BE, + match->value.l4_sport); + MCDI_STRUCT_SET_WORD_BE(match_crit, MAE_ENC_FIELD_PAIRS_ENC_L4_SPORT_BE_MASK, + match->mask.l4_sport); + MCDI_STRUCT_SET_WORD_BE(match_crit, MAE_ENC_FIELD_PAIRS_ENC_L4_DPORT_BE, + match->value.l4_dport); + MCDI_STRUCT_SET_WORD_BE(match_crit, MAE_ENC_FIELD_PAIRS_ENC_L4_DPORT_BE_MASK, + match->mask.l4_dport); + /* No enc-keys in LHS rules. Caps check should have caught this; any + * enc-keys from an fLHS should have been translated to regular keys + * and any EM should be a pseudo (we're an OR so can't have a direct + * EM with another OR). + */ + if (WARN_ON_ONCE(match->encap && !match->encap->type)) + return -EOPNOTSUPP; + if (WARN_ON_ONCE(match->mask.enc_src_ip)) + return -EOPNOTSUPP; + if (WARN_ON_ONCE(match->mask.enc_dst_ip)) + return -EOPNOTSUPP; +#ifdef CONFIG_IPV6 + if (WARN_ON_ONCE(!ipv6_addr_any(&match->mask.enc_src_ip6))) + return -EOPNOTSUPP; + if (WARN_ON_ONCE(!ipv6_addr_any(&match->mask.enc_dst_ip6))) + return -EOPNOTSUPP; +#endif + if (WARN_ON_ONCE(match->mask.enc_ip_tos)) + return -EOPNOTSUPP; + if (WARN_ON_ONCE(match->mask.enc_ip_ttl)) + return -EOPNOTSUPP; + if (WARN_ON_ONCE(match->mask.enc_sport)) + return -EOPNOTSUPP; + if (WARN_ON_ONCE(match->mask.enc_dport)) + return -EOPNOTSUPP; + if (WARN_ON_ONCE(match->mask.enc_keyid)) + return -EOPNOTSUPP; + return 0; +} + +static int efx_mae_insert_lhs_outer_rule(struct efx_nic *efx, + struct efx_tc_lhs_rule *rule, u32 prio) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_MAE_OUTER_RULE_INSERT_IN_LEN(MAE_ENC_FIELD_PAIRS_LEN)); + MCDI_DECLARE_BUF(outbuf, MC_CMD_MAE_OUTER_RULE_INSERT_OUT_LEN); + MCDI_DECLARE_STRUCT_PTR(match_crit); + const struct efx_tc_lhs_action *act; + size_t outlen; + int rc; + + MCDI_SET_DWORD(inbuf, MAE_OUTER_RULE_INSERT_IN_PRIO, prio); + /* match */ + match_crit = _MCDI_DWORD(inbuf, MAE_OUTER_RULE_INSERT_IN_FIELD_MATCH_CRITERIA); + rc = efx_mae_populate_lhs_match_criteria(match_crit, &rule->match); + if (rc) + return rc; + + /* action */ + act = &rule->lhs_act; + rc = efx_mae_encap_type_to_mae_type(act->tun_type); + if (rc < 0) + return rc; + MCDI_SET_DWORD(inbuf, MAE_OUTER_RULE_INSERT_IN_ENCAP_TYPE, rc); +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_CONNTRACK_OFFLOAD) + /* We always inhibit CT lookup on TCP_INTERESTING_FLAGS, since the + * SW path needs to process the packet to update the conntrack tables + * on connection establishment (SYN) or termination (FIN, RST). + */ + MCDI_POPULATE_DWORD_6(inbuf, MAE_OUTER_RULE_INSERT_IN_LOOKUP_CONTROL, + MAE_OUTER_RULE_INSERT_IN_DO_CT, !!act->zone, + MAE_OUTER_RULE_INSERT_IN_CT_TCP_FLAGS_INHIBIT, 1, + MAE_OUTER_RULE_INSERT_IN_CT_DOMAIN, + act->zone ? act->zone->domain : 0, + MAE_OUTER_RULE_INSERT_IN_CT_VNI_MODE, + act->zone ? act->zone->vni_mode : 0, + MAE_OUTER_RULE_INSERT_IN_DO_COUNT, !!act->count, + MAE_OUTER_RULE_INSERT_IN_RECIRC_ID, act->rid ? act->rid->fw_id : 0); +#else + MCDI_POPULATE_DWORD_2(inbuf, MAE_OUTER_RULE_INSERT_IN_LOOKUP_CONTROL, + MAE_OUTER_RULE_INSERT_IN_DO_COUNT, !!act->count, + MAE_OUTER_RULE_INSERT_IN_RECIRC_ID, act->rid ? act->rid->fw_id : 0); +#endif + if (act->count) + MCDI_SET_DWORD(inbuf, MAE_OUTER_RULE_INSERT_IN_COUNTER_ID, + act->count->cnt->fw_id); + rc = efx_mcdi_rpc(efx, MC_CMD_MAE_OUTER_RULE_INSERT, inbuf, + sizeof(inbuf), outbuf, sizeof(outbuf), &outlen); + if (rc) + return rc; + if (outlen < sizeof(outbuf)) + return -EIO; + rule->fw_id = MCDI_DWORD(outbuf, MAE_OUTER_RULE_INSERT_OUT_OR_ID); + return 0; +} + +static int efx_mae_populate_match_criteria(MCDI_DECLARE_STRUCT_PTR(match_crit), + const struct efx_tc_match *match); + +static int efx_mae_insert_lhs_action_rule(struct efx_nic *efx, + struct efx_tc_lhs_rule *rule, + u32 prio) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_MAE_ACTION_RULE_INSERT_IN_LEN(MAE_FIELD_MASK_VALUE_PAIRS_V2_LEN)); + MCDI_DECLARE_BUF(outbuf, MC_CMD_MAE_ACTION_RULE_INSERT_OUT_LEN); + struct efx_tc_lhs_action *act = &rule->lhs_act; + MCDI_DECLARE_STRUCT_PTR(match_crit); + MCDI_DECLARE_STRUCT_PTR(response); + size_t outlen; + int rc; + + match_crit = _MCDI_DWORD(inbuf, MAE_ACTION_RULE_INSERT_IN_MATCH_CRITERIA); + response = _MCDI_DWORD(inbuf, MAE_ACTION_RULE_INSERT_IN_RESPONSE); + MCDI_STRUCT_SET_DWORD(response, MAE_ACTION_RULE_RESPONSE_ASL_ID, + MC_CMD_MAE_ACTION_SET_LIST_ALLOC_OUT_ACTION_SET_LIST_ID_NULL); + MCDI_STRUCT_SET_DWORD(response, MAE_ACTION_RULE_RESPONSE_AS_ID, + MC_CMD_MAE_ACTION_SET_ALLOC_OUT_ACTION_SET_ID_NULL); +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_CONNTRACK_OFFLOAD) + EFX_POPULATE_DWORD_5(*_MCDI_STRUCT_DWORD(response, MAE_ACTION_RULE_RESPONSE_LOOKUP_CONTROL), + MAE_ACTION_RULE_RESPONSE_DO_CT, !!act->zone, + MAE_ACTION_RULE_RESPONSE_DO_RECIRC, + act->rid && !act->zone, + MAE_ACTION_RULE_RESPONSE_CT_VNI_MODE, + act->zone ? act->zone->vni_mode : MAE_CT_VNI_MODE_ZERO, + MAE_ACTION_RULE_RESPONSE_RECIRC_ID, + act->rid ? act->rid->fw_id : 0, + MAE_ACTION_RULE_RESPONSE_CT_DOMAIN, + act->zone ? act->zone->domain : 0); +#else + EFX_POPULATE_DWORD_2(*_MCDI_STRUCT_DWORD(response, MAE_ACTION_RULE_RESPONSE_LOOKUP_CONTROL), + MAE_ACTION_RULE_RESPONSE_DO_RECIRC, !!act->rid, + MAE_ACTION_RULE_RESPONSE_RECIRC_ID, + act->rid ? act->rid->fw_id : 0); +#endif + MCDI_STRUCT_SET_DWORD(response, MAE_ACTION_RULE_RESPONSE_COUNTER_ID, + act->count ? act->count->cnt->fw_id : + MC_CMD_MAE_COUNTER_ALLOC_OUT_COUNTER_ID_NULL); + MCDI_SET_DWORD(inbuf, MAE_ACTION_RULE_INSERT_IN_PRIO, prio); + rc = efx_mae_populate_match_criteria(match_crit, &rule->match); + if (rc) + return rc; + + rc = efx_mcdi_rpc(efx, MC_CMD_MAE_ACTION_RULE_INSERT, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), &outlen); + if (rc) + return rc; + if (outlen < sizeof(outbuf)) + return -EIO; + rule->fw_id = MCDI_DWORD(outbuf, MAE_ACTION_RULE_INSERT_OUT_AR_ID); + return 0; +} + +int efx_mae_insert_lhs_rule(struct efx_nic *efx, struct efx_tc_lhs_rule *rule, + u32 prio) +{ + if (rule->is_ar) + return efx_mae_insert_lhs_action_rule(efx, rule, prio); + return efx_mae_insert_lhs_outer_rule(efx, rule, prio); +} + +static int efx_mae_remove_lhs_outer_rule(struct efx_nic *efx, + struct efx_tc_lhs_rule *rule) +{ + MCDI_DECLARE_BUF(outbuf, MC_CMD_MAE_OUTER_RULE_REMOVE_OUT_LEN(1)); + MCDI_DECLARE_BUF(inbuf, MC_CMD_MAE_OUTER_RULE_REMOVE_IN_LEN(1)); + size_t outlen; + int rc; + + MCDI_SET_DWORD(inbuf, MAE_OUTER_RULE_REMOVE_IN_OR_ID, rule->fw_id); + rc = efx_mcdi_rpc(efx, MC_CMD_MAE_OUTER_RULE_REMOVE, inbuf, + sizeof(inbuf), outbuf, sizeof(outbuf), &outlen); + if (rc) + return rc; + if (outlen < sizeof(outbuf)) + return -EIO; + /* FW freed a different ID than we asked for, should also never happen. + * Warn because it means we've now got a different idea to the FW of + * what encap_mds exist, which could cause mayhem later. + */ + if (WARN_ON(MCDI_DWORD(outbuf, MAE_OUTER_RULE_REMOVE_OUT_REMOVED_OR_ID) != rule->fw_id)) + return -EIO; + /* We're probably about to free @rule, but let's just make sure its + * fw_id is blatted so that it won't look valid if it leaks out. + */ + rule->fw_id = MC_CMD_MAE_OUTER_RULE_INSERT_OUT_OUTER_RULE_ID_NULL; + return 0; +} + +int efx_mae_remove_lhs_rule(struct efx_nic *efx, struct efx_tc_lhs_rule *rule) +{ + if (rule->is_ar) + return efx_mae_delete_rule(efx, rule->fw_id); + return efx_mae_remove_lhs_outer_rule(efx, rule); +} + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_CONNTRACK_OFFLOAD) +/* Populating is done by taking each byte of @value in turn and storing + * it in the appropriate bits of @row. @value must be big-endian; we + * convert it to little-endianness as we go. + */ +static int efx_mae_table_populate(struct efx_tc_table_field_fmt field, + __le32 *row, size_t row_bits, + void *value, size_t value_size) +{ + unsigned int i; + + /* For now only scheme 0 is supported for any field, so we check here + * (rather than, say, in calling code, which knows the semantics and + * could in principle encode for other schemes). + */ + if (field.scheme) + return -EOPNOTSUPP; + if (DIV_ROUND_UP(field.width, 8) != value_size) + return -EINVAL; + if (field.lbn + field.width > row_bits) + return -EINVAL; + for (i = 0; i < value_size; i++) { + unsigned int bn = field.lbn + i * 8; + unsigned int wn = bn / 32; + u64 v; + + v = ((u8 *)value)[value_size - i - 1]; + v <<= (bn % 32); + row[wn] |= cpu_to_le32(v & 0xffffffff); + if (wn * 32 < row_bits) + row[wn + 1] |= cpu_to_le32(v >> 32); + } + return 0; +} + +static int efx_mae_table_populate_bool(struct efx_tc_table_field_fmt field, + __le32 *row, size_t row_bits, bool value) +{ + u8 v = value ? 1 : 0; + + if (field.width != 1) + return -EINVAL; + return efx_mae_table_populate(field, row, row_bits, &v, 1); +} + +static int efx_mae_table_populate_ipv4(struct efx_tc_table_field_fmt field, + __le32 *row, size_t row_bits, __be32 value) +{ + /* IPv4 is placed in the first 4 bytes of an IPv6-sized field */ + struct in6_addr v = {}; + + if (field.width != 128) + return -EINVAL; + v.s6_addr32[0] = value; + return efx_mae_table_populate(field, row, row_bits, &v, sizeof(v)); +} + +static int efx_mae_table_populate_u24(struct efx_tc_table_field_fmt field, + __le32 *row, size_t row_bits, u32 value) +{ + __be32 v = cpu_to_be32(value); + + /* We adjust value_size here since just 3 bytes will be copied, and + * the pointer to the value is set discarding the first byte which is + * the most significant byte for a big-endian 4-bytes value. + */ + return efx_mae_table_populate(field, row, row_bits, ((void *)&v) + 1, + sizeof(v) - 1); +} + +#define _TABLE_POPULATE(dst, dw, _field, _value) ({ \ + typeof(_value) _v = _value; \ + \ + (_field.width == sizeof(_value) * 8) ? \ + efx_mae_table_populate(_field, dst, dw, &_v, \ + sizeof(_v)) : -EINVAL; \ +}) +#define TABLE_POPULATE_KEY_IPV4(dst, _table, _field, _value) \ + efx_mae_table_populate_ipv4(efx->tc->meta_##_table.desc.keys[ \ + efx->tc->meta_##_table.keys._field##_idx], \ + dst, efx->tc->meta_##_table.desc.key_width,\ + _value) +#define TABLE_POPULATE_KEY(dst, _table, _field, _value) \ + _TABLE_POPULATE(dst, efx->tc->meta_##_table.desc.key_width, \ + efx->tc->meta_##_table.desc.keys[ \ + efx->tc->meta_##_table.keys._field##_idx], \ + _value) + +#define TABLE_POPULATE_RESP_BOOL(dst, _table, _field, _value) \ + efx_mae_table_populate_bool(efx->tc->meta_##_table.desc.resps[ \ + efx->tc->meta_##_table.resps._field##_idx], \ + dst, efx->tc->meta_##_table.desc.resp_width,\ + _value) +#define TABLE_POPULATE_RESP(dst, _table, _field, _value) \ + _TABLE_POPULATE(dst, efx->tc->meta_##_table.desc.resp_width, \ + efx->tc->meta_##_table.desc.resps[ \ + efx->tc->meta_##_table.resps._field##_idx], \ + _value) + +#define TABLE_POPULATE_RESP_U24(dst, _table, _field, _value) \ + efx_mae_table_populate_u24(efx->tc->meta_##_table.desc.resps[ \ + efx->tc->meta_##_table.resps._field##_idx], \ + dst, efx->tc->meta_##_table.desc.resp_width, \ + _value) + +static int efx_mae_populate_ct_key(struct efx_nic *efx, __le32 *key, size_t kw, + struct efx_tc_ct_entry *conn) +{ + bool ipv6 = conn->eth_proto == htons(ETH_P_IPV6); + int rc; + + rc = TABLE_POPULATE_KEY(key, ct, eth_proto, conn->eth_proto); + if (rc) + return rc; + rc = TABLE_POPULATE_KEY(key, ct, ip_proto, conn->ip_proto); + if (rc) + return rc; + if (ipv6) + rc = TABLE_POPULATE_KEY(key, ct, src_ip, conn->src_ip6); + else + rc = TABLE_POPULATE_KEY_IPV4(key, ct, src_ip, conn->src_ip); + if (rc) + return rc; + if (ipv6) + rc = TABLE_POPULATE_KEY(key, ct, dst_ip, conn->dst_ip6); + else + rc = TABLE_POPULATE_KEY_IPV4(key, ct, dst_ip, conn->dst_ip); + if (rc) + return rc; + rc = TABLE_POPULATE_KEY(key, ct, l4_sport, conn->l4_sport); + if (rc) + return rc; + rc = TABLE_POPULATE_KEY(key, ct, l4_dport, conn->l4_dport); + if (rc) + return rc; + return TABLE_POPULATE_KEY(key, ct, zone, cpu_to_be16(conn->domain)); +} + +int efx_mae_insert_ct(struct efx_nic *efx, struct efx_tc_ct_entry *conn) +{ + bool ipv6 = conn->eth_proto == htons(ETH_P_IPV6); + __le32 *key = NULL, *resp = NULL; + size_t inlen, kw, rw; + efx_dword_t *inbuf; + int rc = -ENOMEM; + + /* Check table access is supported */ + if (!efx->tc->meta_ct.hooked) + return -EOPNOTSUPP; + + /* key/resp widths are in bits; convert to dwords for IN_LEN */ + kw = DIV_ROUND_UP(efx->tc->meta_ct.desc.key_width, 32); + rw = DIV_ROUND_UP(efx->tc->meta_ct.desc.resp_width, 32); + BUILD_BUG_ON(sizeof(__le32) != MC_CMD_TABLE_INSERT_IN_DATA_LEN); + inlen = MC_CMD_TABLE_INSERT_IN_LEN(kw + rw); + if (inlen > MC_CMD_TABLE_INSERT_IN_LENMAX_MCDI2) + return -E2BIG; + inbuf = kzalloc(inlen, GFP_KERNEL); + if (!inbuf) + return -ENOMEM; + + key = kcalloc(sizeof(__le32), kw, GFP_KERNEL); + if (!key) + goto out_free; + resp = kcalloc(sizeof(__le32), rw, GFP_KERNEL); + if (!resp) + goto out_free; + + rc = efx_mae_populate_ct_key(efx, key, kw, conn); + if (rc) + goto out_free; + + rc = TABLE_POPULATE_RESP_BOOL(resp, ct, dnat, conn->dnat); + if (rc) + goto out_free; + /* No support in hw for IPv6 NAT; field is only 32 bits */ + if (!ipv6) + rc = TABLE_POPULATE_RESP(resp, ct, nat_ip, conn->nat_ip); + if (rc) + goto out_free; + rc = TABLE_POPULATE_RESP(resp, ct, l4_natport, conn->l4_natport); + if (rc) + goto out_free; + rc = TABLE_POPULATE_RESP(resp, ct, mark, cpu_to_be32(conn->mark)); + if (rc) + goto out_free; + rc = TABLE_POPULATE_RESP_U24(resp, ct, counter_id, conn->cnt->fw_id); + if (rc) + goto out_free; + + MCDI_SET_DWORD(inbuf, TABLE_INSERT_IN_TABLE_ID, TABLE_ID_CONNTRACK_TABLE); + MCDI_SET_WORD(inbuf, TABLE_INSERT_IN_KEY_WIDTH, + efx->tc->meta_ct.desc.key_width); + /* MASK_WIDTH is zero as CT is a BCAM */ + MCDI_SET_WORD(inbuf, TABLE_INSERT_IN_RESP_WIDTH, + efx->tc->meta_ct.desc.resp_width); + memcpy(MCDI_PTR(inbuf, TABLE_INSERT_IN_DATA), key, kw * sizeof(__le32)); + memcpy(MCDI_PTR(inbuf, TABLE_INSERT_IN_DATA) + kw * sizeof(__le32), + resp, rw * sizeof(__le32)); + + BUILD_BUG_ON(MC_CMD_TABLE_INSERT_OUT_LEN); + + rc = efx_mcdi_rpc(efx, MC_CMD_TABLE_INSERT, inbuf, inlen, NULL, 0, NULL); + +out_free: + kfree(resp); + kfree(key); + kfree(inbuf); + return rc; +} + +int efx_mae_remove_ct(struct efx_nic *efx, struct efx_tc_ct_entry *conn) +{ + __le32 *key = NULL; + efx_dword_t *inbuf; + size_t inlen, kw; + int rc = -ENOMEM; + + /* Check table access is supported */ + if (!efx->tc->meta_ct.hooked) + return -EOPNOTSUPP; + + /* key width is in bits; convert to dwords for IN_LEN */ + kw = DIV_ROUND_UP(efx->tc->meta_ct.desc.key_width, 32); + BUILD_BUG_ON(sizeof(__le32) != MC_CMD_TABLE_DELETE_IN_DATA_LEN); + inlen = MC_CMD_TABLE_DELETE_IN_LEN(kw); + if (inlen > MC_CMD_TABLE_DELETE_IN_LENMAX_MCDI2) + return -E2BIG; + inbuf = kzalloc(inlen, GFP_KERNEL); + if (!inbuf) + return -ENOMEM; + + key = kcalloc(sizeof(__le32), kw, GFP_KERNEL); + if (!key) + goto out_free; + + rc = efx_mae_populate_ct_key(efx, key, kw, conn); + if (rc) + goto out_free; + + MCDI_SET_DWORD(inbuf, TABLE_DELETE_IN_TABLE_ID, TABLE_ID_CONNTRACK_TABLE); + MCDI_SET_WORD(inbuf, TABLE_DELETE_IN_KEY_WIDTH, + efx->tc->meta_ct.desc.key_width); + /* MASK_WIDTH is zero as CT is a BCAM */ + /* RESP_WIDTH is zero for DELETE */ + memcpy(MCDI_PTR(inbuf, TABLE_DELETE_IN_DATA), key, kw * sizeof(__le32)); + + BUILD_BUG_ON(MC_CMD_TABLE_DELETE_OUT_LEN); + + rc = efx_mcdi_rpc(efx, MC_CMD_TABLE_DELETE, inbuf, inlen, NULL, 0, NULL); + +out_free: + kfree(key); + kfree(inbuf); + return rc; +} +#endif +#endif /* EFX_TC_OFFLOAD */ + +static int efx_mae_populate_match_criteria(MCDI_DECLARE_STRUCT_PTR(match_crit), + const struct efx_tc_match *match) +{ + if (match->mask.ingress_port) { + if (~match->mask.ingress_port) + return -EOPNOTSUPP; + MCDI_STRUCT_SET_DWORD(match_crit, + MAE_FIELD_MASK_VALUE_PAIRS_V2_INGRESS_MPORT_SELECTOR, + match->value.ingress_port); + } + MCDI_STRUCT_SET_DWORD(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_INGRESS_MPORT_SELECTOR_MASK, + match->mask.ingress_port); +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_TC_OFFLOAD) + EFX_POPULATE_DWORD_5(*_MCDI_STRUCT_DWORD(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_FLAGS), + MAE_FIELD_MASK_VALUE_PAIRS_V2_DO_CT, + match->value.ct_state_trk, + MAE_FIELD_MASK_VALUE_PAIRS_V2_CT_HIT, + match->value.ct_state_est, + MAE_FIELD_MASK_VALUE_PAIRS_V2_IS_IP_FRAG, + match->value.ip_frag, + MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_FIRST_FRAG, + match->value.ip_firstfrag, + MAE_FIELD_MASK_VALUE_PAIRS_V2_TCP_SYN_FIN_RST, + match->value.tcp_syn_fin_rst); + EFX_POPULATE_DWORD_5(*_MCDI_STRUCT_DWORD(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_FLAGS_MASK), + MAE_FIELD_MASK_VALUE_PAIRS_V2_DO_CT, + match->mask.ct_state_trk, + MAE_FIELD_MASK_VALUE_PAIRS_V2_CT_HIT, + match->mask.ct_state_est, + MAE_FIELD_MASK_VALUE_PAIRS_V2_IS_IP_FRAG, + match->mask.ip_frag, + MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_FIRST_FRAG, + match->mask.ip_firstfrag, + MAE_FIELD_MASK_VALUE_PAIRS_V2_TCP_SYN_FIN_RST, + match->mask.tcp_syn_fin_rst); + MCDI_STRUCT_SET_BYTE(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_RECIRC_ID, + match->value.recirc_id); + MCDI_STRUCT_SET_BYTE(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_RECIRC_ID_MASK, + match->mask.recirc_id); + MCDI_STRUCT_SET_DWORD(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_CT_MARK, + match->value.ct_mark); + MCDI_STRUCT_SET_DWORD(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_CT_MARK_MASK, + match->mask.ct_mark); + MCDI_STRUCT_SET_WORD(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_CT_DOMAIN, + match->value.ct_zone); + MCDI_STRUCT_SET_WORD(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_CT_DOMAIN_MASK, + match->mask.ct_zone); + MCDI_STRUCT_SET_WORD_BE(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_ETHER_TYPE_BE, + match->value.eth_proto); + MCDI_STRUCT_SET_WORD_BE(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_ETHER_TYPE_BE_MASK, + match->mask.eth_proto); + MCDI_STRUCT_SET_WORD_BE(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_VLAN0_TCI_BE, + match->value.vlan_tci[0]); + MCDI_STRUCT_SET_WORD_BE(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_VLAN0_TCI_BE_MASK, + match->mask.vlan_tci[0]); + MCDI_STRUCT_SET_WORD_BE(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_VLAN0_PROTO_BE, + match->value.vlan_proto[0]); + MCDI_STRUCT_SET_WORD_BE(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_VLAN0_PROTO_BE_MASK, + match->mask.vlan_proto[0]); + MCDI_STRUCT_SET_WORD_BE(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_VLAN1_TCI_BE, + match->value.vlan_tci[1]); + MCDI_STRUCT_SET_WORD_BE(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_VLAN1_TCI_BE_MASK, + match->mask.vlan_tci[1]); + MCDI_STRUCT_SET_WORD_BE(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_VLAN1_PROTO_BE, + match->value.vlan_proto[1]); + MCDI_STRUCT_SET_WORD_BE(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_VLAN1_PROTO_BE_MASK, + match->mask.vlan_proto[1]); + memcpy(MCDI_STRUCT_PTR(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_ETH_SADDR_BE), + match->value.eth_saddr, ETH_ALEN); + memcpy(MCDI_STRUCT_PTR(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_ETH_SADDR_BE_MASK), + match->mask.eth_saddr, ETH_ALEN); + memcpy(MCDI_STRUCT_PTR(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_ETH_DADDR_BE), + match->value.eth_daddr, ETH_ALEN); + memcpy(MCDI_STRUCT_PTR(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_ETH_DADDR_BE_MASK), + match->mask.eth_daddr, ETH_ALEN); + MCDI_STRUCT_SET_BYTE(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_PROTO, + match->value.ip_proto); + MCDI_STRUCT_SET_BYTE(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_PROTO_MASK, + match->mask.ip_proto); + MCDI_STRUCT_SET_BYTE(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_TOS, + match->value.ip_tos); + MCDI_STRUCT_SET_BYTE(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_TOS_MASK, + match->mask.ip_tos); + MCDI_STRUCT_SET_BYTE(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_TTL, + match->value.ip_ttl); + MCDI_STRUCT_SET_BYTE(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_TTL_MASK, + match->mask.ip_ttl); + MCDI_STRUCT_SET_DWORD_BE(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_SRC_IP4_BE, + match->value.src_ip); + MCDI_STRUCT_SET_DWORD_BE(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_SRC_IP4_BE_MASK, + match->mask.src_ip); + MCDI_STRUCT_SET_DWORD_BE(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_DST_IP4_BE, + match->value.dst_ip); + MCDI_STRUCT_SET_DWORD_BE(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_DST_IP4_BE_MASK, + match->mask.dst_ip); +#ifdef CONFIG_IPV6 + memcpy(MCDI_STRUCT_PTR(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_SRC_IP6_BE), + &match->value.src_ip6, sizeof(struct in6_addr)); + memcpy(MCDI_STRUCT_PTR(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_SRC_IP6_BE_MASK), + &match->mask.src_ip6, sizeof(struct in6_addr)); + memcpy(MCDI_STRUCT_PTR(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_DST_IP6_BE), + &match->value.dst_ip6, sizeof(struct in6_addr)); + memcpy(MCDI_STRUCT_PTR(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_DST_IP6_BE_MASK), + &match->mask.dst_ip6, sizeof(struct in6_addr)); +#endif + MCDI_STRUCT_SET_WORD_BE(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_L4_SPORT_BE, + match->value.l4_sport); + MCDI_STRUCT_SET_WORD_BE(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_L4_SPORT_BE_MASK, + match->mask.l4_sport); + MCDI_STRUCT_SET_WORD_BE(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_L4_DPORT_BE, + match->value.l4_dport); + MCDI_STRUCT_SET_WORD_BE(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_L4_DPORT_BE_MASK, + match->mask.l4_dport); + MCDI_STRUCT_SET_WORD_BE(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_TCP_FLAGS_BE, + match->value.tcp_flags); + MCDI_STRUCT_SET_WORD_BE(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_TCP_FLAGS_BE_MASK, + match->mask.tcp_flags); + /* enc-keys are handled indirectly, through encap_match ID */ + if (match->encap) { + MCDI_STRUCT_SET_DWORD(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_OUTER_RULE_ID, + match->encap->fw_id); + MCDI_STRUCT_SET_DWORD(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_OUTER_RULE_ID_MASK, + (u32)-1); + /* enc_keyid (VNI/VSID) is not part of the encap_match */ + MCDI_STRUCT_SET_DWORD_BE(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VNET_ID_BE, + match->value.enc_keyid); + MCDI_STRUCT_SET_DWORD_BE(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VNET_ID_BE_MASK, + match->mask.enc_keyid); + } else { + /* No enc-keys should appear in a rule without an encap_match */ + if (WARN_ON_ONCE(match->mask.enc_src_ip)) + return -EOPNOTSUPP; + if (WARN_ON_ONCE(match->mask.enc_dst_ip)) + return -EOPNOTSUPP; + if (WARN_ON_ONCE(!ipv6_addr_any(&match->mask.enc_src_ip6))) + return -EOPNOTSUPP; + if (WARN_ON_ONCE(!ipv6_addr_any(&match->mask.enc_dst_ip6))) + return -EOPNOTSUPP; + if (WARN_ON_ONCE(match->mask.enc_ip_tos)) + return -EOPNOTSUPP; + if (WARN_ON_ONCE(match->mask.enc_ip_ttl)) + return -EOPNOTSUPP; + if (WARN_ON_ONCE(match->mask.enc_sport)) + return -EOPNOTSUPP; + if (WARN_ON_ONCE(match->mask.enc_dport)) + return -EOPNOTSUPP; + if (WARN_ON_ONCE(match->mask.enc_keyid)) + return -EOPNOTSUPP; + } +#endif /* EFX_TC_OFFLOAD */ + return 0; +} + +int efx_mae_insert_rule(struct efx_nic *efx, const struct efx_tc_match *match, + u32 prio, u32 acts_id, u32 *id) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_MAE_ACTION_RULE_INSERT_IN_LEN(MAE_FIELD_MASK_VALUE_PAIRS_V2_LEN)); + MCDI_DECLARE_BUF(outbuf, MC_CMD_MAE_ACTION_RULE_INSERT_OUT_LEN); + MCDI_DECLARE_STRUCT_PTR(match_crit); + MCDI_DECLARE_STRUCT_PTR(response); + size_t outlen; + int rc; + + if (!id) + return -EINVAL; + + match_crit = _MCDI_DWORD(inbuf, MAE_ACTION_RULE_INSERT_IN_MATCH_CRITERIA); + response = _MCDI_DWORD(inbuf, MAE_ACTION_RULE_INSERT_IN_RESPONSE); + if (efx_mae_asl_id(acts_id)) { + MCDI_STRUCT_SET_DWORD(response, MAE_ACTION_RULE_RESPONSE_ASL_ID, acts_id); + MCDI_STRUCT_SET_DWORD(response, MAE_ACTION_RULE_RESPONSE_AS_ID, + MC_CMD_MAE_ACTION_SET_ALLOC_OUT_ACTION_SET_ID_NULL); + } else { + /* We only had one AS, so we didn't wrap it in an ASL */ + MCDI_STRUCT_SET_DWORD(response, MAE_ACTION_RULE_RESPONSE_ASL_ID, + MC_CMD_MAE_ACTION_SET_LIST_ALLOC_OUT_ACTION_SET_LIST_ID_NULL); + MCDI_STRUCT_SET_DWORD(response, MAE_ACTION_RULE_RESPONSE_AS_ID, acts_id); + } + MCDI_SET_DWORD(inbuf, MAE_ACTION_RULE_INSERT_IN_PRIO, prio); + rc = efx_mae_populate_match_criteria(match_crit, match); + if (rc) + return rc; + + rc = efx_mcdi_rpc(efx, MC_CMD_MAE_ACTION_RULE_INSERT, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), &outlen); + if (rc) + return rc; + if (outlen < sizeof(outbuf)) + return -EIO; + *id = MCDI_DWORD(outbuf, MAE_ACTION_RULE_INSERT_OUT_AR_ID); + return 0; +} + +int efx_mae_update_rule(struct efx_nic *efx, u32 acts_id, u32 id) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_MAE_ACTION_RULE_UPDATE_IN_LEN); + MCDI_DECLARE_STRUCT_PTR(response); + + BUILD_BUG_ON(MC_CMD_MAE_ACTION_RULE_UPDATE_OUT_LEN); + response = _MCDI_DWORD(inbuf, MAE_ACTION_RULE_UPDATE_IN_RESPONSE); + + MCDI_SET_DWORD(inbuf, MAE_ACTION_RULE_UPDATE_IN_AR_ID, id); + if (efx_mae_asl_id(acts_id)) { + MCDI_STRUCT_SET_DWORD(response, MAE_ACTION_RULE_RESPONSE_ASL_ID, acts_id); + MCDI_STRUCT_SET_DWORD(response, MAE_ACTION_RULE_RESPONSE_AS_ID, + MC_CMD_MAE_ACTION_SET_ALLOC_OUT_ACTION_SET_ID_NULL); + } else { + /* We only had one AS, so we didn't wrap it in an ASL */ + MCDI_STRUCT_SET_DWORD(response, MAE_ACTION_RULE_RESPONSE_ASL_ID, + MC_CMD_MAE_ACTION_SET_LIST_ALLOC_OUT_ACTION_SET_LIST_ID_NULL); + MCDI_STRUCT_SET_DWORD(response, MAE_ACTION_RULE_RESPONSE_AS_ID, acts_id); + } + return efx_mcdi_rpc(efx, MC_CMD_MAE_ACTION_RULE_UPDATE, inbuf, sizeof(inbuf), + NULL, 0, NULL); +} + +int efx_mae_delete_rule(struct efx_nic *efx, u32 id) +{ + MCDI_DECLARE_BUF(outbuf, MC_CMD_MAE_ACTION_RULE_DELETE_OUT_LEN(1)); + MCDI_DECLARE_BUF(inbuf, MC_CMD_MAE_ACTION_RULE_DELETE_IN_LEN(1)); + size_t outlen; + int rc; + + MCDI_SET_DWORD(inbuf, MAE_ACTION_RULE_DELETE_IN_AR_ID, id); + rc = efx_mcdi_rpc(efx, MC_CMD_MAE_ACTION_RULE_DELETE, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), &outlen); + if (rc) + return rc; + if (outlen < sizeof(outbuf)) + return -EIO; + /* FW freed a different ID than we asked for, should also never happen. + * Warn because it means we've now got a different idea to the FW of + * what rules exist, which could cause mayhem later. + */ + if (WARN_ON(MCDI_DWORD(outbuf, MAE_ACTION_RULE_DELETE_OUT_DELETED_AR_ID) != id)) + return -EIO; + return 0; +} + +int efx_init_mae(struct efx_nic *efx) +{ + struct efx_mae *mae = kmalloc(sizeof(*mae), GFP_KERNEL); +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_RHASHTABLE_LOOKUP_FAST) + int rc; +#endif + + if (!mae) + return -ENOMEM; + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_RHASHTABLE_LOOKUP_FAST) + rc = rhashtable_init(&mae->mports_ht, &efx_mae_mports_ht_params); + if (rc < 0) { + kfree(mae); + return rc; + } +#endif + efx->mae = mae; + mae->efx = efx; + INIT_WORK(&mae->mport_work, efx_mae_mport_work); + return 0; +} + +void efx_fini_mae(struct efx_nic *efx) +{ + struct efx_mae *mae = efx->mae; +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_RHASHTABLE_LOOKUP_FAST) + struct rhashtable_iter walk; + struct mae_mport_desc *m; +#endif + + if (!mae) + return; + + cancel_work_sync(&mae->mport_work); +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_RHASHTABLE_LOOKUP_FAST) + /* Empty the mports_ht, dropping all its references */ + rhashtable_walk_enter(&mae->mports_ht, &walk); + rhashtable_walk_start(&walk); + while ((m = rhashtable_walk_next(&walk)) != NULL) { + rhashtable_walk_stop(&walk); + efx_mae_remove_mport(efx, m); /* might sleep */ + rhashtable_walk_start(&walk); + } + rhashtable_walk_stop(&walk); + rhashtable_walk_exit(&walk); + rhashtable_destroy(&mae->mports_ht); +#endif + kfree(mae); + efx->mae = NULL; +} diff --git a/src/drivers/net/ethernet/sfc/mae.h b/src/drivers/net/ethernet/sfc/mae.h new file mode 100644 index 0000000..2d3d6b3 --- /dev/null +++ b/src/drivers/net/ethernet/sfc/mae.h @@ -0,0 +1,164 @@ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2019 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#ifndef EF100_MAE_H +#define EF100_MAE_H +/* MCDI interface for the ef100 Match-Action Engine */ + +#include "net_driver.h" +#include "tc.h" +#include "mcdi_pcol.h" /* needed for various MC_CMD_MAE_*_NULL defines */ +#if !defined(EFX_USE_KCOMPAT) || !defined(EFX_NEED_REFCOUNT_T) +#include +#endif + +int efx_mae_allocate_mport(struct efx_nic *efx, u32 *id, u32 *label); +int efx_mae_free_mport(struct efx_nic *efx, u32 id); + +void efx_mae_mport_wire(struct efx_nic *efx, u32 *out); +void efx_mae_mport_uplink(struct efx_nic *efx, u32 *out); +void efx_mae_mport_vf(struct efx_nic *efx, u32 vf_id, u32 *out); +void efx_mae_mport_mport(struct efx_nic *efx, u32 mport_id, u32 *out); + +int efx_mae_lookup_mport(struct efx_nic *efx, u32 selector, u32 *id); + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_TC_OFFLOAD) +int efx_mae_start_counters(struct efx_nic *efx, struct efx_rx_queue *rx_queue); +int efx_mae_stop_counters(struct efx_nic *efx, struct efx_rx_queue *rx_queue); +void efx_mae_counters_grant_credits(struct work_struct *work); +#endif + +enum mae_mport_desc_caller_flags { + MAE_MPORT_DESC_FLAG_CAN_RECEIVE_ON = BIT(MAE_MPORT_DESC_CAN_RECEIVE_ON_LBN), + MAE_MPORT_DESC_FLAG_CAN_DELIVER_TO = BIT(MAE_MPORT_DESC_CAN_DELIVER_TO_LBN), + MAE_MPORT_DESC_FLAG_CAN_DELETE = BIT(MAE_MPORT_DESC_CAN_DELETE_LBN), + MAE_MPORT_DESC_FLAG_IS_ZOMBIE = BIT(MAE_MPORT_DESC_IS_ZOMBIE_LBN), + + MAE_MPORT_DESC_FLAG__MASK = MAE_MPORT_DESC_FLAG_CAN_RECEIVE_ON | + MAE_MPORT_DESC_FLAG_CAN_DELIVER_TO | + MAE_MPORT_DESC_FLAG_CAN_DELETE | + MAE_MPORT_DESC_FLAG_IS_ZOMBIE +}; + +struct mae_mport_desc { + u32 mport_id; + u32 flags; + u32 caller_flags; /* enum mae_mport_desc_caller_flags */ + u32 mport_type; /* MAE_MPORT_DESC_MPORT_TYPE_* */ + union { + u32 port_idx; /* for mport_type == NET_PORT */ + u32 alias_mport_id; /* for mport_type == ALIAS */ + struct { /* for mport_type == VNIC */ + u32 vnic_client_type; /* MAE_MPORT_DESC_VNIC_CLIENT_TYPE_* */ + u32 interface_idx; + u16 pf_idx; + u16 vf_idx; + }; + }; +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_RHASHTABLE_LOOKUP_FAST) + struct rhash_head linkage; + refcount_t ref; +#endif + struct efx_rep *efv; +}; + +int efx_mae_enumerate_mports(struct efx_nic *efx); +struct mae_mport_desc *efx_mae_get_mport(struct efx_nic *efx, u32 mport_id); +void efx_mae_put_mport(struct efx_nic *efx, struct mae_mport_desc *desc); + +int efx_mae_get_tables(struct efx_nic *efx); +void efx_mae_free_tables(struct efx_nic *efx); + +#define MAE_NUM_FIELDS (MAE_FIELD_ENC_VNET_ID + 1) + +struct mae_caps { + u32 match_field_count; + u32 encap_types; + u32 action_prios; + u8 action_rule_fields[MAE_NUM_FIELDS]; + u8 outer_rule_fields[MAE_NUM_FIELDS]; +}; + +#define MAE_ENCAP_TYPE_SUPPORTED(_caps, _type) ((_caps)->encap_type & \ + BIT(MC_CMD_MAE_GET_CAPABILITIES_OUT_ENCAP_TYPE_ ## _type ## _LBN)) + +/** + * struct efx_mae - MAE information + * + * @efx: The associated NIC + * @mport_work: Work item to handle MPORT journal changes + * @mports_ht: m-port descriptions from MC_CMD_MAE_MPORT_READ_JOURNAL + */ +struct efx_mae { + struct efx_nic *efx; + struct work_struct mport_work; +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_RHASHTABLE_LOOKUP_FAST) + struct rhashtable mports_ht; +#endif +}; + +int efx_mae_get_caps(struct efx_nic *efx, struct mae_caps *caps); + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_TC_OFFLOAD) +int efx_mae_match_check_caps(struct efx_nic *efx, + const struct efx_tc_match_fields *mask, + struct netlink_ext_ack *extack); +int efx_mae_match_check_caps_lhs(struct efx_nic *efx, + const struct efx_tc_match_fields *mask, + struct netlink_ext_ack *extack); +int efx_mae_check_encap_match_caps(struct efx_nic *efx, bool ipv6, + u8 ip_tos_mask); +int efx_mae_check_encap_type_supported(struct efx_nic *efx, enum efx_encap_type typ); + +int efx_mae_allocate_counter(struct efx_nic *efx, struct efx_tc_counter *cnt); +int efx_mae_free_counter(struct efx_nic *efx, struct efx_tc_counter *cnt); + +int efx_mae_allocate_encap_md(struct efx_nic *efx, + struct efx_tc_encap_action *encap); +int efx_mae_update_encap_md(struct efx_nic *efx, + struct efx_tc_encap_action *encap); +int efx_mae_free_encap_md(struct efx_nic *efx, + struct efx_tc_encap_action *encap); +int efx_mae_allocate_pedit_mac(struct efx_nic *efx, + struct efx_tc_mac_pedit_action *ped); +int efx_mae_free_pedit_mac(struct efx_nic *efx, + struct efx_tc_mac_pedit_action *ped); +#endif + +int efx_mae_alloc_action_set(struct efx_nic *efx, struct efx_tc_action_set *act); +int efx_mae_free_action_set(struct efx_nic *efx, u32 fw_id); + +int efx_mae_alloc_action_set_list(struct efx_nic *efx, + struct efx_tc_action_set_list *acts); +int efx_mae_free_action_set_list(struct efx_nic *efx, + struct efx_tc_action_set_list *acts); + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_TC_OFFLOAD) +int efx_mae_register_encap_match(struct efx_nic *efx, + struct efx_tc_encap_match *encap); +int efx_mae_unregister_encap_match(struct efx_nic *efx, + struct efx_tc_encap_match *encap); +int efx_mae_insert_lhs_rule(struct efx_nic *efx, struct efx_tc_lhs_rule *rule, + u32 prio); +int efx_mae_remove_lhs_rule(struct efx_nic *efx, struct efx_tc_lhs_rule *rule); +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_CONNTRACK_OFFLOAD) +struct efx_tc_ct_entry; /* see tc_conntrack.h */ +int efx_mae_insert_ct(struct efx_nic *efx, struct efx_tc_ct_entry *conn); +int efx_mae_remove_ct(struct efx_nic *efx, struct efx_tc_ct_entry *conn); +#endif +#endif + +int efx_mae_insert_rule(struct efx_nic *efx, const struct efx_tc_match *match, + u32 prio, u32 acts_id, u32 *id); +int efx_mae_update_rule(struct efx_nic *efx, u32 acts_id, u32 id); +int efx_mae_delete_rule(struct efx_nic *efx, u32 id); + +int efx_init_mae(struct efx_nic *efx); +void efx_fini_mae(struct efx_nic *efx); +#endif /* EF100_MAE_H */ diff --git a/src/drivers/net/ethernet/sfc/mae_counter_format.h b/src/drivers/net/ethernet/sfc/mae_counter_format.h new file mode 100644 index 0000000..a95f2d9 --- /dev/null +++ b/src/drivers/net/ethernet/sfc/mae_counter_format.h @@ -0,0 +1,72 @@ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2020 Xilinx, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +/* Format of counter packets (version 2) from the ef100 Match-Action Engine */ + +#ifndef EFX_MAE_COUNTER_FORMAT_H +#define EFX_MAE_COUNTER_FORMAT_H + + +/*------------------------------------------------------------*/ +/* + * ER_RX_SL_PACKETISER_HEADER_WORD(160bit): + * + */ +#define ER_RX_SL_PACKETISER_HEADER_WORD_SIZE 20 +#define ER_RX_SL_PACKETISER_HEADER_WORD_WIDTH 160 + +#define ERF_SC_PACKETISER_HEADER_VERSION_LBN 0 +#define ERF_SC_PACKETISER_HEADER_VERSION_WIDTH 8 +#define ERF_SC_PACKETISER_HEADER_VERSION_VALUE 2 +#define ERF_SC_PACKETISER_HEADER_IDENTIFIER_LBN 8 +#define ERF_SC_PACKETISER_HEADER_IDENTIFIER_WIDTH 8 +#define ERF_SC_PACKETISER_HEADER_IDENTIFIER_AR 0 +#define ERF_SC_PACKETISER_HEADER_IDENTIFIER_CT 1 +#define ERF_SC_PACKETISER_HEADER_IDENTIFIER_OR 2 +#define ERF_SC_PACKETISER_HEADER_HEADER_OFFSET_LBN 16 +#define ERF_SC_PACKETISER_HEADER_HEADER_OFFSET_WIDTH 8 +#define ERF_SC_PACKETISER_HEADER_HEADER_OFFSET_DEFAULT 0x4 +#define ERF_SC_PACKETISER_HEADER_PAYLOAD_OFFSET_LBN 24 +#define ERF_SC_PACKETISER_HEADER_PAYLOAD_OFFSET_WIDTH 8 +#define ERF_SC_PACKETISER_HEADER_PAYLOAD_OFFSET_DEFAULT 0x14 +#define ERF_SC_PACKETISER_HEADER_INDEX_LBN 32 +#define ERF_SC_PACKETISER_HEADER_INDEX_WIDTH 16 +#define ERF_SC_PACKETISER_HEADER_COUNT_LBN 48 +#define ERF_SC_PACKETISER_HEADER_COUNT_WIDTH 16 +#define ERF_SC_PACKETISER_HEADER_RESERVED_0_LBN 64 +#define ERF_SC_PACKETISER_HEADER_RESERVED_0_WIDTH 32 +#define ERF_SC_PACKETISER_HEADER_RESERVED_1_LBN 96 +#define ERF_SC_PACKETISER_HEADER_RESERVED_1_WIDTH 32 +#define ERF_SC_PACKETISER_HEADER_RESERVED_2_LBN 128 +#define ERF_SC_PACKETISER_HEADER_RESERVED_2_WIDTH 32 + + +/*------------------------------------------------------------*/ +/* + * ER_RX_SL_PACKETISER_PAYLOAD_WORD(128bit): + * + */ +#define ER_RX_SL_PACKETISER_PAYLOAD_WORD_SIZE 16 +#define ER_RX_SL_PACKETISER_PAYLOAD_WORD_WIDTH 128 + +#define ERF_SC_PACKETISER_PAYLOAD_COUNTER_INDEX_LBN 0 +#define ERF_SC_PACKETISER_PAYLOAD_COUNTER_INDEX_WIDTH 24 +#define ERF_SC_PACKETISER_PAYLOAD_RESERVED_LBN 24 +#define ERF_SC_PACKETISER_PAYLOAD_RESERVED_WIDTH 8 +#define ERF_SC_PACKETISER_PAYLOAD_PACKET_COUNT_OFST 4 +#define ERF_SC_PACKETISER_PAYLOAD_PACKET_COUNT_SIZE 6 +#define ERF_SC_PACKETISER_PAYLOAD_PACKET_COUNT_LBN 32 +#define ERF_SC_PACKETISER_PAYLOAD_PACKET_COUNT_WIDTH 48 +#define ERF_SC_PACKETISER_PAYLOAD_BYTE_COUNT_OFST 10 +#define ERF_SC_PACKETISER_PAYLOAD_BYTE_COUNT_SIZE 6 +#define ERF_SC_PACKETISER_PAYLOAD_BYTE_COUNT_LBN 80 +#define ERF_SC_PACKETISER_PAYLOAD_BYTE_COUNT_WIDTH 48 + + +#endif /* EFX_MAE_COUNTER_FORMAT_H */ diff --git a/src/drivers/net/ethernet/sfc/mcdi.c b/src/drivers/net/ethernet/sfc/mcdi.c new file mode 100644 index 0000000..3783943 --- /dev/null +++ b/src/drivers/net/ethernet/sfc/mcdi.c @@ -0,0 +1,2981 @@ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2008-2017 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ +#include +#include +#include "net_driver.h" +#include "nic.h" +#include "efx_common.h" +#include "efx_devlink.h" +#include "io.h" +#include "mcdi_pcol.h" + +struct efx_mcdi_copy_buffer { + _MCDI_DECLARE_BUF(buffer, MCDI_CTL_SDU_LEN_MAX); +}; + +/************************************************************************** + * + * Management-Controller-to-Driver Interface + * + ************************************************************************** + */ + +/* Default RPC timeout for NIC types that don't specify. */ +#define MCDI_RPC_TIMEOUT (10 * HZ) +/* Timeout for acquiring the bus; there may be multiple outstanding requests. */ +#define MCDI_ACQUIRE_TIMEOUT (MCDI_RPC_TIMEOUT * 5) +/* Timeout waiting for a command to be authorised */ +#define MCDI_PROXY_TIMEOUT (10 * HZ) + +#ifdef CONFIG_SFC_MCDI_LOGGING +/* printk has this internal limit. Taken from printk.c. */ +#define LOG_LINE_MAX (1024 - 32) +#endif + +/* A reboot/assertion causes the MCDI status word to be set after the + * command word is set or a REBOOT event is sent. If we notice a reboot + * via these mechanisms then wait 250ms for the status word to be set. + */ +#define MCDI_STATUS_DELAY_US 100 +#define MCDI_STATUS_DELAY_COUNT 2500 +#define MCDI_STATUS_SLEEP_MS \ + (MCDI_STATUS_DELAY_US * MCDI_STATUS_DELAY_COUNT / 1000) + +#ifdef CONFIG_SFC_MCDI_LOGGING +static bool mcdi_logging_default; +module_param(mcdi_logging_default, bool, 0644); +MODULE_PARM_DESC(mcdi_logging_default, + "Enable MCDI logging on newly-probed functions"); +#ifdef EFX_NOT_UPSTREAM +static char *mcdi_log_commands; +module_param(mcdi_log_commands, charp, 0444); +MODULE_PARM_DESC(mcdi_log_commands, + "Specify which MCDI commands to log when logging is enabled. Comma or space separated list of hex values e.g. 'b,4c'. An empty string will log all commands"); +#endif +#endif + +static int efx_mcdi_rpc_async_internal(struct efx_nic *efx, + struct efx_mcdi_cmd *cmd, + unsigned int *handle, + bool immediate_poll, + bool immediate_only); +static void efx_mcdi_start_or_queue(struct efx_mcdi_iface *mcdi, + bool allow_retry, + struct efx_mcdi_copy_buffer *copybuf, + struct list_head *cleanup_list); +static void efx_mcdi_cmd_start_or_queue(struct efx_mcdi_iface *mcdi, + struct efx_mcdi_cmd *cmd, + struct efx_mcdi_copy_buffer *copybuf, + struct list_head *cleanup_list); +static int efx_mcdi_cmd_start_or_queue_ext(struct efx_mcdi_iface *mcdi, + struct efx_mcdi_cmd *cmd, + struct efx_mcdi_copy_buffer *copybuf, + bool immediate_only, + struct list_head *cleanup_list); +static void efx_mcdi_poll_start(struct efx_mcdi_iface *mcdi, + struct efx_mcdi_cmd *cmd, + struct efx_mcdi_copy_buffer *copybuf, + struct list_head *cleanup_list); +static bool efx_mcdi_poll_once(struct efx_mcdi_iface *mcdi, + struct efx_mcdi_cmd *cmd); +static bool efx_mcdi_complete_cmd(struct efx_mcdi_iface *mcdi, + struct efx_mcdi_cmd *cmd, + struct efx_mcdi_copy_buffer *copybuf, + struct list_head *cleanup_list); +static void efx_mcdi_timeout_cmd(struct efx_mcdi_iface *mcdi, + struct efx_mcdi_cmd *cmd, + struct list_head *cleanup_list); +static void efx_mcdi_reset_during_cmd(struct efx_mcdi_iface *mcdi, + struct efx_mcdi_cmd *cmd); +static void efx_mcdi_cmd_work(struct work_struct *work); +static void _efx_mcdi_mode_poll(struct efx_mcdi_iface *mcdi); +static void efx_mcdi_mode_fail(struct efx_nic *efx, struct list_head *cleanup_list); +static void _efx_mcdi_display_error(struct efx_nic *efx, unsigned int cmd, + size_t inlen, int raw, int arg, int rc); + +static bool efx_cmd_running(struct efx_mcdi_cmd *cmd) +{ + return cmd->state == MCDI_STATE_RUNNING || + cmd->state == MCDI_STATE_RUNNING_CANCELLED; +} + +static bool efx_cmd_cancelled(struct efx_mcdi_cmd *cmd) +{ + return cmd->state == MCDI_STATE_RUNNING_CANCELLED || + cmd->state == MCDI_STATE_PROXY_CANCELLED; +} + +static void efx_mcdi_cmd_release(struct kref *ref) +{ + kfree(container_of(ref, struct efx_mcdi_cmd, ref)); +} + +static unsigned int efx_mcdi_cmd_handle(struct efx_mcdi_cmd *cmd) +{ + return cmd->handle; +} + +static void _efx_mcdi_remove_cmd(struct efx_mcdi_iface *mcdi, + struct efx_mcdi_cmd *cmd, + struct list_head *cleanup_list) +{ + /* if cancelled, the completers have already been called */ + if (efx_cmd_cancelled(cmd)) + return; + + if (cmd->atomic_completer) + cmd->atomic_completer(mcdi->efx, cmd->cookie, cmd->rc, + cmd->outbuf, cmd->outlen); + if (cmd->completer) { + list_add_tail(&cmd->cleanup_list, cleanup_list); + ++mcdi->outstanding_cleanups; + kref_get(&cmd->ref); + } +} + +static void efx_mcdi_remove_cmd(struct efx_mcdi_iface *mcdi, + struct efx_mcdi_cmd *cmd, + struct list_head *cleanup_list) +{ + list_del(&cmd->list); + _efx_mcdi_remove_cmd(mcdi, cmd, cleanup_list); + cmd->state = MCDI_STATE_FINISHED; + kref_put(&cmd->ref, efx_mcdi_cmd_release); + if (list_empty(&mcdi->cmd_list)) + wake_up(&mcdi->cmd_complete_wq); +} + +static unsigned long efx_mcdi_rpc_timeout(struct efx_nic *efx, unsigned int cmd) +{ + if (!efx->type->mcdi_rpc_timeout) + return MCDI_RPC_TIMEOUT; + else + return efx->type->mcdi_rpc_timeout(efx, cmd); +} + +#ifdef EFX_NOT_UPSTREAM +static void efx_mcdi_parse_mcdi_list(struct efx_nic *efx, char *list, + unsigned long *bitmap) +{ + if (!list || *list == '\0') { + bitmap_fill(bitmap, MCDI_NUM_LOG_COMMANDS); + return; + } + + bitmap_zero(bitmap, MCDI_NUM_LOG_COMMANDS); + + do { + int val = -1; + int digits, matched; + + matched = sscanf(list, " %x%n", &val, &digits); + if (matched >= 1) { + list += digits; + } else { + netif_err(efx, drv, efx->net_dev, + "bad input in mcdi_log_commands\n"); + break; + } + + if (*list == ' ' || *list == ',' || *list == '\0') { + /* valid entry */ + if (val >= 0 && val < MCDI_NUM_LOG_COMMANDS) { + set_bit(val, bitmap); + } else { + netif_warn(efx, drv, efx->net_dev, + "value too large in mcdi_log_commands\n"); + } + + if (*list) + ++list; + } else { + netif_err(efx, drv, efx->net_dev, + "bad input in mcdi_log_commands\n"); + break; + } + } while (*list); +} +#endif + +int efx_mcdi_init(struct efx_nic *efx) +{ + struct efx_mcdi_iface *mcdi; + int rc = -ENOMEM; + + efx->mcdi = kzalloc(sizeof(*efx->mcdi), GFP_KERNEL); + if (!efx->mcdi) + goto fail; + + mcdi = efx_mcdi(efx); + mcdi->efx = efx; + +#ifdef CONFIG_SFC_MCDI_LOGGING + mcdi->logging_buffer = kmalloc(LOG_LINE_MAX, GFP_KERNEL); + if (!mcdi->logging_buffer) + goto fail2; + mcdi->logging_enabled = mcdi_logging_default; +#ifdef EFX_NOT_UPSTREAM + efx_mcdi_parse_mcdi_list(efx, mcdi_log_commands, mcdi->log_commands); +#endif +#endif + mcdi->workqueue = create_workqueue("mcdi_wq"); + if (!mcdi->workqueue) + goto fail3; + spin_lock_init(&mcdi->iface_lock); + mcdi->mode = MCDI_MODE_POLL; + INIT_LIST_HEAD(&mcdi->cmd_list); + init_waitqueue_head(&mcdi->cmd_complete_wq); + + (void) efx_mcdi_poll_reboot(efx); + mcdi->new_epoch = true; + + /* Recover from a failed assertion before probing */ + rc = efx_mcdi_handle_assertion(efx); + if (rc) + goto fail4; + + /* Let the MC (and BMC, if this is a LOM) know that the driver + * is loaded. We should do this before we reset the NIC. + * This operation can specify the required firmware variant. This will + * fail with EPERM if we are not the primary PF. In this case the + * caller should retry with variant "don't care". + */ + rc = efx_mcdi_drv_attach(efx, MC_CMD_FW_LOW_LATENCY, + &efx->mcdi->fn_flags, false); + if (rc == -EPERM) + rc = efx_mcdi_drv_attach(efx, MC_CMD_FW_DONT_CARE, + &efx->mcdi->fn_flags, false); + if (rc) { + netif_err(efx, probe, efx->net_dev, + "Unable to register driver with MCPU\n"); + goto fail4; + } + + return 0; +fail4: + destroy_workqueue(mcdi->workqueue); +fail3: +#ifdef CONFIG_SFC_MCDI_LOGGING + kfree(mcdi->logging_buffer); +fail2: +#endif + kfree(efx->mcdi); + efx->mcdi = NULL; +fail: + return rc; +} + +void efx_mcdi_detach(struct efx_nic *efx) +{ + if (!efx->mcdi) + return; + + if (!efx_nic_hw_unavailable(efx)) + /* Relinquish the device (back to the BMC, if this is a LOM) */ + efx_mcdi_drv_detach(efx); +} + +void efx_mcdi_fini(struct efx_nic *efx) +{ + struct efx_mcdi_iface *iface; + + if (!efx->mcdi) + return; + + efx_mcdi_wait_for_cleanup(efx); + + iface = efx_mcdi(efx); +#ifdef CONFIG_SFC_MCDI_LOGGING + kfree(iface->logging_buffer); +#endif + + destroy_workqueue(iface->workqueue); + kfree(efx->mcdi); + efx->mcdi = NULL; +} + +static bool efx_mcdi_reset_cmd_running(struct efx_mcdi_iface *mcdi) +{ + struct efx_mcdi_cmd *cmd; + + list_for_each_entry(cmd, &mcdi->cmd_list, list) + if (cmd->cmd == MC_CMD_REBOOT && + efx_cmd_running(cmd)) + return true; + return false; +} + +static void efx_mcdi_reboot_detected(struct efx_nic *efx) +{ + struct efx_mcdi_cmd *cmd; + struct efx_mcdi_iface *mcdi = efx_mcdi(efx); + + _efx_mcdi_mode_poll(mcdi); + list_for_each_entry(cmd, &mcdi->cmd_list, list) + if (efx_cmd_running(cmd)) + cmd->reboot_seen = true; + efx->type->mcdi_reboot_detected(efx); +} + +static bool efx_mcdi_wait_for_reboot(struct efx_nic *efx) +{ + size_t count; + + for (count = 0; count < MCDI_STATUS_DELAY_COUNT; ++count) { + if (efx_mcdi_poll_reboot(efx)) { + efx_mcdi_reboot_detected(efx); + return true; + } + udelay(MCDI_STATUS_DELAY_US); + } + + return false; +} + +static bool efx_mcdi_flushed(struct efx_mcdi_iface *mcdi, bool ignore_cleanups) +{ + bool flushed; + + spin_lock_bh(&mcdi->iface_lock); + flushed = list_empty(&mcdi->cmd_list) && + (ignore_cleanups || !mcdi->outstanding_cleanups); + spin_unlock_bh(&mcdi->iface_lock); + return flushed; +} + +/* Wait for outstanding MCDI commands to complete. */ +void efx_mcdi_wait_for_cleanup(struct efx_nic *efx) +{ + struct efx_mcdi_iface *mcdi = efx_mcdi(efx); + + wait_event(mcdi->cmd_complete_wq, + efx_mcdi_flushed(mcdi, false)); +} + +int efx_mcdi_wait_for_quiescence(struct efx_nic *efx, + unsigned int timeout_jiffies) +{ + struct efx_mcdi_iface *mcdi = efx_mcdi(efx); + + int rc = wait_event_timeout(mcdi->cmd_complete_wq, + efx_mcdi_flushed(mcdi, true), + timeout_jiffies); + + if (rc > 0) + rc = 0; + else if (rc == 0) + rc = -ETIMEDOUT; + + return rc; +} + +/* Indicate to the MCDI module that we're now sending commands for a new + * epoch. + */ +void efx_mcdi_post_reset(struct efx_nic *efx) +{ + struct efx_mcdi_iface *mcdi = efx_mcdi(efx); + + efx_mcdi_wait_for_cleanup(efx); + + mcdi->new_epoch = true; +} + +#ifdef EFX_NOT_UPSTREAM +static bool should_log_command(unsigned int cmd, unsigned long *log_commands) +{ + if (cmd >= MCDI_NUM_LOG_COMMANDS) + return true; /* allow out of range values to be logged */ + return test_bit(cmd, log_commands); +} +#endif + +static void efx_mcdi_send_request(struct efx_nic *efx, + struct efx_mcdi_cmd *cmd) +{ + struct efx_mcdi_iface *mcdi = efx_mcdi(efx); +#ifdef CONFIG_SFC_MCDI_LOGGING + char *buf = mcdi->logging_buffer; /* page-sized */ +#endif + efx_dword_t hdr[5]; + size_t hdr_len; + u32 xflags; + const efx_dword_t *inbuf = cmd->inbuf; + size_t inlen = cmd->inlen; + + mcdi->prev_seq = cmd->seq; + mcdi->seq_held_by[cmd->seq] = cmd; + mcdi->db_held_by = cmd; + cmd->started = jiffies; + + xflags = 0; + if (mcdi->mode == MCDI_MODE_EVENTS) + xflags |= MCDI_HEADER_XFLAGS_EVREQ; + + if (efx->type->mcdi_max_ver == 1) { + /* MCDI v1 */ + EFX_POPULATE_DWORD_7(hdr[0], + MCDI_HEADER_RESPONSE, 0, + MCDI_HEADER_RESYNC, 1, + MCDI_HEADER_CODE, cmd->cmd, + MCDI_HEADER_DATALEN, inlen, + MCDI_HEADER_SEQ, cmd->seq, + MCDI_HEADER_XFLAGS, xflags, + MCDI_HEADER_NOT_EPOCH, !mcdi->new_epoch); + hdr_len = 4; + } else if (cmd->client_id == MC_CMD_CLIENT_ID_SELF) { + /* MCDI v2 */ + WARN_ON(inlen > MCDI_CTL_SDU_LEN_MAX_V2); + EFX_POPULATE_DWORD_7(hdr[0], + MCDI_HEADER_RESPONSE, 0, + MCDI_HEADER_RESYNC, 1, + MCDI_HEADER_CODE, MC_CMD_V2_EXTN, + MCDI_HEADER_DATALEN, 0, + MCDI_HEADER_SEQ, cmd->seq, + MCDI_HEADER_XFLAGS, xflags, + MCDI_HEADER_NOT_EPOCH, !mcdi->new_epoch); + EFX_POPULATE_DWORD_2(hdr[1], + MC_CMD_V2_EXTN_IN_EXTENDED_CMD, cmd->cmd, + MC_CMD_V2_EXTN_IN_ACTUAL_LEN, inlen); + hdr_len = 8; + } else { + WARN_ON(inlen > MCDI_CTL_SDU_LEN_MAX_V2); + /* MCDI v2 with credentials of a different client */ + BUILD_BUG_ON(MC_CMD_CLIENT_CMD_IN_LEN != 4); + /* Outer CLIENT_CMD wrapper command with client ID */ + EFX_POPULATE_DWORD_7(hdr[0], + MCDI_HEADER_RESPONSE, 0, + MCDI_HEADER_RESYNC, 1, + MCDI_HEADER_CODE, MC_CMD_V2_EXTN, + MCDI_HEADER_DATALEN, 0, + MCDI_HEADER_SEQ, cmd->seq, + MCDI_HEADER_XFLAGS, xflags, + MCDI_HEADER_NOT_EPOCH, !mcdi->new_epoch); + EFX_POPULATE_DWORD_2(hdr[1], + MC_CMD_V2_EXTN_IN_EXTENDED_CMD, + MC_CMD_CLIENT_CMD, + MC_CMD_V2_EXTN_IN_ACTUAL_LEN, inlen + 12); + MCDI_SET_DWORD(&hdr[2], + CLIENT_CMD_IN_CLIENT_ID, cmd->client_id); + + /* MCDIv2 header for inner command */ + EFX_POPULATE_DWORD_2(hdr[3], + MCDI_HEADER_CODE, MC_CMD_V2_EXTN, + MCDI_HEADER_DATALEN, 0); + EFX_POPULATE_DWORD_2(hdr[4], + MC_CMD_V2_EXTN_IN_EXTENDED_CMD, + cmd->cmd, + MC_CMD_V2_EXTN_IN_ACTUAL_LEN, + inlen); + hdr_len = 20; + } + +#ifdef CONFIG_SFC_MCDI_LOGGING +#ifdef EFX_NOT_UPSTREAM + if (mcdi->logging_enabled && !WARN_ON_ONCE(!buf) && + should_log_command(cmd->cmd, mcdi->log_commands)) { +#else + if (mcdi->logging_enabled && !WARN_ON_ONCE(!buf)) { +#endif + const efx_dword_t *frags[] = { hdr, inbuf }; + size_t frag_len[] = { hdr_len, round_up(inlen, 4) }; + const efx_dword_t *frag; + int bytes = 0; + int i, j; + unsigned int dcount = 0; + /* Header length should always be a whole number of dwords, + * so scream if it's not. + */ + WARN_ON_ONCE(hdr_len % 4); + + for (j = 0; j < ARRAY_SIZE(frags); j++) { + frag = frags[j]; + for (i = 0; + i < frag_len[j] / 4; + i++) { + /* Do not exceeed the internal printk limit. + * The string before that is just over 70 bytes. + */ + if ((bytes + 75) > LOG_LINE_MAX) { + netif_info(efx, hw, efx->net_dev, + "MCDI RPC REQ:%s \\\n", buf); + dcount = 0; + bytes = 0; + } + bytes += scnprintf(buf + bytes, + LOG_LINE_MAX - bytes, " %08x", + le32_to_cpu(frag[i].u32[0])); + dcount++; + } + } + + netif_info(efx, hw, efx->net_dev, "MCDI RPC REQ:%s\n", buf); + } +#endif + + efx->type->mcdi_request(efx, cmd->bufid, hdr, hdr_len, inbuf, inlen); + + mcdi->new_epoch = false; +} + +static int efx_mcdi_errno(struct efx_nic *efx, unsigned int mcdi_err) +{ + switch (mcdi_err) { + case 0: + case MC_CMD_ERR_PROXY_PENDING: + case MC_CMD_ERR_QUEUE_FULL: + return mcdi_err; +#define TRANSLATE_ERROR(name) \ + case MC_CMD_ERR_ ## name: \ + return -name; + TRANSLATE_ERROR(EPERM); + TRANSLATE_ERROR(ENOENT); + TRANSLATE_ERROR(EINTR); + TRANSLATE_ERROR(EAGAIN); + TRANSLATE_ERROR(EACCES); + TRANSLATE_ERROR(EBUSY); + TRANSLATE_ERROR(EINVAL); + TRANSLATE_ERROR(ERANGE); + TRANSLATE_ERROR(EDEADLK); + TRANSLATE_ERROR(ENOSYS); + TRANSLATE_ERROR(ETIME); + TRANSLATE_ERROR(EALREADY); + TRANSLATE_ERROR(ENOSPC); + TRANSLATE_ERROR(ENOMEM); +#undef TRANSLATE_ERROR + case MC_CMD_ERR_ENOTSUP: + return -EOPNOTSUPP; + case MC_CMD_ERR_ALLOC_FAIL: + return -ENOBUFS; + case MC_CMD_ERR_MAC_EXIST: + return -EADDRINUSE; + case MC_CMD_ERR_NO_EVB_PORT: + if (efx->type->is_vf) + return -EAGAIN; + fallthrough; + default: + return -EPROTO; + } +} + +/* Test and clear MC-rebooted flag for this port/function; reset + * software state as necessary. + */ +int efx_mcdi_poll_reboot(struct efx_nic *efx) +{ + if (!efx->mcdi) + return 0; + + return efx->type->mcdi_poll_reboot(efx); +} + +static void efx_mcdi_process_cleanup_list(struct efx_nic *efx, + struct list_head *cleanup_list) +{ + struct efx_mcdi_iface *mcdi = efx_mcdi(efx); + unsigned int cleanups = 0; + + while (!list_empty(cleanup_list)) { + struct efx_mcdi_cmd *cmd = + list_first_entry(cleanup_list, + struct efx_mcdi_cmd, cleanup_list); + cmd->completer(efx, cmd->cookie, cmd->rc, + cmd->outbuf, cmd->outlen); + list_del(&cmd->cleanup_list); + kref_put(&cmd->ref, efx_mcdi_cmd_release); + ++cleanups; + } + + if (cleanups) { + bool all_done; + + spin_lock_bh(&mcdi->iface_lock); + EFX_WARN_ON_PARANOID(cleanups > mcdi->outstanding_cleanups); + all_done = (mcdi->outstanding_cleanups -= cleanups) == 0; + spin_unlock_bh(&mcdi->iface_lock); + if (all_done) + wake_up(&mcdi->cmd_complete_wq); + } +} + +static void _efx_mcdi_cancel_cmd(struct efx_mcdi_iface *mcdi, + unsigned int handle, + struct list_head *cleanup_list) +{ + struct efx_nic *efx = mcdi->efx; + struct efx_mcdi_cmd *cmd; + + list_for_each_entry(cmd, &mcdi->cmd_list, list) + if (efx_mcdi_cmd_handle(cmd) == handle) { + switch (cmd->state) { + case MCDI_STATE_QUEUED: + case MCDI_STATE_RETRY: + netif_dbg(efx, drv, efx->net_dev, + "command %#x inlen %zu cancelled in queue\n", + cmd->cmd, cmd->inlen); + /* if not yet running, properly cancel it */ + cmd->rc = -EPIPE; + efx_mcdi_remove_cmd(mcdi, cmd, cleanup_list); + break; + case MCDI_STATE_RUNNING: + case MCDI_STATE_PROXY: + netif_dbg(efx, drv, efx->net_dev, + "command %#x inlen %zu cancelled after sending\n", + cmd->cmd, cmd->inlen); + /* It's running. We can't cancel it on the MC, + * so we need to keep track of it so we can + * handle the response. We *also* need to call + * the command's completion function, and make + * sure it's not called again later, by + * marking it as cancelled. + */ + cmd->rc = -EPIPE; + _efx_mcdi_remove_cmd(mcdi, cmd, cleanup_list); + cmd->state = cmd->state == MCDI_STATE_RUNNING ? + MCDI_STATE_RUNNING_CANCELLED : + MCDI_STATE_PROXY_CANCELLED; + break; + case MCDI_STATE_RUNNING_CANCELLED: + case MCDI_STATE_PROXY_CANCELLED: + netif_warn(efx, drv, efx->net_dev, + "command %#x inlen %zu double cancelled\n", + cmd->cmd, cmd->inlen); + break; + case MCDI_STATE_FINISHED: + default: + /* invalid state? */ + WARN_ON(1); + } + break; + } +} + +void efx_mcdi_cancel_cmd(struct efx_nic *efx, unsigned int handle) +{ + struct efx_mcdi_iface *mcdi = efx_mcdi(efx); + LIST_HEAD(cleanup_list); + + spin_lock_bh(&mcdi->iface_lock); + _efx_mcdi_cancel_cmd(mcdi, handle, &cleanup_list); + spin_unlock_bh(&mcdi->iface_lock); + efx_mcdi_process_cleanup_list(efx, &cleanup_list); +} + +static void efx_mcdi_proxy_response(struct efx_mcdi_iface *mcdi, + struct efx_mcdi_cmd *cmd, + int status, + struct list_head *cleanup_list) +{ + mcdi->db_held_by = NULL; + + if (status) { + /* status != 0 means don't retry */ + if (status == -EIO || status == -EINTR) + efx_mcdi_reset_during_cmd(mcdi, cmd); + kref_get(&cmd->ref); + cmd->rc = status; + efx_mcdi_remove_cmd(mcdi, cmd, cleanup_list); + if (cancel_delayed_work(&cmd->work)) + kref_put(&cmd->ref, efx_mcdi_cmd_release); + kref_put(&cmd->ref, efx_mcdi_cmd_release); + } else { + /* status = 0 means ok to retry */ + efx_mcdi_cmd_start_or_queue(mcdi, cmd, NULL, cleanup_list); + } +} + +static void efx_mcdi_ev_proxy_response(struct efx_nic *efx, + u32 handle, int status) +{ + struct efx_mcdi_iface *mcdi = efx_mcdi(efx); + struct efx_mcdi_cmd *cmd; + bool found = false; + LIST_HEAD(cleanup_list); + + spin_lock_bh(&mcdi->iface_lock); + list_for_each_entry(cmd, &mcdi->cmd_list, list) + if (cmd->state == MCDI_STATE_PROXY && + cmd->proxy_handle == handle) { + efx_mcdi_proxy_response(mcdi, cmd, status, &cleanup_list); + found = true; + break; + } + spin_unlock_bh(&mcdi->iface_lock); + + efx_mcdi_process_cleanup_list(efx, &cleanup_list); + + if (!found) { + netif_err(efx, drv, efx->net_dev, + "MCDI proxy unexpected handle %#x\n", + handle); + efx_schedule_reset(efx, RESET_TYPE_WORLD); + } +} + +static void efx_mcdi_cmd_mode_poll(struct efx_mcdi_iface *mcdi, + struct efx_mcdi_cmd *cmd) +{ + cmd->polled = true; + if (cancel_delayed_work(&cmd->work)) + queue_delayed_work(mcdi->workqueue, &cmd->work, 0); +} + +static void efx_mcdi_ev_cpl(struct efx_nic *efx, unsigned int seqno, + unsigned int datalen, unsigned int mcdi_err) +{ + struct efx_mcdi_iface *mcdi = efx_mcdi(efx); + struct efx_mcdi_cmd *cmd; + LIST_HEAD(cleanup_list); + struct efx_mcdi_copy_buffer *copybuf = + kmalloc(sizeof(struct efx_mcdi_copy_buffer), GFP_ATOMIC); + + spin_lock(&mcdi->iface_lock); + cmd = mcdi->seq_held_by[seqno]; + if (cmd) { + if (efx_mcdi_poll_once(mcdi, cmd)) { + kref_get(&cmd->ref); + if (efx_mcdi_complete_cmd(mcdi, cmd, copybuf, + &cleanup_list)) + if (cancel_delayed_work(&cmd->work)) + kref_put(&cmd->ref, + efx_mcdi_cmd_release); + kref_put(&cmd->ref, efx_mcdi_cmd_release); + } else { + /* on some EF100 hardware completion event can overtake + * the write to the MCDI buffer. + * If so, convert the command to polled mode. + * If this is a genuine error, then the + * command will eventually time out. + */ + if (efx_nic_rev(efx) != EFX_REV_EF100) + netif_warn(mcdi->efx, drv, mcdi->efx->net_dev, + "command %#x inlen %zu event received before response\n", + cmd->cmd, cmd->inlen); + efx_mcdi_cmd_mode_poll(mcdi, cmd); + } + } else { + netif_err(efx, hw, efx->net_dev, + "MC response unexpected tx seq 0x%x\n", + seqno); + /* this could theoretically just be a race between command + * time out and processing the completion event, so while not + * a good sign, it'd be premature to attempt any recovery. + */ + } + spin_unlock(&mcdi->iface_lock); + + efx_mcdi_process_cleanup_list(efx, &cleanup_list); + + kfree(copybuf); +} + +static int +efx_mcdi_check_supported(struct efx_nic *efx, unsigned int cmd, size_t inlen) +{ + if (efx->type->mcdi_max_ver < 0 || + (efx->type->mcdi_max_ver < 2 && + cmd > MC_CMD_CMD_SPACE_ESCAPE_7)) + return -EINVAL; + + if (inlen > MCDI_CTL_SDU_LEN_MAX_V2 || + (efx->type->mcdi_max_ver < 2 && + inlen > MCDI_CTL_SDU_LEN_MAX_V1)) + return -EMSGSIZE; + + return 0; +} + +struct efx_mcdi_blocking_data { + struct kref ref; + bool done; + wait_queue_head_t wq; + int rc; + efx_dword_t *outbuf; + size_t outlen; + size_t outlen_actual; +}; + +static void efx_mcdi_blocking_data_release(struct kref *ref) +{ + kfree(container_of(ref, struct efx_mcdi_blocking_data, ref)); +} + +static void efx_mcdi_rpc_completer(struct efx_nic *efx, unsigned long cookie, + int rc, efx_dword_t *outbuf, + size_t outlen_actual) +{ + struct efx_mcdi_blocking_data *wait_data = + (struct efx_mcdi_blocking_data *)cookie; + + wait_data->rc = rc; + memcpy(wait_data->outbuf, outbuf, + min(outlen_actual, wait_data->outlen)); + wait_data->outlen_actual = outlen_actual; + smp_wmb(); + wait_data->done = true; + wake_up(&wait_data->wq); + kref_put(&wait_data->ref, efx_mcdi_blocking_data_release); +} + +int efx_mcdi_rpc_async_ext(struct efx_nic *efx, unsigned int cmd, + const efx_dword_t *inbuf, size_t inlen, + efx_mcdi_async_completer *atomic_completer, + efx_mcdi_async_completer *completer, + unsigned long cookie, bool quiet, + bool immediate_only, unsigned int *handle) +{ + struct efx_mcdi_cmd *cmd_item = + kmalloc(sizeof(struct efx_mcdi_cmd) + inlen, GFP_ATOMIC); + + if (!cmd_item) + return -ENOMEM; + + kref_init(&cmd_item->ref); + cmd_item->quiet = quiet; + cmd_item->cookie = cookie; + cmd_item->completer = completer; + cmd_item->atomic_completer = atomic_completer; + cmd_item->client_id = MC_CMD_CLIENT_ID_SELF; + cmd_item->cmd = cmd; + cmd_item->inlen = inlen; + /* inbuf is probably not valid after return, so take a copy */ + cmd_item->inbuf = (efx_dword_t *) (cmd_item + 1); + memcpy(cmd_item + 1, inbuf, inlen); + + return efx_mcdi_rpc_async_internal(efx, cmd_item, handle, false, + immediate_only); +} + +static bool efx_mcdi_get_seq(struct efx_mcdi_iface *mcdi, unsigned char *seq) +{ + *seq = mcdi->prev_seq; + do { + *seq = (*seq + 1) % ARRAY_SIZE(mcdi->seq_held_by); + } while (mcdi->seq_held_by[*seq] && *seq != mcdi->prev_seq); + return !mcdi->seq_held_by[*seq]; +} + +static int efx_mcdi_rpc_async_internal(struct efx_nic *efx, + struct efx_mcdi_cmd *cmd, + unsigned int *handle, + bool immediate_poll, bool immediate_only) +{ + struct efx_mcdi_iface *mcdi = efx_mcdi(efx); + struct efx_mcdi_copy_buffer *copybuf; + LIST_HEAD(cleanup_list); + int rc; + + rc = efx_mcdi_check_supported(efx, cmd->cmd, cmd->inlen); + if (rc) { + kref_put(&cmd->ref, efx_mcdi_cmd_release); + return rc; + } + if (!mcdi || efx->mc_bist_for_other_fn) { + kref_put(&cmd->ref, efx_mcdi_cmd_release); + return -ENETDOWN; + } + + copybuf = immediate_poll ? + kmalloc(sizeof(struct efx_mcdi_copy_buffer), GFP_KERNEL) : + NULL; + + cmd->mcdi = mcdi; + INIT_DELAYED_WORK(&cmd->work, efx_mcdi_cmd_work); + INIT_LIST_HEAD(&cmd->list); + INIT_LIST_HEAD(&cmd->cleanup_list); + cmd->proxy_handle = 0; + cmd->rc = 0; + cmd->outbuf = NULL; + cmd->outlen = 0; + + spin_lock_bh(&mcdi->iface_lock); + + if (mcdi->mode == MCDI_MODE_FAIL) { + spin_unlock_bh(&mcdi->iface_lock); + kref_put(&cmd->ref, efx_mcdi_cmd_release); + kfree(copybuf); + return -ENETDOWN; + } + + cmd->handle = mcdi->prev_handle++; + if (handle) + *handle = efx_mcdi_cmd_handle(cmd); + + list_add_tail(&cmd->list, &mcdi->cmd_list); + rc = efx_mcdi_cmd_start_or_queue_ext(mcdi, cmd, copybuf, immediate_only, + &cleanup_list); + if (rc) { + list_del(&cmd->list); + kref_put(&cmd->ref, efx_mcdi_cmd_release); + } + + spin_unlock_bh(&mcdi->iface_lock); + + efx_mcdi_process_cleanup_list(efx, &cleanup_list); + + kfree(copybuf); + + return rc; +} + +static int efx_mcdi_cmd_start_or_queue_ext(struct efx_mcdi_iface *mcdi, + struct efx_mcdi_cmd *cmd, + struct efx_mcdi_copy_buffer *copybuf, + bool immediate_only, + struct list_head *cleanup_list) +{ + struct efx_nic *efx = mcdi->efx; + u8 seq, bufid; + + if (!mcdi->db_held_by && + efx_mcdi_get_seq(mcdi, &seq) && + efx->type->mcdi_get_buf(efx, &bufid)) { + cmd->seq = seq; + cmd->bufid = bufid; + cmd->polled = mcdi->mode == MCDI_MODE_POLL; + cmd->reboot_seen = false; + efx_mcdi_send_request(efx, cmd); + cmd->state = MCDI_STATE_RUNNING; + + if (cmd->polled) + efx_mcdi_poll_start(mcdi, cmd, copybuf, cleanup_list); + else { + kref_get(&cmd->ref); + queue_delayed_work(mcdi->workqueue, &cmd->work, + efx_mcdi_rpc_timeout(efx, cmd->cmd)); + } + } else if (immediate_only) { + return -EAGAIN; + } else { + cmd->state = MCDI_STATE_QUEUED; + } + + return 0; +} + +static void efx_mcdi_cmd_start_or_queue(struct efx_mcdi_iface *mcdi, + struct efx_mcdi_cmd *cmd, + struct efx_mcdi_copy_buffer *copybuf, + struct list_head *cleanup_list) +{ + /* when immediate_only=false this can only return success */ + (void) efx_mcdi_cmd_start_or_queue_ext(mcdi, cmd, copybuf, false, + cleanup_list); +} + +/* try to advance other commands */ +static void efx_mcdi_start_or_queue(struct efx_mcdi_iface *mcdi, + bool allow_retry, + struct efx_mcdi_copy_buffer *copybuf, + struct list_head *cleanup_list) +{ + struct efx_mcdi_cmd *cmd, *tmp; + + list_for_each_entry_safe(cmd, tmp, &mcdi->cmd_list, list) + if (cmd->state == MCDI_STATE_QUEUED || + (cmd->state == MCDI_STATE_RETRY && allow_retry)) + efx_mcdi_cmd_start_or_queue(mcdi, cmd, copybuf, + cleanup_list); +} + +static void efx_mcdi_poll_start(struct efx_mcdi_iface *mcdi, + struct efx_mcdi_cmd *cmd, + struct efx_mcdi_copy_buffer *copybuf, + struct list_head *cleanup_list) +{ + /* Poll for completion. Poll quickly (once a us) for the 1st ms, + * because generally mcdi responses are fast. After that, back off + * and poll once a jiffy (approximately) + */ + int spins = copybuf ? 1000 : 0; + + while (spins) { + if (efx_mcdi_poll_once(mcdi, cmd)) { + efx_mcdi_complete_cmd(mcdi, cmd, copybuf, cleanup_list); + return; + } + + --spins; + udelay(1); + } + + /* didn't get a response in the first millisecond. + * schedule poll after one jiffy + */ + kref_get(&cmd->ref); + queue_delayed_work(mcdi->workqueue, &cmd->work, 1); +} + +static bool efx_mcdi_poll_once(struct efx_mcdi_iface *mcdi, + struct efx_mcdi_cmd *cmd) +{ + struct efx_nic *efx = mcdi->efx; + + /* complete or error, either way return true */ + return efx_nic_hw_unavailable(efx) || + efx->type->mcdi_poll_response(efx, cmd->bufid); +} + +static unsigned long efx_mcdi_poll_interval(struct efx_mcdi_iface *mcdi, + struct efx_mcdi_cmd *cmd) +{ + if (time_before(jiffies, cmd->started + msecs_to_jiffies(10))) + return msecs_to_jiffies(1); + else if (time_before(jiffies, cmd->started + msecs_to_jiffies(100))) + return msecs_to_jiffies(10); + else if (time_before(jiffies, cmd->started + msecs_to_jiffies(1000))) + return msecs_to_jiffies(100); + else + return msecs_to_jiffies(1000); +} + +static bool efx_mcdi_check_timeout(struct efx_mcdi_iface *mcdi, + struct efx_mcdi_cmd *cmd) +{ + return time_after(jiffies, cmd->started + + efx_mcdi_rpc_timeout(mcdi->efx, cmd->cmd)); +} + +static void efx_mcdi_proxy_timeout_cmd(struct efx_mcdi_iface *mcdi, + struct efx_mcdi_cmd *cmd, + struct list_head *cleanup_list) +{ + struct efx_nic *efx = mcdi->efx; + + netif_err(efx, drv, efx->net_dev, "MCDI proxy timeout (handle %#x)\n", + cmd->proxy_handle); + + cmd->rc = -ETIMEDOUT; + efx_mcdi_remove_cmd(mcdi, cmd, cleanup_list); + + efx_mcdi_mode_fail(efx, cleanup_list); + efx_schedule_reset(efx, RESET_TYPE_MCDI_TIMEOUT); +} + +static void efx_mcdi_cmd_work(struct work_struct *context) +{ + struct efx_mcdi_cmd *cmd = + container_of(context, struct efx_mcdi_cmd, work.work); + struct efx_mcdi_iface *mcdi = cmd->mcdi; + struct efx_mcdi_copy_buffer *copybuf = + kmalloc(sizeof(struct efx_mcdi_copy_buffer), GFP_KERNEL); + LIST_HEAD(cleanup_list); + + spin_lock_bh(&mcdi->iface_lock); + + if (cmd->state == MCDI_STATE_FINISHED) { + /* The command is done and this is a race between the + * completion in another thread and the work item running. + * All processing been done, so just release it. + */ + spin_unlock_bh(&mcdi->iface_lock); + kref_put(&cmd->ref, efx_mcdi_cmd_release); + kfree(copybuf); + return; + } + + EFX_WARN_ON_PARANOID(cmd->state == MCDI_STATE_QUEUED); + EFX_WARN_ON_PARANOID(cmd->state == MCDI_STATE_RETRY); + + /* if state PROXY, then proxy time out */ + if (cmd->state == MCDI_STATE_PROXY) { + efx_mcdi_proxy_timeout_cmd(mcdi, cmd, &cleanup_list); + /* else running, check for completion */ + } else if (efx_mcdi_poll_once(mcdi, cmd)) { + if (!cmd->polled) { + /* check whether the event is pending on EVQ0 */ + if (efx_nic_mcdi_ev_pending(efx_get_channel(mcdi->efx, 0))) + netif_err(mcdi->efx, drv, mcdi->efx->net_dev, + "MC command 0x%x inlen %zu mode %d completed without an interrupt after %u ms\n", + cmd->cmd, cmd->inlen, + cmd->polled ? MCDI_MODE_POLL : MCDI_MODE_EVENTS, + jiffies_to_msecs(jiffies - cmd->started)); + else + netif_err(mcdi->efx, drv, mcdi->efx->net_dev, + "MC command 0x%x inlen %zu mode %d completed without an event after %u ms\n", + cmd->cmd, cmd->inlen, + cmd->polled ? MCDI_MODE_POLL : MCDI_MODE_EVENTS, + jiffies_to_msecs(jiffies - cmd->started)); + /* things are going wrong. + * switch to polled mode so we tear down faster. + */ + _efx_mcdi_mode_poll(mcdi); + } + efx_mcdi_complete_cmd(mcdi, cmd, copybuf, &cleanup_list); + /* then check for timeout. If evented, it must have timed out */ + } else if (!cmd->polled || efx_mcdi_check_timeout(mcdi, cmd)) { + efx_mcdi_timeout_cmd(mcdi, cmd, &cleanup_list); + /* else reschedule for another poll */ + } else { + kref_get(&cmd->ref); + queue_delayed_work(mcdi->workqueue, &cmd->work, + efx_mcdi_poll_interval(mcdi, cmd)); + } + + spin_unlock_bh(&mcdi->iface_lock); + + kref_put(&cmd->ref, efx_mcdi_cmd_release); + + efx_mcdi_process_cleanup_list(mcdi->efx, &cleanup_list); + + kfree(copybuf); +} + +static void efx_mcdi_reset_during_cmd(struct efx_mcdi_iface *mcdi, + struct efx_mcdi_cmd *cmd) +{ + struct efx_nic *efx = mcdi->efx; + bool reset_running = efx_mcdi_reset_cmd_running(mcdi); + + if (!reset_running) + netif_err(efx, hw, efx->net_dev, + "Command %#x inlen %zu cancelled by MC reboot\n", + cmd->cmd, cmd->inlen); + /* consume the reset notification if we haven't already */ + if (!cmd->reboot_seen && efx_mcdi_wait_for_reboot(efx)) + if (!reset_running) + efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE); +} + +/* Returns true if the MCDI module is finished with the command. + * (examples of false would be if the command was proxied, or it was + * rejected by the MC due to lack of resources and requeued). + */ +static bool efx_mcdi_complete_cmd(struct efx_mcdi_iface *mcdi, + struct efx_mcdi_cmd *cmd, + struct efx_mcdi_copy_buffer *copybuf, + struct list_head *cleanup_list) +{ + struct efx_nic *efx = mcdi->efx; + int rc; + size_t resp_hdr_len, resp_data_len; + unsigned int respcmd, error; + efx_dword_t hdr; + efx_dword_t *outbuf = copybuf ? copybuf->buffer : NULL; + u8 bufid = cmd->bufid; + bool completed = false; + + /* ensure the command can't go away before this function returns */ + kref_get(&cmd->ref); + + efx->type->mcdi_read_response(efx, bufid, &hdr, 0, 4); + respcmd = EFX_DWORD_FIELD(hdr, MCDI_HEADER_CODE); + error = EFX_DWORD_FIELD(hdr, MCDI_HEADER_ERROR); + + if (respcmd != MC_CMD_V2_EXTN) { + resp_hdr_len = 4; + resp_data_len = EFX_DWORD_FIELD(hdr, MCDI_HEADER_DATALEN); + } else { + efx->type->mcdi_read_response(efx, bufid, &hdr, 4, 4); + respcmd = EFX_DWORD_FIELD(hdr, MC_CMD_V2_EXTN_IN_EXTENDED_CMD); + resp_hdr_len = 8; + resp_data_len = + EFX_DWORD_FIELD(hdr, MC_CMD_V2_EXTN_IN_ACTUAL_LEN); + } + +#ifdef CONFIG_SFC_MCDI_LOGGING +#ifdef EFX_NOT_UPSTREAM + if (mcdi->logging_enabled && !WARN_ON_ONCE(!mcdi->logging_buffer) && + should_log_command(cmd->cmd, mcdi->log_commands)) { +#else + if (mcdi->logging_enabled && !WARN_ON_ONCE(!mcdi->logging_buffer)) { +#endif + size_t len; + int bytes = 0; + int i; + unsigned int dcount = 0; + char *log = mcdi->logging_buffer; + + WARN_ON_ONCE(resp_hdr_len % 4); + /* MCDI_DECLARE_BUF ensures that underlying buffer is padded + * to dword size, and the MCDI buffer is always dword size + */ + len = resp_hdr_len / 4 + DIV_ROUND_UP(resp_data_len, 4); + + for (i = 0; i < len; i++) { + if ((bytes + 75) > LOG_LINE_MAX) { + netif_info(efx, hw, efx->net_dev, + "MCDI RPC RESP:%s \\\n", log); + dcount = 0; + bytes = 0; + } + efx->type->mcdi_read_response(efx, bufid, + &hdr, (i * 4), 4); + bytes += scnprintf(log + bytes, LOG_LINE_MAX - bytes, + " %08x", le32_to_cpu(hdr.u32[0])); + dcount++; + } + + netif_info(efx, hw, efx->net_dev, "MCDI RPC RESP:%s\n", log); + } +#endif + + if (error && resp_data_len == 0) { + /* MC rebooted during command */ + efx_mcdi_reset_during_cmd(mcdi, cmd); + rc = -EIO; + } else if (!outbuf) { + rc = -ENOMEM; + } else { + if (WARN_ON_ONCE(error && resp_data_len < 4)) + resp_data_len = 4; + + efx->type->mcdi_read_response(efx, bufid, outbuf, + resp_hdr_len, resp_data_len); + + if (error) { + rc = EFX_DWORD_FIELD(outbuf[0], EFX_DWORD_0); + if (!cmd->quiet) { + int err_arg = 0; + + if (resp_data_len >= MC_CMD_ERR_ARG_OFST + 4) { + efx->type->mcdi_read_response( + efx, bufid, &hdr, + resp_hdr_len + + MC_CMD_ERR_ARG_OFST, 4); + err_arg = EFX_DWORD_VAL(hdr); + } + _efx_mcdi_display_error(efx, cmd->cmd, + cmd->inlen, rc, err_arg, + efx_mcdi_errno(efx, rc)); + } + rc = efx_mcdi_errno(efx, rc); + } else { + rc = 0; + } + } + + if (rc == MC_CMD_ERR_PROXY_PENDING) { + if (mcdi->db_held_by != cmd || cmd->proxy_handle || + resp_data_len < MC_CMD_ERR_PROXY_PENDING_HANDLE_OFST + 4) { + /* The MC shouldn't return the doorbell early and then + * proxy. It also shouldn't return PROXY_PENDING with + * no handle or proxy a command that's already been + * proxied. Schedule an flr to reset the state. + */ + if (mcdi->db_held_by != cmd) + netif_err(efx, drv, efx->net_dev, + "MCDI proxy pending with early db return\n"); + if (cmd->proxy_handle) + netif_err(efx, drv, efx->net_dev, + "MCDI proxy pending twice\n"); + if (resp_data_len < + MC_CMD_ERR_PROXY_PENDING_HANDLE_OFST + 4) + netif_err(efx, drv, efx->net_dev, + "MCDI proxy pending with no handle\n"); + cmd->rc = -EIO; + efx_mcdi_remove_cmd(mcdi, cmd, cleanup_list); + completed = true; + + efx_mcdi_mode_fail(efx, cleanup_list); + efx_schedule_reset(efx, RESET_TYPE_MCDI_TIMEOUT); + } else { + /* keep the doorbell. no commands + * can be issued until the proxy response. + */ + cmd->state = MCDI_STATE_PROXY; + efx->type->mcdi_read_response(efx, bufid, &hdr, + resp_hdr_len + + MC_CMD_ERR_PROXY_PENDING_HANDLE_OFST, + 4); + cmd->proxy_handle = EFX_DWORD_FIELD(hdr, EFX_DWORD_0); + kref_get(&cmd->ref); + queue_delayed_work(mcdi->workqueue, &cmd->work, + MCDI_PROXY_TIMEOUT); + } + } else { + /* free doorbell */ + if (mcdi->db_held_by == cmd) + mcdi->db_held_by = NULL; + + if (efx_cmd_cancelled(cmd)) { + list_del(&cmd->list); + kref_put(&cmd->ref, efx_mcdi_cmd_release); + completed = true; + } else if (rc == MC_CMD_ERR_QUEUE_FULL) { + cmd->state = MCDI_STATE_RETRY; + } else { + cmd->rc = rc; + cmd->outbuf = outbuf; + cmd->outlen = outbuf ? resp_data_len : 0; + efx_mcdi_remove_cmd(mcdi, cmd, cleanup_list); + completed = true; + } + } + + /* free sequence number and buffer */ + mcdi->seq_held_by[cmd->seq] = NULL; + efx->type->mcdi_put_buf(efx, bufid); + + efx_mcdi_start_or_queue(mcdi, rc != MC_CMD_ERR_QUEUE_FULL, + NULL, cleanup_list); + + /* wake up anyone waiting for flush */ + wake_up(&mcdi->cmd_complete_wq); + + kref_put(&cmd->ref, efx_mcdi_cmd_release); + + return completed; +} + +static void efx_mcdi_timeout_cmd(struct efx_mcdi_iface *mcdi, + struct efx_mcdi_cmd *cmd, + struct list_head *cleanup_list) +{ + struct efx_nic *efx = mcdi->efx; + + netif_err(efx, drv, efx->net_dev, + "MC command 0x%x inlen %zu state %d mode %d timed out after %u ms\n", + cmd->cmd, cmd->inlen, cmd->state, + cmd->polled ? MCDI_MODE_POLL : MCDI_MODE_EVENTS, + jiffies_to_msecs(jiffies - cmd->started)); + + efx->type->mcdi_put_buf(efx, cmd->bufid); + + cmd->rc = -ETIMEDOUT; + efx_mcdi_remove_cmd(mcdi, cmd, cleanup_list); + + efx_mcdi_mode_fail(efx, cleanup_list); + efx_schedule_reset(efx, RESET_TYPE_MCDI_TIMEOUT); +} + +/** + * efx_mcdi_rpc_async - Schedule an MCDI command to run asynchronously + * @efx: NIC through which to issue the command + * @cmd: Command type number + * @inbuf: Command parameters + * @inlen: Length of command parameters, in bytes + * @complete: Function to be called on completion or cancellation. + * @cookie: Arbitrary value to be passed to @complete. + * + * This function does not sleep and therefore may be called in atomic + * context. It will fail if event queues are disabled or if MCDI + * event completions have been disabled due to an error. + * + * If it succeeds, the @complete function will be called exactly once + * in atomic context, when one of the following occurs: + * (a) the completion event is received (in NAPI context) + * (b) event queues are disabled (in the process that disables them) + * (c) the request times-out (in timer context) + * + * Return: a negative error code or 0 on success. + */ +int +efx_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd, + const efx_dword_t *inbuf, size_t inlen, + efx_mcdi_async_completer *complete, unsigned long cookie) +{ + return efx_mcdi_rpc_async_ext(efx, cmd, inbuf, inlen, NULL, + complete, cookie, false, false, NULL); +} + +int efx_mcdi_rpc_async_quiet(struct efx_nic *efx, unsigned int cmd, + const efx_dword_t *inbuf, size_t inlen, + efx_mcdi_async_completer *complete, + unsigned long cookie) +{ + return efx_mcdi_rpc_async_ext(efx, cmd, inbuf, inlen, NULL, + complete, cookie, true, false, NULL); +} + +int efx_mcdi_rpc_client_sync(struct efx_nic *efx, u32 client_id, + unsigned int cmd, const efx_dword_t *inbuf, + size_t inlen, efx_dword_t *outbuf, size_t outlen, + size_t *outlen_actual, bool quiet) +{ + struct efx_mcdi_blocking_data *wait_data; + struct efx_mcdi_cmd *cmd_item; + unsigned int handle; + int rc; + + if (outlen_actual) + *outlen_actual = 0; + + wait_data = kmalloc(sizeof(*wait_data), GFP_KERNEL); + if (!wait_data) + return -ENOMEM; + + cmd_item = kmalloc(sizeof(*cmd_item), GFP_KERNEL); + if (!cmd_item) { + kfree(wait_data); + return -ENOMEM; + } + + kref_init(&wait_data->ref); + wait_data->done = false; + init_waitqueue_head(&wait_data->wq); + wait_data->outbuf = outbuf; + wait_data->outlen = outlen; + + kref_init(&cmd_item->ref); + cmd_item->quiet = quiet; + cmd_item->cookie = (unsigned long)wait_data; + cmd_item->atomic_completer = NULL; + cmd_item->completer = &efx_mcdi_rpc_completer; + cmd_item->cmd = cmd; + cmd_item->inlen = inlen; + cmd_item->inbuf = inbuf; + cmd_item->client_id = client_id; + + /* Claim an extra reference for the completer to put. */ + kref_get(&wait_data->ref); + rc = efx_mcdi_rpc_async_internal(efx, cmd_item, &handle, true, false); + if (rc) { + kref_put(&wait_data->ref, efx_mcdi_blocking_data_release); + goto out; + } + + if (!wait_event_timeout(wait_data->wq, wait_data->done, + MCDI_ACQUIRE_TIMEOUT + + efx_mcdi_rpc_timeout(efx, cmd)) && + !wait_data->done) { + netif_err(efx, drv, efx->net_dev, + "MC command 0x%x inlen %zu timed out (sync)\n", + cmd, inlen); + + efx_mcdi_cancel_cmd(efx, handle); + + wait_data->rc = -ETIMEDOUT; + wait_data->outlen_actual = 0; + } + + if (outlen_actual) + *outlen_actual = wait_data->outlen_actual; + rc = wait_data->rc; + +out: + kref_put(&wait_data->ref, efx_mcdi_blocking_data_release); + + return rc; +} + +static void _efx_mcdi_display_error(struct efx_nic *efx, unsigned int cmd, + size_t inlen, int raw, int arg, int rc) +{ + if (efx->net_dev) + netif_cond_dbg(efx, hw, efx->net_dev, + rc == -EPERM || efx_nic_hw_unavailable(efx), err, + "MC command 0x%x inlen %d failed rc=%d (raw=%d) arg=%d\n", + cmd, (int)inlen, rc, raw, arg); + else + pci_dbg(efx->pci_dev, + "MC command 0x%x inlen %d failed rc=%d (raw=%d) arg=%d\n", + cmd, (int)inlen, rc, raw, arg); +} + +void efx_mcdi_display_error(struct efx_nic *efx, unsigned int cmd, + size_t inlen, efx_dword_t *outbuf, + size_t outlen, int rc) +{ + int code = 0, arg = 0; + + if (outlen >= MC_CMD_ERR_CODE_OFST + 4) + code = MCDI_DWORD(outbuf, ERR_CODE); + if (outlen >= MC_CMD_ERR_ARG_OFST + 4) + arg = MCDI_DWORD(outbuf, ERR_ARG); + + _efx_mcdi_display_error(efx, cmd, inlen, code, arg, rc); +} + +/* Switch to polled MCDI completions. */ +static void _efx_mcdi_mode_poll(struct efx_mcdi_iface *mcdi) +{ + /* If already in polling mode, nothing to do. + * If in fail-fast state, don't switch to polled completion, FLR + * recovery will do that later. + */ + if (mcdi->mode == MCDI_MODE_EVENTS) { + struct efx_mcdi_cmd *cmd; + + mcdi->mode = MCDI_MODE_POLL; + + list_for_each_entry(cmd, &mcdi->cmd_list, list) + if (efx_cmd_running(cmd) && !cmd->polled) { + netif_dbg(mcdi->efx, drv, mcdi->efx->net_dev, + "converting command %#x inlen %zu to polled mode\n", + cmd->cmd, cmd->inlen); + efx_mcdi_cmd_mode_poll(mcdi, cmd); + } + } +} + +void efx_mcdi_mode_poll(struct efx_nic *efx) +{ + struct efx_mcdi_iface *mcdi = efx_mcdi(efx); + + if (!mcdi) + return; + + spin_lock_bh(&mcdi->iface_lock); + _efx_mcdi_mode_poll(mcdi); + spin_unlock_bh(&mcdi->iface_lock); +} + +void efx_mcdi_mode_event(struct efx_nic *efx) +{ + struct efx_mcdi_iface *mcdi = efx_mcdi(efx); + + if (!mcdi) + return; + + spin_lock_bh(&mcdi->iface_lock); + /* If already in event completion mode, nothing to do. + * If in fail-fast state, don't switch to event completion. FLR + * recovery will do that later. + */ + if (mcdi->mode == MCDI_MODE_POLL) + mcdi->mode = MCDI_MODE_EVENTS; + spin_unlock_bh(&mcdi->iface_lock); +} + +/* Set MCDI mode to fail to prevent any new commands, then cancel any + * outstanding commands. + * Caller must hold the mcdi iface_lock. + */ +static void efx_mcdi_mode_fail(struct efx_nic *efx, struct list_head *cleanup_list) +{ + struct efx_mcdi_iface *mcdi = efx_mcdi(efx); + struct efx_mcdi_cmd *cmd; + + mcdi->mode = MCDI_MODE_FAIL; + + while (!list_empty(&mcdi->cmd_list)) { + cmd = list_first_entry(&mcdi->cmd_list, struct efx_mcdi_cmd, + list); + _efx_mcdi_cancel_cmd(mcdi, efx_mcdi_cmd_handle(cmd), cleanup_list); + } +} + +static void efx_mcdi_ev_death(struct efx_nic *efx, bool bist) +{ + struct efx_mcdi_iface *mcdi = efx_mcdi(efx); + + if (bist) { + efx->mc_bist_for_other_fn = true; + efx->type->mcdi_record_bist_event(efx); + } + spin_lock(&mcdi->iface_lock); + efx_mcdi_reboot_detected(efx); + /* if this is the result of a MC_CMD_REBOOT then don't schedule reset */ + if (bist || !efx_mcdi_reset_cmd_running(mcdi)) + efx_schedule_reset(efx, bist ? RESET_TYPE_MC_BIST : + RESET_TYPE_MC_FAILURE); + spin_unlock(&mcdi->iface_lock); +} + +bool efx_mcdi_process_event(struct efx_channel *channel, + efx_qword_t *event) +{ + struct efx_nic *efx = channel->efx; + int code = EFX_QWORD_FIELD(*event, MCDI_EVENT_CODE); + u32 data = EFX_QWORD_FIELD(*event, MCDI_EVENT_DATA); + + switch (code) { + case MCDI_EVENT_CODE_BADSSERT: + netif_err(efx, hw, efx->net_dev, + "MC watchdog or assertion failure at 0x%x\n", data); + efx_mcdi_ev_death(efx, false); + return true; + + case MCDI_EVENT_CODE_PMNOTICE: + netif_info(efx, wol, efx->net_dev, "MCDI PM event.\n"); + return true; + + case MCDI_EVENT_CODE_CMDDONE: + efx_mcdi_ev_cpl(efx, + MCDI_EVENT_FIELD(*event, CMDDONE_SEQ), + MCDI_EVENT_FIELD(*event, CMDDONE_DATALEN), + MCDI_EVENT_FIELD(*event, CMDDONE_ERRNO)); + return true; + case MCDI_EVENT_CODE_PROXY_RESPONSE: + efx_mcdi_ev_proxy_response(efx, + MCDI_EVENT_FIELD(*event, PROXY_RESPONSE_HANDLE), + MCDI_EVENT_FIELD(*event, PROXY_RESPONSE_RC)); + return true; + case MCDI_EVENT_CODE_SCHEDERR: + netif_dbg(efx, hw, efx->net_dev, + "MC Scheduler alert (0x%x)\n", data); + return true; + case MCDI_EVENT_CODE_REBOOT: + case MCDI_EVENT_CODE_MC_REBOOT: /* XXX should handle this differently? */ + efx_mcdi_ev_death(efx, false); + return true; + case MCDI_EVENT_CODE_MC_BIST: + netif_info(efx, hw, efx->net_dev, "MC entered BIST mode\n"); + efx_mcdi_ev_death(efx, true); + return true; + } + + return false; +} + +/************************************************************************** + * + * Specific request functions + * + ************************************************************************** + */ + +void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len) +{ + MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_VERSION_OUT_LEN); + size_t outlength; + const __le16 *ver_words; + size_t offset; + int rc; + + BUILD_BUG_ON(MC_CMD_GET_VERSION_IN_LEN != 0); + rc = efx_mcdi_rpc(efx, MC_CMD_GET_VERSION, NULL, 0, + outbuf, sizeof(outbuf), &outlength); + if (rc) + goto fail; + if (outlength < MC_CMD_GET_VERSION_OUT_LEN) { + rc = -EIO; + goto fail; + } + + ver_words = (__le16 *)MCDI_PTR(outbuf, GET_VERSION_OUT_VERSION); + offset = scnprintf(buf, len, "%u.%u.%u.%u", + le16_to_cpu(ver_words[0]), + le16_to_cpu(ver_words[1]), + le16_to_cpu(ver_words[2]), + le16_to_cpu(ver_words[3])); + + /* EF10 may have multiple datapath firmware variants within a + * single version. Report which variants are running. + */ + if (efx_nic_rev(efx) == EFX_REV_HUNT_A0) { + MCDI_DECLARE_BUF(capbuf, MC_CMD_GET_CAPABILITIES_OUT_LEN); + unsigned int rx_id, tx_id; + size_t caplen; + + BUILD_BUG_ON(MC_CMD_GET_CAPABILITIES_IN_LEN != 0); + rc = efx_mcdi_rpc(efx, MC_CMD_GET_CAPABILITIES, NULL, 0, + capbuf, sizeof(capbuf), &caplen); + if (rc) + goto fail; + if (caplen < MC_CMD_GET_CAPABILITIES_OUT_LEN) { + rc = -EIO; + goto fail; + } + + rx_id = MCDI_WORD(capbuf, GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID); + tx_id = MCDI_WORD(capbuf, GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID); + + offset += scnprintf(buf + offset, len - offset, " rx%x tx%x", + rx_id, tx_id); + + /* It's theoretically possible for the string to exceed 31 + * characters, though in practice the first three version + * components are short enough that this doesn't happen. + */ + if (WARN_ON(offset >= len)) + buf[0] = 0; + } + + return; + +fail: + pci_err(efx->pci_dev, "%s: failed rc=%d\n", __func__, rc); + buf[0] = 0; +} + +void efx_mcdi_print_fw_bundle_ver(struct efx_nic *efx, char *buf, size_t len) +{ + MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_VERSION_V5_OUT_LEN); + MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_VERSION_EXT_IN_LEN); + unsigned int flags; + size_t outlength; + int rc; + + MCDI_SET_DWORD(inbuf, GET_VERSION_EXT_IN_EXT_FLAGS, 0); + + rc = efx_mcdi_rpc(efx, MC_CMD_GET_VERSION, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), &outlength); + if (rc) + goto fail; + if (outlength < MC_CMD_GET_VERSION_V5_OUT_LEN) { + rc = -EIO; + pci_err(efx->pci_dev, "%s: failed rc=%d\n", __func__, rc); + goto fail; + } + + flags = MCDI_DWORD(outbuf, GET_VERSION_V5_OUT_FLAGS); + if (flags & BIT(MC_CMD_GET_VERSION_V5_OUT_BUNDLE_VERSION_PRESENT_LBN)) { + const __le32 *ver_dwords = (__le32 *)MCDI_PTR(outbuf, + GET_VERSION_V5_OUT_BUNDLE_VERSION); + size_t needed; + + needed = scnprintf(buf, len, "%u.%u.%u.%u", + le32_to_cpu(ver_dwords[0]), + le32_to_cpu(ver_dwords[1]), + le32_to_cpu(ver_dwords[2]), + le32_to_cpu(ver_dwords[3])); + if (WARN_ON(needed >= len)) + goto fail; + } else { + strscpy(buf, "N/A", len); + } + + return; + +fail: + buf[0] = 0; +} + +static int efx_mcdi_drv_attach_attempt(struct efx_nic *efx, + u32 fw_variant, u32 new_state, + u32 *flags, bool reattach) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_DRV_ATTACH_IN_V2_LEN); + MCDI_DECLARE_BUF(outbuf, MC_CMD_DRV_ATTACH_EXT_OUT_LEN); + size_t outlen; + int rc; + + MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_NEW_STATE, new_state); + MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_UPDATE, 1); + MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_FIRMWARE_ID, fw_variant); +#ifdef EFX_NOT_UPSTREAM + strscpy(MCDI_PTR(inbuf, DRV_ATTACH_IN_V2_DRIVER_VERSION), + EFX_DRIVER_VERSION, MC_CMD_DRV_ATTACH_IN_V2_DRIVER_VERSION_LEN); +#endif + + rc = efx_mcdi_rpc_quiet(efx, MC_CMD_DRV_ATTACH, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), &outlen); + + /* If we're not the primary PF, trying to ATTACH with a firmware + * variant other than MC_CMD_FW_DONT_CARE will fail with EPERM. + * + * The firmware can also return EOPNOTSUPP, EBUSY or EINVAL if we've + * asked for some combinations of VI spreading. Such failures are + * handled at a slightly higher level. + * + * In these cases we can return without logging an error. + */ + if (rc == -EPERM || rc == -EOPNOTSUPP || rc == -EBUSY || rc == -EINVAL) { + netif_dbg(efx, probe, efx->net_dev, + "efx_mcdi_drv_attach failed: %d\n", rc); + return rc; + } + + if (!reattach && (rc || outlen < MC_CMD_DRV_ATTACH_OUT_LEN)) { + efx_mcdi_display_error(efx, MC_CMD_DRV_ATTACH, sizeof(inbuf), + outbuf, outlen, rc); + if (outlen < MC_CMD_DRV_ATTACH_OUT_LEN) + rc = -EIO; + return rc; + } + + if (new_state & (1 << MC_CMD_DRV_ATTACH_IN_ATTACH_LBN)) { + /* Were we already attached? */ + u32 old_state = MCDI_DWORD(outbuf, DRV_ATTACH_OUT_OLD_STATE); + + if ((old_state & (1 << MC_CMD_DRV_ATTACH_IN_ATTACH_LBN)) && + !reattach) + netif_warn(efx, probe, efx->net_dev, + "efx_mcdi_drv_attach attached when already attached\n"); + } + + if (!flags) + return rc; + + if (outlen >= MC_CMD_DRV_ATTACH_EXT_OUT_LEN) + *flags = MCDI_DWORD(outbuf, DRV_ATTACH_EXT_OUT_FUNC_FLAGS); + else + /* Mock up flags for older NICs */ + *flags = 1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL | + 1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_TRUSTED | + (efx_port_num(efx) == 0) << + MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_PRIMARY; + + return rc; +} + +static bool efx_mcdi_drv_attach_bad_spreading(u32 flags) +{ + /* We don't support full VI spreading, only the tx-only version. */ + return flags & (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_VI_SPREADING_ENABLED); +} + +int efx_mcdi_drv_detach(struct efx_nic *efx) +{ + return efx_mcdi_drv_attach_attempt(efx, MC_CMD_FW_DONT_CARE, 0, NULL, + false); +} + +int efx_mcdi_drv_attach(struct efx_nic *efx, u32 fw_variant, u32 *out_flags, + bool reattach) +{ +#ifdef EFX_NOT_UPSTREAM + bool request_spreading = false; +#endif + u32 flags; + u32 in; + int rc; + + in = (1 << MC_CMD_DRV_ATTACH_IN_ATTACH_LBN) | + (1 << MC_CMD_DRV_ATTACH_IN_WANT_V2_LINKCHANGES_LBN); + +#ifdef EFX_NOT_UPSTREAM + /* We request TX-only VI spreading. The firmware will only provide + * this if we're a single port device where this is actually useful. + */ + if (efx->performance_profile == EFX_PERFORMANCE_PROFILE_THROUGHPUT) { + request_spreading = true; + in |= 1 << MC_CMD_DRV_ATTACH_IN_WANT_TX_ONLY_SPREADING_LBN; + } +#endif + + rc = efx_mcdi_drv_attach_attempt(efx, fw_variant, in, &flags, reattach); + +#ifdef EFX_NOT_UPSTREAM + /* If we requested spreading and the firmware failed to provide that + * we should retry the attach without the request. + */ + if (request_spreading && (rc == -EINVAL || rc == -EOPNOTSUPP)) { + pci_dbg(efx->pci_dev, + "%s failed (%d) when requesting VI spreading mode; retrying\n", + __func__, rc); + + /* Retry without asking for spreading. */ + in &= ~(1 << MC_CMD_DRV_ATTACH_IN_WANT_TX_ONLY_SPREADING_LBN); + rc = efx_mcdi_drv_attach_attempt(efx, fw_variant, + in, &flags, reattach); + } +#endif + + if (rc == 0 && efx_mcdi_drv_attach_bad_spreading(flags)) { + efx_mcdi_drv_detach(efx); + pci_err(efx->pci_dev, + "%s gave unsupported VI spreading mode\n", __func__); + rc = -EINVAL; + } + + if (rc == 0) { + pci_dbg(efx->pci_dev, + "%s attached with flags %#x\n", __func__, flags); + if (out_flags) + *out_flags = flags; + } + + return rc; +} + +int efx_mcdi_get_board_perm_mac(struct efx_nic *efx, u8 *mac_address) +{ + return efx_mcdi_get_board_cfg(efx, 0, mac_address, NULL, NULL); +} + +int efx_mcdi_get_board_cfg(struct efx_nic *efx, int port_num, u8 *mac_address, + u16 *fw_subtype_list, u32 *capabilities) +{ + MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_BOARD_CFG_OUT_LENMAX); + size_t outlen, i; + int rc; + + BUILD_BUG_ON(MC_CMD_GET_BOARD_CFG_IN_LEN != 0); + /* we need __aligned(2) for ether_addr_copy */ + BUILD_BUG_ON(MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_OFST & 1); + BUILD_BUG_ON(MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1_OFST & 1); + + rc = efx_mcdi_rpc(efx, MC_CMD_GET_BOARD_CFG, NULL, 0, + outbuf, sizeof(outbuf), &outlen); + if (rc) + goto fail; + + if (outlen < MC_CMD_GET_BOARD_CFG_OUT_LENMIN) { + rc = -EIO; + goto fail; + } + + if (mac_address) + ether_addr_copy(mac_address, + port_num ? + MCDI_PTR(outbuf, GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1) : + MCDI_PTR(outbuf, GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0)); + if (fw_subtype_list) { + for (i = 0; + i < MCDI_VAR_ARRAY_LEN(outlen, + GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST); + i++) + fw_subtype_list[i] = MCDI_ARRAY_WORD( + outbuf, GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST, i); + for (; i < MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MAXNUM; i++) + fw_subtype_list[i] = 0; + } + if (capabilities) { + if (port_num) + *capabilities = MCDI_DWORD(outbuf, + GET_BOARD_CFG_OUT_CAPABILITIES_PORT1); + else + *capabilities = MCDI_DWORD(outbuf, + GET_BOARD_CFG_OUT_CAPABILITIES_PORT0); + } + + return 0; + +fail: + netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d len=%d\n", + __func__, rc, (int)outlen); + + return rc; +} + +int efx_mcdi_log_ctrl(struct efx_nic *efx, bool evq, bool uart, u32 dest_evq) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_LOG_CTRL_IN_LEN); + u32 dest = 0; + int rc; + + if (uart) + dest |= MC_CMD_LOG_CTRL_IN_LOG_DEST_UART; + if (evq) + dest |= MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ; + + MCDI_SET_DWORD(inbuf, LOG_CTRL_IN_LOG_DEST, dest); + MCDI_SET_DWORD(inbuf, LOG_CTRL_IN_LOG_DEST_EVQ, dest_evq); + + BUILD_BUG_ON(MC_CMD_LOG_CTRL_OUT_LEN != 0); + + rc = efx_mcdi_rpc(efx, MC_CMD_LOG_CTRL, inbuf, sizeof(inbuf), + NULL, 0, NULL); + return rc; +} + +void efx_mcdi_log_puts(struct efx_nic *efx, const char *text) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_PUTS_IN_LENMAX); + struct timespec64 tv; + int inlen; + + ktime_get_real_ts64(&tv); + inlen = scnprintf(MCDI_PTR(inbuf, PUTS_IN_STRING), + MC_CMD_PUTS_IN_STRING_MAXNUM, + "{%lld %s}", (long long int)tv.tv_sec, + (text ? text : "")); + /* Count the NULL byte as well */ + inlen += MC_CMD_PUTS_IN_STRING_OFST + 1; + + MCDI_SET_DWORD(inbuf, PUTS_IN_DEST, 1); + efx_mcdi_rpc_quiet(efx, MC_CMD_PUTS, inbuf, inlen, NULL, 0, NULL); +} + +int efx_mcdi_nvram_types(struct efx_nic *efx, u32 *nvram_types_out) +{ + MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_TYPES_OUT_LEN); + size_t outlen; + int rc; + + BUILD_BUG_ON(MC_CMD_NVRAM_TYPES_IN_LEN != 0); + + rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_TYPES, NULL, 0, + outbuf, sizeof(outbuf), &outlen); + if (rc) + goto fail; + if (outlen < MC_CMD_NVRAM_TYPES_OUT_LEN) { + rc = -EIO; + goto fail; + } + + *nvram_types_out = MCDI_DWORD(outbuf, NVRAM_TYPES_OUT_TYPES); + return 0; + +fail: + if (rc != -EPERM) + netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", + __func__, rc); + return rc; +} + +/* This function finds types using the new NVRAM_PARTITIONS mcdi. */ +static int efx_new_mcdi_nvram_types(struct efx_nic *efx, u32 *number, + u32 *nvram_types) +{ + efx_dword_t *outbuf = kzalloc(MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX_MCDI2, + GFP_KERNEL); + size_t outlen; + int rc; + + if (!outbuf) + return -ENOMEM; + + BUILD_BUG_ON(MC_CMD_NVRAM_PARTITIONS_IN_LEN != 0); + + rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_PARTITIONS, NULL, 0, + outbuf, MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX_MCDI2, &outlen); + if (rc) + goto fail; + + *number = MCDI_DWORD(outbuf, NVRAM_PARTITIONS_OUT_NUM_PARTITIONS); + + memcpy(nvram_types, MCDI_PTR(outbuf, NVRAM_PARTITIONS_OUT_TYPE_ID), + *number * sizeof(u32)); + +fail: + kfree(outbuf); + return rc; +} + +#define EFX_MCDI_NVRAM_DEFAULT_WRITE_LEN 128 + +int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type, + size_t *size_out, size_t *erase_size_out, + size_t *write_size_out, bool *protected_out) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_INFO_IN_LEN); + MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_INFO_V2_OUT_LEN); + size_t write_size = 0; + size_t outlen; + int rc; + + MCDI_SET_DWORD(inbuf, NVRAM_INFO_IN_TYPE, type); + + rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_INFO, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), &outlen); + if (rc) + goto fail; + if (outlen < MC_CMD_NVRAM_INFO_OUT_LEN) { + rc = -EIO; + goto fail; + } + + if (outlen >= MC_CMD_NVRAM_INFO_V2_OUT_LEN) + write_size = MCDI_DWORD(outbuf, NVRAM_INFO_V2_OUT_WRITESIZE); + else + write_size = EFX_MCDI_NVRAM_DEFAULT_WRITE_LEN; + + *write_size_out = write_size; + *size_out = MCDI_DWORD(outbuf, NVRAM_INFO_OUT_SIZE); + *erase_size_out = MCDI_DWORD(outbuf, NVRAM_INFO_OUT_ERASESIZE); + *protected_out = !!(MCDI_DWORD(outbuf, NVRAM_INFO_OUT_FLAGS) & + (1 << MC_CMD_NVRAM_INFO_OUT_PROTECTED_LBN)); + return 0; + +fail: + netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); + return rc; +} + +static int efx_mcdi_nvram_test(struct efx_nic *efx, unsigned int type) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_TEST_IN_LEN); + MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_TEST_OUT_LEN); + int rc; + + MCDI_SET_DWORD(inbuf, NVRAM_TEST_IN_TYPE, type); + + rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_TEST, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), NULL); + if (rc) + return rc; + + switch (MCDI_DWORD(outbuf, NVRAM_TEST_OUT_RESULT)) { + case MC_CMD_NVRAM_TEST_PASS: + case MC_CMD_NVRAM_TEST_NOTSUPP: + return 0; + default: + netif_err(efx, hw, efx->net_dev, "%s: failed type=%u\n", + __func__, type); + return -EIO; + } +} + +/* This function tests all nvram partitions */ +int efx_mcdi_nvram_test_all(struct efx_nic *efx) +{ + u32 *nvram_types = kzalloc(MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX_MCDI2, + GFP_KERNEL); + unsigned int number; + int rc, i; + + if (!nvram_types) + return -ENOMEM; + + rc = efx_new_mcdi_nvram_types(efx, &number, nvram_types); + if (rc) + goto fail; + + /* Require at least one check */ + rc = -EAGAIN; + + for (i = 0; i < number; i++) { + if (nvram_types[i] == NVRAM_PARTITION_TYPE_PARTITION_MAP || + nvram_types[i] == NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG) + continue; + + rc = efx_mcdi_nvram_test(efx, nvram_types[i]); + if (rc) + goto fail; + } + +fail: + kfree(nvram_types); + return rc; +} + +/* Returns 1 if an assertion was read, 0 if no assertion had fired, + * negative on error. + */ +static int efx_mcdi_read_assertion(struct efx_nic *efx) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_ASSERTS_IN_LEN); + MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_ASSERTS_OUT_LEN); + unsigned int flags, index; + const char *reason; + size_t outlen; + int retry; + int rc; + + /* Attempt to read any stored assertion state before we reboot + * the mcfw out of the assertion handler. Retry twice, once + * because a boot-time assertion might cause this command to fail + * with EINTR. And once again because GET_ASSERTS can race with + * MC_CMD_REBOOT running on the other port. */ + retry = 2; + do { + MCDI_SET_DWORD(inbuf, GET_ASSERTS_IN_CLEAR, 1); + rc = efx_mcdi_rpc_quiet(efx, MC_CMD_GET_ASSERTS, + inbuf, MC_CMD_GET_ASSERTS_IN_LEN, + outbuf, sizeof(outbuf), &outlen); + if (rc == -EPERM) + return 0; + } while ((rc == -EINTR || rc == -EIO) && retry-- > 0); + + if (rc) { + efx_mcdi_display_error(efx, MC_CMD_GET_ASSERTS, + MC_CMD_GET_ASSERTS_IN_LEN, outbuf, + outlen, rc); + return rc; + } + if (outlen < MC_CMD_GET_ASSERTS_OUT_LEN) + return -EIO; + + /* Print out any recorded assertion state */ + flags = MCDI_DWORD(outbuf, GET_ASSERTS_OUT_GLOBAL_FLAGS); + if (flags == MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS) + return 0; + + reason = (flags == MC_CMD_GET_ASSERTS_FLAGS_SYS_FAIL) + ? "system-level assertion" + : (flags == MC_CMD_GET_ASSERTS_FLAGS_THR_FAIL) + ? "thread-level assertion" + : (flags == MC_CMD_GET_ASSERTS_FLAGS_WDOG_FIRED) + ? "watchdog reset" + : "unknown assertion"; + netif_err(efx, hw, efx->net_dev, + "MCPU %s at PC = 0x%.8x in thread 0x%.8x\n", reason, + MCDI_DWORD(outbuf, GET_ASSERTS_OUT_SAVED_PC_OFFS), + MCDI_DWORD(outbuf, GET_ASSERTS_OUT_THREAD_OFFS)); + + /* Print out the registers */ + for (index = 0; + index < MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_NUM; + index++) + netif_err(efx, hw, efx->net_dev, "R%.2d (?): 0x%.8x\n", + 1 + index, + MCDI_ARRAY_DWORD(outbuf, GET_ASSERTS_OUT_GP_REGS_OFFS, + index)); + + return 1; +} + +static int efx_mcdi_exit_assertion(struct efx_nic *efx) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_REBOOT_IN_LEN); + int rc; + + /* If the MC is running debug firmware, it might now be + * waiting for a debugger to attach, but we just want it to + * reboot. We set a flag that makes the command a no-op if it + * has already done so. + * The MCDI will thus return either 0 or -EIO. + */ + BUILD_BUG_ON(MC_CMD_REBOOT_OUT_LEN != 0); + MCDI_SET_DWORD(inbuf, REBOOT_IN_FLAGS, + MC_CMD_REBOOT_FLAGS_AFTER_ASSERTION); + rc = efx_mcdi_rpc_quiet(efx, MC_CMD_REBOOT, inbuf, MC_CMD_REBOOT_IN_LEN, + NULL, 0, NULL); + if (rc == -EIO) + rc = 0; + if (rc) + efx_mcdi_display_error(efx, MC_CMD_REBOOT, MC_CMD_REBOOT_IN_LEN, + NULL, 0, rc); + return rc; +} + +int efx_mcdi_handle_assertion(struct efx_nic *efx) +{ + int rc; + + rc = efx_mcdi_read_assertion(efx); + if (rc <= 0) + return rc; + + return efx_mcdi_exit_assertion(efx); +} + +int efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_ID_LED_IN_LEN); + + BUILD_BUG_ON(EFX_LED_OFF != MC_CMD_LED_OFF); + BUILD_BUG_ON(EFX_LED_ON != MC_CMD_LED_ON); + BUILD_BUG_ON(EFX_LED_DEFAULT != MC_CMD_LED_DEFAULT); + + BUILD_BUG_ON(MC_CMD_SET_ID_LED_OUT_LEN != 0); + + MCDI_SET_DWORD(inbuf, SET_ID_LED_IN_STATE, mode); + + return efx_mcdi_rpc(efx, MC_CMD_SET_ID_LED, inbuf, sizeof(inbuf), + NULL, 0, NULL); +} + +static int efx_mcdi_reset_func(struct efx_nic *efx) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_ENTITY_RESET_IN_LEN); + int rc; + + BUILD_BUG_ON(MC_CMD_ENTITY_RESET_OUT_LEN != 0); + MCDI_POPULATE_DWORD_1(inbuf, ENTITY_RESET_IN_FLAG, + ENTITY_RESET_IN_FUNCTION_RESOURCE_RESET, 1); + rc = efx_mcdi_rpc(efx, MC_CMD_ENTITY_RESET, inbuf, sizeof(inbuf), + NULL, 0, NULL); + return rc; +} + +static int efx_mcdi_reset_mc(struct efx_nic *efx) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_REBOOT_IN_LEN); + int rc; + + BUILD_BUG_ON(MC_CMD_REBOOT_OUT_LEN != 0); + MCDI_SET_DWORD(inbuf, REBOOT_IN_FLAGS, 0); + rc = efx_mcdi_rpc(efx, MC_CMD_REBOOT, inbuf, sizeof(inbuf), + NULL, 0, NULL); + /* White is black, and up is down */ + if (rc == -EIO) + return 0; + if (rc == 0) + rc = -EIO; + return rc; +} + +static int efx_flr(struct efx_nic *efx) +{ + struct efx_mcdi_iface *mcdi = efx_mcdi(efx); + LIST_HEAD(cleanup_list); + u16 seq; + int rc; + + netif_dbg(efx, drv, efx->net_dev, "Beginning FLR\n"); + + rc = pci_reset_function(efx->pci_dev); + if (rc) + return rc; + + if (!mcdi) + return 0; + + spin_lock_bh(&mcdi->iface_lock); + while (!list_empty(&mcdi->cmd_list)) { + struct efx_mcdi_cmd *cmd = + list_first_entry(&mcdi->cmd_list, + struct efx_mcdi_cmd, list); + + netif_dbg(efx, drv, efx->net_dev, + "aborting command %#x inlen %zu due to FLR\n", + cmd->cmd, cmd->inlen); + + kref_get(&cmd->ref); + + cmd->rc = -EIO; + + if (efx_cmd_running(cmd)) + efx->type->mcdi_put_buf(efx, cmd->bufid); + + efx_mcdi_remove_cmd(mcdi, cmd, &cleanup_list); + + if (cancel_delayed_work(&cmd->work)) + kref_put(&cmd->ref, efx_mcdi_cmd_release); + + kref_put(&cmd->ref, efx_mcdi_cmd_release); + } + + mcdi->db_held_by = NULL; + for (seq = 0; seq < ARRAY_SIZE(mcdi->seq_held_by); ++seq) + mcdi->seq_held_by[seq] = NULL; + mcdi->mode = MCDI_MODE_POLL; + + spin_unlock_bh(&mcdi->iface_lock); + + netif_dbg(efx, drv, efx->net_dev, "Cleaning up for FLR\n"); + + efx_mcdi_process_cleanup_list(efx, &cleanup_list); + + netif_dbg(efx, drv, efx->net_dev, "FLR complete\n"); + + return 0; +} + +int efx_mcdi_reset(struct efx_nic *efx, enum reset_type method) +{ + int rc; + + /* If MCDI is down, we can't handle_assertion */ + if (method == RESET_TYPE_MCDI_TIMEOUT) + return efx_flr(efx); + + /* Recover from a failed assertion pre-reset */ + rc = efx_mcdi_handle_assertion(efx); + if (rc) + return rc; + + if (method == RESET_TYPE_DATAPATH || method == RESET_TYPE_MC_BIST) + rc = 0; + else if (method == RESET_TYPE_WORLD) + rc = efx_mcdi_reset_mc(efx); + else + rc = efx_mcdi_reset_func(efx); + + /* This will have reset our stats; clear our fixup values. */ + if (rc == 0) { + efx->rx_nodesc_drops_total = 0; + efx->rx_nodesc_drops_while_down = 0; + } + + return rc; +} + +static int efx_mcdi_wol_filter_set(struct efx_nic *efx, u32 type, + const u8 *mac, int *id_out) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_WOL_FILTER_SET_IN_LEN); + MCDI_DECLARE_BUF(outbuf, MC_CMD_WOL_FILTER_SET_OUT_LEN); + size_t outlen; + int rc; + + MCDI_SET_DWORD(inbuf, WOL_FILTER_SET_IN_WOL_TYPE, type); + MCDI_SET_DWORD(inbuf, WOL_FILTER_SET_IN_FILTER_MODE, + MC_CMD_FILTER_MODE_SIMPLE); + ether_addr_copy(MCDI_PTR(inbuf, WOL_FILTER_SET_IN_MAGIC_MAC), mac); + + rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_SET, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), &outlen); + if (rc) + goto fail; + + if (outlen < MC_CMD_WOL_FILTER_SET_OUT_LEN) { + rc = -EIO; + goto fail; + } + + *id_out = (int)MCDI_DWORD(outbuf, WOL_FILTER_SET_OUT_FILTER_ID); + + return 0; + +fail: + *id_out = -1; + netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); + return rc; + +} + +int +efx_mcdi_wol_filter_set_magic(struct efx_nic *efx, const u8 *mac, int *id_out) +{ + return efx_mcdi_wol_filter_set(efx, MC_CMD_WOL_TYPE_MAGIC, mac, id_out); +} + +int efx_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out) +{ + MCDI_DECLARE_BUF(outbuf, MC_CMD_WOL_FILTER_GET_OUT_LEN); + size_t outlen; + int rc; + + rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_GET, NULL, 0, + outbuf, sizeof(outbuf), &outlen); + if (rc) + goto fail; + + if (outlen < MC_CMD_WOL_FILTER_GET_OUT_LEN) { + rc = -EIO; + goto fail; + } + + *id_out = (int)MCDI_DWORD(outbuf, WOL_FILTER_GET_OUT_FILTER_ID); + + return 0; + +fail: + *id_out = -1; + netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); + return rc; +} + +int efx_mcdi_wol_filter_remove(struct efx_nic *efx, int id) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_WOL_FILTER_REMOVE_IN_LEN); + int rc; + + MCDI_SET_DWORD(inbuf, WOL_FILTER_REMOVE_IN_FILTER_ID, (u32)id); + + rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_REMOVE, inbuf, sizeof(inbuf), + NULL, 0, NULL); + return rc; +} + +int efx_mcdi_wol_filter_reset(struct efx_nic *efx) +{ + int rc; + + rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_RESET, NULL, 0, NULL, 0, NULL); + return rc; +} + +int efx_mcdi_set_workaround(struct efx_nic *efx, u32 type, bool enabled, + unsigned int *flags) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_WORKAROUND_IN_LEN); + MCDI_DECLARE_BUF(outbuf, MC_CMD_WORKAROUND_EXT_OUT_LEN); + size_t outlen; + int rc; + + BUILD_BUG_ON(MC_CMD_WORKAROUND_OUT_LEN != 0); + MCDI_SET_DWORD(inbuf, WORKAROUND_IN_TYPE, type); + MCDI_SET_DWORD(inbuf, WORKAROUND_IN_ENABLED, enabled); + rc = efx_mcdi_rpc_quiet(efx, MC_CMD_WORKAROUND, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), &outlen); + if (rc) + return rc; + + if (!flags) + return 0; + + if (outlen >= MC_CMD_WORKAROUND_EXT_OUT_LEN) + *flags = MCDI_DWORD(outbuf, WORKAROUND_EXT_OUT_FLAGS); + else + *flags = 0; + + return 0; +} + +int efx_mcdi_get_workarounds(struct efx_nic *efx, unsigned int *impl_out, + unsigned int *enabled_out) +{ + MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_WORKAROUNDS_OUT_LEN); + size_t outlen; + int rc; + + rc = efx_mcdi_rpc(efx, MC_CMD_GET_WORKAROUNDS, NULL, 0, + outbuf, sizeof(outbuf), &outlen); + if (rc) + goto fail; + + if (outlen < MC_CMD_GET_WORKAROUNDS_OUT_LEN) { + rc = -EIO; + goto fail; + } + + if (impl_out) + *impl_out = MCDI_DWORD(outbuf, GET_WORKAROUNDS_OUT_IMPLEMENTED); + + if (enabled_out) + *enabled_out = MCDI_DWORD(outbuf, GET_WORKAROUNDS_OUT_ENABLED); + + return 0; + +fail: + /* Older firmware lacks GET_WORKAROUNDS and this isn't especially + * terrifying. The call site will have to deal with it though. + */ + netif_cond_dbg(efx, hw, efx->net_dev, rc == -ENOSYS, err, + "%s: failed rc=%d\n", __func__, rc); + return rc; +} + +int efx_mcdi_get_privilege_mask(struct efx_nic *efx, u32 *mask) +{ + MCDI_DECLARE_BUF(fi_outbuf, MC_CMD_GET_FUNCTION_INFO_OUT_LEN); + MCDI_DECLARE_BUF(pm_inbuf, MC_CMD_PRIVILEGE_MASK_IN_LEN); + MCDI_DECLARE_BUF(pm_outbuf, MC_CMD_PRIVILEGE_MASK_OUT_LEN); + size_t outlen; + u16 pf, vf; + int rc; + + if (!efx || !mask) + return -EINVAL; + + /* Get our function number */ + rc = efx_mcdi_rpc(efx, MC_CMD_GET_FUNCTION_INFO, NULL, 0, + fi_outbuf, MC_CMD_GET_FUNCTION_INFO_OUT_LEN, &outlen); + if (rc != 0) + return rc; + if (outlen < MC_CMD_GET_FUNCTION_INFO_OUT_LEN) + return -EIO; + + pf = MCDI_DWORD(fi_outbuf, GET_FUNCTION_INFO_OUT_PF); + vf = MCDI_DWORD(fi_outbuf, GET_FUNCTION_INFO_OUT_VF); + + MCDI_POPULATE_DWORD_2(pm_inbuf, PRIVILEGE_MASK_IN_FUNCTION, + PRIVILEGE_MASK_IN_FUNCTION_PF, pf, + PRIVILEGE_MASK_IN_FUNCTION_VF, vf); + + rc = efx_mcdi_rpc(efx, MC_CMD_PRIVILEGE_MASK, + pm_inbuf, sizeof(pm_inbuf), + pm_outbuf, sizeof(pm_outbuf), &outlen); + + if (rc != 0) + return rc; + if (outlen < MC_CMD_PRIVILEGE_MASK_OUT_LEN) + return -EIO; + + *mask = MCDI_DWORD(pm_outbuf, PRIVILEGE_MASK_OUT_OLD_MASK); + + return 0; +} + +/** + * efx_mcdi_rpc_proxy_cmd - Do MCDI command throught proxy + * @efx: NIC through which to issue the command + * @pf: Target physical function + * @vf: Target virtual function or MC_CMD_PROXY_CMD_IN_VF_NULL + * @request_buf: MCDI request to be done + * @request_size: MCDI request size + * @response_buf: Buffer to put MCDI response (including header) to. May + * be updated in the case of failure as well. + * @response_size: Size of the buffer for MCDI response + * @response_size_actual: Optional location to put actual response size + * + * Return: a negative error code or 0 on success. + */ +int efx_mcdi_rpc_proxy_cmd(struct efx_nic *efx, u32 pf, u32 vf, + const void *request_buf, size_t request_size, + void *response_buf, size_t response_size, + size_t *response_size_actual) +{ + size_t inlen; + efx_dword_t *inbuf; + int rc; + + BUILD_BUG_ON(MC_CMD_PROXY_CMD_IN_LEN % sizeof(*inbuf)); + BUILD_BUG_ON(MC_CMD_PROXY_CMD_OUT_LEN != 0); + + if (request_size % sizeof(*inbuf) != 0) + return -EINVAL; + + inlen = MC_CMD_PROXY_CMD_IN_LEN + request_size; + inbuf = kzalloc(inlen, GFP_KERNEL); + if (!inbuf) + return -ENOMEM; + + MCDI_POPULATE_DWORD_2(inbuf, PROXY_CMD_IN_TARGET, + PROXY_CMD_IN_TARGET_PF, pf, + PROXY_CMD_IN_TARGET_VF, vf); + + /* Proxied command should be located just after PROXY_CMD */ + memcpy(&inbuf[MC_CMD_PROXY_CMD_IN_LEN / sizeof(*inbuf)], + request_buf, request_size); + + rc = efx_mcdi_rpc(efx, MC_CMD_PROXY_CMD, inbuf, inlen, + response_buf, response_size, + response_size_actual); + + kfree(inbuf); + + return rc; +} + + +#define EFX_MCDI_NVRAM_LEN_MAX 128 + +int efx_mcdi_nvram_update_start(struct efx_nic *efx, unsigned int type) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_UPDATE_START_V2_IN_LEN); + int rc; + + MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_START_IN_TYPE, type); + MCDI_POPULATE_DWORD_1(inbuf, NVRAM_UPDATE_START_V2_IN_FLAGS, + NVRAM_UPDATE_START_V2_IN_FLAG_REPORT_VERIFY_RESULT, 1); + + BUILD_BUG_ON(MC_CMD_NVRAM_UPDATE_START_OUT_LEN != 0); + + rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_START, inbuf, sizeof(inbuf), + NULL, 0, NULL); + + return rc; +} + +int efx_mcdi_nvram_read(struct efx_nic *efx, unsigned int type, + loff_t offset, u8 *buffer, size_t length) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_READ_IN_V2_LEN); + MCDI_DECLARE_BUF(outbuf, + MC_CMD_NVRAM_READ_OUT_LEN(EFX_MCDI_NVRAM_LEN_MAX)); + size_t outlen; + int rc; + + MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_TYPE, type); + MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_OFFSET, offset); + MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_LENGTH, length); + MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_V2_MODE, + MC_CMD_NVRAM_READ_IN_V2_DEFAULT); + + rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_READ, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), &outlen); + if (rc) + return rc; + + memcpy(buffer, MCDI_PTR(outbuf, NVRAM_READ_OUT_READ_BUFFER), length); + return 0; +} + +int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type, + loff_t offset, const u8 *buffer, size_t length) +{ + efx_dword_t *inbuf; + size_t inlen; + int rc; + + inlen = ALIGN(MC_CMD_NVRAM_WRITE_IN_LEN(length), 4); + inbuf = kzalloc(inlen, GFP_KERNEL); + if (!inbuf) + return -ENOMEM; + + MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_TYPE, type); + MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_OFFSET, offset); + MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_LENGTH, length); + memcpy(MCDI_PTR(inbuf, NVRAM_WRITE_IN_WRITE_BUFFER), buffer, length); + + BUILD_BUG_ON(MC_CMD_NVRAM_WRITE_OUT_LEN != 0); + + rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_WRITE, inbuf, inlen, NULL, 0, NULL); + kfree(inbuf); + + return rc; +} + +int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type, + loff_t offset, size_t length) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_ERASE_IN_LEN); + int rc; + + MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_TYPE, type); + MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_OFFSET, offset); + MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_LENGTH, length); + + BUILD_BUG_ON(MC_CMD_NVRAM_ERASE_OUT_LEN != 0); + + rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_ERASE, inbuf, sizeof(inbuf), + NULL, 0, NULL); + return rc; +} + +int efx_mcdi_nvram_update_finish(struct efx_nic *efx, unsigned int type, + enum efx_update_finish_mode mode) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_LEN); + MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT_LEN); + size_t outlen; + u32 reboot; + int rc, rc2; + + /* Reboot PHY's into the new firmware. mcfw reboot is handled + * explicity via ethtool. */ + reboot = (type == MC_CMD_NVRAM_TYPE_PHY_PORT0 || + type == MC_CMD_NVRAM_TYPE_PHY_PORT1 || + type == MC_CMD_NVRAM_TYPE_DISABLED_CALLISTO); + MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_FINISH_IN_TYPE, type); + MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_FINISH_IN_REBOOT, reboot); + + /* Old firmware doesn't support background update finish and abort + * operations. Fallback to waiting if the requested mode is not + * supported. + */ + if (!efx_has_cap(efx, NVRAM_UPDATE_POLL_VERIFY_RESULT) || + (!efx_has_cap(efx, NVRAM_UPDATE_ABORT_SUPPORTED) && + mode == EFX_UPDATE_FINISH_ABORT)) + mode = EFX_UPDATE_FINISH_WAIT; + + MCDI_POPULATE_DWORD_4(inbuf, NVRAM_UPDATE_FINISH_V2_IN_FLAGS, + NVRAM_UPDATE_FINISH_V2_IN_FLAG_REPORT_VERIFY_RESULT, + (mode != EFX_UPDATE_FINISH_ABORT), + NVRAM_UPDATE_FINISH_V2_IN_FLAG_RUN_IN_BACKGROUND, + (mode == EFX_UPDATE_FINISH_BACKGROUND), + NVRAM_UPDATE_FINISH_V2_IN_FLAG_POLL_VERIFY_RESULT, + (mode == EFX_UPDATE_FINISH_POLL), + NVRAM_UPDATE_FINISH_V2_IN_FLAG_ABORT, + (mode == EFX_UPDATE_FINISH_ABORT)); + + rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_FINISH, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), &outlen); + if (!rc && outlen >= MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT_LEN) { + rc2 = MCDI_DWORD(outbuf, NVRAM_UPDATE_FINISH_V2_OUT_RESULT_CODE); + if (rc2 != MC_CMD_NVRAM_VERIFY_RC_SUCCESS && + rc2 != MC_CMD_NVRAM_VERIFY_RC_PENDING) + netif_err(efx, drv, efx->net_dev, + "NVRAM update failed verification with code 0x%x\n", + rc2); + switch (rc2) { + case MC_CMD_NVRAM_VERIFY_RC_SUCCESS: + break; + case MC_CMD_NVRAM_VERIFY_RC_PENDING: + rc = -EAGAIN; + break; + case MC_CMD_NVRAM_VERIFY_RC_CMS_CHECK_FAILED: + case MC_CMD_NVRAM_VERIFY_RC_MESSAGE_DIGEST_CHECK_FAILED: + case MC_CMD_NVRAM_VERIFY_RC_SIGNATURE_CHECK_FAILED: + case MC_CMD_NVRAM_VERIFY_RC_TRUSTED_APPROVERS_CHECK_FAILED: + case MC_CMD_NVRAM_VERIFY_RC_SIGNATURE_CHAIN_CHECK_FAILED: + rc = -EIO; + break; + case MC_CMD_NVRAM_VERIFY_RC_INVALID_CMS_FORMAT: + case MC_CMD_NVRAM_VERIFY_RC_BAD_MESSAGE_DIGEST: + rc = -EINVAL; + break; + case MC_CMD_NVRAM_VERIFY_RC_NO_VALID_SIGNATURES: + case MC_CMD_NVRAM_VERIFY_RC_NO_TRUSTED_APPROVERS: + case MC_CMD_NVRAM_VERIFY_RC_NO_SIGNATURE_MATCH: + case MC_CMD_NVRAM_VERIFY_RC_REJECT_TEST_SIGNED: + case MC_CMD_NVRAM_VERIFY_RC_SECURITY_LEVEL_DOWNGRADE: + rc = -EPERM; + break; + default: + netif_err(efx, drv, efx->net_dev, + "Unknown response to NVRAM_UPDATE_FINISH\n"); + rc = -EIO; + } + } + return rc; +} + +#define EFX_MCDI_NVRAM_UPDATE_FINISH_INITIAL_POLL_DELAY_MS 5 +#define EFX_MCDI_NVRAM_UPDATE_FINISH_MAX_POLL_DELAY_MS 5000 +#define EFX_MCDI_NVRAM_UPDATE_FINISH_RETRIES 185 + +int efx_mcdi_nvram_update_finish_polled(struct efx_nic *efx, unsigned int type) +{ + unsigned int delay = EFX_MCDI_NVRAM_UPDATE_FINISH_INITIAL_POLL_DELAY_MS; + unsigned int retry = 0; + int rc; + + /* NVRAM updates can take a long time (e.g. up to 1 minute for bundle + * images). Polling for NVRAM update completion ensures that other MCDI + * commands can be issued before the background NVRAM update completes. + * + * The initial call either completes the update synchronously, or + * returns -EAGAIN to indicate processing is continuing. In the latter + * case, we poll for at least 900 seconds, at increasing intervals + * (5ms, 50ms, 500ms, 5s). + */ + rc = efx_mcdi_nvram_update_finish(efx, type, EFX_UPDATE_FINISH_BACKGROUND); + while (rc == -EAGAIN) { + if (retry > EFX_MCDI_NVRAM_UPDATE_FINISH_RETRIES) + return -ETIMEDOUT; + retry++; + + msleep(delay); + if (delay < EFX_MCDI_NVRAM_UPDATE_FINISH_MAX_POLL_DELAY_MS) + delay *= 10; + + rc = efx_mcdi_nvram_update_finish(efx, type, EFX_UPDATE_FINISH_POLL); + } + return rc; +} + +int efx_mcdi_nvram_metadata(struct efx_nic *efx, unsigned int type, + u32 *subtype, u16 version[4], char *desc, + size_t descsize) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_METADATA_IN_LEN); + efx_dword_t *outbuf; + size_t outlen; + u32 flags; + int rc; + + outbuf = kzalloc(MC_CMD_NVRAM_METADATA_OUT_LENMAX_MCDI2, GFP_KERNEL); + if (!outbuf) + return -ENOMEM; + + MCDI_SET_DWORD(inbuf, NVRAM_METADATA_IN_TYPE, type); + + rc = efx_mcdi_rpc_quiet(efx, MC_CMD_NVRAM_METADATA, inbuf, + sizeof(inbuf), outbuf, + MC_CMD_NVRAM_METADATA_OUT_LENMAX_MCDI2, + &outlen); + if (rc) + goto out_free; + if (outlen < MC_CMD_NVRAM_METADATA_OUT_LENMIN) { + rc = -EIO; + goto out_free; + } + + flags = MCDI_DWORD(outbuf, NVRAM_METADATA_OUT_FLAGS); + + if (desc && descsize > 0) { + if (flags & BIT(MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_VALID_LBN)) { + if (descsize <= + MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_NUM(outlen)) { + rc = -E2BIG; + goto out_free; + } + + strncpy(desc, + MCDI_PTR(outbuf, NVRAM_METADATA_OUT_DESCRIPTION), + MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_NUM(outlen)); + desc[MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_NUM(outlen)] = '\0'; + } else { + desc[0] = '\0'; + } + } + + if (subtype) { + if (flags & BIT(MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_VALID_LBN)) + *subtype = MCDI_DWORD(outbuf, NVRAM_METADATA_OUT_SUBTYPE); + else + *subtype = 0; + } + + if (version) { + if (flags & BIT(MC_CMD_NVRAM_METADATA_OUT_VERSION_VALID_LBN)) { + version[0] = MCDI_WORD(outbuf, NVRAM_METADATA_OUT_VERSION_W); + version[1] = MCDI_WORD(outbuf, NVRAM_METADATA_OUT_VERSION_X); + version[2] = MCDI_WORD(outbuf, NVRAM_METADATA_OUT_VERSION_Y); + version[3] = MCDI_WORD(outbuf, NVRAM_METADATA_OUT_VERSION_Z); + } else { + version[0] = 0; + version[1] = 0; + version[2] = 0; + version[3] = 0; + } + } + +out_free: + kfree(outbuf); + return rc; +} + +#ifdef CONFIG_SFC_MTD + +int efx_mcdi_mtd_read(struct mtd_info *mtd, loff_t start, + size_t len, size_t *retlen, u8 *buffer) +{ + struct efx_mtd_partition *part = mtd->priv; + loff_t offset = start; + loff_t end = min_t(loff_t, start + len, mtd->size); + size_t chunk; + int rc = 0; + + while (offset < end) { + chunk = min_t(size_t, end - offset, EFX_MCDI_NVRAM_LEN_MAX); + rc = efx_mcdi_nvram_read(part->mtd_struct->efx, + part->nvram_type, + offset, buffer, chunk); + if (rc) + goto out; + offset += chunk; + buffer += chunk; + } +out: + *retlen = offset - start; + return rc; +} + +int efx_mcdi_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len) +{ + struct efx_mtd_partition *part = mtd->priv; + loff_t offset = start & ~((loff_t)(mtd->erasesize - 1)); + loff_t end = min_t(loff_t, start + len, mtd->size); + struct efx_nic *efx = part->mtd_struct->efx; + size_t chunk = part->mtd.erasesize; + int rc = 0; + + if (!part->updating) { + rc = efx_mcdi_nvram_update_start(efx, part->nvram_type); + if (rc) + goto out; + part->updating = true; + } + + /* The MCDI interface can in fact do multiple erase blocks at once; + * but erasing may be slow, so we make multiple calls here to avoid + * tripping the MCDI RPC timeout. */ + while (offset < end) { + rc = efx_mcdi_nvram_erase(efx, part->nvram_type, offset, + chunk); + if (rc) + goto out; + offset += chunk; + } +out: + return rc; +} + +int efx_mcdi_mtd_write(struct mtd_info *mtd, loff_t start, + size_t len, size_t *retlen, const u8 *buffer) +{ + struct efx_mtd_partition *part = mtd->priv; + struct efx_nic *efx = part->mtd_struct->efx; + loff_t offset = start; + loff_t end = min_t(loff_t, start + len, mtd->size); + size_t chunk; + int rc = 0; + + if (!part->updating) { + rc = efx_mcdi_nvram_update_start(efx, part->nvram_type); + if (rc) + goto out; + part->updating = true; + } + + while (offset < end) { +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_USE_MTD_WRITESIZE) + chunk = min_t(size_t, end - offset, mtd->writesize); +#else + chunk = min_t(size_t, end - offset, part->common.writesize); +#endif + rc = efx_mcdi_nvram_write(efx, part->nvram_type, offset, + buffer, chunk); + if (rc) + goto out; + offset += chunk; + buffer += chunk; + } +out: + *retlen = offset - start; + return rc; +} + +int efx_mcdi_mtd_sync(struct mtd_info *mtd) +{ + struct efx_mtd_partition *part = mtd->priv; + int rc = 0; + + if (part->updating) { + part->updating = false; + rc = efx_mcdi_nvram_update_finish(part->mtd_struct->efx, + part->nvram_type, + EFX_UPDATE_FINISH_WAIT); + } + + return rc; +} + +void efx_mcdi_mtd_rename(struct efx_mtd_partition *part) +{ + struct efx_nic *efx = part->mtd_struct->efx; + + if (!efx) + return; + + snprintf(part->name, sizeof(part->name), "%s %s:%02x", + efx->name, part->type_name, part->fw_subtype); +} + +#endif /* CONFIG_SFC_MTD */ diff --git a/src/drivers/net/ethernet/sfc/mcdi.h b/src/drivers/net/ethernet/sfc/mcdi.h new file mode 100644 index 0000000..d58ea62 --- /dev/null +++ b/src/drivers/net/ethernet/sfc/mcdi.h @@ -0,0 +1,809 @@ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2008-2017 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#ifndef EFX_MCDI_H +#define EFX_MCDI_H + +#include +#include +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_RHASHTABLE) +#include +#endif +#include "mcdi_pcol.h" +#ifdef EFX_NOT_UPSTREAM +#include +#endif + +/** + * enum efx_mcdi_mode - MCDI transaction mode + * @MCDI_MODE_POLL: poll for MCDI completion, until timeout + * @MCDI_MODE_EVENTS: wait for an mcdi_event. On timeout, poll once + * @MCDI_MODE_FAIL: we think MCDI is dead, so fail-fast all calls + */ +enum efx_mcdi_mode { + MCDI_MODE_POLL, + MCDI_MODE_EVENTS, + MCDI_MODE_FAIL, +}; + +/* On older firmwares there is only a single thread on the MC, so even + * the shortest operation can be blocked for some time by an operation + * requested by a different function. + * See bug61269 for further discussion. + * + * On newer firmwares that support multithreaded MCDI commands we extend + * the timeout for commands we know can run longer. + */ +#define MCDI_RPC_TIMEOUT (10 * HZ) +#define MCDI_RPC_LONG_TIMEOUT (60 * HZ) +#define MCDI_RPC_POST_RST_TIME (10 * HZ) + +#define MCDI_BUF_LEN (8 + MCDI_CTL_SDU_LEN_MAX) + +/** + * enum efx_mcdi_cmd_state - State for an individual MCDI command + * @MCDI_STATE_QUEUED: Command not started + * @MCDI_STATE_RETRY: Command was submitted and MC rejected with no resources. + * Command will be retried once another command returns. + * @MCDI_STATE_PROXY: Command needs authenticating with proxy auth. Will be sent + * again after a PROXY_COMPLETE event. + * @MCDI_STATE_RUNNING: Command was accepted and is running. + * @MCDI_STATE_PROXY_CANCELLED: Sender cancelled a running proxy auth command. + * @MCDI_STATE_RUNNING_CANCELLED: Sender cancelled a running command. + * @MCDI_STATE_FINISHED: Command has been completed or aborted. Used to resolve + * race between completion in another threads and the worker. + */ +enum efx_mcdi_cmd_state { + /* waiting to run */ + MCDI_STATE_QUEUED, + /* we tried to run, but the MC said we have too many outstanding + * commands + */ + MCDI_STATE_RETRY, + /* we sent the command and the MC is waiting for proxy auth */ + MCDI_STATE_PROXY, + /* the command is running */ + MCDI_STATE_RUNNING, + /* state was PROXY but the issuer cancelled the command */ + MCDI_STATE_PROXY_CANCELLED, + /* the command is running but the issuer cancelled the command */ + MCDI_STATE_RUNNING_CANCELLED, + /* processing of this command has completed. + * used to break races between contexts. + */ + MCDI_STATE_FINISHED, +}; + +typedef void efx_mcdi_async_completer(struct efx_nic *efx, + unsigned long cookie, int rc, + efx_dword_t *outbuf, + size_t outlen_actual); + +/** + * struct efx_mcdi_cmd - An outstanding MCDI command + * @ref: Reference count. There will be one reference if the command is + * in the mcdi_iface cmd_list, another if it's on a cleanup list, + * and a third if it's queued in the work queue. + * @list: The data for this entry in mcdi->cmd_list + * @cleanup_list: The data for this entry in a cleanup list + * @work: The work item for this command, queued in mcdi->workqueue + * @mcdi: The mcdi_iface for this command + * @state: The state of this command + * @inlen: Size of @inbuf + * @inbuf: Input buffer + * @quiet: Whether to silence errors + * @polled: Whether this command is polled or evented + * @reboot_seen: Whether a reboot has been seen during this command, + * to prevent duplicates + * @seq: Sequence number + * @bufid: Buffer ID from the NIC implementation + * @started: Jiffies this command was started at + * @cookie: Context for completion function + * @atomic_completer: Completion function for atomic context + * @completer: Completion function + * @handle: Handle for this command + * @cmd: Command number + * @rc: Command result + * @outlen: Size of @outbuf + * @outbuf: Output buffer + * @proxy_handle: Handle if this command was proxied + * @client_id: client ID on which to send this MCDI command + */ +struct efx_mcdi_cmd { + struct kref ref; + struct list_head list; + struct list_head cleanup_list; + struct delayed_work work; + struct efx_mcdi_iface *mcdi; + enum efx_mcdi_cmd_state state; + size_t inlen; + const efx_dword_t *inbuf; + bool quiet; + bool polled; + bool reboot_seen; + u8 seq; + u8 bufid; + unsigned long started; + unsigned long cookie; + efx_mcdi_async_completer *atomic_completer; + efx_mcdi_async_completer *completer; + unsigned int handle; + unsigned int cmd; + int rc; + size_t outlen; + efx_dword_t *outbuf; + u32 proxy_handle; + u32 client_id; + /* followed by inbuf data if necessary */ +}; + +#ifdef EFX_NOT_UPSTREAM +#define MCDI_NUM_LOG_COMMANDS 0x300 +#endif + +/** + * struct efx_mcdi_iface - MCDI protocol context + * @efx: The associated NIC + * @iface_lock: Serialise access to this structure + * @cmd_list: List of outstanding and running commands + * @workqueue: Workqueue used for delayed processing + * @outstanding_cleanups: Count of cleanups + * @cmd_complete_wq: Waitqueue for command completion + * @db_held_by: Command the MC doorbell is in use by + * @seq_held_by: Command each sequence number is in use by + * @prev_seq: The last used sequence number + * @prev_handle: The last used command handle + * @mode: Poll for mcdi completion, or wait for an mcdi_event + * @new_epoch: Indicates start of day or start of MC reboot recovery + * @logging_buffer: Buffer that may be used to build MCDI tracing messages + * @logging_enabled: Whether to trace MCDI + */ +struct efx_mcdi_iface { + struct efx_nic *efx; + spinlock_t iface_lock; + unsigned int outstanding_cleanups; + struct list_head cmd_list; + struct workqueue_struct *workqueue; + wait_queue_head_t cmd_complete_wq; + struct efx_mcdi_cmd *db_held_by; + struct efx_mcdi_cmd *seq_held_by[16]; + unsigned int prev_handle; + enum efx_mcdi_mode mode; + u8 prev_seq; + bool new_epoch; +#ifdef CONFIG_SFC_MCDI_LOGGING + bool logging_enabled; + char *logging_buffer; +#ifdef EFX_NOT_UPSTREAM + /** @log_commands: Subset of MCDI commands to log */ + DECLARE_BITMAP(log_commands, MCDI_NUM_LOG_COMMANDS); +#endif +#endif +}; + +struct efx_mcdi_mon { + struct efx_nic *efx; + struct efx_buffer dma_buf; + struct mutex update_lock; + unsigned long last_update; + struct device *device; + struct efx_mcdi_mon_attribute *attrs; + unsigned int n_attrs; + void *sensor_list; +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_RHASHTABLE) + struct rhashtable sensor_table; +#endif + unsigned int generation_count; + unsigned int n_dynamic_sensors; + int pend_sensor_state_handle; +}; + +/** + * struct efx_mcdi_data - extra state for NICs that implement MCDI + * @iface: Interface/protocol state + * @hwmon: Hardware monitor state + * @fn_flags: Flags for this function, as returned by %MC_CMD_DRV_ATTACH. + */ +struct efx_mcdi_data { + struct efx_mcdi_iface iface; +#ifdef CONFIG_SFC_MCDI_MON + struct efx_mcdi_mon hwmon; +#endif + u32 fn_flags; +}; + +static inline struct efx_mcdi_iface *efx_mcdi(struct efx_nic *efx) +{ + return efx->mcdi ? &efx->mcdi->iface : NULL; +} + +#ifdef CONFIG_SFC_MCDI_MON +static inline struct efx_mcdi_mon *efx_mcdi_mon(struct efx_nic *efx) +{ + return efx->mcdi ? &efx->mcdi->hwmon : NULL; +} +#endif + +#ifdef CONFIG_SFC_VDPA +static bool is_mode_vdpa(struct efx_nic *efx) +{ + if (efx->pci_dev->is_virtfn && + efx->pci_dev->physfn && + efx->state == STATE_VDPA && + efx->vdpa_nic) + return true; + + return false; +} +#else +static bool is_mode_vdpa(struct efx_nic *efx) +{ + return false; +} +#endif + +int efx_mcdi_init(struct efx_nic *efx); +void efx_mcdi_detach(struct efx_nic *efx); +void efx_mcdi_fini(struct efx_nic *efx); + +int efx_mcdi_rpc_client_sync(struct efx_nic *efx, u32 client_id, + unsigned int cmd, const efx_dword_t *inbuf, + size_t inlen, efx_dword_t *outbuf, size_t outlen, + size_t *outlen_actual, bool quiet); + +/** + * efx_mcdi_rpc_client - Issue an MCDI command on a non-base client. + * + * @efx: NIC through which to issue the command. + * @client_id: A dynamic client ID on which to send this MCDI command, or + * MC_CMD_CLIENT_ID_SELF to send the command to the base client + * (which makes this function identical to efx_mcdi_rpc()). + * @cmd: Command type number. + * @inbuf: Command parameters. + * @inlen: Length of command parameters, in bytes. Must be a multiple + * of 4 and no greater than %MCDI_CTL_SDU_LEN_MAX_V1. + * @outbuf: Response buffer. May be %NULL if @outlen is 0. + * @outlen: Length of response buffer, in bytes. If the actual + * response is longer than @outlen & ~3, it will be truncated + * to that length. + * @outlen_actual: Pointer through which to return the actual response + * length. May be %NULL if this is not needed. + * + * This is a superset of the functionality of efx_mcdi_rpc(), adding the + * @client_id. This function may sleep and therefore must be called in + * process context. + * + * Return: a negative error code or 0 on success. + */ +static inline int efx_mcdi_rpc_client(struct efx_nic *efx, u32 client_id, + unsigned int cmd, const efx_dword_t *inbuf, + size_t inlen, efx_dword_t *outbuf, + size_t outlen, size_t *outlen_actual) +{ + return efx_mcdi_rpc_client_sync(efx, client_id, cmd, inbuf, inlen, + outbuf, outlen, outlen_actual, false); +} + +/** + * efx_mcdi_rpc - Issue an MCDI command and wait for completion + * @efx: NIC through which to issue the command + * @cmd: Command type number + * @inbuf: Command parameters + * @inlen: Length of command parameters, in bytes. Must be a multiple + * of 4 and no greater than %MCDI_CTL_SDU_LEN_MAX_V1. + * @outbuf: Response buffer. May be %NULL if @outlen is 0. + * @outlen: Length of response buffer, in bytes. If the actual + * response is longer than @outlen & ~3, it will be truncated + * to that length. + * @outlen_actual: Pointer through which to return the actual response + * length. May be %NULL if this is not needed. + * + * This function may sleep and therefore must be called in process + * context. + * + * Return: A negative error code, or zero if successful. The error + * code may come from the MCDI response or may indicate a failure + * to communicate with the MC. In the former case, the response + * will still be copied to @outbuf and *@outlen_actual will be + * set accordingly. In the latter case, *@outlen_actual will be + * set to zero. + */ +static inline int efx_mcdi_rpc(struct efx_nic *efx, unsigned int cmd, + const efx_dword_t *inbuf, size_t inlen, + efx_dword_t *outbuf, size_t outlen, + size_t *outlen_actual) +{ + struct efx_nic *efx_pf; + + if (is_mode_vdpa(efx)) { + efx_pf = pci_get_drvdata(efx->pci_dev->physfn); + return efx_mcdi_rpc_client_sync(efx_pf, efx->client_id, cmd, + inbuf, inlen, outbuf, outlen, + outlen_actual, false); + } + + return efx_mcdi_rpc_client_sync(efx, MC_CMD_CLIENT_ID_SELF, cmd, + inbuf, inlen, outbuf, outlen, + outlen_actual, false); +} + +/* Normally, on receiving an error code in the MCDI response, + * efx_mcdi_rpc/efx_mcdi_rpc_client will log an error message + * containing (among other things) the raw error code, by means of + * efx_mcdi_display_error. This _quiet version suppresses that; + * if the caller wishes to log the error conditionally on the + * return code, it should call this function and is then responsible + * for calling efx_mcdi_display_error as needed. + */ +static inline int efx_mcdi_rpc_client_quiet(struct efx_nic *efx, u32 client_id, + unsigned int cmd, + const efx_dword_t *inbuf, + size_t inlen, efx_dword_t *outbuf, + size_t outlen, size_t *outlen_actual) +{ + return efx_mcdi_rpc_client_sync(efx, client_id, cmd, inbuf, inlen, + outbuf, outlen, outlen_actual, true); +} + +static inline int efx_mcdi_rpc_quiet(struct efx_nic *efx, unsigned int cmd, + const efx_dword_t *inbuf, size_t inlen, + efx_dword_t *outbuf, size_t outlen, + size_t *outlen_actual) +{ + struct efx_nic *efx_pf; + + if (is_mode_vdpa(efx)) { + efx_pf = pci_get_drvdata(efx->pci_dev->physfn); + return efx_mcdi_rpc_client_quiet(efx_pf, efx->client_id, cmd, + inbuf, inlen, outbuf, outlen, + outlen_actual); + } + + return efx_mcdi_rpc_client_quiet(efx, MC_CMD_CLIENT_ID_SELF, cmd, + inbuf, inlen, outbuf, outlen, + outlen_actual); +} + +int efx_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd, + const efx_dword_t *inbuf, size_t inlen, + efx_mcdi_async_completer *complete, + unsigned long cookie); + +int efx_mcdi_rpc_async_quiet(struct efx_nic *efx, unsigned int cmd, + const efx_dword_t *inbuf, size_t inlen, + efx_mcdi_async_completer *complete, + unsigned long cookie); +int efx_mcdi_rpc_async_ext(struct efx_nic *efx, unsigned int cmd, + const efx_dword_t *inbuf, size_t inlen, + efx_mcdi_async_completer *atomic_completer, + efx_mcdi_async_completer *completer, + unsigned long cookie, bool quiet, + bool immediate_only, unsigned int *handle); + +/* Attempt to cancel an outstanding command. + * This function guarantees that the completion function will never be called + * after it returns. The command may or may not actually be cancelled. + */ +void efx_mcdi_cancel_cmd(struct efx_nic *efx, unsigned int handle); + +void efx_mcdi_display_error(struct efx_nic *efx, unsigned int cmd, + size_t inlen, efx_dword_t *outbuf, + size_t outlen, int rc); + +int efx_mcdi_poll_reboot(struct efx_nic *efx); +void efx_mcdi_mode_poll(struct efx_nic *efx); +void efx_mcdi_mode_event(struct efx_nic *efx); +/* Wait for all commands and all cleanup for them to be complete */ +void efx_mcdi_wait_for_cleanup(struct efx_nic *efx); +/* Wait for all commands to be complete */ +int efx_mcdi_wait_for_quiescence(struct efx_nic *efx, + unsigned int timeout_jiffies); +/* Indicate to the MCDI module that MC reset processing is complete + * so new commands can now be sent. + */ +void efx_mcdi_post_reset(struct efx_nic *efx); + +bool efx_mcdi_process_event(struct efx_channel *channel, efx_qword_t *event); +#ifdef CONFIG_SFC_MCDI_MON +void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev); +void efx_mcdi_dynamic_sensor_event(struct efx_nic *efx, efx_qword_t *ev); +#else +static inline void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev) +{ +} +static inline void efx_mcdi_dynamic_sensor_event(struct efx_nic *efx, efx_qword_t *ev) +{ +} +#endif + +/* We expect that 16- and 32-bit fields in MCDI requests and responses + * are appropriately aligned, but 64-bit fields are only + * 32-bit-aligned. Also, on Siena we must copy to the MC shared + * memory strictly 32 bits at a time, so add any necessary padding. + */ +#define MCDI_TX_BUF_LEN(_len) DIV_ROUND_UP((_len), 4) +#define _MCDI_DECLARE_BUF(_name, _len) \ + efx_dword_t _name[DIV_ROUND_UP(_len, 4)] +#define MCDI_DECLARE_BUF(_name, _len) \ + _MCDI_DECLARE_BUF(_name, _len) = {{{0}}} +#define MCDI_DECLARE_BUF_ERR(_name) \ + MCDI_DECLARE_BUF(_name, 8) +#define _MCDI_PTR(_buf, _offset) \ + ((u8 *)(_buf) + (_offset)) +#define MCDI_PTR(_buf, _field) \ + _MCDI_PTR(_buf, MC_CMD_ ## _field ## _OFST) +/* Use MCDI_STRUCT_ functions to access members of MCDI structuredefs. + * _buf should point to the start of the structure, typically obtained with + * MCDI_DECLARE_STRUCT_PTR(structure) = _MCDI_DWORD(mcdi_buf, FIELD_WHICH_IS_STRUCT); + */ +#define MCDI_STRUCT_PTR(_buf, _field) \ + _MCDI_PTR(_buf, _field ## _OFST) +#define _MCDI_CHECK_ALIGN(_ofst, _align) \ + ((void)BUILD_BUG_ON_ZERO((_ofst) & (_align - 1)), \ + (_ofst)) +#define _MCDI_DWORD(_buf, _field) \ + ((_buf) + (_MCDI_CHECK_ALIGN(MC_CMD_ ## _field ## _OFST, 4) >> 2)) +#define _MCDI_STRUCT_DWORD(_buf, _field) \ + ((_buf) + (_MCDI_CHECK_ALIGN(_field ## _OFST, 4) >> 2)) + +#define MCDI_SET_BYTE(_buf, _field, _value) do { \ + BUILD_BUG_ON(MC_CMD_ ## _field ## _LEN != 1); \ + *(u8 *)MCDI_PTR(_buf, _field) = _value; \ + } while (0) +#define MCDI_STRUCT_SET_BYTE(_buf, _field, _value) do { \ + BUILD_BUG_ON(_field ## _LEN != 1); \ + *(u8 *)MCDI_STRUCT_PTR(_buf, _field) = _value; \ + } while (0) +#define MCDI_STRUCT_POPULATE_BYTE_1(_buf, _field, _name, _value) do { \ + efx_dword_t _temp; \ + EFX_POPULATE_DWORD_1(_temp, _name, _value); \ + MCDI_STRUCT_SET_BYTE(_buf, _field, \ + EFX_DWORD_FIELD(_temp, EFX_BYTE_0)); \ + } while (0) +#define MCDI_BYTE(_buf, _field) \ + ((void)BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 1), \ + *MCDI_PTR(_buf, _field)) +#define MCDI_STRUCT_BYTE(_buf, _field) \ + ((void)BUILD_BUG_ON_ZERO(_field ## _LEN != 1), \ + *MCDI_STRUCT_PTR(_buf, _field)) +#define MCDI_SET_WORD(_buf, _field, _value) do { \ + BUILD_BUG_ON(MC_CMD_ ## _field ## _LEN != 2); \ + BUILD_BUG_ON(MC_CMD_ ## _field ## _OFST & 1); \ + *(__force __le16 *)MCDI_PTR(_buf, _field) = cpu_to_le16(_value);\ + } while (0) +#define MCDI_STRUCT_SET_WORD(_buf, _field, _value) do { \ + BUILD_BUG_ON(_field ## _LEN != 2); \ + BUILD_BUG_ON(_field ## _OFST & 1); \ + *(__force __le16 *)MCDI_STRUCT_PTR(_buf, _field) = cpu_to_le16(_value);\ + } while (0) +#define MCDI_WORD(_buf, _field) \ + ((void)BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 2), \ + le16_to_cpu(*(__force const __le16 *)MCDI_PTR(_buf, _field))) +#define MCDI_STRUCT_WORD(_buf, _field) \ + ((void)BUILD_BUG_ON_ZERO(_field ## _LEN != 2), \ + le16_to_cpu(*(__force const __le16 *)MCDI_STRUCT_PTR(_buf, _field))) +/* Read a 16-bit field defined in the protocol as being big-endian. */ +#define MCDI_WORD_BE(_buf, _field) \ + ((void)BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 2), \ + *(__force const __be16 *)MCDI_PTR(_buf, _field)) +/* Write a 16-bit field defined in the protocol as being big-endian. */ +#define MCDI_SET_WORD_BE(_buf, _field, _value) do { \ + BUILD_BUG_ON(MC_CMD_ ## _field ## _LEN != 2); \ + BUILD_BUG_ON(MC_CMD_ ## _field ## _OFST & 1); \ + *(__force __be16 *)MCDI_PTR(_buf, _field) = (_value); \ + } while (0) +#define MCDI_STRUCT_SET_WORD_BE(_buf, _field, _value) do { \ + BUILD_BUG_ON(_field ## _LEN != 2); \ + BUILD_BUG_ON(_field ## _OFST & 1); \ + *(__force __be16 *)MCDI_STRUCT_PTR(_buf, _field) = (_value); \ + } while (0) +#define MCDI_SET_DWORD(_buf, _field, _value) \ + EFX_POPULATE_DWORD_1(*_MCDI_DWORD(_buf, _field), EFX_DWORD_0, _value) +#define MCDI_STRUCT_SET_DWORD(_buf, _field, _value) \ + EFX_POPULATE_DWORD_1(*_MCDI_STRUCT_DWORD(_buf, _field), EFX_DWORD_0, _value) +#define MCDI_DWORD(_buf, _field) \ + EFX_DWORD_FIELD(*_MCDI_DWORD(_buf, _field), EFX_DWORD_0) +#define MCDI_STRUCT_DWORD(_buf, _field) \ + EFX_DWORD_FIELD(*_MCDI_STRUCT_DWORD(_buf, _field), EFX_DWORD_0) +/* Read a 32-bit field defined in the protocol as being big-endian. */ +#define MCDI_DWORD_BE(_buf, _field) \ + ((void)BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 4), \ + *(__force const __be32 *)MCDI_PTR(_buf, _field)) +/* Write a 32-bit field defined in the protocol as being big-endian. */ +#define MCDI_SET_DWORD_BE(_buf, _field, _value) do { \ + BUILD_BUG_ON(MC_CMD_ ## _field ## _LEN < 4); \ + BUILD_BUG_ON(MC_CMD_ ## _field ## _OFST & 3); \ + *(__force __be32 *)MCDI_PTR(_buf, _field) = (_value); \ + } while (0) +#define MCDI_STRUCT_SET_DWORD_BE(_buf, _field, _value) do { \ + BUILD_BUG_ON(_field ## _LEN != 4); \ + BUILD_BUG_ON(_field ## _OFST & 3); \ + *(__force __be32 *)MCDI_STRUCT_PTR(_buf, _field) = (_value); \ + } while (0) +#define MCDI_POPULATE_DWORD_1(_buf, _field, _name1, _value1) \ + EFX_POPULATE_DWORD_1(*_MCDI_DWORD(_buf, _field), \ + MC_CMD_ ## _name1, _value1) +#define MCDI_POPULATE_DWORD_2(_buf, _field, _name1, _value1, \ + _name2, _value2) \ + EFX_POPULATE_DWORD_2(*_MCDI_DWORD(_buf, _field), \ + MC_CMD_ ## _name1, _value1, \ + MC_CMD_ ## _name2, _value2) +#define MCDI_POPULATE_DWORD_3(_buf, _field, _name1, _value1, \ + _name2, _value2, _name3, _value3) \ + EFX_POPULATE_DWORD_3(*_MCDI_DWORD(_buf, _field), \ + MC_CMD_ ## _name1, _value1, \ + MC_CMD_ ## _name2, _value2, \ + MC_CMD_ ## _name3, _value3) +#define MCDI_POPULATE_DWORD_4(_buf, _field, _name1, _value1, \ + _name2, _value2, _name3, _value3, \ + _name4, _value4) \ + EFX_POPULATE_DWORD_4(*_MCDI_DWORD(_buf, _field), \ + MC_CMD_ ## _name1, _value1, \ + MC_CMD_ ## _name2, _value2, \ + MC_CMD_ ## _name3, _value3, \ + MC_CMD_ ## _name4, _value4) +#define MCDI_POPULATE_DWORD_5(_buf, _field, _name1, _value1, \ + _name2, _value2, _name3, _value3, \ + _name4, _value4, _name5, _value5) \ + EFX_POPULATE_DWORD_5(*_MCDI_DWORD(_buf, _field), \ + MC_CMD_ ## _name1, _value1, \ + MC_CMD_ ## _name2, _value2, \ + MC_CMD_ ## _name3, _value3, \ + MC_CMD_ ## _name4, _value4, \ + MC_CMD_ ## _name5, _value5) +#define MCDI_POPULATE_DWORD_6(_buf, _field, _name1, _value1, \ + _name2, _value2, _name3, _value3, \ + _name4, _value4, _name5, _value5, \ + _name6, _value6) \ + EFX_POPULATE_DWORD_6(*_MCDI_DWORD(_buf, _field), \ + MC_CMD_ ## _name1, _value1, \ + MC_CMD_ ## _name2, _value2, \ + MC_CMD_ ## _name3, _value3, \ + MC_CMD_ ## _name4, _value4, \ + MC_CMD_ ## _name5, _value5, \ + MC_CMD_ ## _name6, _value6) +#define MCDI_POPULATE_DWORD_7(_buf, _field, _name1, _value1, \ + _name2, _value2, _name3, _value3, \ + _name4, _value4, _name5, _value5, \ + _name6, _value6, _name7, _value7) \ + EFX_POPULATE_DWORD_7(*_MCDI_DWORD(_buf, _field), \ + MC_CMD_ ## _name1, _value1, \ + MC_CMD_ ## _name2, _value2, \ + MC_CMD_ ## _name3, _value3, \ + MC_CMD_ ## _name4, _value4, \ + MC_CMD_ ## _name5, _value5, \ + MC_CMD_ ## _name6, _value6, \ + MC_CMD_ ## _name7, _value7) +#define MCDI_POPULATE_DWORD_8(_buf, _field, _name1, _value1, \ + _name2, _value2, _name3, _value3, \ + _name4, _value4, _name5, _value5, \ + _name6, _value6, _name7, _value7, \ + _name8, _value8) \ + EFX_POPULATE_DWORD_8(*_MCDI_DWORD(_buf, _field), \ + MC_CMD_ ## _name1, _value1, \ + MC_CMD_ ## _name2, _value2, \ + MC_CMD_ ## _name3, _value3, \ + MC_CMD_ ## _name4, _value4, \ + MC_CMD_ ## _name5, _value5, \ + MC_CMD_ ## _name6, _value6, \ + MC_CMD_ ## _name7, _value7, \ + MC_CMD_ ## _name8, _value8) +#define MCDI_POPULATE_DWORD_9(_buf, _field, _name1, _value1, \ + _name2, _value2, _name3, _value3, \ + _name4, _value4, _name5, _value5, \ + _name6, _value6, _name7, _value7, \ + _name8, _value8, _name9, _value9) \ + EFX_POPULATE_DWORD_9(*_MCDI_DWORD(_buf, _field), \ + MC_CMD_ ## _name1, _value1, \ + MC_CMD_ ## _name2, _value2, \ + MC_CMD_ ## _name3, _value3, \ + MC_CMD_ ## _name4, _value4, \ + MC_CMD_ ## _name5, _value5, \ + MC_CMD_ ## _name6, _value6, \ + MC_CMD_ ## _name7, _value7, \ + MC_CMD_ ## _name8, _value8, \ + MC_CMD_ ## _name9, _value9) +#define MCDI_POPULATE_DWORD_10(_buf, _field, _name1, _value1, \ + _name2, _value2, _name3, _value3, \ + _name4, _value4, _name5, _value5, \ + _name6, _value6, _name7, _value7, \ + _name8, _value8, _name9, _value9, \ + _name10, _value10) \ + EFX_POPULATE_DWORD_10(*_MCDI_DWORD(_buf, _field), \ + MC_CMD_ ## _name1, _value1, \ + MC_CMD_ ## _name2, _value2, \ + MC_CMD_ ## _name3, _value3, \ + MC_CMD_ ## _name4, _value4, \ + MC_CMD_ ## _name5, _value5, \ + MC_CMD_ ## _name6, _value6, \ + MC_CMD_ ## _name7, _value7, \ + MC_CMD_ ## _name8, _value8, \ + MC_CMD_ ## _name9, _value9, \ + MC_CMD_ ## _name10, _value10) +#define MCDI_POPULATE_DWORD_11(_buf, _field, _name1, _value1, \ + _name2, _value2, _name3, _value3, \ + _name4, _value4, _name5, _value5, \ + _name6, _value6, _name7, _value7, \ + _name8, _value8, _name9, _value9, \ + _name10, _value10, _name11, _value11) \ + EFX_POPULATE_DWORD_11(*_MCDI_DWORD(_buf, _field), \ + MC_CMD_ ## _name1, _value1, \ + MC_CMD_ ## _name2, _value2, \ + MC_CMD_ ## _name3, _value3, \ + MC_CMD_ ## _name4, _value4, \ + MC_CMD_ ## _name5, _value5, \ + MC_CMD_ ## _name6, _value6, \ + MC_CMD_ ## _name7, _value7, \ + MC_CMD_ ## _name8, _value8, \ + MC_CMD_ ## _name9, _value9, \ + MC_CMD_ ## _name10, _value10, \ + MC_CMD_ ## _name11, _value11) +#define MCDI_POPULATE_DWORD_12(_buf, _field, _name1, _value1, \ + _name2, _value2, _name3, _value3, \ + _name4, _value4, _name5, _value5, \ + _name6, _value6, _name7, _value7, \ + _name8, _value8, _name9, _value9, \ + _name10, _value10, _name11, _value11, \ + _name12, _value12) \ + EFX_POPULATE_DWORD_12(*_MCDI_DWORD(_buf, _field), \ + MC_CMD_ ## _name1, _value1, \ + MC_CMD_ ## _name2, _value2, \ + MC_CMD_ ## _name3, _value3, \ + MC_CMD_ ## _name4, _value4, \ + MC_CMD_ ## _name5, _value5, \ + MC_CMD_ ## _name6, _value6, \ + MC_CMD_ ## _name7, _value7, \ + MC_CMD_ ## _name8, _value8, \ + MC_CMD_ ## _name9, _value9, \ + MC_CMD_ ## _name10, _value10, \ + MC_CMD_ ## _name11, _value11, \ + MC_CMD_ ## _name12, _value12) +#define MCDI_SET_QWORD(_buf, _field, _value) \ + do { \ + EFX_POPULATE_DWORD_1(_MCDI_DWORD(_buf, _field)[0], \ + EFX_DWORD_0, (u32)(_value)); \ + EFX_POPULATE_DWORD_1(_MCDI_DWORD(_buf, _field)[1], \ + EFX_DWORD_0, (u64)(_value) >> 32); \ + } while (0) +#define MCDI_QWORD(_buf, _field) \ + (EFX_DWORD_FIELD(_MCDI_DWORD(_buf, _field)[0], EFX_DWORD_0) | \ + (u64)EFX_DWORD_FIELD(_MCDI_DWORD(_buf, _field)[1], EFX_DWORD_0) << 32) +#define MCDI_FIELD(_ptr, _type, _field) \ + EFX_EXTRACT_DWORD( \ + *(efx_dword_t *) \ + _MCDI_PTR(_ptr, MC_CMD_ ## _type ## _ ## _field ## _OFST & ~3),\ + MC_CMD_ ## _type ## _ ## _field ## _LBN & 0x1f, \ + (MC_CMD_ ## _type ## _ ## _field ## _LBN & 0x1f) + \ + MC_CMD_ ## _type ## _ ## _field ## _WIDTH - 1) + +#define _MCDI_ARRAY_PTR(_buf, _field, _index, _align) \ + (_MCDI_PTR(_buf, _MCDI_CHECK_ALIGN(MC_CMD_ ## _field ## _OFST, _align))\ + + (_index) * _MCDI_CHECK_ALIGN(MC_CMD_ ## _field ## _LEN, _align)) +#define MCDI_DECLARE_STRUCT_PTR(_name) \ + efx_dword_t *_name +#define MCDI_ARRAY_STRUCT_PTR(_buf, _field, _index) \ + ((efx_dword_t *)_MCDI_ARRAY_PTR(_buf, _field, _index, 4)) +#define MCDI_VAR_ARRAY_LEN(_len, _field) \ + min_t(size_t, MC_CMD_ ## _field ## _MAXNUM, \ + ((_len) - MC_CMD_ ## _field ## _OFST) / MC_CMD_ ## _field ## _LEN) +#define MCDI_ARRAY_WORD(_buf, _field, _index) \ + (BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 2) + \ + le16_to_cpu(*(__force const __le16 *) \ + _MCDI_ARRAY_PTR(_buf, _field, _index, 2))) +#define _MCDI_ARRAY_DWORD(_buf, _field, _index) \ + (BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 4) + \ + (efx_dword_t *)_MCDI_ARRAY_PTR(_buf, _field, _index, 4)) +#define MCDI_SET_ARRAY_DWORD(_buf, _field, _index, _value) \ + EFX_SET_DWORD_FIELD(*_MCDI_ARRAY_DWORD(_buf, _field, _index), \ + EFX_DWORD_0, _value) +#define MCDI_ARRAY_DWORD(_buf, _field, _index) \ + EFX_DWORD_FIELD(*_MCDI_ARRAY_DWORD(_buf, _field, _index), EFX_DWORD_0) +#define _MCDI_ARRAY_QWORD(_buf, _field, _index) \ + (BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 8) + \ + (efx_dword_t *)_MCDI_ARRAY_PTR(_buf, _field, _index, 4)) +#define MCDI_SET_ARRAY_QWORD(_buf, _field, _index, _value) \ + do { \ + EFX_SET_DWORD_FIELD(_MCDI_ARRAY_QWORD(_buf, _field, _index)[0],\ + EFX_DWORD_0, (u32)(_value)); \ + EFX_SET_DWORD_FIELD(_MCDI_ARRAY_QWORD(_buf, _field, _index)[1],\ + EFX_DWORD_0, (u64)(_value) >> 32); \ + } while (0) +#define MCDI_ARRAY_FIELD(_buf, _field1, _type, _index, _field2) \ + MCDI_FIELD(MCDI_ARRAY_STRUCT_PTR(_buf, _field1, _index), \ + _type ## _TYPEDEF, _field2) + +#define MCDI_EVENT_FIELD(_ev, _field) \ + EFX_QWORD_FIELD(_ev, MCDI_EVENT_ ## _field) + +#define MCDI_CAPABILITY(field) \ + MC_CMD_GET_CAPABILITIES_V8_OUT_ ## field ## _LBN + +#define MCDI_CAPABILITY_OFST(field) \ + MC_CMD_GET_CAPABILITIES_V8_OUT_ ## field ## _OFST + +#define efx_has_cap(efx, field) \ + efx->type->check_caps(efx, \ + MCDI_CAPABILITY(field), \ + MCDI_CAPABILITY_OFST(field)) + +void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len); +void efx_mcdi_print_fw_bundle_ver(struct efx_nic *efx, char *buf, size_t len); +int efx_mcdi_drv_attach(struct efx_nic *efx, u32 fw_variant, u32 *out_flags, + bool reattach); +int efx_mcdi_drv_detach(struct efx_nic *efx); +int efx_mcdi_get_board_cfg(struct efx_nic *efx, int port_num, u8 *mac_address, + u16 *fw_subtype_list, u32 *capabilities); +int efx_mcdi_get_board_perm_mac(struct efx_nic *efx, u8 *mac_address); +int efx_mcdi_log_ctrl(struct efx_nic *efx, bool evq, bool uart, u32 dest_evq); +void efx_mcdi_log_puts(struct efx_nic *efx, const char *text); +int efx_mcdi_nvram_types(struct efx_nic *efx, u32 *nvram_types_out); +int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type, + size_t *size_out, size_t *erase_size_out, + size_t *write_size_out, bool *protected_out); +int efx_mcdi_nvram_test_all(struct efx_nic *efx); +int efx_new_mcdi_nvram_test_all(struct efx_nic *efx); +int efx_mcdi_handle_assertion(struct efx_nic *efx); +int efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode); +int efx_mcdi_wol_filter_set_magic(struct efx_nic *efx, const u8 *mac, + int *id_out); +int efx_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out); +int efx_mcdi_wol_filter_remove(struct efx_nic *efx, int id); +int efx_mcdi_wol_filter_reset(struct efx_nic *efx); +int efx_mcdi_reset(struct efx_nic *efx, enum reset_type method); +int efx_mcdi_set_workaround(struct efx_nic *efx, u32 type, bool enabled, + unsigned int *flags); +int efx_mcdi_get_workarounds(struct efx_nic *efx, unsigned int *impl_out, + unsigned int *enabled_out); +int efx_mcdi_get_privilege_mask(struct efx_nic *efx, u32 *mask); +int efx_mcdi_rpc_proxy_cmd(struct efx_nic *efx, u32 pf, u32 vf, + const void *request_buf, size_t request_size, + void *response_buf, size_t response_size, + size_t *response_size_actual); + +#ifdef CONFIG_SFC_MCDI_MON +int efx_mcdi_mon_probe(struct efx_nic *efx); +void efx_mcdi_mon_remove(struct efx_nic *efx); +#else +static inline int efx_mcdi_mon_probe(struct efx_nic *efx) { return 0; } +static inline void efx_mcdi_mon_remove(struct efx_nic *efx) {} +#endif + +int efx_mcdi_nvram_update_start(struct efx_nic *efx, unsigned int type); +int efx_mcdi_nvram_read(struct efx_nic *efx, unsigned int type, + loff_t offset, u8 *buffer, size_t length); +int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type, + loff_t offset, const u8 *buffer, size_t length); +int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type, + loff_t offset, size_t length); +int efx_mcdi_nvram_metadata(struct efx_nic *efx, unsigned int type, + u32 *subtype, u16 version[4], char *desc, + size_t descsize); + +enum efx_update_finish_mode { + EFX_UPDATE_FINISH_WAIT, + EFX_UPDATE_FINISH_BACKGROUND, + EFX_UPDATE_FINISH_POLL, + EFX_UPDATE_FINISH_ABORT, +}; + +int efx_mcdi_nvram_update_finish(struct efx_nic *efx, unsigned int type, + enum efx_update_finish_mode mode); +int efx_mcdi_nvram_update_finish_polled(struct efx_nic *efx, unsigned int type); + +#ifdef CONFIG_SFC_MTD +int efx_mcdi_mtd_read(struct mtd_info *mtd, loff_t start, size_t len, + size_t *retlen, u8 *buffer); +int efx_mcdi_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len); +int efx_mcdi_mtd_write(struct mtd_info *mtd, loff_t start, size_t len, + size_t *retlen, const u8 *buffer); +int efx_mcdi_mtd_sync(struct mtd_info *mtd); +void efx_mcdi_mtd_rename(struct efx_mtd_partition *part); +#endif + +#endif /* EFX_MCDI_H */ diff --git a/src/drivers/net/ethernet/sfc/mcdi_filters.c b/src/drivers/net/ethernet/sfc/mcdi_filters.c new file mode 100644 index 0000000..41cf3cb --- /dev/null +++ b/src/drivers/net/ethernet/sfc/mcdi_filters.c @@ -0,0 +1,3300 @@ +// SPDX-License-Identifier: GPL-2.0-only +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2005-2018 Solarflare Communications Inc. + * Copyright 2019-2020 Xilinx Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#include "mcdi_filters.h" +#include "debugfs.h" +#include "mcdi_pcol.h" +#include "mcdi.h" +#include "efx.h" +#include "rx_common.h" + +/* The maximum size of a shared RSS context */ +/* TODO: this should really be from the mcdi protocol export */ +#define EFX_MCDI_MAX_SHARED_RSS_CONTEXT_SIZE 64UL + +#define EFX_MCDI_FILTER_ID_INVALID 0xffff + +#define EFX_MCDI_FILTER_DEV_UC_MAX 32 +#define EFX_MCDI_FILTER_DEV_MC_MAX 512 + +/* An arbitrary search limit for the software hash table */ +#define EFX_MCDI_FILTER_SEARCH_LIMIT 200 + +enum efx_mcdi_filter_default_filters { + EFX_MCDI_BCAST, + EFX_MCDI_UCDEF, + EFX_MCDI_MCDEF, + EFX_MCDI_VXLAN4_UCDEF, + EFX_MCDI_VXLAN4_MCDEF, + EFX_MCDI_VXLAN6_UCDEF, + EFX_MCDI_VXLAN6_MCDEF, + EFX_MCDI_NVGRE4_UCDEF, + EFX_MCDI_NVGRE4_MCDEF, + EFX_MCDI_NVGRE6_UCDEF, + EFX_MCDI_NVGRE6_MCDEF, + EFX_MCDI_GENEVE4_UCDEF, + EFX_MCDI_GENEVE4_MCDEF, + EFX_MCDI_GENEVE6_UCDEF, + EFX_MCDI_GENEVE6_MCDEF, + + EFX_MCDI_NUM_DEFAULT_FILTERS +}; + +/* Per-VLAN filters information */ +struct efx_mcdi_filter_vlan { + struct list_head list; + u16 vid; + u16 uc[EFX_MCDI_FILTER_DEV_UC_MAX]; + u16 mc[EFX_MCDI_FILTER_DEV_MC_MAX]; + u16 default_filters[EFX_MCDI_NUM_DEFAULT_FILTERS]; + bool warn_on_zero_filters; +}; + +struct efx_mcdi_filter_table { +/* The MCDI match masks supported by this fw & hw, in order of priority */ + u32 rx_match_mcdi_flags[ + MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MAXNUM * 2]; + unsigned int rx_match_count; + + struct rw_semaphore lock; /* Protects entries */ + struct { + unsigned long spec; /* pointer to spec plus flag bits */ +/* AUTO_OLD is used to mark and sweep MAC filters for the device address lists. */ +/* unused flag 1UL */ +#define EFX_MCDI_FILTER_FLAG_AUTO_OLD 2UL +#define EFX_MCDI_FILTER_FLAGS 3UL + u64 handle; /* firmware handle */ + } *entry; + /* are the filters meant to be on the NIC */ + bool push_filters; + /* Function to get addresses from */ + const struct efx_mcdi_filter_addr_ops *addr_ops; + /* Shadow of net_device address lists, guarded by mac_lock */ + struct efx_mcdi_dev_addr dev_uc_list[EFX_MCDI_FILTER_DEV_UC_MAX]; + struct efx_mcdi_dev_addr dev_mc_list[EFX_MCDI_FILTER_DEV_MC_MAX]; + int dev_uc_count; + int dev_mc_count; + bool uc_promisc; + bool mc_promisc; + bool mc_promisc_last; + bool mc_overflow; /* Too many MC addrs; should always imply mc_promisc */ + bool vlan_filter; + struct list_head vlan_list; + bool mc_chaining; + bool encap_supported; + bool must_restore_filters; + bool must_restore_rss_contexts; + bool rss_context_exclusive; + bool additional_rss_modes; +#ifdef EFX_NOT_UPSTREAM +#if IS_MODULE(CONFIG_SFC_DRIVERLINK) + bool kernel_blocked[EFX_DL_FILTER_BLOCK_KERNEL_MAX]; +#endif +#endif +}; + +static void efx_mcdi_filter_remove_old(struct efx_nic *efx); + +static unsigned int efx_mcdi_filter_vlan_count_filters(struct efx_nic *efx, + struct efx_mcdi_filter_vlan *vlan); + +static struct efx_filter_spec * +efx_mcdi_filter_entry_spec(const struct efx_mcdi_filter_table *table, + unsigned int filter_idx) +{ + return (struct efx_filter_spec *)(table->entry[filter_idx].spec & + ~EFX_MCDI_FILTER_FLAGS); +} + +static unsigned int +efx_mcdi_filter_entry_flags(const struct efx_mcdi_filter_table *table, + unsigned int filter_idx) +{ + return table->entry[filter_idx].spec & EFX_MCDI_FILTER_FLAGS; +} + +static u32 efx_mcdi_filter_get_unsafe_id(struct efx_nic *efx, u32 filter_id) +{ + (void) efx; + WARN_ON_ONCE(filter_id == EFX_MCDI_FILTER_ID_INVALID); + return filter_id & (EFX_MCDI_FILTER_TBL_ROWS - 1); +} + +static unsigned int efx_mcdi_filter_get_unsafe_pri(u32 filter_id) +{ + return filter_id / (EFX_MCDI_FILTER_TBL_ROWS * 2); +} + +static u32 efx_mcdi_filter_make_filter_id(unsigned int pri, u16 idx) +{ + return pri * EFX_MCDI_FILTER_TBL_ROWS * 2 + idx; +} + +#ifdef CONFIG_DEBUG_FS +static void efx_debugfs_read_dev_list(struct seq_file *file, + struct efx_mcdi_dev_addr *dev_list, + size_t list_len) +{ + size_t i; + static u8 zero[ETH_ALEN]; + + for (i = 0; i < list_len; i++) + if (!ether_addr_equal(dev_list[i].addr, zero)) + seq_printf(file, "%02x:%02x:%02x:%02x:%02x:%02x\n", + dev_list[i].addr[0], dev_list[i].addr[1], + dev_list[i].addr[2], dev_list[i].addr[3], + dev_list[i].addr[4], dev_list[i].addr[5]); +} + +static int efx_debugfs_read_dev_uc_list(struct seq_file *file, void *data) +{ + efx_debugfs_read_dev_list(file, data, + EFX_MCDI_FILTER_DEV_UC_MAX); + return 0; +} + +static int efx_debugfs_read_dev_mc_list(struct seq_file *file, void *data) +{ + efx_debugfs_read_dev_list(file, data, + EFX_MCDI_FILTER_DEV_MC_MAX); + return 0; +} + +int efx_debugfs_read_filter_list(struct seq_file *file, void *data) +{ + struct efx_mcdi_filter_table *table; + struct efx_nic *efx = data; + int i; + + down_read(&efx->filter_sem); + table = efx->filter_state; + if (!table || !table->entry) { + up_read(&efx->filter_sem); + return -ENETDOWN; + } + + /* deliberately don't lock the table->lock, so that we can + * dump the table mid-operation if needed. + */ + for (i = 0; i < EFX_MCDI_FILTER_TBL_ROWS; ++i) { + struct efx_filter_spec *spec = + efx_mcdi_filter_entry_spec(table, i); + char filter[256]; + + if (spec) { + efx_debugfs_print_filter(filter, sizeof(filter), spec); + + seq_printf(file, "%d[%#04llx],%#x = %s\n", + i, table->entry[i].handle & 0xffff, + efx_mcdi_filter_entry_flags(table, i), + filter); + } + } + + up_read(&efx->filter_sem); + return 0; +} + +static const struct efx_debugfs_parameter efx_debugfs[] = { + _EFX_RAW_PARAMETER(filters, efx_debugfs_read_filter_list), + {NULL} +}; + +static const struct efx_debugfs_parameter filter_debugfs[] = { + EFX_INT_PARAMETER(struct efx_mcdi_filter_table, dev_uc_count), + EFX_INT_PARAMETER(struct efx_mcdi_filter_table, dev_mc_count), + _EFX_PARAMETER(struct efx_mcdi_filter_table, dev_uc_list, + efx_debugfs_read_dev_uc_list), + _EFX_PARAMETER(struct efx_mcdi_filter_table, dev_mc_list, + efx_debugfs_read_dev_mc_list), + EFX_BOOL_PARAMETER(struct efx_mcdi_filter_table, uc_promisc), + EFX_BOOL_PARAMETER(struct efx_mcdi_filter_table, mc_promisc), + EFX_BOOL_PARAMETER(struct efx_mcdi_filter_table, mc_promisc_last), + EFX_BOOL_PARAMETER(struct efx_mcdi_filter_table, mc_overflow), + EFX_BOOL_PARAMETER(struct efx_mcdi_filter_table, mc_chaining), +#ifdef EFX_NOT_UPSTREAM +#if IS_MODULE(CONFIG_SFC_DRIVERLINK) + _EFX_PARAMETER(struct efx_mcdi_filter_table, kernel_blocked, + efx_debugfs_read_kernel_blocked), +#endif +#endif + {NULL} +}; +#else /* CONFIG_DEBUG_FS */ +static const struct efx_debugfs_parameter efx_debugfs[] = { + {NULL} +}; + +static const struct efx_debugfs_parameter filter_debugfs[] = { + {NULL} +}; +#endif /* CONFIG_DEBUG_FS */ + +static bool efx_mcdi_filter_vlan_filter(struct efx_nic *efx) +{ + struct efx_mcdi_filter_table *table = efx->filter_state; + + if (WARN_ON(!table)) + return false; + + /* vDPA VLAN filter support to be added in VDPALINUX-184 */ + if (efx->state == STATE_VDPA) + return false; + + if (!(efx->net_dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)) + return false; + + /* If we don't support VLAN+mismatch filters then if we + * want promiscuous or allmulti mode we'll need + * to do it without vlan filter + */ + if ((table->uc_promisc || table->mc_promisc) && + !efx_mcdi_filter_match_supported(efx, false, + (EFX_FILTER_MATCH_OUTER_VID | EFX_FILTER_MATCH_LOC_MAC_IG))) + return false; + + return true; +} + +/* Decide whether a filter should be exclusive or else should allow + * delivery to additional recipients. Currently we decide that + * filters for specific local unicast MAC and IP addresses are + * exclusive. + */ +static bool efx_mcdi_filter_is_exclusive(const struct efx_filter_spec *spec) +{ +#ifdef EFX_NOT_UPSTREAM + /* special case ether type and ip proto filters for onload */ + if (spec->match_flags == EFX_FILTER_MATCH_ETHER_TYPE || + spec->match_flags == EFX_FILTER_MATCH_IP_PROTO || + spec->match_flags == + (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO)) + return true; +#endif + if (spec->match_flags & EFX_FILTER_MATCH_LOC_MAC && + !is_multicast_ether_addr(spec->loc_mac)) + return true; + + if ((spec->match_flags & + (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) == + (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) { + if (spec->ether_type == htons(ETH_P_IP) && + !(ipv4_is_multicast(spec->loc_host[0]) || + ipv4_is_lbcast(spec->loc_host[0]))) + return true; + if (spec->ether_type == htons(ETH_P_IPV6) && + ((const u8 *)spec->loc_host)[0] != 0xff) + return true; + } + + return false; +} + +static void +efx_mcdi_filter_invalidate_filter_id(struct efx_mcdi_filter_table *table, + unsigned int filter_idx) +{ + struct efx_mcdi_filter_vlan *vlan; + int i; + + list_for_each_entry(vlan, &table->vlan_list, list) { + for (i = 0; i < EFX_MCDI_NUM_DEFAULT_FILTERS; ++i) + if (vlan->default_filters[i] == filter_idx) { + vlan->default_filters[i] = + EFX_MCDI_FILTER_ID_INVALID; + return; + } + + for (i = 0; i < table->dev_uc_count; i++) + if (vlan->uc[i] == filter_idx) { + vlan->uc[i] = EFX_MCDI_FILTER_ID_INVALID; + return; + } + + for (i = 0; i < table->dev_mc_count; i++) + if (vlan->mc[i] == filter_idx) { + vlan->mc[i] = EFX_MCDI_FILTER_ID_INVALID; + return; + } + } +} + +static void +efx_mcdi_filter_set_entry(struct efx_nic *efx, + unsigned int filter_idx, + const struct efx_filter_spec *spec, + unsigned int flags) +{ + struct efx_mcdi_filter_table *table = efx->filter_state; + + table->entry[filter_idx].spec = (unsigned long)spec | flags; + if (!spec) + efx_mcdi_filter_invalidate_filter_id(table, filter_idx); +} + +static void +efx_mcdi_filter_push_prep_set_match_fields(struct efx_nic *efx, + const struct efx_filter_spec *spec, + efx_dword_t *inbuf) +{ + u32 match_fields = 0; + enum efx_encap_type encap_type = + efx_filter_get_encap_type(spec); + unsigned uc_match, mc_match; + + MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP, + efx_mcdi_filter_is_exclusive(spec) ? + MC_CMD_FILTER_OP_IN_OP_INSERT : + MC_CMD_FILTER_OP_IN_OP_SUBSCRIBE); + + /* Convert match flags and values. Unlike almost + * everything else in MCDI, these fields are in + * network byte order. + */ +#define COPY_FIELD(encap, gen_flag, gen_field, mcdi_field) \ + do { \ + if ((spec)->match_flags & \ + EFX_FILTER_MATCH_ ## gen_flag) \ + COPY_VALUE((encap), (spec)->gen_field, \ + mcdi_field); \ + } while (0) + +#define COPY_VALUE(encap, value, mcdi_field) \ + do { \ + match_fields |= (encap) ? \ + 1 << MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_ ## \ + mcdi_field ## _LBN : \ + 1 << MC_CMD_FILTER_OP_EXT_IN_MATCH_ ## \ + mcdi_field ## _LBN; \ + BUILD_BUG_ON(MC_CMD_FILTER_OP_EXT_IN_ ## \ + mcdi_field ## _LEN < \ + sizeof(value)); \ + memcpy((encap) ? \ + MCDI_PTR(inbuf, FILTER_OP_EXT_IN_IFRM_ ## \ + mcdi_field) : \ + MCDI_PTR(inbuf, FILTER_OP_EXT_IN_ ## \ + mcdi_field), \ + &(value), sizeof(value)); \ + } while (0) + + /* first handle encapsulation type */ + if (encap_type) { + __be16 ether_type = + htons(encap_type & EFX_ENCAP_FLAG_IPV6 ? + ETH_P_IPV6 : ETH_P_IP); + u8 outer_ip_proto; /* needs to be a variable for macro magic */ + /* Field in MCDI is big-endian; but MCDI_POPULATE_DWORD_* will + * call cpu_to_le32 on the value. Thus by feeding it a value + * that's "anti-cpu-endian" we ensure that it produces a be32. + * We shift right after swabbing, thereby throwing away the old + * high byte, and effectively getting a swab24. + */ + u32 tni = __swab32(spec->tni) >> 8; + bool vxlan = false; + + switch (encap_type & EFX_ENCAP_TYPES_MASK) { + case EFX_ENCAP_TYPE_VXLAN: + vxlan = true; + fallthrough; + case EFX_ENCAP_TYPE_GENEVE: + COPY_VALUE(false, ether_type, ETHER_TYPE); + outer_ip_proto = IPPROTO_UDP; + COPY_VALUE(false, outer_ip_proto, IP_PROTO); + if (spec->match_flags & + EFX_FILTER_MATCH_ENCAP_TNI) { + match_fields |= 1 << + MC_CMD_FILTER_OP_EXT_IN_MATCH_VNI_OR_VSID_LBN; + } + /* We always need to set the type field, even if we're + * not matching on the TNI. + */ + MCDI_POPULATE_DWORD_2(inbuf, + FILTER_OP_EXT_IN_VNI_OR_VSID, + FILTER_OP_EXT_IN_VNI_TYPE, + vxlan ? MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_VXLAN : + MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_GENEVE, + FILTER_OP_EXT_IN_VNI_VALUE, + tni); + break; + case EFX_ENCAP_TYPE_NVGRE: + COPY_VALUE(false, ether_type, ETHER_TYPE); + outer_ip_proto = IPPROTO_GRE; + COPY_VALUE(false, outer_ip_proto, IP_PROTO); + if (spec->match_flags & + EFX_FILTER_MATCH_ENCAP_TNI) { + match_fields |= 1 << + MC_CMD_FILTER_OP_EXT_IN_MATCH_VNI_OR_VSID_LBN; + MCDI_POPULATE_DWORD_2(inbuf, + FILTER_OP_EXT_IN_VNI_OR_VSID, + FILTER_OP_EXT_IN_VSID_VALUE, + tni, + FILTER_OP_EXT_IN_VSID_TYPE, + MC_CMD_FILTER_OP_EXT_IN_VSID_TYPE_NVGRE); + } + break; + default: + WARN_ON(1); + } + COPY_FIELD(false, OUTER_LOC_MAC, outer_loc_mac, DST_MAC); + + uc_match = + MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_LBN; + mc_match = + MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_LBN; + } else { + uc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_UCAST_DST_LBN; + mc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_MCAST_DST_LBN; + } + + /* special case for mismatch */ + if (spec->match_flags & EFX_FILTER_MATCH_LOC_MAC_IG) + match_fields |= + is_multicast_ether_addr(spec->loc_mac) ? + 1 << mc_match : + 1 << uc_match; + + /* VLAN is always outer */ + COPY_FIELD(false, INNER_VID, inner_vid, INNER_VLAN); + COPY_FIELD(false, OUTER_VID, outer_vid, OUTER_VLAN); + /* outer MAC only applies if encap */ + if (encap_type) + COPY_FIELD(false, OUTER_LOC_MAC, outer_loc_mac, DST_MAC); + /* everything else is inner or outer based on encap type */ + COPY_FIELD(encap_type, REM_HOST, rem_host, SRC_IP); + COPY_FIELD(encap_type, LOC_HOST, loc_host, DST_IP); + COPY_FIELD(encap_type, REM_MAC, rem_mac, SRC_MAC); + COPY_FIELD(encap_type, REM_PORT, rem_port, SRC_PORT); + COPY_FIELD(encap_type, LOC_MAC, loc_mac, DST_MAC); + COPY_FIELD(encap_type, LOC_PORT, loc_port, DST_PORT); + COPY_FIELD(encap_type, ETHER_TYPE, ether_type, ETHER_TYPE); + COPY_FIELD(encap_type, IP_PROTO, ip_proto, IP_PROTO); +#undef COPY_FIELD +#undef COPY_VALUE + MCDI_SET_DWORD(inbuf, FILTER_OP_IN_MATCH_FIELDS, + match_fields); +} + +static void efx_mcdi_filter_push_prep(struct efx_nic *efx, + const struct efx_filter_spec *spec, + efx_dword_t *inbuf, u64 handle, + struct efx_rss_context *ctx, + const struct efx_vport *vpx, + bool replacing) +{ + unsigned int port_id; + u32 flags = spec->flags; + + /* If RSS filter, caller better have given us an RSS context */ + if (flags & EFX_FILTER_FLAG_RX_RSS) { + /* We don't have the ability to return an error, so we'll just + * log a warning and disable RSS for the filter. + */ + if (WARN_ON_ONCE(!ctx)) + flags &= ~EFX_FILTER_FLAG_RX_RSS; + else if (WARN_ON_ONCE(ctx->context_id == EFX_MCDI_RSS_CONTEXT_INVALID)) + flags &= ~EFX_FILTER_FLAG_RX_RSS; + } + + memset(inbuf, 0, MC_CMD_FILTER_OP_EXT_IN_LEN); + + if (replacing) { + MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP, + MC_CMD_FILTER_OP_IN_OP_REPLACE); + MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE, handle); + } else { + efx_mcdi_filter_push_prep_set_match_fields(efx, spec, inbuf); + } + + port_id = efx->vport.vport_id; + if (flags & EFX_FILTER_FLAG_VPORT_ID) { + /* Again, no ability to return an error. Caller needs to catch + * this case for us, so log a warning if they didn't. + */ + if (WARN_ON_ONCE(!vpx)) + flags &= ~EFX_FILTER_FLAG_VPORT_ID; + else if (WARN_ON_ONCE(vpx->vport_id == EVB_PORT_ID_NULL)) + flags &= ~EFX_FILTER_FLAG_VPORT_ID; + else + port_id = vpx->vport_id; + } + if (flags & EFX_FILTER_FLAG_STACK_ID) + port_id |= EVB_STACK_ID(spec->stack_id); + MCDI_SET_DWORD(inbuf, FILTER_OP_IN_PORT_ID, port_id ); + MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_DEST, + spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP ? + MC_CMD_FILTER_OP_IN_RX_DEST_DROP : + MC_CMD_FILTER_OP_IN_RX_DEST_HOST); + +#ifdef EFX_NOT_UPSTREAM + /* Onload might set this flag to request hardware multicast loopback */ +#endif + if (spec->flags & EFX_FILTER_FLAG_TX) + MCDI_POPULATE_DWORD_2(inbuf, FILTER_OP_IN_TX_DEST, + FILTER_OP_IN_TX_DEST_MAC, 1, + FILTER_OP_IN_TX_DEST_PM, 1); + else if (spec->flags & EFX_FILTER_FLAG_LOOPBACK) + MCDI_POPULATE_DWORD_1(inbuf, FILTER_OP_IN_TX_DEST, + FILTER_OP_IN_TX_DEST_MAC, 1); + else + MCDI_SET_DWORD(inbuf, FILTER_OP_IN_TX_DEST, + MC_CMD_FILTER_OP_IN_TX_DEST_DEFAULT); + MCDI_SET_DWORD(inbuf, FILTER_OP_IN_TX_DOMAIN, 0); + + MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_QUEUE, + spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP ? 0 : + efx_rx_queue_id_internal(efx, spec->dmaq_id)); + MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_MODE, + (flags & EFX_FILTER_FLAG_RX_RSS) ? + MC_CMD_FILTER_OP_IN_RX_MODE_RSS : + MC_CMD_FILTER_OP_IN_RX_MODE_SIMPLE); + if (flags & EFX_FILTER_FLAG_RX_RSS) + MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_CONTEXT, ctx->context_id); +} + +static int efx_mcdi_filter_push(struct efx_nic *efx, + const struct efx_filter_spec *spec, u64 *handle, + struct efx_rss_context *ctx, + const struct efx_vport *vpx, bool replacing) +{ + struct efx_mcdi_filter_table *table = efx->filter_state; + MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_EXT_IN_LEN); + MCDI_DECLARE_BUF(outbuf, MC_CMD_FILTER_OP_EXT_OUT_LEN); + size_t outlen; + int rc; + + if (!table->push_filters) { + *handle = EFX_MCDI_FILTER_ID_INVALID; + return 0; + } + + efx_mcdi_filter_push_prep(efx, spec, inbuf, *handle, ctx, vpx, replacing); + + rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), &outlen); + if (rc && spec->priority != EFX_FILTER_PRI_HINT) + efx_mcdi_display_error(efx, MC_CMD_FILTER_OP, sizeof(inbuf), + outbuf, outlen, rc); + if (rc == 0) + *handle = MCDI_QWORD(outbuf, FILTER_OP_OUT_HANDLE); +#ifdef EFX_NOT_UPSTREAM + /* Returning EBUSY was originally done to match Falcon/Siena behaviour. + * This code is also called by Onload, so this is now kept to + * keep that ABI the same. + */ +#endif + if (rc == -ENOSPC) + rc = -EBUSY; + return rc; +} + +static u32 efx_mcdi_filter_mcdi_flags_from_spec(const struct efx_filter_spec *spec) +{ + unsigned int match_flags = spec->match_flags; + u32 mcdi_flags = 0; + enum efx_encap_type encap_type = + efx_filter_get_encap_type(spec); + unsigned uc_match, mc_match; + +#define MAP_FILTER_TO_MCDI_FLAG(gen_flag, mcdi_field, encap) { \ + unsigned int old_match_flags = match_flags; \ + match_flags &= ~EFX_FILTER_MATCH_ ## gen_flag; \ + if (match_flags != old_match_flags) \ + mcdi_flags |= \ + (1 << ((encap) ? \ + MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_ ## \ + mcdi_field ## _LBN : \ + MC_CMD_FILTER_OP_EXT_IN_MATCH_ ## \ + mcdi_field ## _LBN)); \ + } + /* inner or outer based on encap type */ + MAP_FILTER_TO_MCDI_FLAG(REM_HOST, SRC_IP, encap_type); + MAP_FILTER_TO_MCDI_FLAG(LOC_HOST, DST_IP, encap_type); + MAP_FILTER_TO_MCDI_FLAG(REM_MAC, SRC_MAC, encap_type); + MAP_FILTER_TO_MCDI_FLAG(REM_PORT, SRC_PORT, encap_type); + MAP_FILTER_TO_MCDI_FLAG(LOC_MAC, DST_MAC, encap_type); + MAP_FILTER_TO_MCDI_FLAG(LOC_PORT, DST_PORT, encap_type); + MAP_FILTER_TO_MCDI_FLAG(ETHER_TYPE, ETHER_TYPE, encap_type); + MAP_FILTER_TO_MCDI_FLAG(IP_PROTO, IP_PROTO, encap_type); + /* always outer */ + MAP_FILTER_TO_MCDI_FLAG(INNER_VID, INNER_VLAN, false); + MAP_FILTER_TO_MCDI_FLAG(OUTER_VID, OUTER_VLAN, false); +#undef MAP_FILTER_TO_MCDI_FLAG + /* special handling for encap type/tni, and mismatch */ + if (encap_type) { + if (match_flags & EFX_FILTER_MATCH_ENCAP_TNI) { + match_flags &= ~EFX_FILTER_MATCH_ENCAP_TNI; + mcdi_flags |= (1 << + MC_CMD_FILTER_OP_EXT_IN_MATCH_VNI_OR_VSID_LBN); + } + match_flags &= ~EFX_FILTER_MATCH_ENCAP_TYPE; + mcdi_flags |= + (1 << MC_CMD_FILTER_OP_EXT_IN_MATCH_ETHER_TYPE_LBN); + mcdi_flags |= (1 << MC_CMD_FILTER_OP_EXT_IN_MATCH_IP_PROTO_LBN); + + if (match_flags & EFX_FILTER_MATCH_OUTER_LOC_MAC) { + match_flags &= ~EFX_FILTER_MATCH_OUTER_LOC_MAC; + mcdi_flags |= (1 << + MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_MAC_LBN); + } + + uc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_LBN; + mc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_LBN; + } else { + uc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_UCAST_DST_LBN; + mc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_MCAST_DST_LBN; + } + if (match_flags & EFX_FILTER_MATCH_LOC_MAC_IG) { + match_flags &= ~EFX_FILTER_MATCH_LOC_MAC_IG; + mcdi_flags |= + is_multicast_ether_addr(spec->loc_mac) ? + 1 << mc_match : + 1 << uc_match; + } + + /* Did we map them all? */ + WARN_ON(match_flags); + + return mcdi_flags; +} + +static int efx_mcdi_filter_pri(struct efx_mcdi_filter_table *table, + const struct efx_filter_spec *spec) +{ + u32 mcdi_flags = efx_mcdi_filter_mcdi_flags_from_spec(spec); + unsigned int match_pri; + + for (match_pri = 0; + match_pri < table->rx_match_count; + match_pri++) + if (table->rx_match_mcdi_flags[match_pri] == mcdi_flags) + return match_pri; + + return -EPROTONOSUPPORT; +} + +static int efx_filter_sanity_check(const struct efx_filter_spec *spec) +{ + /* if ipproto or hosts were specified then only allow the supported + * ether types + */ + if (((spec->match_flags & EFX_FILTER_MATCH_IP_PROTO) || + (spec->match_flags & EFX_FILTER_MATCH_REM_HOST) || + (spec->match_flags & EFX_FILTER_MATCH_LOC_HOST)) && + (!(spec->match_flags & EFX_FILTER_MATCH_ETHER_TYPE) || + ((spec->ether_type != htons(ETH_P_IP) && + spec->ether_type != htons(ETH_P_IPV6))))) + return -EINVAL; + + /* if ports were specified then only allow the supported ip protos */ + if (((spec->match_flags & EFX_FILTER_MATCH_LOC_PORT) || + (spec->match_flags & EFX_FILTER_MATCH_REM_PORT)) && + (!(spec->match_flags & EFX_FILTER_MATCH_IP_PROTO) || + (spec->ip_proto != IPPROTO_TCP && spec->ip_proto != IPPROTO_UDP))) + return -EINVAL; + + return 0; +} + +static s32 efx_mcdi_filter_insert_locked(struct efx_nic *efx, + const struct efx_filter_spec *spec, + bool replace_equal) +{ + struct efx_mcdi_filter_table *table; + DECLARE_BITMAP(mc_rem_map, EFX_MCDI_FILTER_SEARCH_LIMIT); + struct efx_filter_spec *saved_spec; + struct efx_rss_context *ctx = NULL; + unsigned int match_pri, hash; + struct efx_vport *vpx = NULL; + bool vport_locked = false; + unsigned int priv_flags; + bool rss_locked = false; + bool replacing = false; + unsigned int depth, i; + int ins_index = -1; + DEFINE_WAIT(wait); + bool is_mc_recip; + s32 rc; + + WARN_ON(!rwsem_is_locked(&efx->filter_sem)); + table = efx->filter_state; + if (!table || !table->entry) + return -ENETDOWN; + down_write(&table->lock); + + /* Support only RX or RX+TX filters. */ + if ((spec->flags & EFX_FILTER_FLAG_RX) == 0) { + rc = -EINVAL; + goto out_unlock; + } + /* TX and loopback are mutually exclusive */ + if ((spec->flags & EFX_FILTER_FLAG_TX) && + (spec->flags & EFX_FILTER_FLAG_LOOPBACK)) { + rc = -EINVAL; + goto out_unlock; + } + + rc = efx_filter_sanity_check(spec); + if (rc) + goto out_unlock; + + rc = efx_mcdi_filter_pri(table, spec); + if (rc < 0) + goto out_unlock; + match_pri = rc; + + hash = efx_filter_spec_hash(spec); + is_mc_recip = efx_filter_is_mc_recipient(spec); + if (is_mc_recip) + bitmap_zero(mc_rem_map, EFX_MCDI_FILTER_SEARCH_LIMIT); + + if (spec->flags & EFX_FILTER_FLAG_RX_RSS) { + mutex_lock(&efx->rss_lock); + rss_locked = true; + if (spec->rss_context) + ctx = efx_find_rss_context_entry(efx, spec->rss_context); + else + ctx = &efx->rss_context; + if (!ctx) { + rc = -ENOENT; + goto out_unlock; + } + if (ctx->context_id == EFX_MCDI_RSS_CONTEXT_INVALID) { + rc = -EOPNOTSUPP; + goto out_unlock; + } + } + + if (spec->flags & EFX_FILTER_FLAG_VPORT_ID) { + mutex_lock(&efx->vport_lock); + vport_locked = true; + if (spec->vport_id == 0) + vpx = &efx->vport; + else + vpx = efx_find_vport_entry(efx, spec->vport_id); + if (!vpx) { + rc = -ENOENT; + goto out_unlock; + } + if (vpx->vport_id == EVB_PORT_ID_NULL) { + rc = -EOPNOTSUPP; + goto out_unlock; + } + } + + /* Find any existing filters with the same match tuple or + * else a free slot to insert at. + */ +#ifdef EFX_NOT_UPSTREAM +#if IS_MODULE(CONFIG_SFC_DRIVERLINK) + if (spec->priority <= EFX_FILTER_PRI_AUTO && + table->kernel_blocked[is_mc_recip ? + EFX_DL_FILTER_BLOCK_KERNEL_MCAST : + EFX_DL_FILTER_BLOCK_KERNEL_UCAST]) { + rc = -EPERM; + goto out_unlock; + } +#endif +#endif + + for (depth = 1; depth < EFX_MCDI_FILTER_SEARCH_LIMIT; depth++) { + i = (hash + depth) & (EFX_MCDI_FILTER_TBL_ROWS - 1); + saved_spec = efx_mcdi_filter_entry_spec(table, i); + + if (!saved_spec) { + if (ins_index < 0) + ins_index = i; + } else if (efx_filter_spec_equal(spec, saved_spec)) { + if (spec->priority < saved_spec->priority && + spec->priority != EFX_FILTER_PRI_AUTO) { + rc = -EPERM; + goto out_unlock; + } + if (!is_mc_recip) { + /* This is the only one */ + if (spec->priority == + saved_spec->priority && + !replace_equal) { + rc = -EEXIST; + goto out_unlock; + } + ins_index = i; + break; + } else if (spec->priority > + saved_spec->priority || + (spec->priority == + saved_spec->priority && + replace_equal) || + spec->priority == + EFX_FILTER_PRI_AUTO) { + if (ins_index < 0) + ins_index = i; + else + __set_bit(depth, mc_rem_map); + } + } + } + + /* Once we reach the maximum search depth, use the first suitable + * slot, or return -EBUSY if there was none + */ + if (ins_index < 0) { + rc = -EBUSY; + goto out_unlock; + } + + /* Create a software table entry if necessary. */ + saved_spec = efx_mcdi_filter_entry_spec(table, ins_index); + if (saved_spec) { + if (spec->priority == EFX_FILTER_PRI_AUTO && + saved_spec->priority >= EFX_FILTER_PRI_AUTO) { + /* Just make sure it won't be removed */ + if (saved_spec->priority > EFX_FILTER_PRI_AUTO) + saved_spec->flags |= EFX_FILTER_FLAG_RX_OVER_AUTO; + table->entry[ins_index].spec &= + ~EFX_MCDI_FILTER_FLAG_AUTO_OLD; + rc = ins_index; + goto out_unlock; + } + replacing = true; + priv_flags = efx_mcdi_filter_entry_flags(table, ins_index); + } else { + saved_spec = kmalloc(sizeof(*spec), GFP_ATOMIC); + if (!saved_spec) { + rc = -ENOMEM; + goto out_unlock; + } + *saved_spec = *spec; + priv_flags = 0; + } + efx_mcdi_filter_set_entry(efx, ins_index, saved_spec, priv_flags); + + /* Actually insert the filter on the HW */ + rc = efx_mcdi_filter_push(efx, spec, &table->entry[ins_index].handle, + ctx, vpx, replacing); + + /* Finalise the software table entry */ + if (rc == 0) { + if (replacing) { + /* Update the fields that may differ */ + if (saved_spec->priority == EFX_FILTER_PRI_AUTO) + saved_spec->flags |= + EFX_FILTER_FLAG_RX_OVER_AUTO; + saved_spec->priority = spec->priority; + saved_spec->flags &= EFX_FILTER_FLAG_RX_OVER_AUTO; + saved_spec->flags |= spec->flags; + saved_spec->rss_context = spec->rss_context; + saved_spec->dmaq_id = spec->dmaq_id; + saved_spec->stack_id = spec->stack_id; + saved_spec->vport_id = spec->vport_id; + } + } else if (!replacing) { + kfree(saved_spec); + saved_spec = NULL; + } else { + /* We failed to replace, so the old filter is still present. + * Roll back the software table to reflect this. In fact the + * efx_mcdi_filter_set_entry() call below will do the right + * thing, so nothing extra is needed here. + */ + } + efx_mcdi_filter_set_entry(efx, ins_index, saved_spec, priv_flags); + + /* Remove and finalise entries for lower-priority multicast + * recipients + */ + if (is_mc_recip) { + MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_EXT_IN_LEN); + unsigned int depth, i; + + memset(inbuf, 0, sizeof(inbuf)); + + for (depth = 0; depth < EFX_MCDI_FILTER_SEARCH_LIMIT; depth++) { + if (!test_bit(depth, mc_rem_map)) + continue; + + i = (hash + depth) & (EFX_MCDI_FILTER_TBL_ROWS - 1); + saved_spec = efx_mcdi_filter_entry_spec(table, i); + priv_flags = efx_mcdi_filter_entry_flags(table, i); + + if (rc == 0) { + MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP, + MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE); + MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE, + table->entry[i].handle); + rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP, + inbuf, sizeof(inbuf), + NULL, 0, NULL); + } + + if (rc == 0) { + table->entry[i].handle = + EFX_MCDI_FILTER_ID_INVALID; + kfree(saved_spec); + saved_spec = NULL; + priv_flags = 0; + } + efx_mcdi_filter_set_entry(efx, i, saved_spec, + priv_flags); + } + } + + /* If successful, return the inserted filter ID */ + if (rc == 0) + rc = efx_mcdi_filter_make_filter_id(match_pri, ins_index); + +out_unlock: + if (vport_locked) + mutex_unlock(&efx->vport_lock); + if (rss_locked) + mutex_unlock(&efx->rss_lock); + up_write(&table->lock); + return rc; +} + +s32 efx_mcdi_filter_insert(struct efx_nic *efx, + const struct efx_filter_spec *spec, + bool replace_equal) +{ + s32 ret; + + down_read(&efx->filter_sem); + ret = efx_mcdi_filter_insert_locked(efx, spec, replace_equal); + up_read(&efx->filter_sem); + + return ret; +} + +/* Remove a filter. + * If !by_index, remove by ID + * If by_index, remove by index + * Filter ID may come from userland and must be range-checked. + * Caller must hold efx->filter_sem for read, and efx->filter_state->lock + * for write. + */ +static int efx_mcdi_filter_remove_internal(struct efx_nic *efx, + unsigned int priority_mask, + u32 filter_id, bool by_index) +{ + unsigned int filter_idx = efx_mcdi_filter_get_unsafe_id(efx, filter_id); + MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_LEN); + struct efx_mcdi_filter_table *table = efx->filter_state; + struct efx_filter_spec *spec; + DEFINE_WAIT(wait); + int rc = 0; + + if (!table || !table->entry) + return -ENOENT; + + spec = efx_mcdi_filter_entry_spec(table, filter_idx); + if (!spec || + (!by_index && + efx_mcdi_filter_pri(table, spec) != + efx_mcdi_filter_get_unsafe_pri(filter_id))) + return -ENOENT; + + if (spec->flags & EFX_FILTER_FLAG_RX_OVER_AUTO && + priority_mask == (1U << EFX_FILTER_PRI_AUTO)) { + /* Just remove flags */ + spec->flags &= ~EFX_FILTER_FLAG_RX_OVER_AUTO; + table->entry[filter_idx].spec &= ~EFX_MCDI_FILTER_FLAG_AUTO_OLD; + return 0; + } + + if (!(priority_mask & (1U << spec->priority))) + return -ENOENT; + + if (spec->flags & EFX_FILTER_FLAG_RX_OVER_AUTO) { + /* Reset to an automatic filter */ + + struct efx_filter_spec new_spec = *spec; + + new_spec.priority = EFX_FILTER_PRI_AUTO; + new_spec.flags = (EFX_FILTER_FLAG_RX | + (efx_rss_active(&efx->rss_context) ? + EFX_FILTER_FLAG_RX_RSS : 0)); + new_spec.dmaq_id = 0; + new_spec.rss_context = 0; + new_spec.vport_id = 0; + new_spec.stack_id = 0; + + rc = efx_mcdi_filter_push(efx, &new_spec, + &table->entry[filter_idx].handle, + &efx->rss_context, &efx->vport, + true); + + if (rc == 0) + *spec = new_spec; + } else { + /* Really remove the filter */ + + MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP, + efx_mcdi_filter_is_exclusive(spec) ? + MC_CMD_FILTER_OP_IN_OP_REMOVE : + MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE); + MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE, + table->entry[filter_idx].handle); + if (table->push_filters) + rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FILTER_OP, + inbuf, sizeof(inbuf), NULL, 0, + NULL); + + if ((rc == 0) || (rc == -ENOENT) || (rc == -EIO) || + (efx->reset_pending)) { + /* Filter removed OK, it didn't actually exist, or + * the MC is resetting. + */ + kfree(spec); + efx_mcdi_filter_set_entry(efx, filter_idx, NULL, 0); + table->entry[filter_idx].handle = + EFX_MCDI_FILTER_ID_INVALID; + } else { + efx_mcdi_display_error(efx, MC_CMD_FILTER_OP, + MC_CMD_FILTER_OP_IN_LEN, NULL, 0, rc); + } + } + + return rc; +} + +int efx_mcdi_filter_remove_safe(struct efx_nic *efx, + enum efx_filter_priority priority, + u32 filter_id) +{ + struct efx_mcdi_filter_table *table; + int rc; + + down_read(&efx->filter_sem); + table = efx->filter_state; + if (table) { + down_write(&table->lock); + rc = efx_mcdi_filter_remove_internal(efx, 1U << priority, + filter_id, false); + up_write(&table->lock); + } else { + rc = -ENETDOWN; + } + up_read(&efx->filter_sem); + return rc; +} + +/* Caller must hold efx->filter_sem for read */ +static int efx_mcdi_filter_remove_unsafe(struct efx_nic *efx, + enum efx_filter_priority priority, + u32 filter_id) +{ + struct efx_mcdi_filter_table *table = efx->filter_state; + int rc; + + down_write(&table->lock); + rc = efx_mcdi_filter_remove_internal(efx, 1U << priority, filter_id, + true); + up_write(&table->lock); + return rc; +} + +int efx_mcdi_filter_get_safe(struct efx_nic *efx, + enum efx_filter_priority priority, + u32 filter_id, struct efx_filter_spec *spec) +{ + unsigned int filter_idx = efx_mcdi_filter_get_unsafe_id(efx, filter_id); + struct efx_mcdi_filter_table *table; + const struct efx_filter_spec *saved_spec; + int rc; + + down_read(&efx->filter_sem); + table = efx->filter_state; + if (!table || !table->entry) { + up_read(&efx->filter_sem); + return -ENETDOWN; + } + + down_read(&table->lock); + saved_spec = efx_mcdi_filter_entry_spec(table, filter_idx); + if (saved_spec && saved_spec->priority == priority && + efx_mcdi_filter_pri(table, saved_spec) == + efx_mcdi_filter_get_unsafe_pri(filter_id)) { + *spec = *saved_spec; + rc = 0; + } else { + rc = -ENOENT; + } + up_read(&table->lock); + up_read(&efx->filter_sem); + return rc; +} + +static int efx_mcdi_filter_insert_addr_list(struct efx_nic *efx, + struct efx_mcdi_filter_vlan *vlan, + bool multicast, bool rollback) +{ + struct efx_mcdi_filter_table *table = efx->filter_state; + struct efx_mcdi_dev_addr *addr_list; + u16 *ids; + enum efx_filter_flags filter_flags; + struct efx_filter_spec spec; + u8 baddr[ETH_ALEN]; + unsigned int i, j; + int addr_count; + int qid = 0; + int rc; + + WARN_ON(!mutex_is_locked(&efx->mac_lock)); + + if (efx->state == STATE_VDPA) + qid = EFX_VDPA_BASE_RX_QID; + + if (multicast) { + addr_list = table->dev_mc_list; + addr_count = table->dev_mc_count; + ids = vlan->mc; + } else { + addr_list = table->dev_uc_list; + addr_count = table->dev_uc_count; + ids = vlan->uc; + } + + filter_flags = efx_rss_active(&efx->rss_context) ? + EFX_FILTER_FLAG_RX_RSS : 0; + + /* Insert/renew filters */ + for (i = 0; i < addr_count; i++) { + EFX_WARN_ON_PARANOID(ids[i] != EFX_MCDI_FILTER_ID_INVALID); + efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, qid); + efx_filter_set_eth_local(&spec, vlan->vid, addr_list[i].addr); + rc = efx_mcdi_filter_insert_locked(efx, &spec, true); + if (rc < 0) { + if (rollback) { + netif_info(efx, drv, efx->net_dev, + "efx_mcdi_filter_insert failed rc=%d\n", + rc); + /* Fall back to promiscuous */ + for (j = 0; j < i; j++) { + if (ids[j] == EFX_MCDI_FILTER_ID_INVALID) + continue; + efx_mcdi_filter_remove_unsafe( + efx, EFX_FILTER_PRI_AUTO, + ids[j]); + ids[j] = EFX_MCDI_FILTER_ID_INVALID; + } + return rc; + } + /* Keep invalid ID and continue */ + } else { + ids[i] = efx_mcdi_filter_get_unsafe_id(efx, rc); + } + } + +#ifdef EFX_NOT_UPSTREAM +#if IS_MODULE(CONFIG_SFC_DRIVERLINK) + if (!table->kernel_blocked[EFX_DL_FILTER_BLOCK_KERNEL_MCAST]) +#endif +#endif + if (multicast && rollback) { + /* Also need an Ethernet broadcast filter */ + EFX_WARN_ON_PARANOID(vlan->default_filters[EFX_MCDI_BCAST] != + EFX_MCDI_FILTER_ID_INVALID); + efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, qid); + eth_broadcast_addr(baddr); + efx_filter_set_eth_local(&spec, vlan->vid, baddr); + rc = efx_mcdi_filter_insert_locked(efx, &spec, true); + if (rc < 0) { + netif_warn(efx, drv, efx->net_dev, + "Broadcast filter insert failed rc=%d\n", rc); + /* Fall back to promiscuous */ + for (j = 0; j < i; j++) { + if (ids[j] == EFX_MCDI_FILTER_ID_INVALID) + continue; + efx_mcdi_filter_remove_unsafe( + efx, EFX_FILTER_PRI_AUTO, + ids[j]); + ids[j] = EFX_MCDI_FILTER_ID_INVALID; + } + return rc; + } else { + vlan->default_filters[EFX_MCDI_BCAST] = + efx_mcdi_filter_get_unsafe_id(efx, rc); + } + } + + return 0; +} + +static int efx_mcdi_filter_insert_def(struct efx_nic *efx, + struct efx_mcdi_filter_vlan *vlan, + enum efx_encap_type encap_type, + bool multicast, bool rollback) +{ + struct efx_mcdi_filter_table *table = efx->filter_state; + enum efx_filter_flags filter_flags; + struct efx_filter_spec spec; + u8 baddr[ETH_ALEN]; + int rc, qid = 0; + u16 *id; + +#ifdef EFX_NOT_UPSTREAM +#if IS_MODULE(CONFIG_SFC_DRIVERLINK) + if (table->kernel_blocked[multicast ? EFX_DL_FILTER_BLOCK_KERNEL_MCAST : + EFX_DL_FILTER_BLOCK_KERNEL_UCAST]) + return 0; +#endif +#endif + + filter_flags = efx_rss_active(&efx->rss_context) ? + EFX_FILTER_FLAG_RX_RSS : 0; + + if (efx->state == STATE_VDPA) + qid = EFX_VDPA_BASE_RX_QID; + + efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, qid); + + if (multicast) + efx_filter_set_mc_def(&spec); + else + efx_filter_set_uc_def(&spec); + + if (encap_type) { + if (table->encap_supported) + efx_filter_set_encap_type(&spec, encap_type); + else + /* don't insert encap filters on non-supporting + * platforms. ID will be left as INVALID. + */ + return 0; + } + + if (vlan->vid != EFX_FILTER_VID_UNSPEC) + efx_filter_set_eth_local(&spec, vlan->vid, NULL); + + rc = efx_mcdi_filter_insert_locked(efx, &spec, true); + if (rc < 0) { + const char *um = multicast ? "Multicast" : "Unicast"; + const char *encap_name = ""; + const char *encap_ipv = ""; + + if ((encap_type & EFX_ENCAP_TYPES_MASK) == + EFX_ENCAP_TYPE_VXLAN) + encap_name = "VXLAN "; + else if ((encap_type & EFX_ENCAP_TYPES_MASK) == + EFX_ENCAP_TYPE_NVGRE) + encap_name = "NVGRE "; + else if ((encap_type & EFX_ENCAP_TYPES_MASK) == + EFX_ENCAP_TYPE_GENEVE) + encap_name = "GENEVE "; + if (encap_type & EFX_ENCAP_FLAG_IPV6) + encap_ipv = "IPv6 "; + else if (encap_type) + encap_ipv = "IPv4 "; + + /* unprivileged functions can't insert mismatch filters + * for encapsulated or unicast traffic, so downgrade + * those warnings to debug. + */ + netif_cond_dbg(efx, drv, efx->net_dev, + rc == -EPERM && (encap_type || !multicast), warn, + "%s%s%s mismatch filter insert failed rc=%d\n", + encap_name, encap_ipv, um, rc); + } else if (multicast) { + /* mapping from encap types to default filter IDs (multicast) */ + static enum efx_mcdi_filter_default_filters map[] = { + [EFX_ENCAP_TYPE_NONE] = EFX_MCDI_MCDEF, + [EFX_ENCAP_TYPE_VXLAN] = EFX_MCDI_VXLAN4_MCDEF, + [EFX_ENCAP_TYPE_NVGRE] = EFX_MCDI_NVGRE4_MCDEF, + [EFX_ENCAP_TYPE_GENEVE] = EFX_MCDI_GENEVE4_MCDEF, + [EFX_ENCAP_TYPE_VXLAN | EFX_ENCAP_FLAG_IPV6] = + EFX_MCDI_VXLAN6_MCDEF, + [EFX_ENCAP_TYPE_NVGRE | EFX_ENCAP_FLAG_IPV6] = + EFX_MCDI_NVGRE6_MCDEF, + [EFX_ENCAP_TYPE_GENEVE | EFX_ENCAP_FLAG_IPV6] = + EFX_MCDI_GENEVE6_MCDEF, + }; + + /* quick bounds check (BCAST result impossible) */ + BUILD_BUG_ON(EFX_MCDI_BCAST != 0); + if (encap_type >= ARRAY_SIZE(map) || map[encap_type] == 0) { + WARN_ON(1); + return -EINVAL; + } + /* then follow map */ + id = &vlan->default_filters[map[encap_type]]; + + EFX_WARN_ON_PARANOID(*id != EFX_MCDI_FILTER_ID_INVALID); + *id = efx_mcdi_filter_get_unsafe_id(efx, rc); + if (!table->mc_chaining && !encap_type) { + /* Also need an Ethernet broadcast filter */ + efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, + filter_flags, qid); + eth_broadcast_addr(baddr); + efx_filter_set_eth_local(&spec, vlan->vid, baddr); + rc = efx_mcdi_filter_insert_locked(efx, &spec, true); + if (rc < 0) { + netif_warn(efx, drv, efx->net_dev, + "Broadcast filter insert failed rc=%d\n", + rc); + if (rollback) { + /* Roll back the mc_def filter */ + efx_mcdi_filter_remove_unsafe( + efx, EFX_FILTER_PRI_AUTO, + *id); + *id = EFX_MCDI_FILTER_ID_INVALID; + return rc; + } + } else { + EFX_WARN_ON_PARANOID( + vlan->default_filters[EFX_MCDI_BCAST] != + EFX_MCDI_FILTER_ID_INVALID); + vlan->default_filters[EFX_MCDI_BCAST] = + efx_mcdi_filter_get_unsafe_id(efx, rc); + } + } + rc = 0; + } else { + /* mapping from encap types to default filter IDs (unicast) */ + static enum efx_mcdi_filter_default_filters map[] = { + [EFX_ENCAP_TYPE_NONE] = EFX_MCDI_UCDEF, + [EFX_ENCAP_TYPE_VXLAN] = EFX_MCDI_VXLAN4_UCDEF, + [EFX_ENCAP_TYPE_NVGRE] = EFX_MCDI_NVGRE4_UCDEF, + [EFX_ENCAP_TYPE_GENEVE] = EFX_MCDI_GENEVE4_UCDEF, + [EFX_ENCAP_TYPE_VXLAN | EFX_ENCAP_FLAG_IPV6] = + EFX_MCDI_VXLAN6_UCDEF, + [EFX_ENCAP_TYPE_NVGRE | EFX_ENCAP_FLAG_IPV6] = + EFX_MCDI_NVGRE6_UCDEF, + [EFX_ENCAP_TYPE_GENEVE | EFX_ENCAP_FLAG_IPV6] = + EFX_MCDI_GENEVE6_UCDEF, + }; + + /* quick bounds check (BCAST result impossible) */ + BUILD_BUG_ON(EFX_MCDI_BCAST != 0); + if (encap_type >= ARRAY_SIZE(map) || map[encap_type] == 0) { + WARN_ON(1); + return -EINVAL; + } + /* then follow map */ + id = &vlan->default_filters[map[encap_type]]; + EFX_WARN_ON_PARANOID(*id != EFX_MCDI_FILTER_ID_INVALID); + *id = rc; + rc = 0; + } + return rc; +} + +/* Caller must hold efx->filter_sem for read if race against + * efx_mcdi_filter_table_down() is possible + */ +static void efx_mcdi_filter_vlan_sync_rx_mode(struct efx_nic *efx, + struct efx_mcdi_filter_vlan *vlan) +{ + struct efx_mcdi_filter_table *table = efx->filter_state; + unsigned int n_filters; + + WARN_ON(!mutex_is_locked(&efx->mac_lock)); + + /* Do not install unspecified VID if VLAN filtering is enabled. + * Do not install all specified VIDs if VLAN filtering is disabled. + */ + if ((vlan->vid == EFX_FILTER_VID_UNSPEC) == table->vlan_filter) + return; + + /* Insert/renew unicast filters */ + if (table->uc_promisc) { + efx_mcdi_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_NONE, + false, false); + efx_mcdi_filter_insert_addr_list(efx, vlan, false, false); + } else { + /* If any of the filters failed to insert, fall back to + * promiscuous mode - add in the uc_def filter. But keep + * our individual unicast filters. + */ + if (efx_mcdi_filter_insert_addr_list(efx, vlan, false, false)) + efx_mcdi_filter_insert_def(efx, vlan, + EFX_ENCAP_TYPE_NONE, + false, false); + } + efx_mcdi_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_VXLAN, + false, false); + efx_mcdi_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_VXLAN | + EFX_ENCAP_FLAG_IPV6, + false, false); + efx_mcdi_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_NVGRE, + false, false); + efx_mcdi_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_NVGRE | + EFX_ENCAP_FLAG_IPV6, + false, false); + efx_mcdi_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_GENEVE, + false, false); + efx_mcdi_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_GENEVE | + EFX_ENCAP_FLAG_IPV6, + false, false); + + /* Insert/renew multicast filters */ + /* If changing promiscuous state with cascaded multicast filters, remove + * old filters first, so that packets are dropped rather than duplicated + */ + if (table->mc_chaining && + table->mc_promisc_last != table->mc_promisc) + efx_mcdi_filter_remove_old(efx); + if (table->mc_promisc) { + if (table->mc_chaining) + { + /* If we failed to insert promiscuous filters, rollback and + * fall back to individual multicast filters + */ + if (efx_mcdi_filter_insert_def(efx, vlan, + EFX_ENCAP_TYPE_NONE, + true, true)) { + /* Changing promisc state, so remove old filters */ + efx_mcdi_filter_remove_old(efx); + efx_mcdi_filter_insert_addr_list(efx, vlan, + true, false); + } + } else { + /* If we failed to insert promiscuous filters, don't + * rollback. Regardless, also insert the mc_list, + * unless it's incomplete due to overflow + */ + efx_mcdi_filter_insert_def(efx, vlan, + EFX_ENCAP_TYPE_NONE, + true, false); + if (!table->mc_overflow) + efx_mcdi_filter_insert_addr_list(efx, vlan, + true, false); + } + } else { + /* If any filters failed to insert, rollback and fall back to + * promiscuous mode - mc_def filter and maybe broadcast. If + * that fails, roll back again and insert as many of our + * individual multicast filters as we can. + */ + if (efx_mcdi_filter_insert_addr_list(efx, vlan, true, true)) { + /* Changing promisc state, so remove old filters */ + if (table->mc_chaining) + efx_mcdi_filter_remove_old(efx); + if (efx_mcdi_filter_insert_def(efx, vlan, + EFX_ENCAP_TYPE_NONE, + true, true)) + efx_mcdi_filter_insert_addr_list(efx, vlan, + true, false); + } + } + efx_mcdi_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_VXLAN, + true, false); + efx_mcdi_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_VXLAN | + EFX_ENCAP_FLAG_IPV6, + true, false); + efx_mcdi_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_NVGRE, + true, false); + efx_mcdi_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_NVGRE | + EFX_ENCAP_FLAG_IPV6, + true, false); + efx_mcdi_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_GENEVE, + true, false); + efx_mcdi_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_GENEVE | + EFX_ENCAP_FLAG_IPV6, + true, false); + + n_filters = efx_mcdi_filter_vlan_count_filters(efx, vlan); +#if defined(EFX_NOT_UPSTREAM) && IS_MODULE(CONFIG_SFC_DRIVERLINK) + if (n_filters == 0 && vlan->warn_on_zero_filters && + !(table->kernel_blocked[EFX_DL_FILTER_BLOCK_KERNEL_UCAST] && + table->kernel_blocked[EFX_DL_FILTER_BLOCK_KERNEL_MCAST])) { +#else + if (n_filters == 0 && vlan->warn_on_zero_filters) { +#endif + netif_warn(efx, drv, efx->net_dev, + "Cannot install VLAN %u filters\n", + vlan->vid); + netif_warn(efx, drv, efx->net_dev, + "Maybe it is denied by hypervisor or network access control\n"); + vlan->warn_on_zero_filters = false; + } else if (n_filters != 0 && !vlan->warn_on_zero_filters) { + netif_warn(efx, drv, efx->net_dev, + "VLAN %u is now allowed\n", + vlan->vid); + vlan->warn_on_zero_filters = true; + } +} + +int efx_mcdi_filter_clear_rx(struct efx_nic *efx, + enum efx_filter_priority priority) +{ + struct efx_mcdi_filter_table *table; + unsigned int priority_mask; + unsigned int i; + int rc = -ENETDOWN; + + priority_mask = (((1U << (priority + 1)) - 1) & + ~(1U << EFX_FILTER_PRI_AUTO)); + + down_read(&efx->filter_sem); + table = efx->filter_state; + down_write(&table->lock); + + for (i = 0; i < EFX_MCDI_FILTER_TBL_ROWS; i++) { + rc = efx_mcdi_filter_remove_internal(efx, priority_mask, + i, true); + if (rc && rc != -ENOENT) + break; + rc = 0; + } + + up_write(&table->lock); + up_read(&efx->filter_sem); + return rc; +} + +int efx_mcdi_filter_remove_all(struct efx_nic *efx, + enum efx_filter_priority priority) +{ + struct efx_mcdi_filter_table *table; + int rc = -ENETDOWN; + unsigned int i; + + down_read(&efx->filter_sem); + table = efx->filter_state; + down_write(&table->lock); + + for (i = 0; i < EFX_MCDI_FILTER_TBL_ROWS; i++) { + rc = efx_mcdi_filter_remove_internal(efx, priority, + i, true); + if (rc && rc != -ENOENT) + break; + rc = 0; + } + up_write(&table->lock); + up_read(&efx->filter_sem); + return rc; +} + +#ifdef EFX_NOT_UPSTREAM +int efx_mcdi_filter_redirect(struct efx_nic *efx, u32 filter_id, + u32 *rss_context, int rxq_i, int stack_id) +{ + struct efx_mcdi_filter_table *table; + struct efx_filter_spec *spec, new_spec; + struct efx_rss_context *ctx; + struct efx_vport *vpx; + unsigned int filter_idx = efx_mcdi_filter_get_unsafe_id(efx, filter_id); + DEFINE_WAIT(wait); + int rc; + + down_read(&efx->filter_sem); + table = efx->filter_state; + if (!table || !table->entry) { + rc = -ENETDOWN; + goto out; + } + down_write(&table->lock); + /* We only need the RSS lock if this is an RSS filter, but let's just + * take it anyway for simplicity's sake. Same goes for the vport lock. + */ + mutex_lock(&efx->rss_lock); + mutex_lock(&efx->vport_lock); + spec = efx_mcdi_filter_entry_spec(table, filter_idx); + if (!spec) { + rc = -ENOENT; + netdev_WARN(efx->net_dev, + "invalid filter_id: filter_id=%#x rxq_i=%u\n", + filter_id, rxq_i); + goto out_unlock; + } + + /* Try to redirect */ + new_spec = *spec; + new_spec.dmaq_id = rxq_i; + new_spec.stack_id = stack_id; + if (rss_context) { + new_spec.rss_context = *rss_context; + new_spec.flags |= EFX_FILTER_FLAG_RX_RSS; + } else { + new_spec.flags &= ~EFX_FILTER_FLAG_RX_RSS; + } + if (new_spec.rss_context) + ctx = efx_find_rss_context_entry(efx, new_spec.rss_context); + else + ctx = &efx->rss_context; + if (new_spec.flags & EFX_FILTER_FLAG_RX_RSS) { + if (!ctx) { + rc = -ENOENT; + netdev_WARN(efx->net_dev, + "invalid rss_context %u: filter_id=%#x rxq_i=%u\n", + new_spec.rss_context, filter_id, rxq_i); + goto out_unlock; + } + if (ctx->context_id == EFX_MCDI_RSS_CONTEXT_INVALID) { + rc = -EOPNOTSUPP; + goto out_check; + } + } + if (new_spec.vport_id) + vpx = efx_find_vport_entry(efx, new_spec.vport_id); + else + vpx = &efx->vport; + if (new_spec.flags & EFX_FILTER_FLAG_VPORT_ID) { + if (!vpx) { + rc = -ENOENT; + netdev_WARN(efx->net_dev, + "invalid vport %u: filter_id=%#x rxq_i=%u\n", + new_spec.vport_id, filter_id, rxq_i); + goto out_unlock; + } + if (vpx->vport_id == EVB_PORT_ID_NULL) { + rc = -EOPNOTSUPP; + goto out_check; + } + } + rc = efx_mcdi_filter_push(efx, &new_spec, + &table->entry[filter_idx].handle, ctx, vpx, + true); +out_check: + if (rc && (rc != -ENETDOWN)) + netdev_WARN(efx->net_dev, + "failed to update filter: filter_id=%#x " + "filter_flags = %#x rxq_i=%u " + "stack_id = %d rc=%d \n", + filter_id, spec->flags, rxq_i, + new_spec.stack_id, rc); + if (!rc) + *spec = new_spec; +out_unlock: + mutex_unlock(&efx->vport_lock); + mutex_unlock(&efx->rss_lock); + up_write(&table->lock); +out: + up_read(&efx->filter_sem); + return rc; +} +#endif /* EFX_NOT_UPSTREAM */ + +u32 efx_mcdi_filter_count_rx_used(struct efx_nic *efx, + enum efx_filter_priority priority) +{ + struct efx_mcdi_filter_table *table; + struct efx_filter_spec *spec; + unsigned int filter_idx; + s32 count = 0; + + down_read(&efx->filter_sem); + table = efx->filter_state; + if (!table || !table->entry) { + up_read(&efx->filter_sem); + return -ENETDOWN; + } + + down_read(&table->lock); + + for (filter_idx = 0; + filter_idx < EFX_MCDI_FILTER_TBL_ROWS; + filter_idx++) { + spec = efx_mcdi_filter_entry_spec(table, filter_idx); + + if (spec && spec->priority == priority) + ++count; + } + up_read(&table->lock); + up_read(&efx->filter_sem); + return count; +} + +u32 efx_mcdi_filter_get_rx_id_limit(struct efx_nic *efx) +{ + struct efx_mcdi_filter_table *table = efx->filter_state; + + return table->rx_match_count * EFX_MCDI_FILTER_TBL_ROWS * 2; +} + +s32 efx_mcdi_filter_get_rx_ids(struct efx_nic *efx, + enum efx_filter_priority priority, + u32 *buf, u32 size) +{ + struct efx_mcdi_filter_table *table; + struct efx_filter_spec *spec; + unsigned int filter_idx; + s32 count = 0; + + down_read(&efx->filter_sem); + table = efx->filter_state; + if (!table || !table->entry) { + up_read(&efx->filter_sem); + return -ENETDOWN; + } + down_read(&table->lock); + + for (filter_idx = 0; + filter_idx < EFX_MCDI_FILTER_TBL_ROWS; + filter_idx++) { + spec = efx_mcdi_filter_entry_spec(table, filter_idx); + if (spec && spec->priority == priority) { + if (count == size) { + count = -EMSGSIZE; + break; + } + buf[count++] = + efx_mcdi_filter_make_filter_id( + efx_mcdi_filter_pri(table, + spec), + filter_idx); + } + } + up_read(&table->lock); + up_read(&efx->filter_sem); + return count; +} + +static int efx_mcdi_filter_match_flags_from_mcdi(bool encap, u32 mcdi_flags) +{ + int match_flags = 0; + +#define MAP_FLAG(gen_flag, mcdi_field) do { \ + u32 old_mcdi_flags = mcdi_flags; \ + mcdi_flags &= ~(1 << MC_CMD_FILTER_OP_EXT_IN_MATCH_ ## \ + mcdi_field ## _LBN); \ + if (mcdi_flags != old_mcdi_flags) \ + match_flags |= EFX_FILTER_MATCH_ ## gen_flag; \ + } while (0) + + if (encap) { + /* encap filters must specify encap type */ + match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE; + /* and imply ethertype and ip proto */ + mcdi_flags &= + ~(1 << MC_CMD_FILTER_OP_EXT_IN_MATCH_IP_PROTO_LBN); + mcdi_flags &= + ~(1 << MC_CMD_FILTER_OP_EXT_IN_MATCH_ETHER_TYPE_LBN); + /* VNI/VSID, VLAN and outer MAC refer to the outer packet */ + MAP_FLAG(ENCAP_TNI, VNI_OR_VSID); + MAP_FLAG(INNER_VID, INNER_VLAN); + MAP_FLAG(OUTER_VID, OUTER_VLAN); + MAP_FLAG(OUTER_LOC_MAC, DST_MAC); + /* everything else refers to the inner packet */ + MAP_FLAG(LOC_MAC_IG, IFRM_UNKNOWN_UCAST_DST); + MAP_FLAG(LOC_MAC_IG, IFRM_UNKNOWN_MCAST_DST); + MAP_FLAG(REM_HOST, IFRM_SRC_IP); + MAP_FLAG(LOC_HOST, IFRM_DST_IP); + MAP_FLAG(REM_MAC, IFRM_SRC_MAC); + MAP_FLAG(REM_PORT, IFRM_SRC_PORT); + MAP_FLAG(LOC_MAC, IFRM_DST_MAC); + MAP_FLAG(LOC_PORT, IFRM_DST_PORT); + MAP_FLAG(ETHER_TYPE, IFRM_ETHER_TYPE); + MAP_FLAG(IP_PROTO, IFRM_IP_PROTO); + } else { + MAP_FLAG(LOC_MAC_IG, UNKNOWN_UCAST_DST); + MAP_FLAG(LOC_MAC_IG, UNKNOWN_MCAST_DST); + MAP_FLAG(REM_HOST, SRC_IP); + MAP_FLAG(LOC_HOST, DST_IP); + MAP_FLAG(REM_MAC, SRC_MAC); + MAP_FLAG(REM_PORT, SRC_PORT); + MAP_FLAG(LOC_MAC, DST_MAC); + MAP_FLAG(LOC_PORT, DST_PORT); + MAP_FLAG(ETHER_TYPE, ETHER_TYPE); + MAP_FLAG(INNER_VID, INNER_VLAN); + MAP_FLAG(OUTER_VID, OUTER_VLAN); + MAP_FLAG(IP_PROTO, IP_PROTO); + } +#undef MAP_FLAG + + /* Did we map them all? */ + if (mcdi_flags) + return -EINVAL; + + return match_flags; +} + +void efx_mcdi_filter_uc_addr(struct efx_nic *efx, + const struct efx_mcdi_dev_addr *addr) +{ + struct efx_mcdi_filter_table *table = efx->filter_state; + + WARN_ON(!mutex_is_locked(&efx->mac_lock)); + + if (table->dev_uc_count >= EFX_MCDI_FILTER_DEV_UC_MAX) { + table->uc_promisc = true; + return; + } + + table->dev_uc_list[table->dev_uc_count++] = *addr; +} + +void efx_mcdi_filter_mc_addr(struct efx_nic *efx, + const struct efx_mcdi_dev_addr *addr) +{ + struct efx_mcdi_filter_table *table = efx->filter_state; + + WARN_ON(!mutex_is_locked(&efx->mac_lock)); + + if (table->dev_mc_count >= EFX_MCDI_FILTER_DEV_MC_MAX) { + table->mc_promisc = true; + table->mc_overflow = true; + return; + } + + table->dev_mc_list[table->dev_mc_count++] = *addr; +} + +static void efx_mcdi_filter_netdev_uc_addrs(struct efx_nic *efx, + bool *uc_promisc) +{ + struct net_device *net_dev = efx->net_dev; + struct efx_mcdi_dev_addr addr; + struct netdev_hw_addr *uc; + +#ifdef EFX_NOT_UPSTREAM +#if IS_MODULE(CONFIG_SFC_DRIVERLINK) + struct efx_mcdi_filter_table *table = efx->filter_state; + + if (table->kernel_blocked[EFX_DL_FILTER_BLOCK_KERNEL_UCAST]) + return; +#endif +#endif + + /* Copy/convert the address lists; add the primary station + * address and broadcast address + */ + *uc_promisc = !!(net_dev->flags & IFF_PROMISC); + + ether_addr_copy(addr.addr, net_dev->dev_addr); + efx_mcdi_filter_uc_addr(efx, &addr); + netdev_for_each_uc_addr(uc, net_dev) { + ether_addr_copy(addr.addr, uc->addr); + efx_mcdi_filter_uc_addr(efx, &addr); + } +} + +static void efx_mcdi_filter_netdev_mc_addrs(struct efx_nic *efx, + bool *mc_promisc) +{ + struct net_device *net_dev = efx->net_dev; + struct efx_mcdi_dev_addr addr; +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_NET_DEVICE_MC) + struct netdev_hw_addr *mc; +#else + struct dev_mc_list *mc; +#endif + +#ifdef EFX_NOT_UPSTREAM +#if IS_MODULE(CONFIG_SFC_DRIVERLINK) + struct efx_mcdi_filter_table *table = efx->filter_state; + + if (table->kernel_blocked[EFX_DL_FILTER_BLOCK_KERNEL_MCAST]) + return; +#endif +#endif + + *mc_promisc = !!(net_dev->flags & (IFF_PROMISC | IFF_ALLMULTI)); + + netdev_for_each_mc_addr(mc, net_dev) { +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_NET_DEVICE_MC) + ether_addr_copy(addr.addr, mc->addr); +#else + ether_addr_copy(addr.addr, mc->dmi_addr); +#endif + efx_mcdi_filter_mc_addr(efx, &addr); + } +} + +static void efx_mcdi_filter_netdev_get_addrs(struct efx_nic *efx, + bool *uc_promisc, + bool *mc_promisc) +{ + struct net_device *net_dev = efx->net_dev; + + /* If we're currently resetting, then this isn't going to go well (and + * we'll try it again when the reset is complete). So skip it. + * This is typically triggered by adding a new UDP tunnel port and + * adding a multicast address (for the UDP tunnel) at the same time. + */ + if (!netif_device_present(net_dev)) + return; + + netif_addr_lock_bh(net_dev); + efx_mcdi_filter_netdev_uc_addrs(efx, uc_promisc); + efx_mcdi_filter_netdev_mc_addrs(efx, mc_promisc); + netif_addr_unlock_bh(net_dev); +} + +static const struct efx_mcdi_filter_addr_ops efx_netdev_addr_ops = { + .get_addrs = efx_mcdi_filter_netdev_get_addrs, +}; + +static struct efx_mcdi_filter_vlan * +efx_mcdi_filter_find_vlan(struct efx_nic *efx, u16 vid) +{ + struct efx_mcdi_filter_table *table = efx->filter_state; + struct efx_mcdi_filter_vlan *vlan; + + WARN_ON(!rwsem_is_locked(&efx->filter_sem)); + + list_for_each_entry(vlan, &table->vlan_list, list) + if (vlan->vid == vid) + return vlan; + + return NULL; +} + +int efx_mcdi_filter_add_vlan(struct efx_nic *efx, u16 vid) +{ + struct efx_mcdi_filter_table *table; + struct efx_mcdi_filter_vlan *vlan; + unsigned int i; + int rc = 0; + + mutex_lock(&efx->mac_lock); + down_write(&efx->filter_sem); + + table = efx->filter_state; + if (!table) { + rc = -ENETDOWN; + goto out; + } + + vlan = efx_mcdi_filter_find_vlan(efx, vid); + if (vlan) { + netif_info(efx, drv, efx->net_dev, + "VLAN %u already added\n", vid); + rc = -EALREADY; + goto out; + } + + vlan = kzalloc(sizeof(*vlan), GFP_KERNEL); + if (!vlan) { + rc = -ENOMEM; + goto out; + } + + vlan->vid = vid; + + for (i = 0; i < ARRAY_SIZE(vlan->uc); i++) + vlan->uc[i] = EFX_MCDI_FILTER_ID_INVALID; + for (i = 0; i < ARRAY_SIZE(vlan->mc); i++) + vlan->mc[i] = EFX_MCDI_FILTER_ID_INVALID; + for (i = 0; i < EFX_MCDI_NUM_DEFAULT_FILTERS; i++) + vlan->default_filters[i] = EFX_MCDI_FILTER_ID_INVALID; + + vlan->warn_on_zero_filters = true; + + list_add_tail(&vlan->list, &table->vlan_list); + + if (efx->datapath_started) + efx_mcdi_filter_vlan_sync_rx_mode(efx, vlan); + +out: + up_write(&efx->filter_sem); + mutex_unlock(&efx->mac_lock); + return rc; +} + +static void efx_mcdi_filter_down_vlan(struct efx_nic *efx, + struct efx_mcdi_filter_vlan *vlan) +{ + unsigned int i; + + efx_rwsem_assert_write_locked(&efx->filter_sem); + + for (i = 0; i < ARRAY_SIZE(vlan->uc); i++) + if (vlan->uc[i] != EFX_MCDI_FILTER_ID_INVALID) { + efx_mcdi_filter_remove_unsafe(efx, EFX_FILTER_PRI_AUTO, + vlan->uc[i]); + vlan->uc[i] = EFX_MCDI_FILTER_ID_INVALID; + } + for (i = 0; i < ARRAY_SIZE(vlan->mc); i++) + if (vlan->mc[i] != EFX_MCDI_FILTER_ID_INVALID) { + efx_mcdi_filter_remove_unsafe(efx, EFX_FILTER_PRI_AUTO, + vlan->mc[i]); + vlan->mc[i] = EFX_MCDI_FILTER_ID_INVALID; + } + for (i = 0; i < EFX_MCDI_NUM_DEFAULT_FILTERS; i++) + if (vlan->default_filters[i] != EFX_MCDI_FILTER_ID_INVALID) { + efx_mcdi_filter_remove_unsafe(efx, EFX_FILTER_PRI_AUTO, + vlan->default_filters[i]); + vlan->default_filters[i] = EFX_MCDI_FILTER_ID_INVALID; + } +} + +static void efx_mcdi_filter_del_vlan_internal(struct efx_nic *efx, + struct efx_mcdi_filter_vlan *vlan) +{ + efx_rwsem_assert_write_locked(&efx->filter_sem); + + efx_mcdi_filter_down_vlan(efx, vlan); + + list_del(&vlan->list); + + kfree(vlan); +} + +int efx_mcdi_filter_del_vlan(struct efx_nic *efx, u16 vid) +{ + struct efx_mcdi_filter_table *table = efx->filter_state; + struct efx_mcdi_filter_vlan *vlan; + int rc = 0; + + mutex_lock(&efx->mac_lock); + down_write(&efx->filter_sem); + + if (!table) { + rc = -ENETDOWN; + goto out; + } + + vlan = efx_mcdi_filter_find_vlan(efx, vid); + if (!vlan) { + netif_err(efx, drv, efx->net_dev, + "VLAN %u not found in filter state\n", vid); + rc = -ENOENT; + goto out; + } + + efx_mcdi_filter_del_vlan_internal(efx, vlan); + +out: + up_write(&efx->filter_sem); + mutex_unlock(&efx->mac_lock); + return rc; +} + +static void efx_mcdi_filter_down_vlans(struct efx_nic *efx) +{ + struct efx_mcdi_filter_table *table = efx->filter_state; + struct efx_mcdi_filter_vlan *vlan; + + efx_rwsem_assert_write_locked(&efx->filter_sem); + + list_for_each_entry(vlan, &table->vlan_list, list) + efx_mcdi_filter_down_vlan(efx, vlan); +} + +static void efx_mcdi_filter_del_vlans(struct efx_nic *efx) +{ + struct efx_mcdi_filter_table *table = efx->filter_state; + struct efx_mcdi_filter_vlan *vlan, *next_vlan; + + efx_rwsem_assert_write_locked(&efx->filter_sem); + + list_for_each_entry_safe(vlan, next_vlan, &table->vlan_list, list) + efx_mcdi_filter_del_vlan_internal(efx, vlan); +} + +bool efx_mcdi_filter_match_supported(struct efx_nic *efx, + bool encap, + unsigned int match_flags) +{ + struct efx_mcdi_filter_table *table = efx->filter_state; + unsigned int match_pri; + int mf; + + if (!table) { + netif_warn(efx, drv, efx->net_dev, + "filter match support check before probe\n"); + return false; + } + + for (match_pri = 0; + match_pri < table->rx_match_count; + match_pri++) { + mf = efx_mcdi_filter_match_flags_from_mcdi(encap, + table->rx_match_mcdi_flags[match_pri]); + if (mf == match_flags) + return true; + } + + return false; +} + +static int +efx_mcdi_filter_table_probe_matches(struct efx_nic *efx, + struct efx_mcdi_filter_table *table, + bool encap) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_PARSER_DISP_INFO_IN_LEN); + MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMAX); + unsigned int pd_match_pri, pd_match_count; + size_t outlen; + int rc; + + /* Find out which RX filter types are supported, and their priorities */ + MCDI_SET_DWORD(inbuf, GET_PARSER_DISP_INFO_IN_OP, + encap ? + MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_ENCAP_RX_MATCHES : + MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_RX_MATCHES); + rc = efx_mcdi_rpc(efx, MC_CMD_GET_PARSER_DISP_INFO, + inbuf, sizeof(inbuf), outbuf, sizeof(outbuf), + &outlen); + if (rc) + return rc; + + pd_match_count = MCDI_VAR_ARRAY_LEN( + outlen, GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES); + + if (pd_match_count == 0) + netif_warn(efx, probe, efx->net_dev, "%s: pd_match_count = 0\n", + __func__); + + for (pd_match_pri = 0; pd_match_pri < pd_match_count; pd_match_pri++) { + u32 mcdi_flags = + MCDI_ARRAY_DWORD( + outbuf, + GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES, + pd_match_pri); + rc = efx_mcdi_filter_match_flags_from_mcdi(encap, mcdi_flags); + if (rc < 0) { + netif_dbg(efx, probe, efx->net_dev, + "%s: fw flags %#x pri %u not supported in driver\n", + __func__, mcdi_flags, pd_match_pri); + } else { + netif_dbg(efx, probe, efx->net_dev, + "%s: fw flags %#x pri %u supported as driver flags %#x pri %u\n", + __func__, mcdi_flags, pd_match_pri, + rc, table->rx_match_count); + table->rx_match_mcdi_flags[table->rx_match_count++] = + mcdi_flags; + } + } + + return 0; +} + +int efx_mcdi_filter_probe_supported_filters(struct efx_nic *efx) +{ + struct efx_mcdi_filter_table *table = efx->filter_state; + int rc; + + table->rx_match_count = 0; + + rc = efx_mcdi_filter_table_probe_matches(efx, table, false); + + if (!rc && table->encap_supported) + rc = efx_mcdi_filter_table_probe_matches(efx, table, true); + + return rc; +} + +int efx_mcdi_filter_table_probe(struct efx_nic *efx, bool additional_rss_modes) +{ + struct efx_mcdi_filter_table *table; + + if (efx->filter_state) /* already probed */ + return 0; + + table = kzalloc(sizeof(*table), GFP_KERNEL); + if (!table) + return -ENOMEM; + + efx->filter_state = table; + + table->additional_rss_modes = additional_rss_modes; + + table->mc_promisc_last = false; + INIT_LIST_HEAD(&table->vlan_list); + init_rwsem(&table->lock); + + /* default to netdev address source */ + efx_mcdi_filter_set_addr_ops(efx, &efx_netdev_addr_ops); + + return 0; +} + +void +efx_mcdi_filter_set_addr_ops(struct efx_nic *efx, + const struct efx_mcdi_filter_addr_ops *addr_ops) +{ + struct efx_mcdi_filter_table *table = efx->filter_state; + + table->addr_ops = addr_ops; +} + +int efx_mcdi_filter_table_init(struct efx_nic *efx, bool mc_chaining, + bool encap) +{ + struct efx_mcdi_filter_table *table = efx->filter_state; + struct net_device *net_dev = efx->net_dev; + unsigned int filter_idx; + int rc; + + table->mc_chaining = mc_chaining; + table->encap_supported = encap; + + table->entry = vzalloc(array_size(EFX_MCDI_FILTER_TBL_ROWS, + sizeof(*table->entry))); + if (!table->entry) { + rc = -ENOMEM; + goto fail; + } + + for (filter_idx = 0; + filter_idx < EFX_MCDI_FILTER_TBL_ROWS; + filter_idx++) + table->entry[filter_idx].handle = EFX_MCDI_FILTER_ID_INVALID; + + rc = efx_mcdi_filter_probe_supported_filters(efx); + if (rc) + goto fail; + + efx_extend_debugfs_port(efx, efx, 0, efx_debugfs); + efx_extend_debugfs_port(efx, efx->filter_state, 0, filter_debugfs); + + /* Ignore net_dev features for vDPA devices */ + if (efx->state == STATE_VDPA) { + table->vlan_filter = efx_mcdi_filter_vlan_filter(efx); + return 0; + } + + if (!efx_mcdi_filter_match_supported(efx, false, + EFX_FILTER_MATCH_FLAGS_RFS)) { + netif_info(efx, probe, net_dev, + "RFS filters are not supported in this firmware variant\n"); + net_dev->features &= ~NETIF_F_NTUPLE; + } + + efx->vlan_filter_available = + efx_mcdi_filter_match_supported(efx, false, + (EFX_FILTER_MATCH_OUTER_VID | EFX_FILTER_MATCH_LOC_MAC)); + + if ((efx_supported_features(efx) & NETIF_F_HW_VLAN_CTAG_FILTER) && + !efx->vlan_filter_available) { + + netif_info(efx, probe, net_dev, + "VLAN filters are not supported in this firmware variant\n"); + net_dev->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER; + efx->fixed_features &= ~NETIF_F_HW_VLAN_CTAG_FILTER; +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_NETDEV_HW_FEATURES) + net_dev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_FILTER; +#elif defined(EFX_HAVE_NETDEV_EXTENDED_HW_FEATURES) + netdev_extended(net_dev)->hw_features &= + ~NETIF_F_HW_VLAN_CTAG_FILTER; +#else + efx->hw_features &= ~NETIF_F_HW_VLAN_CTAG_FILTER; +#endif + } + table->vlan_filter = efx_mcdi_filter_vlan_filter(efx); + + return 0; + +fail: + kfree(table->entry); + table->entry = NULL; + return rc; +} + +void efx_mcdi_filter_table_reset_mc_allocations(struct efx_nic *efx) +{ + struct efx_mcdi_filter_table *table = efx->filter_state; + + if (table) { + table->must_restore_filters = true; + table->must_restore_rss_contexts = true; + } +} + +/* Caller must hold efx->filter_sem for read if race against + * efx_mcdi_filter_table_down() is possible + */ +int efx_mcdi_filter_table_up(struct efx_nic *efx) +{ + struct efx_mcdi_filter_table *table = efx->filter_state; + unsigned int invalid_filters = 0; + struct efx_filter_spec *spec; + struct efx_rss_context *ctx; + unsigned int filter_idx; + struct efx_vport *vpx; + int fail_rc = 0; + u32 mcdi_flags; + int match_pri; + int rc; + + WARN_ON(!rwsem_is_locked(&efx->filter_sem)); + + if (!table || !table->entry) + return -ENETDOWN; + + if (table->push_filters) + return 0; + + down_write(&table->lock); + mutex_lock(&efx->rss_lock); + mutex_lock(&efx->vport_lock); + + table->push_filters = true; + + /* remove any filters that are no longer supported */ + for (filter_idx = 0; filter_idx < EFX_MCDI_FILTER_TBL_ROWS; filter_idx++) { + spec = efx_mcdi_filter_entry_spec(table, filter_idx); + if (!spec) + continue; + + mcdi_flags = efx_mcdi_filter_mcdi_flags_from_spec(spec); + match_pri = 0; + while (match_pri < table->rx_match_count && + table->rx_match_mcdi_flags[match_pri] != mcdi_flags) + ++match_pri; + if (match_pri < table->rx_match_count) + continue; + + table->entry[filter_idx].handle = EFX_MCDI_FILTER_ID_INVALID; + kfree(spec); + efx_mcdi_filter_set_entry(efx, filter_idx, NULL, 0); + ++invalid_filters; + } + /* This can happen validly if the MC's capabilities have changed, so + * is not an error. + */ + if (invalid_filters) + netif_dbg(efx, drv, efx->net_dev, + "Did not restore %u filters that are now unsupported.\n", + invalid_filters); + /* re-add filters in priority order */ + for (match_pri=(table->rx_match_count)-1; match_pri >= 0; match_pri--) { + for (filter_idx = 0; filter_idx < EFX_MCDI_FILTER_TBL_ROWS; filter_idx++) { + spec = efx_mcdi_filter_entry_spec(table, filter_idx); + if (!spec) + continue; + + mcdi_flags = efx_mcdi_filter_mcdi_flags_from_spec(spec); + if (mcdi_flags != table->rx_match_mcdi_flags[match_pri]) + continue; + + if (spec->rss_context) + ctx = efx_find_rss_context_entry(efx, spec->rss_context); + else + ctx = &efx->rss_context; + if (spec->flags & EFX_FILTER_FLAG_RX_RSS) { + if (!ctx) { + netif_warn(efx, drv, efx->net_dev, + "Warning: unable to restore a filter with nonexistent RSS context %u.\n", + spec->rss_context); + rc = -EINVAL; + goto invalid; + } + if (ctx->context_id == EFX_MCDI_RSS_CONTEXT_INVALID) { + netif_warn(efx, drv, efx->net_dev, + "Warning: unable to restore a filter with RSS context %u as it was not created.\n", + spec->rss_context); + rc = -EINVAL; + goto invalid; + } + } + + if (spec->vport_id) + vpx = efx_find_vport_entry(efx, spec->vport_id); + else + vpx = &efx->vport; + if (spec->flags & EFX_FILTER_FLAG_VPORT_ID) { + if (!vpx) { + netif_warn(efx, drv, efx->net_dev, + "Warning: unable to restore a filter with nonexistent v-port %u.\n", + spec->vport_id); + rc = -EINVAL; + goto invalid; + } + if (vpx->vport_id == EVB_PORT_ID_NULL) { + netif_warn(efx, drv, efx->net_dev, + "Warning: unable to restore a filter with v-port %u as it was not created.\n", + spec->vport_id); + rc = -EINVAL; + goto invalid; + } + } + + rc = efx_mcdi_filter_push(efx, spec, + &table->entry[filter_idx].handle, + ctx, vpx, false); + + if (rc) { +invalid: + fail_rc = rc; + kfree(spec); + efx_mcdi_filter_set_entry(efx, filter_idx, + NULL, 0); + } + } + } + + /* if we fail to insert some filters then don't fail whatever + * control operation we're performing, but don't mark filters + * as installed so they'll be tried again later + */ + if (fail_rc) + netif_err(efx, hw, efx->net_dev, + "unable to restore all filters, rc=%d\n", + fail_rc); + else + table->must_restore_filters = false; + + mutex_unlock(&efx->vport_lock); + mutex_unlock(&efx->rss_lock); + up_write(&table->lock); + return 0; +} + +void efx_mcdi_filter_table_restore(struct efx_nic *efx) +{ + struct efx_mcdi_filter_table *table = efx->filter_state; + + if (!table || !table->must_restore_filters) + return; + (void)efx_mcdi_filter_table_up(efx); +} + +void efx_mcdi_filter_table_down(struct efx_nic *efx) +{ + struct efx_mcdi_filter_table *table = efx->filter_state; + MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_LEN); + struct efx_filter_spec *spec; + unsigned int filter_idx; + int rc; + + if (!table || !table->entry) + return; + + efx_rwsem_assert_write_locked(&efx->filter_sem); + + efx_mcdi_filter_down_vlans(efx); + + for (filter_idx = 0; filter_idx < EFX_MCDI_FILTER_TBL_ROWS; filter_idx++) { + spec = efx_mcdi_filter_entry_spec(table, filter_idx); + if (!spec) + continue; + + MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP, + efx_mcdi_filter_is_exclusive(spec) ? + MC_CMD_FILTER_OP_IN_OP_REMOVE : + MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE); + MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE, + table->entry[filter_idx].handle); + rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FILTER_OP, inbuf, + sizeof(inbuf), NULL, 0, NULL); + if (rc && (rc != -ENETDOWN)) + netif_info(efx, drv, efx->net_dev, + "%s: filter %04x remove failed %d\n", + __func__, filter_idx, rc); + table->entry[filter_idx].handle = EFX_MCDI_FILTER_ID_INVALID; + } + + table->push_filters = false; +} + +void efx_mcdi_filter_table_fini(struct efx_nic *efx) +{ + struct efx_mcdi_filter_table *table = efx->filter_state; + unsigned int filter_idx; + + if (!table) + return; + + efx_trim_debugfs_port(efx, efx_debugfs); + efx_trim_debugfs_port(efx, filter_debugfs); + if (!table->entry) + return; + + for (filter_idx = 0; + filter_idx < EFX_MCDI_FILTER_TBL_ROWS; + filter_idx++) + kfree(efx_mcdi_filter_entry_spec(table, filter_idx)); + vfree(table->entry); + table->entry = NULL; +} + +void efx_mcdi_filter_table_remove(struct efx_nic *efx) +{ + struct efx_mcdi_filter_table *table = efx->filter_state; + + if (!table) + return; + + down_write(&efx->filter_sem); + efx_mcdi_filter_del_vlans(efx); + + efx->filter_state = NULL; + up_write(&efx->filter_sem); + kfree(table); +} + +static void efx_mcdi_filter_mark_one_old(struct efx_nic *efx, uint16_t *id) +{ + struct efx_mcdi_filter_table *table = efx->filter_state; + unsigned int filter_idx; + + efx_rwsem_assert_write_locked(&table->lock); + + if (*id != EFX_MCDI_FILTER_ID_INVALID) { + filter_idx = efx_mcdi_filter_get_unsafe_id(efx, *id); + if (!table->entry[filter_idx].spec) + netif_dbg(efx, drv, efx->net_dev, + "%s: marked null spec old %04x:%04x\n", + __func__, *id, filter_idx); + table->entry[filter_idx].spec |= EFX_MCDI_FILTER_FLAG_AUTO_OLD; + *id = EFX_MCDI_FILTER_ID_INVALID; + } +} + +/* Mark old per-VLAN filters that may need to be removed */ +static void _efx_mcdi_filter_vlan_mark_old(struct efx_nic *efx, + struct efx_mcdi_filter_vlan *vlan) +{ + struct efx_mcdi_filter_table *table = efx->filter_state; + unsigned int i; + + WARN_ON(!mutex_is_locked(&efx->mac_lock)); + + for (i = 0; i < table->dev_uc_count; i++) + efx_mcdi_filter_mark_one_old(efx, &vlan->uc[i]); + for (i = 0; i < table->dev_mc_count; i++) + efx_mcdi_filter_mark_one_old(efx, &vlan->mc[i]); + for (i = 0; i < EFX_MCDI_NUM_DEFAULT_FILTERS; i++) + efx_mcdi_filter_mark_one_old(efx, &vlan->default_filters[i]); +} + +/* Mark old filters that may need to be removed. + * Caller must hold efx->filter_sem for read if race against + * efx_mcdi_filter_table_down() is possible + */ +static void efx_mcdi_filter_mark_old(struct efx_nic *efx) +{ + struct efx_mcdi_filter_table *table = efx->filter_state; + struct efx_mcdi_filter_vlan *vlan; + + down_write(&table->lock); + list_for_each_entry(vlan, &table->vlan_list, list) + _efx_mcdi_filter_vlan_mark_old(efx, vlan); + up_write(&table->lock); +} + +/* Remove filters that weren't renewed. */ +static void efx_mcdi_filter_remove_old(struct efx_nic *efx) +{ + struct efx_mcdi_filter_table *table = efx->filter_state; + int remove_failed = 0; + int remove_noent = 0; + int rc; + int i; + + down_write(&table->lock); + for (i = 0; i < EFX_MCDI_FILTER_TBL_ROWS; i++) { + if (READ_ONCE(table->entry[i].spec) & + EFX_MCDI_FILTER_FLAG_AUTO_OLD) { + rc = efx_mcdi_filter_remove_internal(efx, + 1U << EFX_FILTER_PRI_AUTO, i, true); + if (rc == -ENOENT) + remove_noent++; + else if (rc) + remove_failed++; + } + } + up_write(&table->lock); + + if (remove_failed) + netif_info(efx, drv, efx->net_dev, + "%s: failed to remove %d filters\n", + __func__, remove_failed); + if (remove_noent) + netif_info(efx, drv, efx->net_dev, + "%s: failed to remove %d non-existent filters\n", + __func__, remove_noent); +} + +static unsigned int efx_mcdi_filter_vlan_count_filters(struct efx_nic *efx, + struct efx_mcdi_filter_vlan *vlan) +{ + struct efx_mcdi_filter_table *table = efx->filter_state; + unsigned int count = 0; + unsigned int i; + + WARN_ON(!mutex_is_locked(&efx->mac_lock)); + + for (i = 0; i < table->dev_uc_count; i++) + count += (vlan->uc[i] != EFX_MCDI_FILTER_ID_INVALID); + for (i = 0; i < table->dev_mc_count; i++) + count += (vlan->mc[i] != EFX_MCDI_FILTER_ID_INVALID); + for (i = 0; i < EFX_MCDI_NUM_DEFAULT_FILTERS; i++) + count += (vlan->default_filters[i] != + EFX_MCDI_FILTER_ID_INVALID); + + return count; +} + +/* Caller must hold efx->filter_sem for read if race against + * efx_mcdi_filter_table_down() is possible + */ +void efx_mcdi_filter_sync_rx_mode(struct efx_nic *efx) +{ + struct efx_mcdi_filter_table *table = efx->filter_state; + struct net_device *net_dev = efx->net_dev; + struct efx_mcdi_filter_vlan *vlan; + bool uc_promisc, mc_promisc; + bool vlan_filter; + + WARN_ON(!mutex_is_locked(&efx->mac_lock)); + + if (efx->state != STATE_VDPA && !efx->datapath_started) + return; + + /* If we're currently resetting, then this isn't going to go well (and + * we'll try it again when the reset is complete). So skip it. + * This is typically triggered by adding a new UDP tunnel port and + * adding a multicast address (for the UDP tunnel) at the same time. + */ + if (efx->state != STATE_VDPA && !netif_device_present(net_dev)) + return; + + if (!table || !table->entry) + return; + + efx_mcdi_filter_mark_old(efx); + + /* Get the address list from net device or VDPA */ + table->dev_uc_count = 0; + table->dev_mc_count = 0; + table->uc_promisc = false; + table->mc_promisc = false; + table->mc_overflow = false; + uc_promisc = false; + mc_promisc = false; + WARN_ON(!table->addr_ops); + WARN_ON(!table->addr_ops->get_addrs); + if (table->addr_ops && table->addr_ops->get_addrs) + table->addr_ops->get_addrs(efx, &uc_promisc, &mc_promisc); + if (uc_promisc) + table->uc_promisc = true; + if (mc_promisc) + table->mc_promisc = true; + + /* If VLAN filtering changes, all old filters are finally removed. + * Do it in advance to avoid conflicts for unicast untagged and + * VLAN 0 tagged filters. + */ + vlan_filter = efx_mcdi_filter_vlan_filter(efx); + if (table->vlan_filter != vlan_filter) { + table->vlan_filter = vlan_filter; + efx_mcdi_filter_remove_old(efx); + } + + list_for_each_entry(vlan, &table->vlan_list, list) + efx_mcdi_filter_vlan_sync_rx_mode(efx, vlan); + + efx_mcdi_filter_remove_old(efx); + table->mc_promisc_last = table->mc_promisc; +} + +#ifdef CONFIG_RFS_ACCEL +bool efx_mcdi_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id, + unsigned int filter_idx) +{ + struct efx_filter_spec *spec, saved_spec; + struct efx_mcdi_filter_table *table; + struct efx_arfs_rule *rule = NULL; + bool ret = true, force = false; + u16 arfs_id; + + down_read(&efx->filter_sem); + table = efx->filter_state; + if (!table || !table->entry) { + up_read(&efx->filter_sem); + return false; + } + down_write(&table->lock); + spec = efx_mcdi_filter_entry_spec(table, filter_idx); + + /* If it's somehow gone, or been replaced with a higher priority filter + * then silently expire it and move on. + */ + if (!spec || spec->priority != EFX_FILTER_PRI_HINT) + goto out_unlock; + + spin_lock_bh(&efx->rps_hash_lock); + if (!efx->rps_hash_table) { + /* In the absence of the table, we always return 0 to ARFS. */ + arfs_id = 0; + } else { + rule = efx_rps_hash_find(efx, spec); + if (!rule) + /* ARFS table doesn't know of this filter, so remove it */ + goto expire; + arfs_id = rule->arfs_id; + ret = efx_rps_check_rule(rule, filter_idx, &force); + if (force) + goto expire; + if (!ret) { + spin_unlock_bh(&efx->rps_hash_lock); + goto out_unlock; + } + } + if (!rps_may_expire_flow(efx->net_dev, spec->dmaq_id, flow_id, arfs_id)) + ret = false; + else if (rule) + rule->filter_id = EFX_ARFS_FILTER_ID_REMOVING; +expire: + saved_spec = *spec; /* remove operation will kfree spec */ + spin_unlock_bh(&efx->rps_hash_lock); + /* At this point (since we dropped the lock), another thread might queue + * up a fresh insertion request (but the actual insertion will be held + * up by our possession of the filter table lock). In that case, it + * will set rule->filter_id to EFX_ARFS_FILTER_ID_PENDING, meaning that + * the rule is not removed by efx_rps_hash_del() below. + */ + if (ret) + ret = efx_mcdi_filter_remove_internal(efx, 1U << spec->priority, + filter_idx, true) == 0; + /* While we can't safely dereference rule (we dropped the lock), we can + * still test it for NULL. + */ + if (ret && rule) { + /* Expiring, so remove entry from ARFS table */ + spin_lock_bh(&efx->rps_hash_lock); + efx_rps_hash_del(efx, &saved_spec); + spin_unlock_bh(&efx->rps_hash_lock); + } +out_unlock: + up_write(&table->lock); + up_read(&efx->filter_sem); + return ret; +} +#endif /* CONFIG_RFS_ACCEL*/ + +#ifdef EFX_NOT_UPSTREAM +#if IS_MODULE(CONFIG_SFC_DRIVERLINK) +int efx_mcdi_filter_block_kernel(struct efx_nic *efx, + enum efx_dl_filter_block_kernel_type type) +{ + struct efx_mcdi_filter_table *table; + int rc = 0; + + mutex_lock(&efx->mac_lock); + down_read(&efx->filter_sem); + table = efx->filter_state; + if (!table || !table->entry) { + rc = -ENETDOWN; + goto out; + } + down_write(&table->lock); + table->kernel_blocked[type] = true; + up_write(&table->lock); + + efx_mcdi_filter_sync_rx_mode(efx); + + if (type == EFX_DL_FILTER_BLOCK_KERNEL_UCAST) + rc = efx_mcdi_filter_clear_rx(efx, EFX_FILTER_PRI_HINT); +out: + up_read(&efx->filter_sem); + mutex_unlock(&efx->mac_lock); + return rc; +} + +void efx_mcdi_filter_unblock_kernel(struct efx_nic *efx, + enum efx_dl_filter_block_kernel_type type) +{ + struct efx_mcdi_filter_table *table; + + mutex_lock(&efx->mac_lock); + down_read(&efx->filter_sem); + table = efx->filter_state; + if (!table || !table->entry) + goto out; + down_write(&table->lock); + table->kernel_blocked[type] = false; + up_write(&table->lock); + + efx_mcdi_filter_sync_rx_mode(efx); +out: + up_read(&efx->filter_sem); + mutex_unlock(&efx->mac_lock); +} +#endif /* CONFIG_SFC_DRIVERLINK */ +#endif /* EFX_NOT_UPSTREAM */ + +u32 efx_mcdi_get_default_rss_flags(struct efx_nic *efx) +{ + struct efx_mcdi_filter_table *table = efx->filter_state; + u32 flags = RSS_CONTEXT_FLAGS_DEFAULT; + + if (table->additional_rss_modes) + flags |= RSS_CONTEXT_FLAGS_DEFAULT_ADDITIONAL; + return flags; +} + +/* Firmware had a bug (sfc bug 61952) where it would not actually fill in the + * flags field in the response to MC_CMD_RSS_CONTEXT_GET_FLAGS. + * This meant that it would always contain whatever was previously in the MCDI + * buffer. Fortunately, all firmware versions with this bug have a known + * default flags value for a newly-allocated RSS context (either 0xf or + * 0x33f33f0f, depending on the capability flag + * MC_CMD_GET_CAPABILITIES_OUT_ADDITIONAL_RSS_MODES), and when we allocate an + * RSS context we're the only function that can control it; thus we can + * maintain a shadow state in software. + */ +static void efx_mcdi_init_rss_flags(struct efx_nic *efx) +{ + efx->rss_context.flags = efx_mcdi_get_default_rss_flags(efx); +} + +/* The response to MC_CMD_RSS_CONTEXT_GET_FLAGS has a 32-bit hole where the + * context ID would be in the request, so we can use an overlength buffer in + * the request and pre-fill the flags field with what we believe the current + * value to be. Thus if the firmware has the bug, it will leave our pre-filled + * value in the flags field of the response, whereas if the firmware is fixed, + * it will fill in the current value (which should be the same). + */ +int efx_mcdi_get_rss_context_flags(struct efx_nic *efx, + struct efx_rss_context *ctx) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_LEN); + MCDI_DECLARE_BUF(outbuf, MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_LEN); + size_t outlen; + int rc; + + /* Check we have a hole for the context ID */ + BUILD_BUG_ON(MC_CMD_RSS_CONTEXT_GET_FLAGS_IN_LEN != MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_FLAGS_OFST); + MCDI_SET_DWORD(inbuf, RSS_CONTEXT_GET_FLAGS_IN_RSS_CONTEXT_ID, + ctx->context_id); + MCDI_SET_DWORD(inbuf, RSS_CONTEXT_GET_FLAGS_OUT_FLAGS, ctx->flags); + rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_GET_FLAGS, inbuf, + sizeof(inbuf), outbuf, sizeof(outbuf), &outlen); + if (rc == 0) { + if (outlen < MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_LEN) + rc = -EIO; + else + ctx->flags = MCDI_DWORD(outbuf, RSS_CONTEXT_GET_FLAGS_OUT_FLAGS); + } + return rc; +} + +int efx_mcdi_set_rss_context_flags(struct efx_nic *efx, + struct efx_rss_context *ctx, u32 flags) +{ + struct efx_mcdi_filter_table *table = efx->filter_state; + MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_LEN); + int rc; + + BUILD_BUG_ON(MC_CMD_RSS_CONTEXT_SET_FLAGS_OUT_LEN != 0); + + if (flags == ctx->flags) + /* nothing to do */ + return 0; + /* If we're using additional flags, check firmware supports them */ + if ((flags & RSS_CONTEXT_FLAGS_ADDITIONAL_MASK) && + !table->additional_rss_modes) + return -EOPNOTSUPP; + MCDI_SET_DWORD(inbuf, RSS_CONTEXT_SET_FLAGS_IN_RSS_CONTEXT_ID, + ctx->context_id); + MCDI_SET_DWORD(inbuf, RSS_CONTEXT_SET_FLAGS_IN_FLAGS, flags); + rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_SET_FLAGS, inbuf, + sizeof(inbuf), NULL, 0, NULL); + if (!rc) + ctx->flags = flags; + return rc; +} + +static int efx_mcdi_filter_alloc_rss_context(struct efx_nic *efx, bool exclusive, + struct efx_rss_context *ctx, + unsigned int *context_size) +{ + struct efx_mcdi_filter_table *table = efx->filter_state; + MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_ALLOC_IN_LEN); + MCDI_DECLARE_BUF(outbuf, MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN); + size_t outlen; + int rc; + u32 alloc_type = exclusive ? + MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_EXCLUSIVE : + MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_SHARED; + unsigned int rss_spread = exclusive ? + efx->rss_spread : + min(rounddown_pow_of_two(efx->rss_spread), + EFX_MCDI_MAX_SHARED_RSS_CONTEXT_SIZE); + +#ifdef EFX_NOT_UPSTREAM + if (ctx->num_queues) + rss_spread = ctx->num_queues; +#endif + + if (!exclusive && rss_spread == 1) { + ctx->context_id = EFX_MCDI_RSS_CONTEXT_INVALID; + if (context_size) + *context_size = 1; + return 0; + } + + MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID, + efx->vport.vport_id); + MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_TYPE, alloc_type); + MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_NUM_QUEUES, + efx_rx_queue_id_internal(efx, rss_spread)); + + rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_ALLOC, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), &outlen); + if (rc != 0) + return rc; + + if (outlen < MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN) + return -EIO; + + ctx->context_id = MCDI_DWORD(outbuf, RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID); + + if (context_size) + *context_size = rss_spread; + + efx_mcdi_init_rss_flags(efx); + efx_mcdi_get_rss_context_flags(efx, ctx); + + /* Apply our default RSS hashing policy: 4-tuple for TCP and UDP, + * 2-tuple for other IP. + * If we fail, we just leave the RSS context at its default hash + * settings (4-tuple TCP, 2-tuple UDP and other-IP), which is safe + * but may slightly reduce performance. + */ + if (table->additional_rss_modes) + efx_mcdi_set_rss_context_flags(efx, ctx, + RSS_CONTEXT_FLAGS_DEFAULT | /* _EN flags */ + RSS_MODE_HASH_4TUPLE << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV4_RSS_MODE_LBN | + RSS_MODE_HASH_4TUPLE << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV6_RSS_MODE_LBN | + RSS_MODE_HASH_4TUPLE << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV4_RSS_MODE_LBN | + RSS_MODE_HASH_4TUPLE << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV6_RSS_MODE_LBN | + RSS_MODE_HASH_ADDRS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV4_RSS_MODE_LBN | + RSS_MODE_HASH_ADDRS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV6_RSS_MODE_LBN); + + return 0; +} + +static int efx_mcdi_filter_free_rss_context(struct efx_nic *efx, u32 context) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_FREE_IN_LEN); + + MCDI_SET_DWORD(inbuf, RSS_CONTEXT_FREE_IN_RSS_CONTEXT_ID, + context); + + return efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_FREE, inbuf, sizeof(inbuf), + NULL, 0, NULL); +} + +static int efx_mcdi_filter_populate_rss_table(struct efx_nic *efx, u32 context, + const u32 *rx_indir_table, const u8 *key) +{ + MCDI_DECLARE_BUF(tablebuf, MC_CMD_RSS_CONTEXT_SET_TABLE_IN_LEN); + MCDI_DECLARE_BUF(keybuf, MC_CMD_RSS_CONTEXT_SET_KEY_IN_LEN); + int i, rc; + + MCDI_SET_DWORD(tablebuf, RSS_CONTEXT_SET_TABLE_IN_RSS_CONTEXT_ID, + context); + BUILD_BUG_ON(ARRAY_SIZE(efx->rss_context.rx_indir_table) != + MC_CMD_RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE_LEN); + + /* This iterates over the length of efx->rss_context.rx_indir_table, but + * copies bytes from rx_indir_table. That's because the latter is a + * pointer rather than an array, but should have the same length. + * The efx->rss_context.rx_hash_key loop below is similar. + */ + for (i = 0; i < ARRAY_SIZE(efx->rss_context.rx_indir_table); ++i) { + u8 q = (u8)efx_rx_queue_id_internal(efx, rx_indir_table[i]); + + MCDI_PTR(tablebuf, + RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE)[i] = q; + } + + rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_SET_TABLE, tablebuf, + sizeof(tablebuf), NULL, 0, NULL); + if (rc != 0) + return rc; + + MCDI_SET_DWORD(keybuf, RSS_CONTEXT_SET_KEY_IN_RSS_CONTEXT_ID, + context); + BUILD_BUG_ON(ARRAY_SIZE(efx->rss_context.rx_hash_key) != + MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN); + for (i = 0; i < ARRAY_SIZE(efx->rss_context.rx_hash_key); ++i) + MCDI_PTR(keybuf, RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY)[i] = key[i]; + + return efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_SET_KEY, keybuf, + sizeof(keybuf), NULL, 0, NULL); +} + +void efx_mcdi_rx_free_indir_table(struct efx_nic *efx) +{ + int rc; + + if (efx->rss_context.context_id != EFX_MCDI_RSS_CONTEXT_INVALID) { + rc = efx_mcdi_filter_free_rss_context(efx, efx->rss_context.context_id); + WARN_ON(rc && rc != -ENETDOWN); + } + efx->rss_context.context_id = EFX_MCDI_RSS_CONTEXT_INVALID; +} + +int efx_mcdi_rx_push_shared_rss_config(struct efx_nic *efx, + unsigned int *context_size) +{ + struct efx_mcdi_filter_table *table = efx->filter_state; + int rc = efx_mcdi_filter_alloc_rss_context(efx, false, + &efx->rss_context, context_size); + if (rc != 0) + return rc; + + efx_mcdi_init_rss_flags(efx); + efx_mcdi_get_rss_context_flags(efx, &efx->rss_context); + table->rss_context_exclusive = false; + efx_set_default_rx_indir_table(efx, &efx->rss_context); + return 0; +} + +static int efx_mcdi_filter_rx_push_exclusive_rss_config(struct efx_nic *efx, + const u32 *rx_indir_table, + const u8 *key) +{ + struct efx_mcdi_filter_table *table = efx->filter_state; + u32 old_rx_rss_context = efx->rss_context.context_id; + int rc; + + if (efx->rss_context.context_id == EFX_MCDI_RSS_CONTEXT_INVALID || + !table->rss_context_exclusive) { + rc = efx_mcdi_filter_alloc_rss_context(efx, true, &efx->rss_context, + NULL); + if (rc == -EOPNOTSUPP) + return rc; + else if (rc != 0) + goto fail1; + } + + rc = efx_mcdi_filter_populate_rss_table(efx, efx->rss_context.context_id, + rx_indir_table, key); + if (rc) + goto fail2; + if (efx->rss_context.context_id != old_rx_rss_context && + old_rx_rss_context != EFX_MCDI_RSS_CONTEXT_INVALID) + WARN_ON(efx_mcdi_filter_free_rss_context(efx, old_rx_rss_context) != 0); + table->rss_context_exclusive = true; + if (rx_indir_table != efx->rss_context.rx_indir_table) + memcpy(efx->rss_context.rx_indir_table, rx_indir_table, + sizeof(efx->rss_context.rx_indir_table)); + if (key != efx->rss_context.rx_hash_key) + memcpy(efx->rss_context.rx_hash_key, key, + efx->type->rx_hash_key_size); + + return 0; + +fail2: + if (old_rx_rss_context != efx->rss_context.context_id) { + WARN_ON(efx_mcdi_filter_free_rss_context(efx, efx->rss_context.context_id) != 0); + efx->rss_context.context_id = old_rx_rss_context; + } +fail1: + netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); + return rc; +} + +int efx_mcdi_rx_push_rss_context_config(struct efx_nic *efx, + struct efx_rss_context *ctx, + const u32 *rx_indir_table, + const u8 *key) +{ + bool allocated = false; + int rc; + + WARN_ON(!mutex_is_locked(&efx->rss_lock)); + + if (ctx->context_id == EFX_MCDI_RSS_CONTEXT_INVALID) { + rc = efx_mcdi_filter_alloc_rss_context(efx, true, ctx, NULL); + if (rc) + return rc; + allocated = true; + } + + if (!rx_indir_table) /* Delete this context */ + return efx_mcdi_filter_free_rss_context(efx, ctx->context_id); + + rc = efx_mcdi_filter_populate_rss_table(efx, ctx->context_id, + rx_indir_table, key); + if (rc) { + if (allocated) + /* try to clean up */ + if (efx_mcdi_filter_free_rss_context(efx, ctx->context_id)) + netif_warn(efx, hw, efx->net_dev, + "Leaked RSS context %u (hw %u)\n", + ctx->user_id, ctx->context_id); + return rc; + } + + memcpy(ctx->rx_indir_table, rx_indir_table, + sizeof(efx->rss_context.rx_indir_table)); + memcpy(ctx->rx_hash_key, key, efx->type->rx_hash_key_size); + + return 0; +} + +static int efx_rx_queue_id_external(struct efx_nic *efx, int rxq_id) +{ + if (efx_tx_vi_spreading(efx)) { + WARN_ON_ONCE(rxq_id & 1); + return rxq_id / 2; + } else { + return rxq_id; + } +} + +int efx_mcdi_rx_pull_rss_context_config(struct efx_nic *efx, + struct efx_rss_context *ctx) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_GET_TABLE_IN_LEN); + MCDI_DECLARE_BUF(tablebuf, MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_LEN); + MCDI_DECLARE_BUF(keybuf, MC_CMD_RSS_CONTEXT_GET_KEY_OUT_LEN); + size_t outlen; + int rc, i; + + WARN_ON(!mutex_is_locked(&efx->rss_lock)); + + BUILD_BUG_ON(MC_CMD_RSS_CONTEXT_GET_TABLE_IN_LEN != + MC_CMD_RSS_CONTEXT_GET_KEY_IN_LEN); + + if (ctx->context_id == EFX_MCDI_RSS_CONTEXT_INVALID) + return -ENOENT; + + MCDI_SET_DWORD(inbuf, RSS_CONTEXT_GET_TABLE_IN_RSS_CONTEXT_ID, + ctx->context_id); + BUILD_BUG_ON(ARRAY_SIZE(ctx->rx_indir_table) != + MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_INDIRECTION_TABLE_LEN); + rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_GET_TABLE, inbuf, sizeof(inbuf), + tablebuf, sizeof(tablebuf), &outlen); + if (rc != 0) + return rc; + + if (WARN_ON(outlen != MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_LEN)) + return -EIO; + + for (i = 0; i < ARRAY_SIZE(ctx->rx_indir_table); i++) { + u8 q = MCDI_PTR(tablebuf, + RSS_CONTEXT_GET_TABLE_OUT_INDIRECTION_TABLE)[i]; + + ctx->rx_indir_table[i] = efx_rx_queue_id_external(efx, q); + } + + MCDI_SET_DWORD(inbuf, RSS_CONTEXT_GET_KEY_IN_RSS_CONTEXT_ID, + ctx->context_id); + BUILD_BUG_ON(ARRAY_SIZE(ctx->rx_hash_key) != + MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN); + rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_GET_KEY, inbuf, sizeof(inbuf), + keybuf, sizeof(keybuf), &outlen); + if (rc != 0) + return rc; + + if (WARN_ON(outlen != MC_CMD_RSS_CONTEXT_GET_KEY_OUT_LEN)) + return -EIO; + + for (i = 0; i < ARRAY_SIZE(ctx->rx_hash_key); ++i) + ctx->rx_hash_key[i] = MCDI_PTR( + keybuf, RSS_CONTEXT_GET_KEY_OUT_TOEPLITZ_KEY)[i]; + + return 0; +} + +int efx_mcdi_rx_pull_rss_config(struct efx_nic *efx) +{ + int rc; + + mutex_lock(&efx->rss_lock); + rc = efx_mcdi_rx_pull_rss_context_config(efx, &efx->rss_context); + mutex_unlock(&efx->rss_lock); + return rc; +} + +void efx_mcdi_rx_restore_rss_contexts(struct efx_nic *efx) +{ + struct efx_mcdi_filter_table *table = efx->filter_state; + struct efx_rss_context *ctx; + int rc; + + WARN_ON(!mutex_is_locked(&efx->rss_lock)); + + if (!table->must_restore_rss_contexts) + return; + + list_for_each_entry(ctx, &efx->rss_context.list, list) { + /* previous NIC RSS context is gone */ + ctx->context_id = EFX_MCDI_RSS_CONTEXT_INVALID; + /* so try to allocate a new one */ + rc = efx_mcdi_rx_push_rss_context_config(efx, ctx, + ctx->rx_indir_table, + ctx->rx_hash_key); + if (rc) + netif_warn(efx, probe, efx->net_dev, + "failed to restore RSS context %u, rc=%d" + "; RSS filters may fail to be applied\n", + ctx->user_id, rc); + } + table->must_restore_rss_contexts = false; +} + +int efx_mcdi_rx_push_rss_config(struct efx_nic *efx, bool user, + const u32 *rx_indir_table, + const u8 *key) +{ + u32 flags = efx->rss_context.flags; + int rc; + + if (efx->rss_spread == 1) + return 0; + + if (!key) + key = efx->rss_context.rx_hash_key; + + rc = efx_mcdi_filter_rx_push_exclusive_rss_config(efx, rx_indir_table, key); + + if (rc == -ENOBUFS && !user) { + unsigned int context_size; + bool mismatch = false; + size_t i; + + for (i = 0; + i < ARRAY_SIZE(efx->rss_context.rx_indir_table) && !mismatch; + i++) + mismatch = rx_indir_table[i] != + ethtool_rxfh_indir_default(i, efx->rss_spread); + + rc = efx_mcdi_rx_push_shared_rss_config(efx, &context_size); + if (rc == 0) { + if (context_size != efx->rss_spread) + netif_warn(efx, probe, efx->net_dev, + "Could not allocate an exclusive RSS " + "context; allocated a shared one of " + "different size. " + "Wanted %u, got %u.\n", + efx->rss_spread, context_size); + else if (mismatch) + netif_warn(efx, probe, efx->net_dev, + "Could not allocate an exclusive RSS " + "context; allocated a shared one but " + "could not apply custom " + "indirection.\n"); + else + netif_info(efx, probe, efx->net_dev, + "Could not allocate an exclusive RSS " + "context; allocated a shared one.\n"); + if (flags != efx->rss_context.flags) + netif_info(efx, probe, efx->net_dev, + "Could not apply custom flow-hashing; wanted " + "%#08x, got %#08x.\n", flags, + efx->rss_context.flags); + } + } + return rc; +} + +int efx_mcdi_push_default_indir_table(struct efx_nic *efx, + unsigned int rss_spread) +{ + int rc = 0; + + if (efx->rss_spread == rss_spread) + return 0; + + efx->rss_spread = rss_spread; + if (!efx->filter_state) + return 0; + + efx_mcdi_rx_free_indir_table(efx); + if (rss_spread > 1) { + efx_set_default_rx_indir_table(efx, &efx->rss_context); + rc = efx->type->rx_push_rss_config(efx, false, + efx->rss_context.rx_indir_table, NULL); + } + return rc; +} + diff --git a/src/drivers/net/ethernet/sfc/mcdi_filters.h b/src/drivers/net/ethernet/sfc/mcdi_filters.h new file mode 100644 index 0000000..ec63c75 --- /dev/null +++ b/src/drivers/net/ethernet/sfc/mcdi_filters.h @@ -0,0 +1,156 @@ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2019 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#ifndef EFX_MCDI_FILTERS_H +#define EFX_MCDI_FILTERS_H + +#include "net_driver.h" + +/* set up basic operations required for all net devices */ +int efx_mcdi_filter_table_probe(struct efx_nic *efx, bool additional_rss); +/* enable operations we can do once we know the number of queues we have */ +int efx_mcdi_filter_table_init(struct efx_nic *efx, bool mc_chaining, + bool encap); +/* opposite of init */ +void efx_mcdi_filter_table_fini(struct efx_nic *efx); +/* opposite of probe */ +void efx_mcdi_filter_table_remove(struct efx_nic *efx); +/* interface up */ +int efx_mcdi_filter_table_up(struct efx_nic *efx); +/* interface down */ +void efx_mcdi_filter_table_down(struct efx_nic *efx); +/* called post-reset while interface is up */ +void efx_mcdi_filter_table_restore(struct efx_nic *efx); + +void efx_mcdi_filter_table_reset_mc_allocations(struct efx_nic *efx); + +/* The filter table(s) are managed by firmware and we have write-only + * access. When removing filters we must identify them to the + * firmware by a 64-bit handle, but this is too wide for Linux kernel + * interfaces (32-bit for RX NFC, 16-bit for RFS). Also, we need to + * be able to tell in advance whether a requested insertion will + * replace an existing filter. Therefore we maintain a software hash + * table, which should be at least as large as the hardware hash + * table. + * + * Huntington has a single 8K filter table shared between all filter + * types and both ports. + * + * Medford/Medford2 have a single 16K filter table shared between all + * filter types and ports. + */ +#define EFX_MCDI_FILTER_TBL_ROWS 16384 + +int efx_mcdi_filter_probe_supported_filters(struct efx_nic *efx); +bool efx_mcdi_filter_match_supported(struct efx_nic *efx, + bool encap, + unsigned int match_flags); + +struct efx_mcdi_dev_addr { + u8 addr[ETH_ALEN]; +}; + +struct efx_mcdi_filter_addr_ops { + void (*get_addrs)(struct efx_nic *efx, + bool *uc_promisc, + bool *mc_promisc); +}; + +void +efx_mcdi_filter_set_addr_ops(struct efx_nic *efx, + const struct efx_mcdi_filter_addr_ops *addr_ops); +void efx_mcdi_filter_sync_rx_mode(struct efx_nic *efx); +s32 efx_mcdi_filter_insert(struct efx_nic *efx, + const struct efx_filter_spec *spec, + bool replace_equal); +int efx_mcdi_filter_redirect(struct efx_nic *efx, u32 filter_id, + u32 *rss_context, int rxq_i, int stack_id); +int efx_mcdi_filter_remove_safe(struct efx_nic *efx, + enum efx_filter_priority priority, + u32 filter_id); +int efx_mcdi_filter_get_safe(struct efx_nic *efx, + enum efx_filter_priority priority, + u32 filter_id, struct efx_filter_spec *spec); + +#ifdef EFX_NOT_UPSTREAM +#if IS_MODULE(CONFIG_SFC_DRIVERLINK) +int efx_mcdi_filter_block_kernel(struct efx_nic *efx, + enum efx_dl_filter_block_kernel_type type); +void efx_mcdi_filter_unblock_kernel(struct efx_nic *efx, + enum efx_dl_filter_block_kernel_type type); +#endif +#endif + +u32 efx_mcdi_filter_count_rx_used(struct efx_nic *efx, + enum efx_filter_priority priority); +int efx_mcdi_filter_clear_rx(struct efx_nic *efx, + enum efx_filter_priority priority); +int efx_mcdi_filter_remove_all(struct efx_nic *efx, + enum efx_filter_priority priority); +u32 efx_mcdi_filter_get_rx_id_limit(struct efx_nic *efx); +s32 efx_mcdi_filter_get_rx_ids(struct efx_nic *efx, + enum efx_filter_priority priority, + u32 *buf, u32 size); + +int efx_mcdi_filter_add_vlan(struct efx_nic *efx, u16 vid); +int efx_mcdi_filter_del_vlan(struct efx_nic *efx, u16 vid); + +static inline int efx_mcdi_filter_add_vid(struct efx_nic *efx, + __be16 proto, u16 vid) +{ + if (proto != htons(ETH_P_8021Q)) + return -EINVAL; + + return efx_mcdi_filter_add_vlan(efx, vid); +} + +static inline int efx_mcdi_filter_del_vid(struct efx_nic *efx, + __be16 proto, u16 vid) +{ + if (proto != htons(ETH_P_8021Q)) + return -EINVAL; + + return efx_mcdi_filter_del_vlan(efx, vid); +} + +void efx_mcdi_rx_free_indir_table(struct efx_nic *efx); +int efx_mcdi_rx_push_rss_context_config(struct efx_nic *efx, + struct efx_rss_context *ctx, + const u32 *rx_indir_table, + const u8 *key); +int efx_mcdi_rx_push_rss_config(struct efx_nic *efx, bool user, + const u32 *rx_indir_table, + const u8 *key); +int efx_mcdi_rx_push_shared_rss_config(struct efx_nic *efx, + unsigned int *context_size); +int efx_mcdi_push_default_indir_table(struct efx_nic *efx, + unsigned int rss_spread); +int efx_mcdi_rx_pull_rss_config(struct efx_nic *efx); +int efx_mcdi_rx_pull_rss_context_config(struct efx_nic *efx, + struct efx_rss_context *ctx); +u32 efx_mcdi_get_default_rss_flags(struct efx_nic *efx); +int efx_mcdi_get_rss_context_flags(struct efx_nic *efx, + struct efx_rss_context *ctx); +int efx_mcdi_set_rss_context_flags(struct efx_nic *efx, + struct efx_rss_context *ctx, u32 flags); +void efx_mcdi_rx_restore_rss_contexts(struct efx_nic *efx); + +bool efx_mcdi_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id, + unsigned int filter_idx); +#if defined(CONFIG_DEBUG_FS) +int efx_debugfs_read_filter_list(struct seq_file *file, void *data); +#endif + +/* only to be called from an addr_ops->get_addrs() */ +void efx_mcdi_filter_uc_addr(struct efx_nic *efx, + const struct efx_mcdi_dev_addr *addr); +void efx_mcdi_filter_mc_addr(struct efx_nic *efx, + const struct efx_mcdi_dev_addr *addr); + +#endif diff --git a/src/drivers/net/ethernet/sfc/mcdi_functions.c b/src/drivers/net/ethernet/sfc/mcdi_functions.c new file mode 100644 index 0000000..a5fc973 --- /dev/null +++ b/src/drivers/net/ethernet/sfc/mcdi_functions.c @@ -0,0 +1,463 @@ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2018 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ +#include "net_driver.h" +#include "efx.h" +#include "nic.h" +#include "mcdi_functions.h" +#include "rx_common.h" +#include "mcdi.h" +#include "ef100_nic.h" +#include "efx_reflash.h" + +int efx_mcdi_free_vis(struct efx_nic *efx) +{ + MCDI_DECLARE_BUF_ERR(outbuf); + size_t outlen; + int rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FREE_VIS, NULL, 0, + outbuf, sizeof(outbuf), &outlen); + + /* -EALREADY means nothing to free, so ignore */ + if (rc == -EALREADY) + rc = 0; + if (rc) + efx_mcdi_display_error(efx, MC_CMD_FREE_VIS, 0, outbuf, outlen, + rc); + return rc; +} + +int efx_mcdi_alloc_vis(struct efx_nic *efx, + unsigned int min_vis, unsigned int max_vis, + unsigned int *vi_base, unsigned int *vi_shift, + unsigned int *allocated_vis) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_ALLOC_VIS_IN_LEN); + MCDI_DECLARE_BUF(outbuf, MC_CMD_ALLOC_VIS_EXT_OUT_LEN); + size_t outlen; + int rc; + + MCDI_SET_DWORD(inbuf, ALLOC_VIS_IN_MIN_VI_COUNT, min_vis); + MCDI_SET_DWORD(inbuf, ALLOC_VIS_IN_MAX_VI_COUNT, max_vis); + rc = efx_mcdi_rpc(efx, MC_CMD_ALLOC_VIS, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), &outlen); + if (rc != 0) + return rc; + + if (outlen < MC_CMD_ALLOC_VIS_OUT_LEN) + return -EIO; + + netif_dbg(efx, drv, efx->net_dev, "base VI is A%#03x\n", + MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_BASE)); + + if (vi_base) + *vi_base = MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_BASE); + if (vi_shift) + *vi_shift = MCDI_DWORD(outbuf, ALLOC_VIS_EXT_OUT_VI_SHIFT); + if (allocated_vis) + *allocated_vis = MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_COUNT); + return 0; +} + +int efx_mcdi_ev_probe(struct efx_channel *channel) +{ + return efx_nic_alloc_buffer(channel->efx, &channel->eventq, + (channel->eventq_mask + 1) * + sizeof(efx_qword_t), + GFP_KERNEL); +} + +int efx_mcdi_ev_init(struct efx_channel *channel, bool v1_cut_thru, bool v2) +{ + efx_dword_t *inbuf = kzalloc(MC_CMD_INIT_EVQ_V2_IN_LENMAX, GFP_KERNEL); + MCDI_DECLARE_BUF(outbuf, MC_CMD_INIT_EVQ_V2_OUT_LEN); + size_t entries = DIV_ROUND_UP(channel->eventq.len, EFX_BUF_SIZE); + struct efx_nic *efx = channel->efx; + + size_t inlen, outlen; + dma_addr_t dma_addr; + int rc; + int i; + + if (!inbuf) + return -ENOMEM; + + /* Fill event queue with all ones (i.e. empty events) */ + memset(channel->eventq.addr, 0xff, channel->eventq.len); + + MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_SIZE, channel->eventq_mask + 1); + MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_INSTANCE, channel->channel); + /* INIT_EVQ expects index in vector table, not absolute */ + MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_IRQ_NUM, channel->channel); + MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_MODE, + MC_CMD_INIT_EVQ_IN_TMR_MODE_DIS); + MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_LOAD, 0); + MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_RELOAD, 0); + MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_COUNT_MODE, + MC_CMD_INIT_EVQ_IN_COUNT_MODE_DIS); + MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_COUNT_THRSHLD, 0); + + if (v2) { + /* Use the new generic approach to specifying event queue + * configuration, requesting lower latency or higher throughput. + * The options that actually get used appear in the output. + */ + int unsigned perf_mode = MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_AUTO; + +#ifdef EFX_NOT_UPSTREAM + switch (efx->performance_profile) { + case EFX_PERFORMANCE_PROFILE_THROUGHPUT: + perf_mode = MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_THROUGHPUT; + break; + case EFX_PERFORMANCE_PROFILE_LATENCY: + perf_mode = MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_LOW_LATENCY; + break; + default: + break; + } +#endif + MCDI_POPULATE_DWORD_2(inbuf, INIT_EVQ_V2_IN_FLAGS, + INIT_EVQ_V2_IN_FLAG_INTERRUPTING, 1, + INIT_EVQ_V2_IN_FLAG_TYPE, perf_mode); + } else { + MCDI_POPULATE_DWORD_4(inbuf, INIT_EVQ_IN_FLAGS, + INIT_EVQ_IN_FLAG_INTERRUPTING, 1, + INIT_EVQ_IN_FLAG_RX_MERGE, 1, + INIT_EVQ_IN_FLAG_TX_MERGE, 1, + INIT_EVQ_IN_FLAG_CUT_THRU, v1_cut_thru); + } + + if (efx->type->revision == EFX_REV_EF100) + entries = 1; /* No need to split the memory up any more */ + + dma_addr = channel->eventq.dma_addr; + if (efx->type->regionmap_buffer) { + rc = efx->type->regionmap_buffer(efx, &dma_addr); + if (rc) + return rc; + } + + for (i = 0; i < entries; ++i) { + MCDI_SET_ARRAY_QWORD(inbuf, INIT_EVQ_IN_DMA_ADDR, i, dma_addr); + dma_addr += EFX_BUF_SIZE; + } + + inlen = MC_CMD_INIT_EVQ_IN_LEN(entries); + + rc = efx_mcdi_rpc(efx, MC_CMD_INIT_EVQ, inbuf, inlen, + outbuf, sizeof(outbuf), &outlen); + + if (outlen >= MC_CMD_INIT_EVQ_V2_OUT_LEN) + netif_dbg(efx, drv, efx->net_dev, + "Channel %d using event queue flags %08x\n", + channel->channel, + MCDI_DWORD(outbuf, INIT_EVQ_V2_OUT_FLAGS)); + + kfree(inbuf); + + return rc; +} + +void efx_mcdi_ev_remove(struct efx_channel *channel) +{ + efx_nic_free_buffer(channel->efx, &channel->eventq); +} + +void efx_mcdi_ev_fini(struct efx_channel *channel) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_EVQ_IN_LEN); + MCDI_DECLARE_BUF_ERR(outbuf); + struct efx_nic *efx = channel->efx; + size_t outlen; + int rc; + + MCDI_SET_DWORD(inbuf, FINI_EVQ_IN_INSTANCE, channel->channel); + + rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_EVQ, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), &outlen); + + if (rc && rc != -EALREADY) + goto fail; + + return; + +fail: + efx_mcdi_display_error(efx, MC_CMD_FINI_EVQ, MC_CMD_FINI_EVQ_IN_LEN, + outbuf, outlen, rc); +} + +int efx_mcdi_tx_init(struct efx_tx_queue *tx_queue, bool tso_v2) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_INIT_TXQ_EXT_IN_LEN); + size_t entries = DIV_ROUND_UP(tx_queue->txd.len, EFX_BUF_SIZE); + struct efx_channel *channel = tx_queue->channel; + struct efx_nic *efx = tx_queue->efx; + dma_addr_t dma_addr; + int rc; + int i; + bool outer_csum_offload = + tx_queue->csum_offload & EFX_TXQ_TYPE_CSUM_OFFLOAD; + bool inner_csum_offload = + tx_queue->csum_offload & EFX_TXQ_TYPE_INNER_CSUM_OFFLOAD; + + BUILD_BUG_ON(MC_CMD_INIT_TXQ_OUT_LEN != 0); + + MCDI_SET_DWORD(inbuf, INIT_TXQ_EXT_IN_SIZE, tx_queue->ptr_mask + 1); + MCDI_SET_DWORD(inbuf, INIT_TXQ_EXT_IN_TARGET_EVQ, channel->channel); + MCDI_SET_DWORD(inbuf, INIT_TXQ_EXT_IN_LABEL, tx_queue->label); + MCDI_SET_DWORD(inbuf, INIT_TXQ_EXT_IN_INSTANCE, tx_queue->queue); + MCDI_SET_DWORD(inbuf, INIT_TXQ_EXT_IN_OWNER_ID, 0); + MCDI_SET_DWORD(inbuf, INIT_TXQ_EXT_IN_PORT_ID, efx->vport.vport_id); + + dma_addr = tx_queue->txd.dma_addr; + if (efx->type->regionmap_buffer) { + rc = efx->type->regionmap_buffer(efx, &dma_addr); + if (rc) + return rc; + } + + netif_dbg(efx, hw, efx->net_dev, "pushing TXQ %d. %zu entries (%llx)\n", + tx_queue->queue, entries, (u64)dma_addr); + + for (i = 0; i < entries; ++i) { + MCDI_SET_ARRAY_QWORD(inbuf, INIT_TXQ_EXT_IN_DMA_ADDR, i, + dma_addr); + dma_addr += EFX_BUF_SIZE; + } + + do { + /* TSOv2 implies IP header checksum offload for TSO frames, + * so we can safely disable IP header checksum offload for + * everything else. If we don't have TSOv2, then we have to + * enable IP header checksum offload, which is strictly + * incorrect but better than breaking TSO. + */ + MCDI_POPULATE_DWORD_6(inbuf, INIT_TXQ_EXT_IN_FLAGS, + INIT_TXQ_EXT_IN_FLAG_TSOV2_EN, + tso_v2, + INIT_TXQ_EXT_IN_FLAG_IP_CSUM_DIS, + tso_v2 || !outer_csum_offload, + INIT_TXQ_EXT_IN_FLAG_TCP_CSUM_DIS, + !outer_csum_offload, + INIT_TXQ_EXT_IN_FLAG_INNER_IP_CSUM_EN, + inner_csum_offload && !tso_v2, + INIT_TXQ_EXT_IN_FLAG_INNER_TCP_CSUM_EN, + inner_csum_offload, + INIT_TXQ_EXT_IN_FLAG_TIMESTAMP, + tx_queue->timestamping); + + rc = efx_mcdi_rpc_quiet(efx, MC_CMD_INIT_TXQ, + inbuf, sizeof(inbuf), + NULL, 0, NULL); + + if (rc == -ENOSPC && tso_v2) { + /* Retry without TSOv2 if we're short on contexts. */ + tso_v2 = false; + netif_warn(efx, probe, efx->net_dev, + "TSOv2 context not available to segment in hardware. TCP performance may be reduced.\n"); + } else if (rc) { + efx_mcdi_display_error(efx, MC_CMD_INIT_TXQ, + MC_CMD_INIT_TXQ_EXT_IN_LEN, + NULL, 0, rc); + return rc; + } + } while (rc); + + return 0; +} + +void efx_mcdi_tx_fini(struct efx_tx_queue *tx_queue) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_TXQ_IN_LEN); + MCDI_DECLARE_BUF_ERR(outbuf); + struct efx_nic *efx = tx_queue->efx; + size_t outlen; + int rc; + + MCDI_SET_DWORD(inbuf, FINI_TXQ_IN_INSTANCE, tx_queue->queue); + + rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_TXQ, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), &outlen); + + if (rc && rc != -EALREADY) + goto fail; + + return; + +fail: + efx_mcdi_display_error(efx, MC_CMD_FINI_TXQ, MC_CMD_FINI_TXQ_IN_LEN, + outbuf, outlen, rc); +} + +int efx_mcdi_rx_probe(struct efx_rx_queue *rx_queue) +{ + return efx_nic_alloc_buffer(rx_queue->efx, &rx_queue->rxd, + (rx_queue->ptr_mask + 1) * + sizeof(efx_qword_t), + GFP_KERNEL); +} + +int efx_mcdi_rx_init(struct efx_rx_queue *rx_queue, bool want_outer_classes) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_INIT_RXQ_V4_IN_LEN); + struct efx_channel *channel = efx_rx_queue_channel(rx_queue); + size_t entries = DIV_ROUND_UP(rx_queue->rxd.len, EFX_BUF_SIZE); + struct efx_nic *efx = rx_queue->efx; + unsigned int buffer_size; + dma_addr_t dma_addr; + int rc; + int i; + BUILD_BUG_ON(MC_CMD_INIT_RXQ_V4_OUT_LEN != 0); + + rx_queue->scatter_n = 0; + rx_queue->scatter_len = 0; + if (efx->type->revision == EFX_REV_EF100) + buffer_size = efx->rx_page_buf_step; + else + buffer_size = 0; + + MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_SIZE, rx_queue->ptr_mask + 1); + MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_TARGET_EVQ, channel->channel); + MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_LABEL, efx_rx_queue_index(rx_queue)); + MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_INSTANCE, + efx_rx_queue_instance(rx_queue)); + MCDI_POPULATE_DWORD_3(inbuf, INIT_RXQ_IN_FLAGS, + INIT_RXQ_IN_FLAG_PREFIX, 1, + INIT_RXQ_IN_FLAG_TIMESTAMP, 1, + INIT_RXQ_EXT_IN_FLAG_WANT_OUTER_CLASSES, + want_outer_classes); + MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_OWNER_ID, 0); + MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_PORT_ID, efx->vport.vport_id); + MCDI_SET_DWORD(inbuf, INIT_RXQ_V4_IN_BUFFER_SIZE_BYTES, buffer_size); + + dma_addr = rx_queue->rxd.dma_addr; + if (efx->type->regionmap_buffer) { + rc = efx->type->regionmap_buffer(efx, &dma_addr); + if (rc) + return rc; + } + + netif_dbg(efx, hw, efx->net_dev, "pushing RXQ %d. %zu entries (%llx)\n", + efx_rx_queue_index(rx_queue), entries, (u64)dma_addr); + + for (i = 0; i < entries; ++i) { + MCDI_SET_ARRAY_QWORD(inbuf, INIT_RXQ_IN_DMA_ADDR, i, dma_addr); + dma_addr += EFX_BUF_SIZE; + } + + rc = efx_mcdi_rpc(efx, MC_CMD_INIT_RXQ, inbuf, + MC_CMD_INIT_RXQ_V4_IN_LEN, NULL, 0, NULL); + if (rc && rc != -ENETDOWN && rc != -EAGAIN) + netdev_WARN(efx->net_dev, "failed to initialise RXQ %d\n", + efx_rx_queue_index(rx_queue)); + return rc; +} + +void efx_mcdi_rx_remove(struct efx_rx_queue *rx_queue) +{ + efx_nic_free_buffer(rx_queue->efx, &rx_queue->rxd); +} + +void efx_mcdi_rx_fini(struct efx_rx_queue *rx_queue) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_RXQ_IN_LEN); + MCDI_DECLARE_BUF_ERR(outbuf); + struct efx_nic *efx = rx_queue->efx; + size_t outlen; + int rc; + + MCDI_SET_DWORD(inbuf, FINI_RXQ_IN_INSTANCE, + efx_rx_queue_instance(rx_queue)); + + rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_RXQ, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), &outlen); + + if (rc && rc != -EALREADY) + goto fail; + + return; + +fail: + efx_mcdi_display_error(efx, MC_CMD_FINI_RXQ, MC_CMD_FINI_RXQ_IN_LEN, + outbuf, outlen, rc); +} + +int efx_fini_dmaq(struct efx_nic *efx) +{ + struct efx_channel *channel; + struct efx_tx_queue *tx_queue; + struct efx_rx_queue *rx_queue; + int pending; + + /* Do not attempt to write to the NIC whilst unavailable */ + if (efx_nic_hw_unavailable(efx)) + return 0; + + efx_for_each_channel(channel, efx) { + efx_for_each_channel_rx_queue(rx_queue, channel) { + efx_mcdi_rx_fini(rx_queue); + } + efx_for_each_channel_tx_queue(tx_queue, channel) { + efx_mcdi_tx_fini(tx_queue); + } + } + + wait_event_timeout(efx->flush_wq, + atomic_read(&efx->active_queues) == 0, + msecs_to_jiffies(EFX_MAX_FLUSH_TIME)); + pending = atomic_read(&efx->active_queues); + if (pending) { + netif_err(efx, hw, efx->net_dev, "failed to flush %d queues\n", + pending); + return -ETIMEDOUT; + } + return 0; +} + +int efx_mcdi_window_mode_to_stride(struct efx_nic *efx, u8 vi_window_mode) +{ + switch (vi_window_mode) { + case MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_8K: + efx->vi_stride = 8192; + break; + case MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_16K: + efx->vi_stride = 16384; + break; + case MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_64K: + efx->vi_stride = 65536; + break; + default: + netif_err(efx, probe, efx->net_dev, + "Unrecognised VI window mode %d\n", + vi_window_mode); + return -EIO; + } + pci_dbg(efx->pci_dev, "vi_stride = %u\n", efx->vi_stride); + return 0; +} + +int efx_get_fn_info(struct efx_nic *efx, unsigned int *pf_index, + unsigned int *vf_index) +{ + MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_FUNCTION_INFO_OUT_LEN); + size_t outlen; + int rc; + + rc = efx_mcdi_rpc(efx, MC_CMD_GET_FUNCTION_INFO, NULL, 0, outbuf, + sizeof(outbuf), &outlen); + if (rc) + return rc; + if (outlen < sizeof(outbuf)) + return -EIO; + + if (pf_index) + *pf_index = MCDI_DWORD(outbuf, GET_FUNCTION_INFO_OUT_PF); + + if (efx->type->is_vf && vf_index) + *vf_index = MCDI_DWORD(outbuf, GET_FUNCTION_INFO_OUT_VF); + return 0; +} diff --git a/src/drivers/net/ethernet/sfc/mcdi_functions.h b/src/drivers/net/ethernet/sfc/mcdi_functions.h new file mode 100644 index 0000000..b0bc956 --- /dev/null +++ b/src/drivers/net/ethernet/sfc/mcdi_functions.h @@ -0,0 +1,34 @@ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2019 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#ifndef EFX_MCDI_FUNCTIONS_H +#define EFX_MCDI_FUNCTIONS_H + +int efx_mcdi_alloc_vis(struct efx_nic *efx, + unsigned int min_vis, unsigned int max_vis, + unsigned int *vi_base, unsigned int *vi_shift, + unsigned int *allocated_vis); +int efx_mcdi_free_vis(struct efx_nic *efx); + +int efx_mcdi_ev_probe(struct efx_channel *channel); +int efx_mcdi_ev_init(struct efx_channel *channel, bool v1_cut_thru, bool v2); +void efx_mcdi_ev_remove(struct efx_channel *channel); +void efx_mcdi_ev_fini(struct efx_channel *channel); +int efx_mcdi_tx_init(struct efx_tx_queue *tx_queue, bool tso_v2); +void efx_mcdi_tx_fini(struct efx_tx_queue *tx_queue); +int efx_mcdi_rx_probe(struct efx_rx_queue *rx_queue); +int efx_mcdi_rx_init(struct efx_rx_queue *rx_queue, bool want_outer_classes); +void efx_mcdi_rx_remove(struct efx_rx_queue *rx_queue); +void efx_mcdi_rx_fini(struct efx_rx_queue *rx_queue); +int efx_fini_dmaq(struct efx_nic *efx); +int efx_mcdi_window_mode_to_stride(struct efx_nic *efx, u8 vi_window_mode); +int efx_get_fn_info(struct efx_nic *efx, unsigned int *pf_index, + unsigned int *vf_index); + +#endif diff --git a/src/drivers/net/ethernet/sfc/mcdi_mon.c b/src/drivers/net/ethernet/sfc/mcdi_mon.c new file mode 100644 index 0000000..ac3204c --- /dev/null +++ b/src/drivers/net/ethernet/sfc/mcdi_mon.c @@ -0,0 +1,1660 @@ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2011-2017 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#include +#include +#include +#include +#include + +#include "net_driver.h" +#include "mcdi.h" +#include "mcdi_pcol.h" +#include "nic.h" + +#define EFX_HWMON_TYPES_COUNT hwmon_pwm + +#define EFX_DYNAMIC_SENSOR_READING_UPDATE_MAX_HANDLES 32 +#define EFX_DYNAMIC_SENSOR_INFO_READ_MAX_HANDLES 4 + +#define MC_CMD_DYN_SENSOR_LIMIT_LO_WARNING_OFST \ + (MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_LIMITS_OFST + 0) +#define MC_CMD_DYN_SENSOR_LIMIT_LO_CRITICAL_OFST \ + (MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_LIMITS_OFST + 4) +#define MC_CMD_DYN_SENSOR_LIMIT_LO_FATAL_OFST \ + (MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_LIMITS_OFST + 8) +#define MC_CMD_DYN_SENSOR_LIMIT_HI_WARNING_OFST \ + (MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_LIMITS_OFST + 12) +#define MC_CMD_DYN_SENSOR_LIMIT_HI_CRITICAL_OFST \ + (MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_LIMITS_OFST + 16) +#define MC_CMD_DYN_SENSOR_LIMIT_HI_FATAL_OFST \ + (MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_LIMITS_OFST + 20) + +static const char *const efx_hwmon_unit[EFX_HWMON_TYPES_COUNT] = { + [hwmon_temp] = " degC", + [hwmon_fan] = " rpm", /* though nonsense for a heatsink */ + [hwmon_in] = " mV", + [hwmon_curr] = " mA", + [hwmon_power] = " W", +}; + +enum efx_hwmon_attribute { + EFX_HWMON_INPUT, + EFX_HWMON_MIN, + EFX_HWMON_MAX, + EFX_HWMON_CRIT, + EFX_HWMON_ALARM, + EFX_HWMON_LABEL, + EFX_HWMON_NAME +}; + +#define EFX_HWMON_ATTRIBUTE_COUNT (EFX_HWMON_NAME + 1) + +struct efx_mcdi_hwmon_info { + const char *label; + enum hwmon_sensor_types hwmon_type; + int port; +}; + +static const struct efx_mcdi_hwmon_info efx_mcdi_sensor_type[] = { +#define SENSOR(name, label, hwmon_type, port) \ + [MC_CMD_SENSOR_##name] = { label, hwmon_ ## hwmon_type, port } + SENSOR(CONTROLLER_TEMP, "Controller board temp.", temp, -1), + SENSOR(PHY_COMMON_TEMP, "PHY temp.", temp, -1), + SENSOR(CONTROLLER_COOLING, "Controller heat sink", fan, -1), + SENSOR(PHY0_TEMP, "PHY temp.", temp, 0), + SENSOR(PHY0_COOLING, "PHY heat sink", fan, 0), + SENSOR(PHY1_TEMP, "PHY temp.", temp, 1), + SENSOR(PHY1_COOLING, "PHY heat sink", fan, 1), + SENSOR(IN_1V0, "1.0V supply", in, -1), + SENSOR(IN_1V2, "1.2V supply", in, -1), + SENSOR(IN_1V8, "1.8V supply", in, -1), + SENSOR(IN_2V5, "2.5V supply", in, -1), + SENSOR(IN_3V3, "3.3V supply", in, -1), + SENSOR(IN_12V0, "12.0V supply", in, -1), + SENSOR(IN_1V2A, "1.2V analogue supply", in, -1), + SENSOR(IN_VREF, "Ref. voltage", in, -1), + SENSOR(OUT_VAOE, "AOE FPGA supply", in, -1), + SENSOR(AOE_TEMP, "AOE FPGA temp.", temp, -1), + SENSOR(PSU_AOE_TEMP, "AOE regulator temp.", temp, -1), + SENSOR(PSU_TEMP, "Controller regulator temp.", + temp, -1), + SENSOR(FAN_0, "Fan 0", fan, -1), + SENSOR(FAN_1, "Fan 1", fan, -1), + SENSOR(FAN_2, "Fan 2", fan, -1), + SENSOR(FAN_3, "Fan 3", fan, -1), + SENSOR(FAN_4, "Fan 4", fan, -1), + SENSOR(IN_VAOE, "AOE input supply", in, -1), + SENSOR(OUT_IAOE, "AOE output current", curr, -1), + SENSOR(IN_IAOE, "AOE input current", curr, -1), + SENSOR(NIC_POWER, "Board power use", power,-1), + SENSOR(IN_0V9, "0.9V supply", in, -1), + SENSOR(IN_I0V9, "0.9V supply current", curr, -1), + SENSOR(IN_I1V2, "1.2V supply current", curr, -1), + SENSOR(IN_0V9_ADC, "0.9V supply (ext. ADC)", in, -1), + SENSOR(CONTROLLER_2_TEMP, "Controller board temp. 2", temp, -1), + SENSOR(VREG_INTERNAL_TEMP, "Regulator die temp.", temp, -1), + SENSOR(VREG_0V9_TEMP, "0.9V regulator temp.", temp, -1), + SENSOR(VREG_1V2_TEMP, "1.2V regulator temp.", temp, -1), + SENSOR(CONTROLLER_VPTAT, + "Controller PTAT voltage (int. ADC)", in, -1), + SENSOR(CONTROLLER_INTERNAL_TEMP, + "Controller die temp. (int. ADC)", temp, -1), + SENSOR(CONTROLLER_VPTAT_EXTADC, + "Controller PTAT voltage (ext. ADC)", in, -1), + SENSOR(CONTROLLER_INTERNAL_TEMP_EXTADC, + "Controller die temp. (ext. ADC)", temp, -1), + SENSOR(AMBIENT_TEMP, "Ambient temp.", temp, -1), + SENSOR(AIRFLOW, "Air flow raw", in, -1), + SENSOR(VDD08D_VSS08D_CSR, "0.9V die (int. ADC)", in, -1), + SENSOR(VDD08D_VSS08D_CSR_EXTADC, "0.9V die (ext. ADC)", in, -1), + SENSOR(HOTPOINT_TEMP, "Controller board temp. (hotpoint)", temp, -1), + SENSOR(PHY_POWER_PORT0, "PHY overcurrent", fan, 0), + SENSOR(PHY_POWER_PORT1, "PHY overcurrent", fan, 1), + SENSOR(MUM_VCC, "MUM Vcc", in, -1), + SENSOR(IN_0V9_A, "0.9V phase A supply", in, -1), + SENSOR(IN_I0V9_A, + "0.9V phase A supply current", curr, -1), + SENSOR(VREG_0V9_A_TEMP, + "0.9V phase A regulator temp.", temp, -1), + SENSOR(IN_0V9_B, "0.9V phase B supply", in, -1), + SENSOR(IN_I0V9_B, + "0.9V phase B supply current", curr, -1), + SENSOR(VREG_0V9_B_TEMP, + "0.9V phase B regulator temp.", temp, -1), + SENSOR(CCOM_AVREG_1V2_SUPPLY, + "CCOM 1.2V supply (int. ADC)", in, -1), + SENSOR(CCOM_AVREG_1V2_SUPPLY_EXTADC, + "CCOM 1.2V supply (ext. ADC)", in, -1), + SENSOR(CCOM_AVREG_1V8_SUPPLY, + "CCOM 1.8V supply (int. ADC)", in, -1), + SENSOR(CCOM_AVREG_1V8_SUPPLY_EXTADC, + "CCOM 1.8V supply (ext. ADC)", in, -1), + SENSOR(CONTROLLER_RTS, "CCOM RTS temp.", temp, -1), + SENSOR(CONTROLLER_MASTER_VPTAT, "Master die int. temp.", in, -1), + SENSOR(CONTROLLER_MASTER_INTERNAL_TEMP, + "Master die int. temp.", temp, -1), + SENSOR(CONTROLLER_MASTER_VPTAT_EXTADC, + "Master die int. temp. (ext. ADC)", in, -1), + SENSOR(CONTROLLER_MASTER_INTERNAL_TEMP_EXTADC, + "Master die int. temp. (ext. ADC)", temp, -1), + SENSOR(CONTROLLER_SLAVE_VPTAT, "Slave die int. temp.", in, -1), + SENSOR(CONTROLLER_SLAVE_INTERNAL_TEMP, + "Slave die int. temp.", temp, -1), + SENSOR(CONTROLLER_SLAVE_VPTAT_EXTADC, + "Slave die int. temp. (ext. ADC)", in, -1), + SENSOR(CONTROLLER_SLAVE_INTERNAL_TEMP_EXTADC, + "Slave die int. temp. (ext. ADC)", temp, -1), + SENSOR(SODIMM_VOUT, "SODIMM supply.", in, -1), + SENSOR(SODIMM_0_TEMP, "SODIMM 0 temp.", temp, -1), + SENSOR(SODIMM_1_TEMP, "SODIMM 1 temp.", temp, -1), + SENSOR(PHY0_VCC, "PHY0 supply.", in, -1), + SENSOR(PHY1_VCC, "PHY1 supply.", in, -1), + SENSOR(CONTROLLER_TDIODE_TEMP, + "Controller die (TDIODE) temp.", temp, -1), + SENSOR(BOARD_FRONT_TEMP, "Board front temp.", temp, -1), + SENSOR(BOARD_BACK_TEMP, "Board back temp.", temp, -1), + SENSOR(IN_I1V8, "1.8V supply current", curr, -1), + SENSOR(IN_I2V5, "2.5V supply current", curr, -1), + SENSOR(IN_I3V3, "3.3V supply current", curr, -1), + SENSOR(IN_I12V0, "12V supply current", curr, -1), + SENSOR(IN_1V3, "1.3V supply", in, -1), + SENSOR(IN_I1V3, "1.3V supply current", curr, -1), +#undef SENSOR +}; + +static const char *const sensor_status_names[] = { + [MC_CMD_SENSOR_STATE_OK] = "OK", + [MC_CMD_SENSOR_STATE_WARNING] = "Warning", + [MC_CMD_SENSOR_STATE_FATAL] = "Fatal", + [MC_CMD_SENSOR_STATE_BROKEN] = "Device failure", + [MC_CMD_SENSOR_STATE_NO_READING] = "No reading", +}; + +struct efx_mcdi_mon_attribute { + struct device_attribute dev_attr; + unsigned int index; + unsigned int type; + enum hwmon_sensor_types hwmon_type; + unsigned int limit_value; + enum efx_hwmon_attribute hwmon_attribute; + u8 file_index; + bool is_dynamic; +#if defined(EFX_USE_KCOMPAT) && !defined(EFX_HAVE_HWMON_DEVICE_REGISTER_WITH_INFO) + char name[15]; +#endif +}; + +#ifdef CONFIG_SFC_MCDI_MON + +void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev) +{ + static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL, + DEFAULT_RATELIMIT_BURST); + enum hwmon_sensor_types hwmon_type = hwmon_chip; + const char *name = NULL, *state_txt, *unit; + unsigned int type, state, value; + + type = EFX_QWORD_FIELD(*ev, MCDI_EVENT_SENSOREVT_MONITOR); + state = EFX_QWORD_FIELD(*ev, MCDI_EVENT_SENSOREVT_STATE); + value = EFX_QWORD_FIELD(*ev, MCDI_EVENT_SENSOREVT_VALUE); + + /* Deal gracefully with the board having more drivers than we + * know about, but do not expect new sensor states. */ + if (type < ARRAY_SIZE(efx_mcdi_sensor_type)) { + name = efx_mcdi_sensor_type[type].label; + hwmon_type = efx_mcdi_sensor_type[type].hwmon_type; + } + if (!name) + name = "No sensor name available"; + EFX_WARN_ON_PARANOID(state >= ARRAY_SIZE(sensor_status_names)); + state_txt = sensor_status_names[state]; + EFX_WARN_ON_PARANOID(hwmon_type >= EFX_HWMON_TYPES_COUNT); + unit = efx_hwmon_unit[hwmon_type]; + if (!unit) + unit = ""; + + switch (state) { + case MC_CMD_SENSOR_STATE_OK: + if (__ratelimit(&rs)) + netif_info(efx, hw, efx->net_dev, + "Sensor %d (%s) reports condition '%s' for value %d%s\n", + type, name, state_txt, value, unit); + break; + case MC_CMD_SENSOR_STATE_WARNING: + if (__ratelimit(&rs)) + netif_warn(efx, hw, efx->net_dev, + "Sensor %d (%s) reports condition '%s' for value %d%s\n", + type, name, state_txt, value, unit); + break; + default: + netif_err(efx, hw, efx->net_dev, + "Sensor %d (%s) reports condition '%s' for value %d%s\n", + type, name, state_txt, value, unit); + break; + } +} + +enum efx_sensor_limits { + EFX_SENSOR_LIMIT_WARNING_LO, + EFX_SENSOR_LIMIT_CRITICAL_LO, + EFX_SENSOR_LIMIT_FATAL_LO, + EFX_SENSOR_LIMIT_WARNING_HI, + EFX_SENSOR_LIMIT_CRITICAL_HI, + EFX_SENSOR_LIMIT_FATAL_HI, + EFX_SENSOR_LIMITS +}; + +/* + * struct efx_dynamic_sensor_description - dynamic sensor description + * @name: name of the sensor + * @idx: sensor index in the host driver maintained list + * @handle: handle to the sensor + * @type: type of sensor + * @limits: limits for the sensor reading + * @gen_count: generation count corresponding to the description + * @entry: an entry in rhashtable of dynamic sensors + */ +struct efx_dynamic_sensor_description { + char name[MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_NAME_LEN]; + unsigned int idx; + unsigned int handle; + unsigned int type; + int limits[EFX_SENSOR_LIMITS]; + unsigned int gen_count; +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_RHASHTABLE_LOOKUP_FAST) + struct rhash_head entry; +#endif +}; + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_RHASHTABLE_LOOKUP_FAST) +static const struct rhashtable_params sensor_entry_params = { + .key_len = sizeof(unsigned int), + .key_offset = offsetof(struct efx_dynamic_sensor_description, handle), + .head_offset = offsetof(struct efx_dynamic_sensor_description, entry), +}; +#endif + +static struct efx_dynamic_sensor_description * +efx_mcdi_get_dynamic_sensor(struct efx_nic *efx, unsigned int handle) +{ +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_RHASHTABLE_LOOKUP_FAST) + struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx); + + return rhashtable_lookup_fast(&hwmon->sensor_table, &handle, + sensor_entry_params); +#else + return NULL; +#endif +} + +static void efx_mcdi_handle_dynamic_sensor_state_change(struct efx_nic *efx, + efx_qword_t *ev) +{ + int count = EFX_QWORD_FIELD(*ev, MCDI_EVENT_CONT); + struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx); + struct efx_dynamic_sensor_description *sensor; + efx_dword_t *sensor_reading_entry; + const char *state_txt; + unsigned int handle; + int state, value; + + mutex_lock(&hwmon->update_lock); + /* Need to receive event with count = 1 first */ + if (count) { + handle = EFX_QWORD_FIELD(*ev, MCDI_EVENT_DATA); + sensor = efx_mcdi_get_dynamic_sensor(efx, handle); + if (!sensor) + return; + sensor_reading_entry = + MCDI_ARRAY_STRUCT_PTR(((efx_dword_t *)hwmon->dma_buf.addr), + DYNAMIC_SENSORS_GET_READINGS_OUT_VALUES, + sensor->idx); + /* save sensor index to handle 2nd event of + * DYNAMIC_SENSOR_STATE_CHANGE */ + hwmon->pend_sensor_state_handle = handle; + state = EFX_QWORD_FIELD(*ev, MCDI_EVENT_SRC); + EFX_WARN_ON_PARANOID(state >= ARRAY_SIZE(sensor_status_names)); + state_txt = sensor_status_names[state]; + WARN(1, "%s: sensor %s state changed to %s\n", efx->name, + sensor->name, state_txt); + + MCDI_SET_DWORD(sensor_reading_entry, + DYNAMIC_SENSORS_READING_HANDLE, + handle); + MCDI_SET_DWORD(sensor_reading_entry, + DYNAMIC_SENSORS_READING_STATE, state); + } else { + EFX_WARN_ON_PARANOID(hwmon->pend_sensor_state_handle < 0); + handle = hwmon->pend_sensor_state_handle; + sensor = efx_mcdi_get_dynamic_sensor(efx, handle); + hwmon->pend_sensor_state_handle = -1; + if (!sensor) + return; + sensor_reading_entry = + MCDI_ARRAY_STRUCT_PTR(((efx_dword_t *)hwmon->dma_buf.addr), + DYNAMIC_SENSORS_GET_READINGS_OUT_VALUES, + sensor->idx); + value = EFX_QWORD_FIELD(*ev, MCDI_EVENT_DATA); + MCDI_SET_DWORD(sensor_reading_entry, + DYNAMIC_SENSORS_READING_VALUE, value); + } + mutex_unlock(&hwmon->update_lock); +} + +void efx_mcdi_dynamic_sensor_event(struct efx_nic *efx, efx_qword_t *ev) +{ + int code = EFX_QWORD_FIELD(*ev, MCDI_EVENT_CODE); + + switch(code) { + case MCDI_EVENT_CODE_DYNAMIC_SENSORS_STATE_CHANGE: + efx_mcdi_handle_dynamic_sensor_state_change(efx, ev); + return; + case MCDI_EVENT_CODE_DYNAMIC_SENSORS_CHANGE: + netif_info(efx, drv, efx->net_dev, + "CODE_DYNAMIC_SENSORS_CHANGE even unsupported\n"); + } +} + +static int +efx_mcdi_dynamic_sensor_list_reading_update(struct efx_nic *efx, + unsigned int *handle_list, + unsigned int num_handles) +{ + MCDI_DECLARE_BUF(outbuf, + MC_CMD_DYNAMIC_SENSORS_GET_READINGS_OUT_LEN(EFX_DYNAMIC_SENSOR_READING_UPDATE_MAX_HANDLES)); + MCDI_DECLARE_BUF(inbuf, + MC_CMD_DYNAMIC_SENSORS_GET_READINGS_IN_LEN(EFX_DYNAMIC_SENSOR_READING_UPDATE_MAX_HANDLES)); + struct efx_dynamic_sensor_description *sensor = NULL; + struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx); + efx_dword_t *sensor_reading_entry; + efx_dword_t *sensor_out_entry; + unsigned int handle; + size_t outlen; + int i, rc = 0; + + for (i = 0; i < num_handles; i++) + MCDI_SET_ARRAY_DWORD(inbuf, + DYNAMIC_SENSORS_GET_READINGS_IN_HANDLES, i, + handle_list[i]); + rc = efx_mcdi_rpc(efx, MC_CMD_DYNAMIC_SENSORS_GET_READINGS, inbuf, + MC_CMD_DYNAMIC_SENSORS_GET_READINGS_IN_LEN(i), + outbuf, sizeof(outbuf), &outlen); + if (rc) + return rc; + /* outlen determins the number of handles returned if any handles + * are dropped by FW */ + if (outlen % MC_CMD_DYNAMIC_SENSORS_GET_READINGS_OUT_VALUES_LEN) { + WARN_ON(1); + return -EINVAL; + } + i = 0; + while (outlen) { + sensor_out_entry = + MCDI_ARRAY_STRUCT_PTR(outbuf, + DYNAMIC_SENSORS_GET_READINGS_OUT_VALUES, + i); + handle = MCDI_DWORD(sensor_out_entry, + DYNAMIC_SENSORS_READING_HANDLE); + sensor = efx_mcdi_get_dynamic_sensor(efx, handle); + /* check if sensor was dropped */ + if (IS_ERR(sensor)) { + i++; + outlen -= MC_CMD_DYNAMIC_SENSORS_GET_READINGS_OUT_VALUES_LEN; + continue; + } + sensor_reading_entry = + MCDI_ARRAY_STRUCT_PTR(((efx_dword_t *)hwmon->dma_buf.addr), + DYNAMIC_SENSORS_GET_READINGS_OUT_VALUES, + sensor->idx); + MCDI_SET_DWORD(sensor_reading_entry, + DYNAMIC_SENSORS_READING_HANDLE, handle); + MCDI_SET_DWORD(sensor_reading_entry, + DYNAMIC_SENSORS_READING_VALUE, + (MCDI_DWORD(sensor_out_entry, + DYNAMIC_SENSORS_READING_VALUE))); + MCDI_SET_DWORD(sensor_reading_entry, + DYNAMIC_SENSORS_READING_STATE, + (MCDI_DWORD(sensor_out_entry, + DYNAMIC_SENSORS_READING_STATE))); + pci_dbg(efx->pci_dev, + "Reading sensor, handle %u, value %u state: %u\n", + handle, + MCDI_DWORD(sensor_out_entry, + DYNAMIC_SENSORS_READING_VALUE), + MCDI_DWORD(sensor_out_entry, + DYNAMIC_SENSORS_READING_STATE)); + + i++; + outlen -= MC_CMD_DYNAMIC_SENSORS_GET_READINGS_OUT_VALUES_LEN; + } + if (rc == 0) + hwmon->last_update = jiffies; + + return rc; + +} + +static int efx_mcdi_dynamic_sensor_reading_update(struct efx_nic *efx) +{ + struct efx_dynamic_sensor_description *sensor; + /* limiting maximum sensors per read to 4 to avoid + * -Werror=frame-larger-than=] + */ + unsigned int handles[EFX_DYNAMIC_SENSOR_INFO_READ_MAX_HANDLES]; + struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx); + unsigned int j, i = 0; + int rc = 0; + + sensor = (struct efx_dynamic_sensor_description *)hwmon->sensor_list; + for (j = 0; j < hwmon->n_dynamic_sensors; j++) { + handles[i] = sensor[j].handle; + i++; + if ((i == EFX_DYNAMIC_SENSOR_INFO_READ_MAX_HANDLES) || + (j == (hwmon->n_dynamic_sensors - 1))) { + rc = efx_mcdi_dynamic_sensor_list_reading_update(efx, + handles, + i); + if (!rc) + i = 0; + else + break; + } + } + + return rc; +} + +static void +efx_mcdi_mon_add_attr(struct efx_nic *efx, + unsigned int index, unsigned int type, + unsigned int limit_value, u8 file_index, + enum efx_hwmon_attribute attribute, + bool is_dynamic) +{ + struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx); + struct efx_mcdi_mon_attribute *attr = &hwmon->attrs[hwmon->n_attrs]; + + attr->index = index; + attr->type = type; + + if (is_dynamic) { + /* Conversion between FW types and kernel types */ + if (type == 0) + attr->hwmon_type = hwmon_in; + if (type == 1) + attr->hwmon_type = hwmon_curr; + if (type == 2) + attr->hwmon_type = hwmon_power; + if (type == 3) + attr->hwmon_type = hwmon_temp; + if (type == 4) + attr->hwmon_type = hwmon_fan; + } else { + if (type < ARRAY_SIZE(efx_mcdi_sensor_type)) + attr->hwmon_type = + efx_mcdi_sensor_type[type].hwmon_type; + else + attr->hwmon_type = hwmon_chip; + } + + attr->limit_value = limit_value; + attr->file_index = file_index; + attr->hwmon_attribute = attribute; + attr->is_dynamic = is_dynamic; + ++hwmon->n_attrs; +} + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_RHASHTABLE_LOOKUP_FAST) +static void efx_mcdi_add_dynamic_sensor(struct efx_nic *efx, + unsigned int handle, + unsigned int idx) +{ + struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx); + struct efx_dynamic_sensor_description *sensor; + + sensor = (struct efx_dynamic_sensor_description *)hwmon->sensor_list; + sensor += idx; + /* sensor->idx need to stored as this is needed to access sensor + * reading data saved + */ + sensor->idx = idx; + sensor->handle = handle; + rhashtable_lookup_insert_fast(&hwmon->sensor_table, + &sensor->entry, + sensor_entry_params); +} + +static void efx_mcdi_remove_dynamic_sensors(struct efx_nic *efx) +{ + struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx); + struct efx_dynamic_sensor_description *sensor; + int i = 0; + + sensor = (struct efx_dynamic_sensor_description *)hwmon->sensor_list; + for(i = 0; i < hwmon->n_dynamic_sensors; i++) { + rhashtable_remove_fast(&hwmon->sensor_table, + &sensor[i].entry, + sensor_entry_params); + } +} + +/* This function is assumed to be called only after @has_dynamic_sensors + * has returned true or DYNAMIC_SENSOR_LIST event received + */ +static int efx_mcdi_read_dynamic_sensor_list(struct efx_nic *efx) +{ + MCDI_DECLARE_BUF(outbuf, MC_CMD_DYNAMIC_SENSORS_LIST_OUT_LENMAX); + struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx); + unsigned int n_sensors; + unsigned int gen_count; + void *new_sensor_list; + size_t outlen; + int rc, i; + + rc = efx_mcdi_rpc(efx, MC_CMD_DYNAMIC_SENSORS_LIST, NULL, 0, outbuf, + sizeof(outbuf), &outlen); + if (rc) + return rc; + + gen_count = MCDI_DWORD(outbuf, DYNAMIC_SENSORS_LIST_OUT_GENERATION); + /* check if generation count changed */ + if (gen_count == hwmon->generation_count) + return 0; + hwmon->generation_count = gen_count; + + n_sensors = MCDI_DWORD(outbuf, DYNAMIC_SENSORS_LIST_OUT_COUNT); + if (outlen < MC_CMD_DYNAMIC_SENSORS_LIST_OUT_LEN(n_sensors)) { + WARN_ON(1); + return -EINVAL; + } + mutex_lock(&hwmon->update_lock); + efx_mcdi_remove_dynamic_sensors(efx); + hwmon->n_dynamic_sensors = n_sensors; + new_sensor_list = krealloc(hwmon->sensor_list, + (n_sensors * + sizeof(struct efx_dynamic_sensor_description)), + GFP_KERNEL); + if (!new_sensor_list) { + mutex_unlock(&hwmon->update_lock); + return -ENOMEM; + } + hwmon->sensor_list = new_sensor_list; + + for (i = 0; i < n_sensors; i++) { + unsigned int handle; + + handle = MCDI_ARRAY_DWORD(outbuf, + DYNAMIC_SENSORS_LIST_OUT_HANDLES, i); + efx_mcdi_add_dynamic_sensor(efx, handle, i); + } + + mutex_unlock(&hwmon->update_lock); + + return 0; +} +#endif + +static int efx_mcdi_read_dynamic_sensor_list_info(struct efx_nic *efx, + unsigned int *handle_list, + unsigned int num_handles) +{ + MCDI_DECLARE_BUF(outbuf, + MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_OUT_LEN(EFX_DYNAMIC_SENSOR_INFO_READ_MAX_HANDLES)); + MCDI_DECLARE_BUF(inbuf, + MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_IN_LEN(EFX_DYNAMIC_SENSOR_INFO_READ_MAX_HANDLES)); + struct efx_dynamic_sensor_description *sensor = NULL; + static int type_idx[EFX_HWMON_TYPES_COUNT] = {0}; + unsigned int handle; + int i, rc = 0; + size_t outlen; + + for (i = 0; i < num_handles; i++) { + MCDI_SET_ARRAY_DWORD(inbuf, + DYNAMIC_SENSORS_GET_DESCRIPTIONS_IN_HANDLES, i, + handle_list[i]); + } + rc = efx_mcdi_rpc(efx, MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS, inbuf, + MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_IN_LEN(i), + outbuf, sizeof(outbuf), &outlen); + if (rc) + return rc; + i = 0; + /* + * outlen determins the number of handles returned if any handles + * are dropped by FW + */ + if (outlen % MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_OUT_SENSORS_LEN){ + WARN_ON(1); + return -EINVAL; + } + while (outlen) { + efx_dword_t *str_ptr = MCDI_ARRAY_STRUCT_PTR(outbuf, + DYNAMIC_SENSORS_GET_DESCRIPTIONS_OUT_SENSORS, + i); + + handle = MCDI_DWORD(str_ptr, DYNAMIC_SENSORS_DESCRIPTION_HANDLE); + sensor = efx_mcdi_get_dynamic_sensor(efx, handle); + /* check if sensor was dropped */ + if (IS_ERR(sensor)) { + i++; + outlen -= MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_OUT_SENSORS_LEN; + continue; + } + memcpy(sensor->name, + MCDI_PTR(str_ptr, DYNAMIC_SENSORS_DESCRIPTION_NAME), + MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_NAME_LEN); + sensor->type = MCDI_DWORD(str_ptr, DYNAMIC_SENSORS_DESCRIPTION_TYPE); + + sensor->limits[EFX_SENSOR_LIMIT_WARNING_LO] = + MCDI_DWORD(str_ptr, DYN_SENSOR_LIMIT_LO_WARNING); + efx_mcdi_mon_add_attr(efx, sensor->idx, + sensor->type, + sensor->limits[EFX_SENSOR_LIMIT_WARNING_LO], + type_idx[sensor->type], + EFX_HWMON_MIN, true); + + sensor->limits[EFX_SENSOR_LIMIT_WARNING_HI] = + MCDI_DWORD(str_ptr, DYN_SENSOR_LIMIT_HI_WARNING); + efx_mcdi_mon_add_attr(efx, sensor->idx, + sensor->type, + sensor->limits[EFX_SENSOR_LIMIT_WARNING_HI], + type_idx[sensor->type], + EFX_HWMON_MAX, true); + + pci_dbg(efx->pci_dev, + "Adding sensor %s, type %d, limits(%u, %u)\n", + sensor->name, sensor->type, + sensor->limits[EFX_SENSOR_LIMIT_WARNING_LO], + sensor->limits[EFX_SENSOR_LIMIT_WARNING_HI]); + + efx_mcdi_mon_add_attr(efx, sensor->idx, sensor->type, 0, + type_idx[sensor->type], EFX_HWMON_LABEL, + true); + efx_mcdi_mon_add_attr(efx, sensor->idx, sensor->type, 0, + type_idx[sensor->type], EFX_HWMON_INPUT, + true); + type_idx[sensor->type]++; + i++; + outlen -= MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_OUT_SENSORS_LEN; + } + + return 0; +} + +/* + * This function is assumed to be called only after @has_dynamic_sensors + * has returned true or DYNAMIC_SENSOR_LIST event received + */ +static int efx_mcdi_read_dynamic_sensor_info(struct efx_nic *efx) +{ + /* + * limiting maximum sensors per read to 4 to avoid + * -Werror=frame-larger-than=] + */ + unsigned int handles[EFX_DYNAMIC_SENSOR_INFO_READ_MAX_HANDLES]; + struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx); + struct efx_dynamic_sensor_description *sensor; + unsigned int j, i = 0; + int rc = 0; + + sensor = (struct efx_dynamic_sensor_description *)hwmon->sensor_list; + mutex_lock(&hwmon->update_lock); + for (j = 0; j < hwmon->n_dynamic_sensors; j++) { + handles[i] = sensor[j].handle; + i++; + if ((i == EFX_DYNAMIC_SENSOR_INFO_READ_MAX_HANDLES) || + (j == (hwmon->n_dynamic_sensors - 1))) { + rc = efx_mcdi_read_dynamic_sensor_list_info(efx, + handles, i); + if (!rc) + i = 0; + else + break; + } + } + mutex_unlock(&hwmon->update_lock); + + return rc; +} + +static int efx_mcdi_read_sensor_info(struct efx_nic *efx, unsigned int n_pages) +{ + MCDI_DECLARE_BUF(outbuf, MC_CMD_SENSOR_INFO_OUT_LENMAX); + MCDI_DECLARE_BUF(inbuf, MC_CMD_SENSOR_INFO_EXT_IN_LEN); + u8 n_temp, n_cool, n_in, n_curr, n_power; + u32 type, mask = 0, page = 0; + size_t outlen; + int i, j, rc; + + n_temp = n_curr = n_cool = n_in = n_power = 0; + for (i = 0, j = -1, type = -1; ; i++) { + enum hwmon_sensor_types hwmon_type; + u16 min1, max1, min2, max2; + u8 file_index = 0; + + /* Find next sensor type or exit if there is none */ + do { + type++; + + if ((type % 32) == 0) { + page = type / 32; + j = -1; + if (page == n_pages) + return 0; + + MCDI_SET_DWORD(inbuf, SENSOR_INFO_EXT_IN_PAGE, + page); + rc = efx_mcdi_rpc(efx, MC_CMD_SENSOR_INFO, + inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), + &outlen); + if (rc) + return rc; + if (outlen < MC_CMD_SENSOR_INFO_OUT_LENMIN) + return -EIO; + + mask = (MCDI_DWORD(outbuf, + SENSOR_INFO_OUT_MASK) & + ~(1 << MC_CMD_SENSOR_PAGE0_NEXT)); + + /* Check again for short response */ + if (outlen < + MC_CMD_SENSOR_INFO_OUT_LEN(hweight32(mask))) + return -EIO; + } + } while (!(mask & (1 << type % 32))); + j++; + + if (type < ARRAY_SIZE(efx_mcdi_sensor_type)) { + hwmon_type = efx_mcdi_sensor_type[type].hwmon_type; + + /* Skip sensors specific to a different port */ + if (hwmon_type != hwmon_chip && + efx_mcdi_sensor_type[type].port >= 0 && + efx_mcdi_sensor_type[type].port != + efx_port_num(efx)) + continue; + } else { + hwmon_type = hwmon_chip; + } + + switch (hwmon_type) { + case hwmon_temp: + file_index = ++n_temp; /* 1-based */ + break; + case hwmon_fan: + file_index = ++n_cool; /* 1-based */ + break; + default: + file_index = n_in++; /* 0-based */ + break; + case hwmon_curr: + file_index = ++n_curr; /* 1-based */ + break; + case hwmon_power: + file_index = ++n_power; /* 1-based */ + break; + } + + min1 = MCDI_ARRAY_FIELD(outbuf, SENSOR_ENTRY, + SENSOR_INFO_ENTRY, j, MIN1); + max1 = MCDI_ARRAY_FIELD(outbuf, SENSOR_ENTRY, + SENSOR_INFO_ENTRY, j, MAX1); + min2 = MCDI_ARRAY_FIELD(outbuf, SENSOR_ENTRY, + SENSOR_INFO_ENTRY, j, MIN2); + max2 = MCDI_ARRAY_FIELD(outbuf, SENSOR_ENTRY, + SENSOR_INFO_ENTRY, j, MAX2); + + efx_mcdi_mon_add_attr(efx, i, type, 0, file_index, + EFX_HWMON_INPUT, false); + if (min1 != max1) { + + if (hwmon_type != hwmon_power) { + efx_mcdi_mon_add_attr(efx, i, type, min1, + file_index, + EFX_HWMON_MIN, false); + } + + efx_mcdi_mon_add_attr(efx, i, type, max1, file_index, + EFX_HWMON_MAX, false); + + } + if (min2 != max2) { + /* Assume max2 is critical value. + * But we have no good way to expose min2. + */ + efx_mcdi_mon_add_attr(efx, i, type, max2, + file_index, + EFX_HWMON_CRIT, false); + } + + efx_mcdi_mon_add_attr(efx, i, type, 0, file_index, + EFX_HWMON_ALARM, false); + + if (type < ARRAY_SIZE(efx_mcdi_sensor_type) && + efx_mcdi_sensor_type[type].label) { + efx_mcdi_mon_add_attr(efx, i, type, 0, file_index, + EFX_HWMON_LABEL, false); + } + } + + return 0; +} + +static int efx_mcdi_get_num_sensors(struct efx_nic *efx, + unsigned int *n_sensors, + unsigned int *n_pages) +{ + MCDI_DECLARE_BUF(outbuf, MC_CMD_SENSOR_INFO_OUT_LENMAX); + MCDI_DECLARE_BUF(inbuf, MC_CMD_SENSOR_INFO_EXT_IN_LEN); + unsigned int page = 0; + size_t outlen; + u32 mask = 0; + int rc; + + do { + MCDI_SET_DWORD(inbuf, SENSOR_INFO_EXT_IN_PAGE, page); + + rc = efx_mcdi_rpc(efx, MC_CMD_SENSOR_INFO, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), &outlen); + if (rc) + return rc; + if (outlen < MC_CMD_SENSOR_INFO_OUT_LENMIN) + return -EIO; + + mask = MCDI_DWORD(outbuf, SENSOR_INFO_OUT_MASK); + *n_sensors += hweight32(mask & + ~(1 << MC_CMD_SENSOR_PAGE0_NEXT)); + ++page; + } while (mask & (1 << MC_CMD_SENSOR_PAGE0_NEXT)); + *n_pages = page; + + return 0; +} + +static int efx_mcdi_mon_update(struct efx_nic *efx) +{ + struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx); + MCDI_DECLARE_BUF(inbuf, MC_CMD_READ_SENSORS_EXT_IN_LEN); + int rc; + + MCDI_SET_QWORD(inbuf, READ_SENSORS_EXT_IN_DMA_ADDR, + hwmon->dma_buf.dma_addr); + MCDI_SET_DWORD(inbuf, READ_SENSORS_EXT_IN_LENGTH, hwmon->dma_buf.len); + + rc = efx_mcdi_rpc(efx, MC_CMD_READ_SENSORS, + inbuf, sizeof(inbuf), NULL, 0, NULL); + if (rc == 0) + hwmon->last_update = jiffies; + return rc; +} + +static int efx_mcdi_mon_get_entry(struct device *dev, unsigned int index, + efx_dword_t *entry) +{ + struct efx_nic *efx = dev_get_drvdata(dev); + struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx); + int rc; + + BUILD_BUG_ON(MC_CMD_READ_SENSORS_OUT_LEN != 0); + + mutex_lock(&hwmon->update_lock); + + /* Use cached value if last update was < 1 s ago */ + if (time_before(jiffies, hwmon->last_update + HZ)) + rc = 0; + else + rc = efx_mcdi_mon_update(efx); + + /* Copy out the requested entry */ + *entry = ((efx_dword_t *)hwmon->dma_buf.addr)[index]; + + mutex_unlock(&hwmon->update_lock); + + return rc; +} + +static int efx_mcdi_mon_get_dynamic_entry(struct device *dev, + unsigned int index, + efx_dword_t **entry) +{ + struct efx_nic *efx = dev_get_drvdata(dev); + struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx); + int rc; + + BUILD_BUG_ON(MC_CMD_READ_SENSORS_OUT_LEN != 0); + + mutex_lock(&hwmon->update_lock); + + /* Use cached value if last update was < 1 s ago */ + if (time_before(jiffies, hwmon->last_update + HZ)) + rc = 0; + else + rc = efx_mcdi_dynamic_sensor_reading_update(efx); + + /* + * Copy out the requested entry. Entries for dynamic sensors are + * commposed by three efx_dword_t values: handle, value, state. + */ + *entry = &((efx_dword_t *)hwmon->dma_buf.addr)[index * 3]; + + mutex_unlock(&hwmon->update_lock); + + return rc; +} + +static int efx_mcdi_mon_get_value(struct device *dev, unsigned int index, + enum hwmon_sensor_types hwmon_type, + unsigned int *value) +{ + struct efx_nic *efx = dev_get_drvdata(dev); + efx_dword_t *dyn_entry; + unsigned int state; + efx_dword_t entry; + int rc; + + if (efx_nic_has_dynamic_sensors(efx)) { + rc = efx_mcdi_mon_get_dynamic_entry(dev, index, &dyn_entry); + if (rc) + return rc; + + state = MCDI_DWORD(dyn_entry, DYNAMIC_SENSORS_READING_STATE); + if (state == MC_CMD_SENSOR_STATE_NO_READING) + return -EBUSY; + + *value = MCDI_DWORD(dyn_entry, DYNAMIC_SENSORS_READING_VALUE); + } else { + rc = efx_mcdi_mon_get_entry(dev, index, &entry); + if (rc) + return rc; + state = EFX_DWORD_FIELD(entry, + MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE); + if (state == MC_CMD_SENSOR_STATE_NO_READING) + return -EBUSY; + *value = EFX_DWORD_FIELD(entry, + MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE); + } + + switch (hwmon_type) { + case hwmon_temp: + /* Convert temperature from degrees to milli-degrees Celsius */ + *value *= 1000; + break; + case hwmon_power: + /* Convert power from watts to microwatts */ + *value *= 1000000; + break; + default: + /* No conversion needed */ + break; + } + return 0; +} + +static unsigned int +efx_mcdi_mon_get_limit(struct efx_mcdi_mon_attribute *mon_attr) +{ + unsigned int value; + + value = mon_attr->limit_value; + + switch (mon_attr->hwmon_type) { + case hwmon_temp: + /* Convert temperature from degrees to milli-degrees Celsius */ + value *= 1000; + break; + case hwmon_power: + /* Convert power from watts to microwatts */ + value *= 1000000; + break; + default: + /* No conversion needed */ + break; + } + return value; +} + +static int efx_mcdi_mon_get_state(struct device *dev, unsigned int index, + unsigned int *value) +{ + struct efx_nic *efx = dev_get_drvdata(dev); + efx_dword_t entry, *dyn_entry; + int rc; + + if (efx_nic_has_dynamic_sensors(efx)) { + rc = efx_mcdi_mon_get_dynamic_entry(dev, index, &dyn_entry); + if (rc) + return rc; + rc = MCDI_DWORD(dyn_entry, DYNAMIC_SENSORS_READING_STATE); + } else { + rc = efx_mcdi_mon_get_entry(dev, index, &entry); + if (rc) + return rc; + rc = EFX_DWORD_FIELD(entry, + MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE); + } + return rc; +} + +#if defined(EFX_USE_KCOMPAT) && !defined(EFX_HAVE_HWMON_DEVICE_REGISTER_WITH_INFO) +static ssize_t efx_mcdi_mon_show_name(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + return scnprintf(buf, PAGE_SIZE, "%s\n", KBUILD_MODNAME); +} + +static ssize_t efx_mcdi_mon_show_value(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct efx_mcdi_mon_attribute *mon_attr = + container_of(attr, struct efx_mcdi_mon_attribute, dev_attr); + unsigned int value = 0; + int rc = efx_mcdi_mon_get_value(dev, mon_attr->index, + mon_attr->hwmon_type, &value); + + if (rc) + return rc; + return scnprintf(buf, PAGE_SIZE, "%u\n", value); +} + +static ssize_t efx_mcdi_mon_show_limit(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct efx_mcdi_mon_attribute *mon_attr = + container_of(attr, struct efx_mcdi_mon_attribute, dev_attr); + unsigned int value = efx_mcdi_mon_get_limit(mon_attr); + + return scnprintf(buf, PAGE_SIZE, "%u\n", value); +} + +static ssize_t efx_mcdi_mon_show_alarm(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct efx_mcdi_mon_attribute *mon_attr = + container_of(attr, struct efx_mcdi_mon_attribute, dev_attr); + int state = 0; + int rc; + + rc = efx_mcdi_mon_get_state(dev, mon_attr->index, &state); + if (rc) + return rc; + + return scnprintf(buf, PAGE_SIZE, "%d\n", state != MC_CMD_SENSOR_STATE_OK); +} + +static ssize_t efx_mcdi_mon_show_label(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct efx_mcdi_mon_attribute *mon_attr = + container_of(attr, struct efx_mcdi_mon_attribute, dev_attr); + return scnprintf(buf, PAGE_SIZE, "%s\n", + efx_mcdi_sensor_type[mon_attr->type].label); +} + +static int efx_mcdi_mon_create_files(struct device *dev, + struct efx_mcdi_mon *hwmon) +{ + int rc = 0, i; + + for (i=0; i < hwmon->n_attrs; i++) { + struct efx_mcdi_mon_attribute *attr = &hwmon->attrs[i]; + const char *hwmon_prefix; + + sysfs_attr_init(&attr->dev_attr.attr); + attr->dev_attr.attr.mode = S_IRUGO; + + switch (attr->hwmon_type) { + case hwmon_temp: + hwmon_prefix = "temp"; + break; + case hwmon_fan: + /* This is likely to be a heatsink, but there + * is no convention for representing cooling + * devices other than fans. + */ + hwmon_prefix = "fan"; + break; + case hwmon_in: + hwmon_prefix = "in"; + break; + case hwmon_curr: + hwmon_prefix = "curr"; + break; + case hwmon_power: + hwmon_prefix = "power"; + break; + case hwmon_chip: + continue; + default: + dev_warn(dev, "Unknown HW monitor type %d\n", + attr->hwmon_type); + continue; + } + + switch (attr->hwmon_attribute) { + case EFX_HWMON_INPUT: + attr->dev_attr.show = efx_mcdi_mon_show_value; + snprintf(attr->name, sizeof(attr->name), "%s%hhu_input", + hwmon_prefix, attr->file_index); + break; + case EFX_HWMON_MIN: + attr->dev_attr.show = efx_mcdi_mon_show_limit; + snprintf(attr->name, sizeof(attr->name), "%s%hhu_min", + hwmon_prefix, attr->file_index); + break; + case EFX_HWMON_MAX: + attr->dev_attr.show = efx_mcdi_mon_show_limit; + snprintf(attr->name, sizeof(attr->name), "%s%hhu_max", + hwmon_prefix, attr->file_index); + break; + case EFX_HWMON_CRIT: + attr->dev_attr.show = efx_mcdi_mon_show_limit; + snprintf(attr->name, sizeof(attr->name), "%s%hhu_crit", + hwmon_prefix, attr->file_index); + break; + case EFX_HWMON_ALARM: + attr->dev_attr.show = efx_mcdi_mon_show_alarm; + snprintf(attr->name, sizeof(attr->name), "%s%hhu_alarm", + hwmon_prefix, attr->file_index); + break; + case EFX_HWMON_LABEL: + attr->dev_attr.show = efx_mcdi_mon_show_label; + snprintf(attr->name, sizeof(attr->name), "%s%hhu_label", + hwmon_prefix, attr->file_index); + break; + case EFX_HWMON_NAME: + attr->dev_attr.show = efx_mcdi_mon_show_name; + snprintf(attr->name, sizeof(attr->name), "name"); + break; + default: + dev_warn(dev, "Unknown HW monitor attribute %d\n", + attr->hwmon_attribute); + continue; + } + attr->dev_attr.attr.name = attr->name; + rc = device_create_file(dev, &attr->dev_attr); + if (rc) { + attr->name[0] = '\0'; + break; + } + } + return rc; +} + +static void efx_mcdi_mon_remove_files(struct device *dev, + struct efx_mcdi_mon *hwmon) +{ + unsigned int i; + + for (i = 0; (i < hwmon->n_attrs) && hwmon->attrs[i].name[0]; i++) + device_remove_file(dev, &hwmon->attrs[i].dev_attr); +} + +#define efx_hwmon_chip_info_p NULL +#else +/* These defines must match with the efx_attrib_map array below. */ +#define EFX_HWMON_TEMP_CONFIG (HWMON_T_INPUT | HWMON_T_MIN | HWMON_T_MAX | \ + HWMON_T_CRIT | HWMON_T_ALARM | HWMON_T_LABEL) +#define EFX_HWMON_IN_CONFIG (HWMON_I_INPUT | HWMON_I_MIN | HWMON_I_MAX | \ + HWMON_I_CRIT | HWMON_I_ALARM | HWMON_I_LABEL) +#define EFX_HWMON_CURR_CONFIG (HWMON_C_INPUT | HWMON_C_MIN | HWMON_C_MAX | \ + HWMON_C_CRIT | HWMON_C_ALARM | HWMON_C_LABEL) +#define EFX_HWMON_POWER_CONFIG (HWMON_P_INPUT | HWMON_P_LABEL) +#define EFX_HWMON_FAN_CONFIG (HWMON_F_ALARM | HWMON_F_LABEL) + +static const u32 +efx_attrib_map[EFX_HWMON_TYPES_COUNT][EFX_HWMON_ATTRIBUTE_COUNT] = { + [hwmon_temp] = { HWMON_T_INPUT, HWMON_T_MIN, HWMON_T_MAX, HWMON_T_CRIT, + HWMON_T_ALARM, HWMON_T_LABEL, 0 }, + [hwmon_in] = { HWMON_I_INPUT, HWMON_I_MIN, HWMON_I_MAX, HWMON_I_CRIT, + HWMON_I_ALARM, HWMON_I_LABEL, 0 }, + [hwmon_curr] = { HWMON_C_INPUT, HWMON_C_MIN, HWMON_C_MAX, HWMON_C_CRIT, + HWMON_C_ALARM, HWMON_C_LABEL, 0 }, + [hwmon_power] = { HWMON_P_INPUT, 0, 0, 0, 0, HWMON_P_LABEL, 0 }, + [hwmon_fan] = { 0, 0, 0, 0, HWMON_F_ALARM, HWMON_F_LABEL, 0 }, +}; + +static int efx_mcdi_mon_create_files(struct device *dev, + struct efx_mcdi_mon *hwmon) +{ + return 0; +} + +static void efx_mcdi_mon_remove_files(struct device *dev, + struct efx_mcdi_mon *hwmon) +{ +} + +static struct efx_mcdi_mon_attribute * +efx_hwmon_get_attribute(const struct efx_nic *efx, + enum hwmon_sensor_types type, u32 attr, int channel) +{ + struct efx_mcdi_mon *hwmon = efx_mcdi_mon((struct efx_nic *) efx); + struct efx_mcdi_mon_attribute *attribute; + enum efx_hwmon_attribute hwmon_attribute; + int i; + + if (type > EFX_HWMON_TYPES_COUNT) + return NULL; + + for (i=0; i < EFX_HWMON_ATTRIBUTE_COUNT; i++) { + if (efx_attrib_map[type][i] == BIT(attr)) { + hwmon_attribute = i; + break; + } + } + if (i == EFX_HWMON_ATTRIBUTE_COUNT) + return NULL; + + for (i=0; i < hwmon->n_attrs; i++) { + attribute = &hwmon->attrs[i]; + if ((attribute->hwmon_type == type) && + (attribute->hwmon_attribute == hwmon_attribute) && + (attribute->file_index == channel)) + return attribute; + } + return NULL; +} + +static int efx_hwmon_read(struct device *dev, + enum hwmon_sensor_types type, + u32 attr, int channel, long *val) +{ + const struct efx_nic *efx = dev_get_drvdata(dev); + struct efx_mcdi_mon_attribute *mon_attr = + efx_hwmon_get_attribute(efx, type, attr, channel); + int rc; + unsigned int value = 0; + + *val = 0; + if (!mon_attr) + return -EOPNOTSUPP; + + switch (mon_attr->hwmon_attribute) { + case EFX_HWMON_INPUT: + rc = efx_mcdi_mon_get_value(dev, mon_attr->index, + mon_attr->hwmon_type, &value); + if (rc < 0) + return rc; + break; + case EFX_HWMON_MIN: + case EFX_HWMON_MAX: + case EFX_HWMON_CRIT: + value = efx_mcdi_mon_get_limit(mon_attr); + break; + case EFX_HWMON_ALARM: + rc = efx_mcdi_mon_get_state(dev, mon_attr->index, &value); + if (rc) + return rc; + value = (value != MC_CMD_SENSOR_STATE_OK); + break; +#ifndef EFX_HAVE_HWMON_READ_STRING + case EFX_HWMON_LABEL: + return -EOPNOTSUPP; +#endif + default: + WARN_ONCE(1, "Unhandled HW sensor read\n"); + return -EOPNOTSUPP; + } + *val = value; + return 0; +} + +#ifdef EFX_HAVE_HWMON_READ_STRING +static int efx_hwmon_read_string(struct device *dev, + enum hwmon_sensor_types type, + u32 attr, int channel, +#ifdef EFX_HAVE_HWMON_READ_STRING_CONST + const char **str +#else + char **str +#endif + ) +{ + const struct efx_nic *efx = dev_get_drvdata(dev); + struct efx_nic *efx_temp = dev_get_drvdata(dev); + struct efx_dynamic_sensor_description *sensor; + struct efx_mcdi_mon_attribute *mon_attr = + efx_hwmon_get_attribute(efx, type, attr, channel); + + if (!mon_attr) + return 1; + + if (mon_attr->is_dynamic) { + sensor = efx_mcdi_get_dynamic_sensor(efx_temp, mon_attr->index + 1); + if (!sensor) { + WARN(1, "%s: sensor not found\n", __func__); + *str = NULL; + } else { + *str = sensor->name; + } + } else { + *str = (char *) efx_mcdi_sensor_type[mon_attr->type].label; + } + return 0; +} +#endif + +static umode_t efx_hwmon_is_visible(const void *data, + enum hwmon_sensor_types type, + u32 attr, int channel) +{ + const struct efx_nic *efx = data; + struct efx_mcdi_mon_attribute *mon_attr = efx_hwmon_get_attribute( + efx, type, attr, channel); + + if (mon_attr) + return S_IRUGO; + else + return 0; +} + +static const u32 efx_temp_config[] = { + EFX_HWMON_TEMP_CONFIG, + EFX_HWMON_TEMP_CONFIG, + EFX_HWMON_TEMP_CONFIG, + EFX_HWMON_TEMP_CONFIG, + EFX_HWMON_TEMP_CONFIG, + EFX_HWMON_TEMP_CONFIG, + EFX_HWMON_TEMP_CONFIG, + EFX_HWMON_TEMP_CONFIG, + EFX_HWMON_TEMP_CONFIG, + EFX_HWMON_TEMP_CONFIG, + EFX_HWMON_TEMP_CONFIG, + EFX_HWMON_TEMP_CONFIG, + EFX_HWMON_TEMP_CONFIG, + EFX_HWMON_TEMP_CONFIG, + EFX_HWMON_TEMP_CONFIG, + EFX_HWMON_TEMP_CONFIG, + EFX_HWMON_TEMP_CONFIG, + EFX_HWMON_TEMP_CONFIG, + EFX_HWMON_TEMP_CONFIG, + EFX_HWMON_TEMP_CONFIG, + 0 +}; + +static const struct hwmon_channel_info efx_temp = { + .type = hwmon_temp, + .config = efx_temp_config, +}; + +static const u32 efx_in_config[] = { + EFX_HWMON_IN_CONFIG, + EFX_HWMON_IN_CONFIG, + EFX_HWMON_IN_CONFIG, + EFX_HWMON_IN_CONFIG, + EFX_HWMON_IN_CONFIG, + EFX_HWMON_IN_CONFIG, + EFX_HWMON_IN_CONFIG, + EFX_HWMON_IN_CONFIG, + EFX_HWMON_IN_CONFIG, + EFX_HWMON_IN_CONFIG, + EFX_HWMON_IN_CONFIG, + EFX_HWMON_IN_CONFIG, + EFX_HWMON_IN_CONFIG, + EFX_HWMON_IN_CONFIG, + EFX_HWMON_IN_CONFIG, + EFX_HWMON_IN_CONFIG, + EFX_HWMON_IN_CONFIG, + EFX_HWMON_IN_CONFIG, + EFX_HWMON_IN_CONFIG, + EFX_HWMON_IN_CONFIG, + 0 +}; + +static const struct hwmon_channel_info efx_in = { + .type = hwmon_in, + .config = efx_in_config, +}; + +static const u32 efx_curr_config[] = { + EFX_HWMON_CURR_CONFIG, + EFX_HWMON_CURR_CONFIG, + EFX_HWMON_CURR_CONFIG, + EFX_HWMON_CURR_CONFIG, + EFX_HWMON_CURR_CONFIG, + EFX_HWMON_CURR_CONFIG, + EFX_HWMON_CURR_CONFIG, + EFX_HWMON_CURR_CONFIG, + EFX_HWMON_CURR_CONFIG, + EFX_HWMON_CURR_CONFIG, + EFX_HWMON_CURR_CONFIG, + EFX_HWMON_CURR_CONFIG, + EFX_HWMON_CURR_CONFIG, + EFX_HWMON_CURR_CONFIG, + EFX_HWMON_CURR_CONFIG, + EFX_HWMON_CURR_CONFIG, + EFX_HWMON_CURR_CONFIG, + EFX_HWMON_CURR_CONFIG, + EFX_HWMON_CURR_CONFIG, + EFX_HWMON_CURR_CONFIG, + 0 +}; + +static const struct hwmon_channel_info efx_curr = { + .type = hwmon_curr, + .config = efx_curr_config, +}; + +static const u32 efx_power_config[] = { + EFX_HWMON_POWER_CONFIG, + EFX_HWMON_POWER_CONFIG, + EFX_HWMON_POWER_CONFIG, + EFX_HWMON_POWER_CONFIG, + EFX_HWMON_POWER_CONFIG, + EFX_HWMON_POWER_CONFIG, + EFX_HWMON_POWER_CONFIG, + EFX_HWMON_POWER_CONFIG, + EFX_HWMON_POWER_CONFIG, + EFX_HWMON_POWER_CONFIG, + EFX_HWMON_POWER_CONFIG, + EFX_HWMON_POWER_CONFIG, + EFX_HWMON_POWER_CONFIG, + EFX_HWMON_POWER_CONFIG, + EFX_HWMON_POWER_CONFIG, + EFX_HWMON_POWER_CONFIG, + EFX_HWMON_POWER_CONFIG, + EFX_HWMON_POWER_CONFIG, + EFX_HWMON_POWER_CONFIG, + EFX_HWMON_POWER_CONFIG, + 0 +}; + +static const struct hwmon_channel_info efx_power = { + .type = hwmon_power, + .config = efx_power_config, +}; + +static const u32 efx_fan_config[] = { + EFX_HWMON_FAN_CONFIG, + EFX_HWMON_FAN_CONFIG, + EFX_HWMON_FAN_CONFIG, + EFX_HWMON_FAN_CONFIG, + EFX_HWMON_FAN_CONFIG, + 0 +}; + +static const struct hwmon_channel_info efx_fan = { + .type = hwmon_fan, + .config = efx_fan_config, +}; + +static const struct hwmon_channel_info *efx_hwmon_info[] = { + &efx_temp, + &efx_in, + &efx_curr, + &efx_power, + &efx_fan, + NULL +}; + +static const struct hwmon_ops efx_hwmon_ops = { + .is_visible = efx_hwmon_is_visible, + .read = efx_hwmon_read, +#if defined(EFX_HAVE_HWMON_READ_STRING) + .read_string = efx_hwmon_read_string, +#endif +}; + +static const struct hwmon_chip_info efx_hwmon_chip_info = { + .ops = &efx_hwmon_ops, + .info = efx_hwmon_info, +}; + +#define efx_hwmon_chip_info_p &efx_hwmon_chip_info +#endif + +static int efx_mcdi_hwmon_probe(struct efx_nic *efx, unsigned int n_sensors, + unsigned int n_pages, bool has_dynamic_sensors) +{ + unsigned int sensor_entry_len = MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_LEN; + struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx); + unsigned int n_attrs; + int rc; + + if (has_dynamic_sensors) + sensor_entry_len = MC_CMD_DYNAMIC_SENSORS_GET_READINGS_OUT_VALUES_LEN; + + rc = efx_nic_alloc_buffer(efx, &hwmon->dma_buf, + n_sensors * sensor_entry_len, GFP_KERNEL); + if (rc) + return rc; + + + /* Allocate space for the maximum possible number of + * attributes for this set of sensors: name of the driver plus + * value, min, max, crit, alarm and label for each sensor. + */ + n_attrs = 1 + 6 * n_sensors; + hwmon->attrs = kcalloc(n_attrs, sizeof(*hwmon->attrs), GFP_KERNEL); + if (!hwmon->attrs) + return -ENOMEM; + + efx_mcdi_mon_add_attr(efx, 0, 0, 0, 0, EFX_HWMON_NAME, false); + + if (has_dynamic_sensors) + rc = efx_mcdi_read_dynamic_sensor_info(efx); + else + rc = efx_mcdi_read_sensor_info(efx, n_pages); + + if (rc) + return rc; + + hwmon->device = hwmon_device_register_with_info(&efx->pci_dev->dev, + efx->name, efx, + efx_hwmon_chip_info_p, + NULL); + if (IS_ERR(hwmon->device)) + return PTR_ERR(hwmon->device); + rc = efx_mcdi_mon_create_files(&efx->pci_dev->dev, hwmon); + + return rc; +} + +int efx_mcdi_mon_probe(struct efx_nic *efx) +{ + struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx); + unsigned int n_pages, n_sensors; + bool has_dynamic_sensors = efx_nic_has_dynamic_sensors(efx); + int rc; + + /* Do not probe twice */ + if (hwmon->dma_buf.addr) + return 0; + + if (!has_dynamic_sensors && efx->type->has_dynamic_sensors) { + netif_err(efx, hw, efx->net_dev, + "Expected dynamic sensor feature not supported by FW\n"); + return 0; + } + + /* Find out how many sensors are present */ + n_sensors = 0; + mutex_init(&hwmon->update_lock); + + if (has_dynamic_sensors) { +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_RHASHTABLE_LOOKUP_FAST) + rhashtable_init(&hwmon->sensor_table, &sensor_entry_params); + rc = efx_mcdi_read_dynamic_sensor_list(efx); + n_sensors = hwmon->n_dynamic_sensors; +#else + netif_err(efx, hw, efx->net_dev, + "Kernel is too old to support dynamic sensors\n"); + rc = -ENOTSUPP; +#endif + } else { + rc = efx_mcdi_get_num_sensors(efx, &n_sensors, &n_pages); + } + + if (rc) + return rc; + + if (!n_sensors) + return 0; + + rc = efx_mcdi_hwmon_probe(efx, n_sensors, n_pages, + has_dynamic_sensors); + if (rc) { + efx_mcdi_mon_remove(efx); + } else { + mutex_lock(&hwmon->update_lock); + if (has_dynamic_sensors) + efx_mcdi_dynamic_sensor_reading_update(efx); + else + efx_mcdi_mon_update(efx); + mutex_unlock(&hwmon->update_lock); + } + + return rc; +} + +static void efx_mcdi_hwmon_remove(struct efx_nic *efx) +{ + struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx); + + mutex_lock(&hwmon->update_lock); + + efx_mcdi_mon_remove_files(&efx->pci_dev->dev, hwmon); + if (!IS_ERR_OR_NULL(hwmon->device)) + hwmon_device_unregister(hwmon->device); + if (hwmon->attrs) + kfree(hwmon->attrs); + hwmon->attrs = NULL; + hwmon->n_attrs = 0; + if (hwmon->sensor_list) + kfree(hwmon->sensor_list); + hwmon->sensor_list = NULL; + hwmon->n_dynamic_sensors = 0; + + efx_nic_free_buffer(efx, &hwmon->dma_buf); + mutex_unlock(&hwmon->update_lock); +} + +void efx_mcdi_mon_remove(struct efx_nic *efx) +{ + struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx); + + if (!hwmon || !hwmon->dma_buf.addr) + return; + efx_mcdi_hwmon_remove(efx); +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_RHASHTABLE_LOOKUP_FAST) + if (efx_nic_has_dynamic_sensors(efx)) + rhashtable_free_and_destroy(&hwmon->sensor_table, + NULL, + NULL); +#endif +} + +#endif /* CONFIG_SFC_MCDI_MON */ diff --git a/src/drivers/net/ethernet/sfc/mcdi_pcol.h b/src/drivers/net/ethernet/sfc/mcdi_pcol.h new file mode 100644 index 0000000..2149403 --- /dev/null +++ b/src/drivers/net/ethernet/sfc/mcdi_pcol.h @@ -0,0 +1,33895 @@ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2009-2018 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + + +#ifndef MCDI_PCOL_H +#define MCDI_PCOL_H + +/* Values to be written into FMCR_CZ_RESET_STATE_REG to control boot. */ +/* Power-on reset state */ +#define MC_FW_STATE_POR (1) +/* If this is set in MC_RESET_STATE_REG then it should be + * possible to jump into IMEM without loading code from flash. */ +#define MC_FW_WARM_BOOT_OK (2) +/* The MC main image has started to boot. */ +#define MC_FW_STATE_BOOTING (4) +/* The Scheduler has started. */ +#define MC_FW_STATE_SCHED (8) +/* If this is set in MC_RESET_STATE_REG then it should be + * possible to jump into IMEM without loading code from flash. + * Unlike a warm boot, assume DMEM has been reloaded, so that + * the MC persistent data must be reinitialised. */ +#define MC_FW_TEPID_BOOT_OK (16) +/* We have entered the main firmware via recovery mode. This + * means that MC persistent data must be reinitialised, but that + * we shouldn't touch PCIe config. */ +#define MC_FW_RECOVERY_MODE_PCIE_INIT_OK (32) +/* BIST state has been initialized */ +#define MC_FW_BIST_INIT_OK (128) + +/* Siena MC shared memmory offsets */ +/* The 'doorbell' addresses are hard-wired to alert the MC when written */ +#define MC_SMEM_P0_DOORBELL_OFST 0x000 +#define MC_SMEM_P1_DOORBELL_OFST 0x004 +/* The rest of these are firmware-defined */ +#define MC_SMEM_P0_PDU_OFST 0x008 +#define MC_SMEM_P1_PDU_OFST 0x108 +#define MC_SMEM_PDU_LEN 0x100 +#define MC_SMEM_P0_PTP_TIME_OFST 0x7f0 +#define MC_SMEM_P0_STATUS_OFST 0x7f8 +#define MC_SMEM_P1_STATUS_OFST 0x7fc + +/* Values to be written to the per-port status dword in shared + * memory on reboot and assert */ +#define MC_STATUS_DWORD_REBOOT (0xb007b007) +#define MC_STATUS_DWORD_ASSERT (0xdeaddead) + +/* Check whether an mcfw version (in host order) belongs to a bootloader */ +#define MC_FW_VERSION_IS_BOOTLOADER(_v) (((_v) >> 16) == 0xb007) + +/* The current version of the MCDI protocol. + * + * Note that the ROM burnt into the card only talks V0, so at the very + * least every driver must support version 0 and MCDI_PCOL_VERSION + */ +#define MCDI_PCOL_VERSION 2 + +/* Unused commands: 0x23, 0x27, 0x30, 0x31 */ + +/* MCDI version 1 + * + * Each MCDI request starts with an MCDI_HEADER, which is a 32bit + * structure, filled in by the client. + * + * 0 7 8 16 20 22 23 24 31 + * | CODE | R | LEN | SEQ | Rsvd | E | R | XFLAGS | + * | | | + * | | \--- Response + * | \------- Error + * \------------------------------ Resync (always set) + * + * The client writes its request into MC shared memory, and rings the + * doorbell. Each request is completed either by the MC writing + * back into shared memory, or by writing out an event. + * + * All MCDI commands support completion by shared memory response. Each + * request may also contain additional data (accounted for by HEADER.LEN), + * and some responses may also contain additional data (again, accounted + * for by HEADER.LEN). + * + * Some MCDI commands support completion by event, in which any associated + * response data is included in the event. + * + * The protocol requires one response to be delivered for every request; a + * request should not be sent unless the response for the previous request + * has been received (either by polling shared memory, or by receiving + * an event). + */ + +/** Request/Response structure */ +#define MCDI_HEADER_OFST 0 +#define MCDI_HEADER_CODE_LBN 0 +#define MCDI_HEADER_CODE_WIDTH 7 +#define MCDI_HEADER_RESYNC_LBN 7 +#define MCDI_HEADER_RESYNC_WIDTH 1 +#define MCDI_HEADER_DATALEN_LBN 8 +#define MCDI_HEADER_DATALEN_WIDTH 8 +#define MCDI_HEADER_SEQ_LBN 16 +#define MCDI_HEADER_SEQ_WIDTH 4 +#define MCDI_HEADER_RSVD_LBN 20 +#define MCDI_HEADER_RSVD_WIDTH 1 +#define MCDI_HEADER_NOT_EPOCH_LBN 21 +#define MCDI_HEADER_NOT_EPOCH_WIDTH 1 +#define MCDI_HEADER_ERROR_LBN 22 +#define MCDI_HEADER_ERROR_WIDTH 1 +#define MCDI_HEADER_RESPONSE_LBN 23 +#define MCDI_HEADER_RESPONSE_WIDTH 1 +#define MCDI_HEADER_XFLAGS_LBN 24 +#define MCDI_HEADER_XFLAGS_WIDTH 8 +/* Request response using event */ +#define MCDI_HEADER_XFLAGS_EVREQ 0x01 +/* Request (and signal) early doorbell return */ +#define MCDI_HEADER_XFLAGS_DBRET 0x02 + +/* Maximum number of payload bytes */ +#define MCDI_CTL_SDU_LEN_MAX_V1 0xfc +#define MCDI_CTL_SDU_LEN_MAX_V2 0x400 + +#define MCDI_CTL_SDU_LEN_MAX MCDI_CTL_SDU_LEN_MAX_V2 + + +/* The MC can generate events for two reasons: + * - To advance a shared memory request if XFLAGS_EVREQ was set + * - As a notification (link state, i2c event), controlled + * via MC_CMD_LOG_CTRL + * + * Both events share a common structure: + * + * 0 32 33 36 44 52 60 + * | Data | Cont | Level | Src | Code | Rsvd | + * | + * \ There is another event pending in this notification + * + * If Code==CMDDONE, then the fields are further interpreted as: + * + * - LEVEL==INFO Command succeeded + * - LEVEL==ERR Command failed + * + * 0 8 16 24 32 + * | Seq | Datalen | Errno | Rsvd | + * + * These fields are taken directly out of the standard MCDI header, i.e., + * LEVEL==ERR, Datalen == 0 => Reboot + * + * Events can be squirted out of the UART (using LOG_CTRL) without a + * MCDI header. An event can be distinguished from a MCDI response by + * examining the first byte which is 0xc0. This corresponds to the + * non-existent MCDI command MC_CMD_DEBUG_LOG. + * + * 0 7 8 + * | command | Resync | = 0xc0 + * + * Since the event is written in big-endian byte order, this works + * providing bits 56-63 of the event are 0xc0. + * + * 56 60 63 + * | Rsvd | Code | = 0xc0 + * + * Which means for convenience the event code is 0xc for all MC + * generated events. + */ +#define FSE_AZ_EV_CODE_MCDI_EVRESPONSE 0xc + + + +#define MC_CMD_ERR_CODE_OFST 0 +#define MC_CMD_ERR_PROXY_PENDING_HANDLE_OFST 4 + +/* We define 8 "escape" commands to allow + for command number space extension */ + +#define MC_CMD_CMD_SPACE_ESCAPE_0 0x78 +#define MC_CMD_CMD_SPACE_ESCAPE_1 0x79 +#define MC_CMD_CMD_SPACE_ESCAPE_2 0x7A +#define MC_CMD_CMD_SPACE_ESCAPE_3 0x7B +#define MC_CMD_CMD_SPACE_ESCAPE_4 0x7C +#define MC_CMD_CMD_SPACE_ESCAPE_5 0x7D +#define MC_CMD_CMD_SPACE_ESCAPE_6 0x7E +#define MC_CMD_CMD_SPACE_ESCAPE_7 0x7F + +/* Vectors in the boot ROM */ +/* Point to the copycode entry point. */ +#define SIENA_MC_BOOTROM_COPYCODE_VEC (0x800 - 3 * 0x4) +#define HUNT_MC_BOOTROM_COPYCODE_VEC (0x8000 - 3 * 0x4) +#define MEDFORD_MC_BOOTROM_COPYCODE_VEC (0x10000 - 3 * 0x4) +/* Points to the recovery mode entry point. Misnamed but kept for compatibility. */ +#define SIENA_MC_BOOTROM_NOFLASH_VEC (0x800 - 2 * 0x4) +#define HUNT_MC_BOOTROM_NOFLASH_VEC (0x8000 - 2 * 0x4) +#define MEDFORD_MC_BOOTROM_NOFLASH_VEC (0x10000 - 2 * 0x4) +/* Points to the recovery mode entry point. Same as above, but the right name. */ +#define SIENA_MC_BOOTROM_RECOVERY_VEC (0x800 - 2 * 0x4) +#define HUNT_MC_BOOTROM_RECOVERY_VEC (0x8000 - 2 * 0x4) +#define MEDFORD_MC_BOOTROM_RECOVERY_VEC (0x10000 - 2 * 0x4) + +/* Points to noflash mode entry point. */ +#define MEDFORD_MC_BOOTROM_REAL_NOFLASH_VEC (0x10000 - 4 * 0x4) + +/* The command set exported by the boot ROM (MCDI v0) */ +#define MC_CMD_GET_VERSION_V0_SUPPORTED_FUNCS { \ + (1 << MC_CMD_READ32) | \ + (1 << MC_CMD_WRITE32) | \ + (1 << MC_CMD_COPYCODE) | \ + (1 << MC_CMD_GET_VERSION), \ + 0, 0, 0 } + +#define MC_CMD_SENSOR_INFO_OUT_OFFSET_OFST(_x) \ + (MC_CMD_SENSOR_ENTRY_OFST + (_x)) + +#define MC_CMD_DBI_WRITE_IN_ADDRESS_OFST(n) \ + (MC_CMD_DBI_WRITE_IN_DBIWROP_OFST + \ + MC_CMD_DBIWROP_TYPEDEF_ADDRESS_OFST + \ + (n) * MC_CMD_DBIWROP_TYPEDEF_LEN) + +#define MC_CMD_DBI_WRITE_IN_BYTE_MASK_OFST(n) \ + (MC_CMD_DBI_WRITE_IN_DBIWROP_OFST + \ + MC_CMD_DBIWROP_TYPEDEF_BYTE_MASK_OFST + \ + (n) * MC_CMD_DBIWROP_TYPEDEF_LEN) + +#define MC_CMD_DBI_WRITE_IN_VALUE_OFST(n) \ + (MC_CMD_DBI_WRITE_IN_DBIWROP_OFST + \ + MC_CMD_DBIWROP_TYPEDEF_VALUE_OFST + \ + (n) * MC_CMD_DBIWROP_TYPEDEF_LEN) + +/* This may be ORed with an EVB_PORT_ID_xxx constant to pass a non-default + * stack ID (which must be in the range 1-255) along with an EVB port ID. + */ +#define EVB_STACK_ID(n) (((n) & 0xff) << 16) + + +/* Version 2 adds an optional argument to error returns: the errno value + * may be followed by the (0-based) number of the first argument that + * could not be processed. + */ +#define MC_CMD_ERR_ARG_OFST 4 + +/* MC_CMD_ERR enum: Public MCDI error codes. Error codes that correspond to + * POSIX errnos should use the same numeric values that linux does. Error codes + * specific to Solarflare firmware should use values in the range 0x1000 - + * 0x10ff. The range 0x2000 - 0x20ff is reserved for private error codes (see + * MC_CMD_ERR_PRIV below). + */ +/* enum: Operation not permitted. */ +#define MC_CMD_ERR_EPERM 0x1 +/* enum: Non-existent command target */ +#define MC_CMD_ERR_ENOENT 0x2 +/* enum: assert() has killed the MC */ +#define MC_CMD_ERR_EINTR 0x4 +/* enum: I/O failure */ +#define MC_CMD_ERR_EIO 0x5 +/* enum: Already exists */ +#define MC_CMD_ERR_EEXIST 0x6 +/* enum: Try again */ +#define MC_CMD_ERR_EAGAIN 0xb +/* enum: Out of memory */ +#define MC_CMD_ERR_ENOMEM 0xc +/* enum: Caller does not hold required locks */ +#define MC_CMD_ERR_EACCES 0xd +/* enum: Resource is currently unavailable (e.g. lock contention) */ +#define MC_CMD_ERR_EBUSY 0x10 +/* enum: No such device */ +#define MC_CMD_ERR_ENODEV 0x13 +/* enum: Invalid argument to target */ +#define MC_CMD_ERR_EINVAL 0x16 +/* enum: No space */ +#define MC_CMD_ERR_ENOSPC 0x1c +/* enum: Read-only */ +#define MC_CMD_ERR_EROFS 0x1e +/* enum: Broken pipe */ +#define MC_CMD_ERR_EPIPE 0x20 +/* enum: Out of range */ +#define MC_CMD_ERR_ERANGE 0x22 +/* enum: Non-recursive resource is already acquired */ +#define MC_CMD_ERR_EDEADLK 0x23 +/* enum: Operation not implemented */ +#define MC_CMD_ERR_ENOSYS 0x26 +/* enum: Operation timed out */ +#define MC_CMD_ERR_ETIME 0x3e +/* enum: Link has been severed */ +#define MC_CMD_ERR_ENOLINK 0x43 +/* enum: Protocol error */ +#define MC_CMD_ERR_EPROTO 0x47 +/* enum: Bad message */ +#define MC_CMD_ERR_EBADMSG 0x4a +/* enum: Operation not supported */ +#define MC_CMD_ERR_ENOTSUP 0x5f +/* enum: Address not available */ +#define MC_CMD_ERR_EADDRNOTAVAIL 0x63 +/* enum: Not connected */ +#define MC_CMD_ERR_ENOTCONN 0x6b +/* enum: Operation already in progress */ +#define MC_CMD_ERR_EALREADY 0x72 +/* enum: Stale handle. The handle references a resource that no longer exists. + */ +#define MC_CMD_ERR_ESTALE 0x74 +/* enum: Resource allocation failed. */ +#define MC_CMD_ERR_ALLOC_FAIL 0x1000 +/* enum: V-adaptor not found. */ +#define MC_CMD_ERR_NO_VADAPTOR 0x1001 +/* enum: EVB port not found. */ +#define MC_CMD_ERR_NO_EVB_PORT 0x1002 +/* enum: V-switch not found. */ +#define MC_CMD_ERR_NO_VSWITCH 0x1003 +/* enum: Too many VLAN tags. */ +#define MC_CMD_ERR_VLAN_LIMIT 0x1004 +/* enum: Bad PCI function number. */ +#define MC_CMD_ERR_BAD_PCI_FUNC 0x1005 +/* enum: Invalid VLAN mode. */ +#define MC_CMD_ERR_BAD_VLAN_MODE 0x1006 +/* enum: Invalid v-switch type. */ +#define MC_CMD_ERR_BAD_VSWITCH_TYPE 0x1007 +/* enum: Invalid v-port type. */ +#define MC_CMD_ERR_BAD_VPORT_TYPE 0x1008 +/* enum: MAC address exists. */ +#define MC_CMD_ERR_MAC_EXIST 0x1009 +/* enum: Slave core not present */ +#define MC_CMD_ERR_SLAVE_NOT_PRESENT 0x100a +/* enum: The datapath is disabled. */ +#define MC_CMD_ERR_DATAPATH_DISABLED 0x100b +/* enum: The requesting client is not a function */ +#define MC_CMD_ERR_CLIENT_NOT_FN 0x100c +/* enum: The requested operation might require the command to be passed between + * MCs, and the transport doesn't support that. Should only ever been seen over + * the UART. + */ +#define MC_CMD_ERR_TRANSPORT_NOPROXY 0x100d +/* enum: VLAN tag(s) exists */ +#define MC_CMD_ERR_VLAN_EXIST 0x100e +/* enum: No MAC address assigned to an EVB port */ +#define MC_CMD_ERR_NO_MAC_ADDR 0x100f +/* enum: Notifies the driver that the request has been relayed to an admin + * function for authorization. The driver should wait for a PROXY_RESPONSE + * event and then resend its request. This error code is followed by a 32-bit + * handle that helps matching it with the respective PROXY_RESPONSE event. + */ +#define MC_CMD_ERR_PROXY_PENDING 0x1010 +/* enum: The request cannot be passed for authorization because another request + * from the same function is currently being authorized. The drvier should try + * again later. + */ +#define MC_CMD_ERR_PROXY_INPROGRESS 0x1011 +/* enum: Returned by MC_CMD_PROXY_COMPLETE if the caller is not the function + * that has enabled proxying or BLOCK_INDEX points to a function that doesn't + * await an authorization. + */ +#define MC_CMD_ERR_PROXY_UNEXPECTED 0x1012 +/* enum: This code is currently only used internally in FW. Its meaning is that + * an operation failed due to lack of SR-IOV privilege. Normally it is + * translated to EPERM by send_cmd_err(), but it may also be used to trigger + * some special mechanism for handling such case, e.g. to relay the failed + * request to a designated admin function for authorization. + */ +#define MC_CMD_ERR_NO_PRIVILEGE 0x1013 +/* enum: Workaround 26807 could not be turned on/off because some functions + * have already installed filters. See the comment at + * MC_CMD_WORKAROUND_BUG26807. May also returned for other operations such as + * sub-variant switching. + */ +#define MC_CMD_ERR_FILTERS_PRESENT 0x1014 +/* enum: The clock whose frequency you've attempted to set doesn't exist on + * this NIC + */ +#define MC_CMD_ERR_NO_CLOCK 0x1015 +/* enum: Returned by MC_CMD_TESTASSERT if the action that should have caused an + * assertion failed to do so. + */ +#define MC_CMD_ERR_UNREACHABLE 0x1016 +/* enum: This command needs to be processed in the background but there were no + * resources to do so. Send it again after a command has completed. + */ +#define MC_CMD_ERR_QUEUE_FULL 0x1017 +/* enum: The operation could not be completed because the PCIe link has gone + * away. This error code is never expected to be returned over the TLP + * transport. + */ +#define MC_CMD_ERR_NO_PCIE 0x1018 +/* enum: The operation could not be completed because the datapath has gone + * away. This is distinct from MC_CMD_ERR_DATAPATH_DISABLED in that the + * datapath absence may be temporary + */ +#define MC_CMD_ERR_NO_DATAPATH 0x1019 +/* enum: The operation could not complete because some VIs are allocated */ +#define MC_CMD_ERR_VIS_PRESENT 0x101a +/* enum: The operation could not complete because some PIO buffers are + * allocated + */ +#define MC_CMD_ERR_PIOBUFS_PRESENT 0x101b + +/* MC_CMD_RESOURCE_SPECIFIER enum */ +/* enum: Any */ +#define MC_CMD_RESOURCE_INSTANCE_ANY 0xffffffff +#define MC_CMD_RESOURCE_INSTANCE_NONE 0xfffffffe /* enum */ + +/* MC_CMD_FPGA_FLASH_INDEX enum */ +#define MC_CMD_FPGA_FLASH_PRIMARY 0x0 /* enum */ +#define MC_CMD_FPGA_FLASH_SECONDARY 0x1 /* enum */ + +/* MC_CMD_EXTERNAL_MAE_LINK_MODE enum */ +/* enum: Legacy mode as described in XN-200039-TC. */ +#define MC_CMD_EXTERNAL_MAE_LINK_MODE_LEGACY 0x0 +/* enum: Switchdev mode as described in XN-200039-TC. */ +#define MC_CMD_EXTERNAL_MAE_LINK_MODE_SWITCHDEV 0x1 +/* enum: Bootstrap mode as described in XN-200039-TC. */ +#define MC_CMD_EXTERNAL_MAE_LINK_MODE_BOOTSTRAP 0x2 +/* enum: Link-mode change is in-progress as described in XN-200039-TC. */ +#define MC_CMD_EXTERNAL_MAE_LINK_MODE_PENDING 0xf + +/* PCIE_INTERFACE enum: From EF100 onwards, SFC products can have multiple PCIe + * interfaces. There is a need to refer to interfaces explicitly from drivers + * (for example, a management driver on one interface administering a function + * on another interface). This enumeration provides stable identifiers to all + * interfaces present on a product. Product documentation will specify which + * interfaces exist and their associated identifier. In general, drivers, + * should not assign special meanings to specific values. Instead, behaviour + * should be determined by NIC configuration, which will identify interfaces + * where appropriate. + */ +/* enum: Primary host interfaces. Typically (i.e. for all known SFC products) + * the interface exposed on the edge connector (or form factor equivalent). + */ +#define PCIE_INTERFACE_HOST_PRIMARY 0x0 +/* enum: Riverhead and keystone products have a second PCIe interface to which + * an on-NIC ARM module is expected to be connected. + */ +#define PCIE_INTERFACE_NIC_EMBEDDED 0x1 +/* enum: For MCDI commands issued over a PCIe interface, this value is + * translated into the interface over which the command was issued. Not + * meaningful for other MCDI transports. + */ +#define PCIE_INTERFACE_CALLER 0xffffffff + +/* MC_CLIENT_ID_SPECIFIER enum */ +/* enum: Equivalent to the caller's client ID */ +#define MC_CMD_CLIENT_ID_SELF 0xffffffff + +/* MAE_FIELD_SUPPORT_STATUS enum */ +/* enum: The NIC does not support this field. The driver must ensure that any + * mask associated with this field in a match rule is zeroed. The NIC may + * either reject requests with an invalid mask for such a field, or may assume + * that the mask is zero. (This category only exists to describe behaviour for + * fields that a newer driver might know about but that older firmware does + * not. It is recommended that firmware report MAE_FIELD_FIELD_MATCH_NEVER for + * all match fields defined at the time of its compilation. If a driver see a + * field support status value that it does not recognise, it must treat that + * field as thought the field was reported as MAE_FIELD_SUPPORTED_MATCH_NEVER, + * and must never set a non-zero mask value for this field. + */ +#define MAE_FIELD_UNSUPPORTED 0x0 +/* enum: The NIC supports this field, but cannot use it in a match rule. The + * driver must ensure that any mask for such a field in a match rule is zeroed. + * The NIC will reject requests with an invalid mask for such a field. + */ +#define MAE_FIELD_SUPPORTED_MATCH_NEVER 0x1 +/* enum: The NIC supports this field, and must use it in all match rules. The + * driver must ensure that any mask for such a field is all ones. The NIC will + * reject requests with an invalid mask for such a field. + */ +#define MAE_FIELD_SUPPORTED_MATCH_ALWAYS 0x2 +/* enum: The NIC supports this field, and may optionally use it in match rules. + * The driver must ensure that any mask for such a field is either all zeroes + * or all ones. The NIC will reject requests with an invalid mask for such a + * field. + */ +#define MAE_FIELD_SUPPORTED_MATCH_OPTIONAL 0x3 +/* enum: The NIC supports this field, and may optionally use it in match rules. + * The driver must ensure that any mask for such a field is either all zeroes + * or a consecutive set of ones following by all zeroes (starting from MSB). + * The NIC will reject requests with an invalid mask for such a field. + */ +#define MAE_FIELD_SUPPORTED_MATCH_PREFIX 0x4 +/* enum: The NIC supports this field, and may optionally use it in match rules. + * The driver may provide an arbitrary mask for such a field. + */ +#define MAE_FIELD_SUPPORTED_MATCH_MASK 0x5 + +/* MAE_CT_VNI_MODE enum: Controls the layout of the VNI input to the conntrack + * lookup. (Values are not arbitrary - constrained by table access ABI.) + */ +/* enum: The VNI input to the conntrack lookup will be zero. */ +#define MAE_CT_VNI_MODE_ZERO 0x0 +/* enum: The VNI input to the conntrack lookup will be the VNI (VXLAN/Geneve) + * or VSID (NVGRE) field from the packet. + */ +#define MAE_CT_VNI_MODE_VNI 0x1 +/* enum: The VNI input to the conntrack lookup will be the VLAN ID from the + * outermost VLAN tag (in bottom 12 bits; top 12 bits zero). + */ +#define MAE_CT_VNI_MODE_1VLAN 0x2 +/* enum: The VNI input to the conntrack lookup will be the VLAN IDs from both + * VLAN tags (outermost in bottom 12 bits, innermost in top 12 bits). + */ +#define MAE_CT_VNI_MODE_2VLAN 0x3 + +/* MAE_FIELD enum: NB: this enum shares namespace with the support status enum. + */ +/* enum: Source mport upon entering the MAE. */ +#define MAE_FIELD_INGRESS_PORT 0x0 +#define MAE_FIELD_MARK 0x1 /* enum */ +/* enum: Table ID used in action rule. Initially zero, can be changed in action + * rule response. + */ +#define MAE_FIELD_RECIRC_ID 0x2 +#define MAE_FIELD_IS_IP_FRAG 0x3 /* enum */ +#define MAE_FIELD_DO_CT 0x4 /* enum */ +#define MAE_FIELD_CT_HIT 0x5 /* enum */ +/* enum: Undefined unless CT_HIT=1. */ +#define MAE_FIELD_CT_MARK 0x6 +/* enum: Undefined unless DO_CT=1. */ +#define MAE_FIELD_CT_DOMAIN 0x7 +/* enum: Undefined unless CT_HIT=1. */ +#define MAE_FIELD_CT_PRIVATE_FLAGS 0x8 +/* enum: 1 if the packet ingressed the NIC from one of the MACs, else 0. */ +#define MAE_FIELD_IS_FROM_NETWORK 0x9 +/* enum: 1 if the packet has 1 or more VLAN tags, else 0. */ +#define MAE_FIELD_HAS_OVLAN 0xa +/* enum: 1 if the packet has 2 or more VLAN tags, else 0. */ +#define MAE_FIELD_HAS_IVLAN 0xb +/* enum: 1 if the outer packet has 1 or more VLAN tags, else 0; only present + * when encap + */ +#define MAE_FIELD_ENC_HAS_OVLAN 0xc +/* enum: 1 if the outer packet has 2 or more VLAN tags, else 0; only present + * when encap + */ +#define MAE_FIELD_ENC_HAS_IVLAN 0xd +/* enum: Packet is IP fragment */ +#define MAE_FIELD_ENC_IP_FRAG 0xe +#define MAE_FIELD_ETHER_TYPE 0x21 /* enum */ +#define MAE_FIELD_VLAN0_TCI 0x22 /* enum */ +#define MAE_FIELD_VLAN0_PROTO 0x23 /* enum */ +#define MAE_FIELD_VLAN1_TCI 0x24 /* enum */ +#define MAE_FIELD_VLAN1_PROTO 0x25 /* enum */ +/* enum: Inner when encap */ +#define MAE_FIELD_ETH_SADDR 0x28 +/* enum: Inner when encap */ +#define MAE_FIELD_ETH_DADDR 0x29 +/* enum: Inner when encap. NB: IPv4 and IPv6 fields are mutually exclusive. */ +#define MAE_FIELD_SRC_IP4 0x2a +/* enum: Inner when encap */ +#define MAE_FIELD_SRC_IP6 0x2b +/* enum: Inner when encap */ +#define MAE_FIELD_DST_IP4 0x2c +/* enum: Inner when encap */ +#define MAE_FIELD_DST_IP6 0x2d +/* enum: Inner when encap */ +#define MAE_FIELD_IP_PROTO 0x2e +/* enum: Inner when encap */ +#define MAE_FIELD_IP_TOS 0x2f +/* enum: Inner when encap */ +#define MAE_FIELD_IP_TTL 0x30 +/* enum: Inner when encap TODO: how this is defined? The raw flags + + * frag_offset from the packet, or some derived value more amenable to ternary + * matching? TODO: there was a proposal for driver-allocation fields. The + * driver would provide some instruction for how to extract given field values, + * and would be given a field id in return. It could then use that field id in + * its matches. This feels like it would be extremely hard to implement in + * hardware, but I mention it for completeness. + */ +#define MAE_FIELD_IP_FLAGS 0x31 +/* enum: Ports (UDP, TCP) Inner when encap */ +#define MAE_FIELD_L4_SPORT 0x32 +/* enum: Ports (UDP, TCP) Inner when encap */ +#define MAE_FIELD_L4_DPORT 0x33 +/* enum: Inner when encap */ +#define MAE_FIELD_TCP_FLAGS 0x34 +/* enum: TCP packet with any of SYN, FIN or RST flag set */ +#define MAE_FIELD_TCP_SYN_FIN_RST 0x35 +/* enum: Packet is IP fragment with fragment offset 0 */ +#define MAE_FIELD_IP_FIRST_FRAG 0x36 +/* enum: The type of encapsulated used for this packet. Value as per + * ENCAP_TYPE_*. + */ +#define MAE_FIELD_ENCAP_TYPE 0x3f +/* enum: The ID of the outer rule that marked this packet as encapsulated. + * Useful for implicitly matching on outer fields. + */ +#define MAE_FIELD_OUTER_RULE_ID 0x40 +/* enum: Outer; only present when encap */ +#define MAE_FIELD_ENC_ETHER_TYPE 0x41 +/* enum: Outer; only present when encap */ +#define MAE_FIELD_ENC_VLAN0_TCI 0x42 +/* enum: Outer; only present when encap */ +#define MAE_FIELD_ENC_VLAN0_PROTO 0x43 +/* enum: Outer; only present when encap */ +#define MAE_FIELD_ENC_VLAN1_TCI 0x44 +/* enum: Outer; only present when encap */ +#define MAE_FIELD_ENC_VLAN1_PROTO 0x45 +/* enum: Outer; only present when encap */ +#define MAE_FIELD_ENC_ETH_SADDR 0x48 +/* enum: Outer; only present when encap */ +#define MAE_FIELD_ENC_ETH_DADDR 0x49 +/* enum: Outer; only present when encap */ +#define MAE_FIELD_ENC_SRC_IP4 0x4a +/* enum: Outer; only present when encap */ +#define MAE_FIELD_ENC_SRC_IP6 0x4b +/* enum: Outer; only present when encap */ +#define MAE_FIELD_ENC_DST_IP4 0x4c +/* enum: Outer; only present when encap */ +#define MAE_FIELD_ENC_DST_IP6 0x4d +/* enum: Outer; only present when encap */ +#define MAE_FIELD_ENC_IP_PROTO 0x4e +/* enum: Outer; only present when encap */ +#define MAE_FIELD_ENC_IP_TOS 0x4f +/* enum: Outer; only present when encap */ +#define MAE_FIELD_ENC_IP_TTL 0x50 +/* enum: Outer; only present when encap */ +#define MAE_FIELD_ENC_IP_FLAGS 0x51 +/* enum: Outer; only present when encap */ +#define MAE_FIELD_ENC_L4_SPORT 0x52 +/* enum: Outer; only present when encap */ +#define MAE_FIELD_ENC_L4_DPORT 0x53 +/* enum: VNI (when VXLAN or GENEVE) VSID (when NVGRE) Bottom 24 bits of Key + * (when L2GRE) Outer; only present when encap + */ +#define MAE_FIELD_ENC_VNET_ID 0x54 + +/* MAE_MCDI_ENCAP_TYPE enum: Encapsulation type. Defines how the payload will + * be parsed to an inner frame. Other values are reserved. Unknown values + * should be treated same as NONE. (Values are not arbitrary - constrained by + * table access ABI.) + */ +#define MAE_MCDI_ENCAP_TYPE_NONE 0x0 /* enum */ +/* enum: Don't assume enum aligns with support bitmask... */ +#define MAE_MCDI_ENCAP_TYPE_VXLAN 0x1 +#define MAE_MCDI_ENCAP_TYPE_NVGRE 0x2 /* enum */ +#define MAE_MCDI_ENCAP_TYPE_GENEVE 0x3 /* enum */ +#define MAE_MCDI_ENCAP_TYPE_L2GRE 0x4 /* enum */ + +/* MAE_MPORT_END enum: Selects which end of the logical link identified by an + * MPORT_SELECTOR is targeted by an operation. + */ +/* enum: Selects the port on the MAE virtual switch */ +#define MAE_MPORT_END_MAE 0x1 +/* enum: Selects the virtual NIC plugged into the MAE switch */ +#define MAE_MPORT_END_VNIC 0x2 + +/* MAE_COUNTER_TYPE enum: The datapath maintains several sets of counters, each + * being associated with a different table. Note that the same counter ID may + * be allocated by different counter blocks, so e.g. AR counter 42 is different + * from CT counter 42. Generation counts are also type-specific. This value is + * also present in the header of streaming counter packets, in the IDENTIFIER + * field (see packetiser packet format definitions). Also note that LACP + * counter IDs are not allocated individually, instead the counter IDs are + * directly tied to the LACP balance table indices. These in turn are allocated + * in large contiguous blocks as a LAG config. Calling MAE_COUNTER_ALLOC/FREE + * with an LACP counter type will return EPERM. + */ +/* enum: Action Rule counters - can be referenced in AR response. */ +#define MAE_COUNTER_TYPE_AR 0x0 +/* enum: Conntrack counters - can be referenced in CT response. */ +#define MAE_COUNTER_TYPE_CT 0x1 +/* enum: Outer Rule counters - can be referenced in OR response. */ +#define MAE_COUNTER_TYPE_OR 0x2 +/* enum: LACP counters - linked to LACP balance table entries. */ +#define MAE_COUNTER_TYPE_LACP 0x3 + +/* MAE_COUNTER_ID enum: ID of allocated counter or counter list. */ +/* enum: A counter ID that is guaranteed never to represent a real counter or + * counter list. + */ +#define MAE_COUNTER_ID_NULL 0xffffffff + +/* TABLE_ID enum: Unique IDs for tables. The 32-bit ID values have been + * structured with bits [31:24] reserved (0), [23:16] indicating which major + * block the tables belongs to (0=VNIC TX, none currently; 1=MAE; 2=VNIC RX), + * [15:8] a unique ID within the block, and [7:0] reserved for future + * variations of the same table. (All of the tables currently defined within + * the streaming engines are listed here, but this does not imply that they are + * all supported - MC_CMD_TABLE_LIST returns the list of actually supported + * tables.) The DPU offload engines' enumerators follow a deliberate pattern: + * 0x01010000 + is_dpu_net * 0x10000 + is_wr_or_tx * 0x8000 + is_lite_pipe * + * 0x1000 + oe_engine_type * 0x100 + oe_instance_within_pipe * 0x10 + */ +/* enum: Outer_Rule_Table in the MAE - refer to SF-123102-TC. */ +#define TABLE_ID_OUTER_RULE_TABLE 0x10000 +/* enum: Outer_Rule_No_CT_Table in the MAE - refer to SF-123102-TC. */ +#define TABLE_ID_OUTER_RULE_NO_CT_TABLE 0x10100 +/* enum: Mgmt_Filter_Table in the MAE - refer to SF-123102-TC. */ +#define TABLE_ID_MGMT_FILTER_TABLE 0x10200 +/* enum: Conntrack_Table in the MAE - refer to SF-123102-TC. */ +#define TABLE_ID_CONNTRACK_TABLE 0x10300 +/* enum: Action_Rule_Table in the MAE - refer to SF-123102-TC. */ +#define TABLE_ID_ACTION_RULE_TABLE 0x10400 +/* enum: Mgroup_Default_Action_Set_Table in the MAE - refer to SF-123102-TC. */ +#define TABLE_ID_MGROUP_DEFAULT_ACTION_SET_TABLE 0x10500 +/* enum: Encap_Hdr_Part1_Table in the MAE - refer to SF-123102-TC. */ +#define TABLE_ID_ENCAP_HDR_PART1_TABLE 0x10600 +/* enum: Encap_Hdr_Part2_Table in the MAE - refer to SF-123102-TC. */ +#define TABLE_ID_ENCAP_HDR_PART2_TABLE 0x10700 +/* enum: Replace_Src_MAC_Table in the MAE - refer to SF-123102-TC. */ +#define TABLE_ID_REPLACE_SRC_MAC_TABLE 0x10800 +/* enum: Replace_Dst_MAC_Table in the MAE - refer to SF-123102-TC. */ +#define TABLE_ID_REPLACE_DST_MAC_TABLE 0x10900 +/* enum: Dst_Mport_VC_Table in the MAE - refer to SF-123102-TC. */ +#define TABLE_ID_DST_MPORT_VC_TABLE 0x10a00 +/* enum: LACP_LAG_Config_Table in the MAE - refer to SF-123102-TC. */ +#define TABLE_ID_LACP_LAG_CONFIG_TABLE 0x10b00 +/* enum: LACP_Balance_Table in the MAE - refer to SF-123102-TC. */ +#define TABLE_ID_LACP_BALANCE_TABLE 0x10c00 +/* enum: Dst_Mport_Host_Chan_Table in the MAE - refer to SF-123102-TC. */ +#define TABLE_ID_DST_MPORT_HOST_CHAN_TABLE 0x10d00 +/* enum: VNIC_Rx_Encap_Table in VNIC Rx - refer to SF-123102-TC. */ +#define TABLE_ID_VNIC_RX_ENCAP_TABLE 0x20000 +/* enum: Steering_Table in VNIC Rx - refer to SF-123102-TC. */ +#define TABLE_ID_STEERING_TABLE 0x20100 +/* enum: RSS_Context_Table in VNIC Rx - refer to SF-123102-TC. */ +#define TABLE_ID_RSS_CONTEXT_TABLE 0x20200 +/* enum: Indirection_Table in VNIC Rx - refer to SF-123102-TC. */ +#define TABLE_ID_INDIRECTION_TABLE 0x20300 +/* enum: DPU.host read pipe first CRC offload engine profiles - refer to + * XN-200147-AN. + */ +#define TABLE_ID_DPU_HOST_RD_CRC0_OE_PROFILE 0x1010000 +/* enum: DPU.host read pipe second CRC offload engine profiles - refer to + * XN-200147-AN. + */ +#define TABLE_ID_DPU_HOST_RD_CRC1_OE_PROFILE 0x1010010 +/* enum: DPU.host write pipe first CRC offload engine profiles - refer to + * XN-200147-AN. + */ +#define TABLE_ID_DPU_HOST_WR_CRC0_OE_PROFILE 0x1018000 +/* enum: DPU.host write pipe second CRC offload engine profiles - refer to + * XN-200147-AN. + */ +#define TABLE_ID_DPU_HOST_WR_CRC1_OE_PROFILE 0x1018010 +/* enum: DPU.net 'full' receive pipe CRC offload engine profiles - refer to + * XN-200147-AN. + */ +#define TABLE_ID_DPU_NET_RX_CRC0_OE_PROFILE 0x1020000 +/* enum: DPU.net 'full' receive pipe first checksum offload engine profiles - + * refer to XN-200147-AN. + */ +#define TABLE_ID_DPU_NET_RX_CSUM0_OE_PROFILE 0x1020100 +/* enum: DPU.net 'full' receive pipe second checksum offload engine profiles - + * refer to XN-200147-AN. + */ +#define TABLE_ID_DPU_NET_RX_CSUM1_OE_PROFILE 0x1020110 +/* enum: DPU.net 'full' receive pipe AES-GCM offload engine profiles - refer to + * XN-200147-AN. + */ +#define TABLE_ID_DPU_NET_RX_AES_GCM0_OE_PROFILE 0x1020200 +/* enum: DPU.net 'lite' receive pipe CRC offload engine profiles - refer to + * XN-200147-AN. + */ +#define TABLE_ID_DPU_NET_RXLITE_CRC0_OE_PROFILE 0x1021000 +/* enum: DPU.net 'lite' receive pipe checksum offload engine profiles - refer + * to XN-200147-AN. + */ +#define TABLE_ID_DPU_NET_RXLITE_CSUM0_OE_PROFILE 0x1021100 +/* enum: DPU.net 'full' transmit pipe CRC offload engine profiles - refer to + * XN-200147-AN. + */ +#define TABLE_ID_DPU_NET_TX_CRC0_OE_PROFILE 0x1028000 +/* enum: DPU.net 'full' transmit pipe first checksum offload engine profiles - + * refer to XN-200147-AN. + */ +#define TABLE_ID_DPU_NET_TX_CSUM0_OE_PROFILE 0x1028100 +/* enum: DPU.net 'full' transmit pipe second checksum offload engine profiles - + * refer to XN-200147-AN. + */ +#define TABLE_ID_DPU_NET_TX_CSUM1_OE_PROFILE 0x1028110 +/* enum: DPU.net 'full' transmit pipe AES-GCM offload engine profiles - refer + * to XN-200147-AN. + */ +#define TABLE_ID_DPU_NET_TX_AES_GCM0_OE_PROFILE 0x1028200 +/* enum: DPU.net 'lite' transmit pipe CRC offload engine profiles - refer to + * XN-200147-AN. + */ +#define TABLE_ID_DPU_NET_TXLITE_CRC0_OE_PROFILE 0x1029000 +/* enum: DPU.net 'lite' transmit pipe checksum offload engine profiles - refer + * to XN-200147-AN. + */ +#define TABLE_ID_DPU_NET_TXLITE_CSUM0_OE_PROFILE 0x1029100 + +/* TABLE_COMPRESSED_VLAN enum: Compressed VLAN TPID as used by some field + * types; can be calculated by (((ether_type_msb >> 2) & 0x4) ^ 0x4) | + * (ether_type_msb & 0x3); + */ +#define TABLE_COMPRESSED_VLAN_TPID_8100 0x5 /* enum */ +#define TABLE_COMPRESSED_VLAN_TPID_88A8 0x4 /* enum */ +#define TABLE_COMPRESSED_VLAN_TPID_9100 0x1 /* enum */ +#define TABLE_COMPRESSED_VLAN_TPID_9200 0x2 /* enum */ +#define TABLE_COMPRESSED_VLAN_TPID_9300 0x3 /* enum */ + +/* TABLE_NAT_DIR enum: NAT direction. */ +#define TABLE_NAT_DIR_SOURCE 0x0 /* enum */ +#define TABLE_NAT_DIR_DEST 0x1 /* enum */ + +/* TABLE_RSS_KEY_MODE enum: Defines how the value for Toeplitz hashing for RSS + * is constructed as a concatenation (indicated here by "++") of packet header + * fields. + */ +/* enum: IP src addr ++ IP dst addr */ +#define TABLE_RSS_KEY_MODE_SA_DA 0x0 +/* enum: IP src addr ++ IP dst addr ++ TCP/UDP src port ++ TCP/UDP dst port */ +#define TABLE_RSS_KEY_MODE_SA_DA_SP_DP 0x1 +/* enum: IP src addr */ +#define TABLE_RSS_KEY_MODE_SA 0x2 +/* enum: IP dst addr */ +#define TABLE_RSS_KEY_MODE_DA 0x3 +/* enum: IP src addr ++ TCP/UDP src port */ +#define TABLE_RSS_KEY_MODE_SA_SP 0x4 +/* enum: IP dest addr ++ TCP dest port */ +#define TABLE_RSS_KEY_MODE_DA_DP 0x5 +/* enum: Nothing (produces input of 0, resulting in output hash of 0) */ +#define TABLE_RSS_KEY_MODE_NONE 0x7 + +/* TABLE_RSS_SPREAD_MODE enum: RSS spreading mode. */ +/* enum: RSS uses Indirection_Table lookup. */ +#define TABLE_RSS_SPREAD_MODE_INDIRECTION 0x0 +/* enum: RSS uses even spreading calculation. */ +#define TABLE_RSS_SPREAD_MODE_EVEN 0x1 + +/* CRC_VARIANT enum: Operation for the DPU CRC engine to perform. */ +/* enum: Calculate a 32-bit CRC. */ +#define CRC_VARIANT_CRC32 0x1 +/* enum: Calculate a 64-bit CRC. */ +#define CRC_VARIANT_CRC64 0x2 + +/* DPU_CSUM_OP enum: Operation for the DPU checksum engine to perform. */ +/* enum: Calculate the checksum for a TCP payload, output result on OPR bus. */ +#define DPU_CSUM_OP_CALC_TCP 0x0 +/* enum: Calculate the checksum for a UDP payload, output result on OPR bus. */ +#define DPU_CSUM_OP_CALC_UDP 0x1 +/* enum: Calculate the checksum for a TCP payload, output match/not match value + * on OPR bus. + */ +#define DPU_CSUM_OP_VALIDATE_TCP 0x2 +/* enum: Calculate the checksum for a UDP payload, output match/not match value + * on OPR bus. + */ +#define DPU_CSUM_OP_VALIDATE_UDP 0x3 + +/* GCM_OP_CODE enum: Operation for the DPU AES-GCM engine to perform. */ +/* enum: Encrypt/decrypt a stream of data. */ +#define GCM_OP_CODE_BULK_CRYPT 0x0 +/* enum: Calculate the authentication tag for a stream of data. */ +#define GCM_OP_CODE_BULK_AUTH 0x1 +/* enum: Encrypt/decrypt an IPsec packet. */ +#define GCM_OP_CODE_IPSEC_CRYPT 0x2 +/* enum: Calculate the authentication tag of an IPsec packet. */ +#define GCM_OP_CODE_IPSEC_AUTH 0x3 + +/* AES_KEY_LEN enum: Key size for AES crypto operations */ +/* enum: 128 bit key size. */ +#define AES_KEY_LEN_AES_KEY_128 0x0 +/* enum: 256 bit key size. */ +#define AES_KEY_LEN_AES_KEY_256 0x1 + +/* TABLE_FIELD_ID enum: Unique IDs for fields. Related concepts have been + * loosely grouped together into blocks with gaps for expansion, but the values + * are arbitrary. Field IDs are not specific to particular tables, and in some + * cases this sharing means that they are not used with the exact names of the + * corresponding table definitions in SF-123102-TC; however, the mapping should + * still be clear. The intent is that a list of fields, with their associated + * bit widths and semantics version code, unambiguously defines the semantics + * of the fields in a key or response. (Again, this list includes all of the + * fields currently defined within the streaming engines, but only a subset may + * actually be used by the supported list of tables.) + */ +/* enum: May appear multiple times within a key or response, and indicates that + * the field is unused and should be set to 0 (or masked out if permitted by + * the MASK_VALUE for this field). + */ +#define TABLE_FIELD_ID_UNUSED 0x0 +/* enum: Source m-port (a full m-port label). */ +#define TABLE_FIELD_ID_SRC_MPORT 0x1 +/* enum: Destination m-port (a full m-port label). */ +#define TABLE_FIELD_ID_DST_MPORT 0x2 +/* enum: Source m-group ID. */ +#define TABLE_FIELD_ID_SRC_MGROUP_ID 0x3 +/* enum: Physical network port ID (or m-port ID; same thing, for physical + * network ports). + */ +#define TABLE_FIELD_ID_NETWORK_PORT_ID 0x4 +/* enum: True if packet arrived via network port, false if it arrived via host. + */ +#define TABLE_FIELD_ID_IS_FROM_NETWORK 0x5 +/* enum: Full virtual channel from capsule header. */ +#define TABLE_FIELD_ID_CH_VC 0x6 +/* enum: Low bits of virtual channel from capsule header. */ +#define TABLE_FIELD_ID_CH_VC_LOW 0x7 +/* enum: User mark value in metadata and packet prefix. */ +#define TABLE_FIELD_ID_USER_MARK 0x8 +/* enum: User flag value in metadata and packet prefix. */ +#define TABLE_FIELD_ID_USER_FLAG 0x9 +/* enum: Counter ID associated with a response. All-bits-1 is a null value to + * suppress counting. + */ +#define TABLE_FIELD_ID_COUNTER_ID 0xa +/* enum: Discriminator which may be set by plugins in some lookup keys; this + * allows plugins to make a reinterpretation of packet fields in these keys + * without clashing with the normal interpretation. + */ +#define TABLE_FIELD_ID_DISCRIM 0xb +/* enum: Destination MAC address. The mapping from bytes in a frame to the + * 48-bit value for this field is in network order, i.e. a MAC address of + * AA:BB:CC:DD:EE:FF becomes a 48-bit value of 0xAABBCCDDEEFF. + */ +#define TABLE_FIELD_ID_DST_MAC 0x14 +/* enum: Source MAC address (see notes for DST_MAC). */ +#define TABLE_FIELD_ID_SRC_MAC 0x15 +/* enum: Outer VLAN tag TPID, compressed to an enumeration. */ +#define TABLE_FIELD_ID_OVLAN_TPID_COMPRESSED 0x16 +/* enum: Full outer VLAN tag TCI (16 bits). */ +#define TABLE_FIELD_ID_OVLAN 0x17 +/* enum: Outer VLAN ID (least significant 12 bits of full 16-bit TCI) only. */ +#define TABLE_FIELD_ID_OVLAN_VID 0x18 +/* enum: Inner VLAN tag TPID, compressed to an enumeration. */ +#define TABLE_FIELD_ID_IVLAN_TPID_COMPRESSED 0x19 +/* enum: Full inner VLAN tag TCI (16 bits). */ +#define TABLE_FIELD_ID_IVLAN 0x1a +/* enum: Inner VLAN ID (least significant 12 bits of full 16-bit TCI) only. */ +#define TABLE_FIELD_ID_IVLAN_VID 0x1b +/* enum: Ethertype. */ +#define TABLE_FIELD_ID_ETHER_TYPE 0x1c +/* enum: Source IP address, either IPv4 or IPv6. The mapping from bytes in a + * frame to the 128-bit value for this field is in network order, with IPv4 + * addresses assumed to have 12 bytes of trailing zeroes. i.e. the IPv6 address + * [2345::6789:ABCD] is 0x2345000000000000000000006789ABCD; the IPv4 address + * 192.168.1.2 is 0xC0A80102000000000000000000000000. + */ +#define TABLE_FIELD_ID_SRC_IP 0x1d +/* enum: Destination IP address (see notes for SRC_IP). */ +#define TABLE_FIELD_ID_DST_IP 0x1e +/* enum: IPv4 Type-of-Service or IPv6 Traffic Class field. */ +#define TABLE_FIELD_ID_IP_TOS 0x1f +/* enum: IP Protocol. */ +#define TABLE_FIELD_ID_IP_PROTO 0x20 +/* enum: Layer 4 source port. */ +#define TABLE_FIELD_ID_SRC_PORT 0x21 +/* enum: Layer 4 destination port. */ +#define TABLE_FIELD_ID_DST_PORT 0x22 +/* enum: TCP flags. */ +#define TABLE_FIELD_ID_TCP_FLAGS 0x23 +/* enum: Virtual Network Identifier (VXLAN) or Virtual Session ID (NVGRE). */ +#define TABLE_FIELD_ID_VNI 0x24 +/* enum: True if packet has any tunnel encapsulation header. */ +#define TABLE_FIELD_ID_HAS_ENCAP 0x32 +/* enum: True if encap header has an outer VLAN tag. */ +#define TABLE_FIELD_ID_HAS_ENC_OVLAN 0x33 +/* enum: True if encap header has an inner VLAN tag. */ +#define TABLE_FIELD_ID_HAS_ENC_IVLAN 0x34 +/* enum: True if encap header is some sort of IP. */ +#define TABLE_FIELD_ID_HAS_ENC_IP 0x35 +/* enum: True if encap header is specifically IPv4. */ +#define TABLE_FIELD_ID_HAS_ENC_IP4 0x36 +/* enum: True if encap header is UDP. */ +#define TABLE_FIELD_ID_HAS_ENC_UDP 0x37 +/* enum: True if only/inner frame has an outer VLAN tag. */ +#define TABLE_FIELD_ID_HAS_OVLAN 0x38 +/* enum: True if only/inner frame has an inner VLAN tag. */ +#define TABLE_FIELD_ID_HAS_IVLAN 0x39 +/* enum: True if only/inner frame is some sort of IP. */ +#define TABLE_FIELD_ID_HAS_IP 0x3a +/* enum: True if only/inner frame has a recognised L4 IP protocol (TCP or UDP). + */ +#define TABLE_FIELD_ID_HAS_L4 0x3b +/* enum: True if only/inner frame is an IP fragment. */ +#define TABLE_FIELD_ID_IP_FRAG 0x3c +/* enum: True if only/inner frame is the first IP fragment (fragment offset 0). + */ +#define TABLE_FIELD_ID_IP_FIRST_FRAG 0x3d +/* enum: True if only/inner frame has an IP Time-To-Live of <= 1. (Note: the + * implementation calls this "ip_ttl_is_one" but does in fact match packets + * with TTL=0 - which we shouldn't be seeing! - as well.) + */ +#define TABLE_FIELD_ID_IP_TTL_LE_ONE 0x3e +/* enum: True if only/inner frame has any of TCP SYN, FIN or RST flags set. */ +#define TABLE_FIELD_ID_TCP_INTERESTING_FLAGS 0x3f +/* enum: Plugin channel selection. */ +#define TABLE_FIELD_ID_RDP_PL_CHAN 0x50 +/* enum: Enable update of CH_ROUTE_RDP_C_PL route bit. */ +#define TABLE_FIELD_ID_RDP_C_PL_EN 0x51 +/* enum: New value of CH_ROUTE_RDP_C_PL route bit. */ +#define TABLE_FIELD_ID_RDP_C_PL 0x52 +/* enum: Enable update of CH_ROUTE_RDP_D_PL route bit. */ +#define TABLE_FIELD_ID_RDP_D_PL_EN 0x53 +/* enum: New value of CH_ROUTE_RDP_D_PL route bit. */ +#define TABLE_FIELD_ID_RDP_D_PL 0x54 +/* enum: Enable update of CH_ROUTE_RDP_OUT_HOST_CHAN route bit. */ +#define TABLE_FIELD_ID_RDP_OUT_HOST_CHAN_EN 0x55 +/* enum: New value of CH_ROUTE_RDP_OUT_HOST_CHAN route bit. */ +#define TABLE_FIELD_ID_RDP_OUT_HOST_CHAN 0x56 +/* enum: Recirculation ID for lookup sequences with two action rule lookups. */ +#define TABLE_FIELD_ID_RECIRC_ID 0x64 +/* enum: Domain ID passed to conntrack and action rule lookups. */ +#define TABLE_FIELD_ID_DOMAIN 0x65 +/* enum: Construction mode for encap_tunnel_id - see MAE_CT_VNI_MODE enum. */ +#define TABLE_FIELD_ID_CT_VNI_MODE 0x66 +/* enum: True to inhibit conntrack lookup if TCP SYN, FIN or RST flag is set. + */ +#define TABLE_FIELD_ID_CT_TCP_FLAGS_INHIBIT 0x67 +/* enum: True to do conntrack lookups for IPv4 TCP packets. */ +#define TABLE_FIELD_ID_DO_CT_IP4_TCP 0x68 +/* enum: True to do conntrack lookups for IPv4 UDP packets. */ +#define TABLE_FIELD_ID_DO_CT_IP4_UDP 0x69 +/* enum: True to do conntrack lookups for IPv6 TCP packets. */ +#define TABLE_FIELD_ID_DO_CT_IP6_TCP 0x6a +/* enum: True to do conntrack lookups for IPv6 UDP packets. */ +#define TABLE_FIELD_ID_DO_CT_IP6_UDP 0x6b +/* enum: Outer rule identifier. */ +#define TABLE_FIELD_ID_OUTER_RULE_ID 0x6c +/* enum: Encapsulation type - see MAE_MCDI_ENCAP_TYPE enum. */ +#define TABLE_FIELD_ID_ENCAP_TYPE 0x6d +/* enum: Encap tunnel ID for conntrack lookups from VNI, VLAN tag(s), or 0, + * depending on CT_VNI_MODE. + */ +#define TABLE_FIELD_ID_ENCAP_TUNNEL_ID 0x78 +/* enum: A conntrack entry identifier, passed to plugins. */ +#define TABLE_FIELD_ID_CT_ENTRY_ID 0x79 +/* enum: Either source or destination NAT replacement port. */ +#define TABLE_FIELD_ID_NAT_PORT 0x7a +/* enum: Either source or destination NAT replacement IPv4 address. Note that + * this is specifically an IPv4 address (IPv6 is not supported for NAT), with + * byte mapped to a 32-bit value in network order, i.e. the IPv4 address + * 192.168.1.2 is the value 0xC0A80102. + */ +#define TABLE_FIELD_ID_NAT_IP 0x7b +/* enum: NAT direction: 0=>source, 1=>destination. */ +#define TABLE_FIELD_ID_NAT_DIR 0x7c +/* enum: Conntrack mark value, passed to action rule lookup. Note that this is + * not related to the "user mark" in the metadata / packet prefix. + */ +#define TABLE_FIELD_ID_CT_MARK 0x7d +/* enum: Private flags for conntrack, passed to action rule lookup. */ +#define TABLE_FIELD_ID_CT_PRIV_FLAGS 0x7e +/* enum: True if the conntrack lookup resulted in a hit. */ +#define TABLE_FIELD_ID_CT_HIT 0x7f +/* enum: True to suppress delivery when source and destination m-ports match. + */ +#define TABLE_FIELD_ID_SUPPRESS_SELF_DELIVERY 0x8c +/* enum: True to perform tunnel decapsulation. */ +#define TABLE_FIELD_ID_DO_DECAP 0x8d +/* enum: True to copy outer frame DSCP to inner on decap. */ +#define TABLE_FIELD_ID_DECAP_DSCP_COPY 0x8e +/* enum: True to map outer frame ECN to inner on decap, by RFC 6040 rules. */ +#define TABLE_FIELD_ID_DECAP_ECN_RFC6040 0x8f +/* enum: True to replace DSCP field. */ +#define TABLE_FIELD_ID_DO_REPLACE_DSCP 0x90 +/* enum: True to replace ECN field. */ +#define TABLE_FIELD_ID_DO_REPLACE_ECN 0x91 +/* enum: True to decrement IP Time-To-Live. */ +#define TABLE_FIELD_ID_DO_DECR_IP_TTL 0x92 +/* enum: True to replace source MAC address. */ +#define TABLE_FIELD_ID_DO_SRC_MAC 0x93 +/* enum: True to replace destination MAC address. */ +#define TABLE_FIELD_ID_DO_DST_MAC 0x94 +/* enum: Number of VLAN tags to pop. Valid values are 0, 1, or 2. */ +#define TABLE_FIELD_ID_DO_VLAN_POP 0x95 +/* enum: Number of VLANs tags to push. Valid values are 0, 1, or 2. */ +#define TABLE_FIELD_ID_DO_VLAN_PUSH 0x96 +/* enum: True to count this packet. */ +#define TABLE_FIELD_ID_DO_COUNT 0x97 +/* enum: True to perform tunnel encapsulation. */ +#define TABLE_FIELD_ID_DO_ENCAP 0x98 +/* enum: True to copy inner frame DSCP to outer on encap. */ +#define TABLE_FIELD_ID_ENCAP_DSCP_COPY 0x99 +/* enum: True to copy inner frame ECN to outer on encap. */ +#define TABLE_FIELD_ID_ENCAP_ECN_COPY 0x9a +/* enum: True to deliver the packet (otherwise it is dropped). */ +#define TABLE_FIELD_ID_DO_DELIVER 0x9b +/* enum: True to set the user flag in the metadata. */ +#define TABLE_FIELD_ID_DO_FLAG 0x9c +/* enum: True to update the user mark in the metadata. */ +#define TABLE_FIELD_ID_DO_MARK 0x9d +/* enum: True to override the capsule virtual channel for network deliveries. + */ +#define TABLE_FIELD_ID_DO_SET_NET_CHAN 0x9e +/* enum: True to override the reported source m-port for host deliveries. */ +#define TABLE_FIELD_ID_DO_SET_SRC_MPORT 0x9f +/* enum: Encap header ID for DO_ENCAP, indexing Encap_Hdr_Part1/2_Table. */ +#define TABLE_FIELD_ID_ENCAP_HDR_ID 0xaa +/* enum: New DSCP value for DO_REPLACE_DSCP. */ +#define TABLE_FIELD_ID_DSCP_VALUE 0xab +/* enum: If DO_REPLACE_ECN is set, the new value for the ECN field. If + * DO_REPLACE_ECN is not set, ECN_CONTROL[0] and ECN_CONTROL[1] are set to + * request remapping of ECT0 and ECT1 ECN codepoints respectively to CE. + */ +#define TABLE_FIELD_ID_ECN_CONTROL 0xac +/* enum: Source MAC ID for DO_SRC_MAC, indexing Replace_Src_MAC_Table. */ +#define TABLE_FIELD_ID_SRC_MAC_ID 0xad +/* enum: Destination MAC ID for DO_DST_MAC, indexing Replace_Dst_MAC_Table. */ +#define TABLE_FIELD_ID_DST_MAC_ID 0xae +/* enum: Parameter for either DO_SET_NET_CHAN (only bottom 6 bits used in this + * case) or DO_SET_SRC_MPORT. + */ +#define TABLE_FIELD_ID_REPORTED_SRC_MPORT_OR_NET_CHAN 0xaf +/* enum: 64-byte chunk of added encapsulation header. */ +#define TABLE_FIELD_ID_CHUNK64 0xb4 +/* enum: 32-byte chunk of added encapsulation header. */ +#define TABLE_FIELD_ID_CHUNK32 0xb5 +/* enum: 16-byte chunk of added encapsulation header. */ +#define TABLE_FIELD_ID_CHUNK16 0xb6 +/* enum: 8-byte chunk of added encapsulation header. */ +#define TABLE_FIELD_ID_CHUNK8 0xb7 +/* enum: 4-byte chunk of added encapsulation header. */ +#define TABLE_FIELD_ID_CHUNK4 0xb8 +/* enum: 2-byte chunk of added encapsulation header. */ +#define TABLE_FIELD_ID_CHUNK2 0xb9 +/* enum: Added encapsulation header length in words. */ +#define TABLE_FIELD_ID_HDR_LEN_W 0xba +/* enum: Static value for layer 2/3 LACP hash of the encapsulation header. */ +#define TABLE_FIELD_ID_ENC_LACP_HASH_L23 0xbb +/* enum: Static value for layer 4 LACP hash of the encapsulation header. */ +#define TABLE_FIELD_ID_ENC_LACP_HASH_L4 0xbc +/* enum: True to use the static ENC_LACP_HASH values for the encap header + * instead of the calculated values for the inner frame when delivering a newly + * encapsulated packet to a LAG m-port. + */ +#define TABLE_FIELD_ID_USE_ENC_LACP_HASHES 0xbd +/* enum: True to trigger conntrack from first action rule lookup (AR=>CT=>AR + * sequence). + */ +#define TABLE_FIELD_ID_DO_CT 0xc8 +/* enum: True to perform NAT using parameters from conntrack lookup response. + */ +#define TABLE_FIELD_ID_DO_NAT 0xc9 +/* enum: True to trigger recirculated action rule lookup (AR=>AR sequence). */ +#define TABLE_FIELD_ID_DO_RECIRC 0xca +/* enum: Next action set payload ID for replay. The null value is all-1-bits. + */ +#define TABLE_FIELD_ID_NEXT_ACTION_SET_PAYLOAD 0xcb +/* enum: Next action set row ID for replay. The null value is all-1-bits. */ +#define TABLE_FIELD_ID_NEXT_ACTION_SET_ROW 0xcc +/* enum: Action set payload ID for additional delivery to management CPU. The + * null value is all-1-bits. + */ +#define TABLE_FIELD_ID_MC_ACTION_SET_PAYLOAD 0xcd +/* enum: Action set row ID for additional delivery to management CPU. The null + * value is all-1-bits. + */ +#define TABLE_FIELD_ID_MC_ACTION_SET_ROW 0xce +/* enum: True to include layer 4 in LACP hash on delivery to a LAG m-port. */ +#define TABLE_FIELD_ID_LACP_INC_L4 0xdc +/* enum: True to request that LACP is performed by a plugin. */ +#define TABLE_FIELD_ID_LACP_PLUGIN 0xdd +/* enum: LACP_Balance_Table base address divided by 64. */ +#define TABLE_FIELD_ID_BAL_TBL_BASE_DIV64 0xde +/* enum: Length of balance table region: 0=>64, 1=>128, 2=>256. */ +#define TABLE_FIELD_ID_BAL_TBL_LEN_ID 0xdf +/* enum: LACP LAG ID (i.e. the low 3 bits of LACP LAG mport ID), indexing + * LACP_LAG_Config_Table. Refer to SF-123102-TC. + */ +#define TABLE_FIELD_ID_LACP_LAG_ID 0xe0 +/* enum: Address in LACP_Balance_Table. The balance table is partitioned + * between LAGs according to the settings in LACP_LAG_Config_Table and then + * indexed by the LACP hash, providing the mapping to destination mports. Refer + * to SF-123102-TC. + */ +#define TABLE_FIELD_ID_BAL_TBL_ADDR 0xe1 +/* enum: UDP port to match for UDP-based encapsulations; required to be 0 for + * other encapsulation types. + */ +#define TABLE_FIELD_ID_UDP_PORT 0xe6 +/* enum: True to perform RSS based on outer fields rather than inner fields. */ +#define TABLE_FIELD_ID_RSS_ON_OUTER 0xe7 +/* enum: True to perform steering table lookup on outer fields rather than + * inner fields. + */ +#define TABLE_FIELD_ID_STEER_ON_OUTER 0xe8 +/* enum: Destination queue ID for host delivery. */ +#define TABLE_FIELD_ID_DST_QID 0xf0 +/* enum: True to drop this packet. */ +#define TABLE_FIELD_ID_DROP 0xf1 +/* enum: True to strip outer VLAN tag from this packet. */ +#define TABLE_FIELD_ID_VLAN_STRIP 0xf2 +/* enum: True to override the user mark field with the supplied USER_MARK, or + * false to bitwise-OR the USER_MARK into it. + */ +#define TABLE_FIELD_ID_MARK_OVERRIDE 0xf3 +/* enum: True to override the user flag field with the supplied USER_FLAG, or + * false to bitwise-OR the USER_FLAG into it. + */ +#define TABLE_FIELD_ID_FLAG_OVERRIDE 0xf4 +/* enum: RSS context ID, indexing the RSS_Context_Table. */ +#define TABLE_FIELD_ID_RSS_CTX_ID 0xfa +/* enum: True to enable RSS. */ +#define TABLE_FIELD_ID_RSS_EN 0xfb +/* enum: Toeplitz hash key. */ +#define TABLE_FIELD_ID_KEY 0xfc +/* enum: Key mode for IPv4 TCP packets - see TABLE_RSS_KEY_MODE enum. */ +#define TABLE_FIELD_ID_TCP_V4_KEY_MODE 0xfd +/* enum: Key mode for IPv6 TCP packets - see TABLE_RSS_KEY_MODE enum. */ +#define TABLE_FIELD_ID_TCP_V6_KEY_MODE 0xfe +/* enum: Key mode for IPv4 UDP packets - see TABLE_RSS_KEY_MODE enum. */ +#define TABLE_FIELD_ID_UDP_V4_KEY_MODE 0xff +/* enum: Key mode for IPv6 UDP packets - see TABLE_RSS_KEY_MODE enum. */ +#define TABLE_FIELD_ID_UDP_V6_KEY_MODE 0x100 +/* enum: Key mode for other IPv4 packets - see TABLE_RSS_KEY_MODE enum. */ +#define TABLE_FIELD_ID_OTHER_V4_KEY_MODE 0x101 +/* enum: Key mode for other IPv6 packets - see TABLE_RSS_KEY_MODE enum. */ +#define TABLE_FIELD_ID_OTHER_V6_KEY_MODE 0x102 +/* enum: Spreading mode - 0=>indirection; 1=>even. */ +#define TABLE_FIELD_ID_SPREAD_MODE 0x103 +/* enum: For indirection spreading mode, the base address of a region within + * the Indirection_Table. For even spreading mode, the number of queues to + * spread across (only values 1-255 are valid for this mode). + */ +#define TABLE_FIELD_ID_INDIR_TBL_BASE 0x104 +/* enum: For indirection spreading mode, identifies the length of a region + * within the Indirection_Table, where length = 32 << len_id. Must be set to 0 + * for even spreading mode. + */ +#define TABLE_FIELD_ID_INDIR_TBL_LEN_ID 0x105 +/* enum: An offset to be applied to the base destination queue ID. */ +#define TABLE_FIELD_ID_INDIR_OFFSET 0x106 +/* enum: DPU offload engine profile ID to address. */ +#define TABLE_FIELD_ID_OE_PROFILE 0x3e8 +/* enum: Width of the CRC to calculate - see CRC_VARIANT enum. */ +#define TABLE_FIELD_ID_CRC_VARIANT 0x3f2 +/* enum: If set, reflect the bits of each input byte, bit 7 is LSB, bit 0 is + * MSB. If clear, bit 7 is MSB, bit 0 is LSB. + */ +#define TABLE_FIELD_ID_CRC_REFIN 0x3f3 +/* enum: If set, reflect the bits of each output byte, bit 7 is LSB, bit 0 is + * MSB. If clear, bit 7 is MSB, bit 0 is LSB. + */ +#define TABLE_FIELD_ID_CRC_REFOUT 0x3f4 +/* enum: If set, invert every bit of the output value. */ +#define TABLE_FIELD_ID_CRC_INVOUT 0x3f5 +/* enum: The CRC polynomial to use for checksumming, in normal form. See + * https://en.wikipedia.org/wiki/Cyclic_redundancy_check#Specification for a + * description of normal form. + */ +#define TABLE_FIELD_ID_CRC_POLY 0x3f6 +/* enum: Operation for the checksum engine to perform - see DPU_CSUM_OP enum. + */ +#define TABLE_FIELD_ID_CSUM_OP 0x410 +/* enum: Byte offset of checksum relative to region_start (for VALIDATE_* + * operations only). + */ +#define TABLE_FIELD_ID_CSUM_OFFSET 0x411 +/* enum: Indicates there is additional data on OPR bus that needs to be + * incorporated into the payload checksum. + */ +#define TABLE_FIELD_ID_CSUM_OPR_ADDITIONAL_DATA 0x412 +/* enum: Log2 data size of additional data on OPR bus. */ +#define TABLE_FIELD_ID_CSUM_OPR_DATA_SIZE_LOG2 0x413 +/* enum: 4 byte offset of where to find the additional data on the OPR bus. */ +#define TABLE_FIELD_ID_CSUM_OPR_4B_OFF 0x414 +/* enum: Operation type for the AES-GCM core - see GCM_OP_CODE enum. */ +#define TABLE_FIELD_ID_GCM_OP_CODE 0x41a +/* enum: Key length - AES_KEY_LEN enum. */ +#define TABLE_FIELD_ID_GCM_KEY_LEN 0x41b +/* enum: OPR 4 byte offset for ICV or GHASH output (only in BULK_* mode) or + * IPSEC descrypt output. + */ +#define TABLE_FIELD_ID_GCM_OPR_4B_OFFSET 0x41c +/* enum: If OP_CODE is BULK_*, indicates Emit GHASH (Fragment mode). Else, + * indicates IPSEC-ESN mode. + */ +#define TABLE_FIELD_ID_GCM_EMIT_GHASH_ISESN 0x41d +/* enum: Replay Protection Enable. */ +#define TABLE_FIELD_ID_GCM_REPLAY_PROTECT_EN 0x41e +/* enum: IPSEC Encrypt ESP trailer NEXT_HEADER byte. */ +#define TABLE_FIELD_ID_GCM_NEXT_HDR 0x41f +/* enum: Replay Window Size. */ +#define TABLE_FIELD_ID_GCM_REPLAY_WIN_SIZE 0x420 + +/* MCDI_EVENT structuredef: The structure of an MCDI_EVENT on Siena/EF10/EF100 + * platforms + */ +#define MCDI_EVENT_LEN 8 +#define MCDI_EVENT_CONT_LBN 32 +#define MCDI_EVENT_CONT_WIDTH 1 +#define MCDI_EVENT_LEVEL_LBN 33 +#define MCDI_EVENT_LEVEL_WIDTH 3 +/* enum: Info. */ +#define MCDI_EVENT_LEVEL_INFO 0x0 +/* enum: Warning. */ +#define MCDI_EVENT_LEVEL_WARN 0x1 +/* enum: Error. */ +#define MCDI_EVENT_LEVEL_ERR 0x2 +/* enum: Fatal. */ +#define MCDI_EVENT_LEVEL_FATAL 0x3 +#define MCDI_EVENT_DATA_OFST 0 +#define MCDI_EVENT_DATA_LEN 4 +#define MCDI_EVENT_CMDDONE_SEQ_OFST 0 +#define MCDI_EVENT_CMDDONE_SEQ_LBN 0 +#define MCDI_EVENT_CMDDONE_SEQ_WIDTH 8 +#define MCDI_EVENT_CMDDONE_DATALEN_OFST 0 +#define MCDI_EVENT_CMDDONE_DATALEN_LBN 8 +#define MCDI_EVENT_CMDDONE_DATALEN_WIDTH 8 +#define MCDI_EVENT_CMDDONE_ERRNO_OFST 0 +#define MCDI_EVENT_CMDDONE_ERRNO_LBN 16 +#define MCDI_EVENT_CMDDONE_ERRNO_WIDTH 8 +#define MCDI_EVENT_LINKCHANGE_LP_CAP_OFST 0 +#define MCDI_EVENT_LINKCHANGE_LP_CAP_LBN 0 +#define MCDI_EVENT_LINKCHANGE_LP_CAP_WIDTH 16 +#define MCDI_EVENT_LINKCHANGE_SPEED_OFST 0 +#define MCDI_EVENT_LINKCHANGE_SPEED_LBN 16 +#define MCDI_EVENT_LINKCHANGE_SPEED_WIDTH 4 +/* enum: Link is down or link speed could not be determined */ +#define MCDI_EVENT_LINKCHANGE_SPEED_UNKNOWN 0x0 +/* enum: 100Mbs */ +#define MCDI_EVENT_LINKCHANGE_SPEED_100M 0x1 +/* enum: 1Gbs */ +#define MCDI_EVENT_LINKCHANGE_SPEED_1G 0x2 +/* enum: 10Gbs */ +#define MCDI_EVENT_LINKCHANGE_SPEED_10G 0x3 +/* enum: 40Gbs */ +#define MCDI_EVENT_LINKCHANGE_SPEED_40G 0x4 +/* enum: 25Gbs */ +#define MCDI_EVENT_LINKCHANGE_SPEED_25G 0x5 +/* enum: 50Gbs */ +#define MCDI_EVENT_LINKCHANGE_SPEED_50G 0x6 +/* enum: 100Gbs */ +#define MCDI_EVENT_LINKCHANGE_SPEED_100G 0x7 +#define MCDI_EVENT_LINKCHANGE_FCNTL_OFST 0 +#define MCDI_EVENT_LINKCHANGE_FCNTL_LBN 20 +#define MCDI_EVENT_LINKCHANGE_FCNTL_WIDTH 4 +#define MCDI_EVENT_LINKCHANGE_LINK_FLAGS_OFST 0 +#define MCDI_EVENT_LINKCHANGE_LINK_FLAGS_LBN 24 +#define MCDI_EVENT_LINKCHANGE_LINK_FLAGS_WIDTH 8 +#define MCDI_EVENT_SENSOREVT_MONITOR_OFST 0 +#define MCDI_EVENT_SENSOREVT_MONITOR_LBN 0 +#define MCDI_EVENT_SENSOREVT_MONITOR_WIDTH 8 +#define MCDI_EVENT_SENSOREVT_STATE_OFST 0 +#define MCDI_EVENT_SENSOREVT_STATE_LBN 8 +#define MCDI_EVENT_SENSOREVT_STATE_WIDTH 8 +#define MCDI_EVENT_SENSOREVT_VALUE_OFST 0 +#define MCDI_EVENT_SENSOREVT_VALUE_LBN 16 +#define MCDI_EVENT_SENSOREVT_VALUE_WIDTH 16 +#define MCDI_EVENT_FWALERT_DATA_OFST 0 +#define MCDI_EVENT_FWALERT_DATA_LBN 8 +#define MCDI_EVENT_FWALERT_DATA_WIDTH 24 +#define MCDI_EVENT_FWALERT_REASON_OFST 0 +#define MCDI_EVENT_FWALERT_REASON_LBN 0 +#define MCDI_EVENT_FWALERT_REASON_WIDTH 8 +/* enum: SRAM Access. */ +#define MCDI_EVENT_FWALERT_REASON_SRAM_ACCESS 0x1 +#define MCDI_EVENT_FLR_VF_OFST 0 +#define MCDI_EVENT_FLR_VF_LBN 0 +#define MCDI_EVENT_FLR_VF_WIDTH 8 +#define MCDI_EVENT_TX_ERR_TXQ_OFST 0 +#define MCDI_EVENT_TX_ERR_TXQ_LBN 0 +#define MCDI_EVENT_TX_ERR_TXQ_WIDTH 12 +#define MCDI_EVENT_TX_ERR_TYPE_OFST 0 +#define MCDI_EVENT_TX_ERR_TYPE_LBN 12 +#define MCDI_EVENT_TX_ERR_TYPE_WIDTH 4 +/* enum: Descriptor loader reported failure. Specific to EF10-family NICs. */ +#define MCDI_EVENT_TX_ERR_DL_FAIL 0x1 +/* enum: Descriptor ring empty and no EOP seen for packet. Specific to + * EF10-family NICs + */ +#define MCDI_EVENT_TX_ERR_NO_EOP 0x2 +/* enum: Overlength packet. Specific to EF10-family NICs. */ +#define MCDI_EVENT_TX_ERR_2BIG 0x3 +/* enum: Malformed option descriptor. Specific to EF10-family NICs. */ +#define MCDI_EVENT_TX_BAD_OPTDESC 0x5 +/* enum: Option descriptor part way through a packet. Specific to EF10-family + * NICs. + */ +#define MCDI_EVENT_TX_OPT_IN_PKT 0x8 +/* enum: DMA or PIO data access error. Specific to EF10-family NICs */ +#define MCDI_EVENT_TX_ERR_BAD_DMA_OR_PIO 0x9 +#define MCDI_EVENT_TX_ERR_INFO_OFST 0 +#define MCDI_EVENT_TX_ERR_INFO_LBN 16 +#define MCDI_EVENT_TX_ERR_INFO_WIDTH 16 +#define MCDI_EVENT_TX_FLUSH_TO_DRIVER_OFST 0 +#define MCDI_EVENT_TX_FLUSH_TO_DRIVER_LBN 12 +#define MCDI_EVENT_TX_FLUSH_TO_DRIVER_WIDTH 1 +#define MCDI_EVENT_TX_FLUSH_TXQ_OFST 0 +#define MCDI_EVENT_TX_FLUSH_TXQ_LBN 0 +#define MCDI_EVENT_TX_FLUSH_TXQ_WIDTH 12 +#define MCDI_EVENT_PTP_ERR_TYPE_OFST 0 +#define MCDI_EVENT_PTP_ERR_TYPE_LBN 0 +#define MCDI_EVENT_PTP_ERR_TYPE_WIDTH 8 +/* enum: PLL lost lock */ +#define MCDI_EVENT_PTP_ERR_PLL_LOST 0x1 +/* enum: Filter overflow (PDMA) */ +#define MCDI_EVENT_PTP_ERR_FILTER 0x2 +/* enum: FIFO overflow (FPGA) */ +#define MCDI_EVENT_PTP_ERR_FIFO 0x3 +/* enum: Merge queue overflow */ +#define MCDI_EVENT_PTP_ERR_QUEUE 0x4 +#define MCDI_EVENT_AOE_ERR_TYPE_OFST 0 +#define MCDI_EVENT_AOE_ERR_TYPE_LBN 0 +#define MCDI_EVENT_AOE_ERR_TYPE_WIDTH 8 +/* enum: AOE failed to load - no valid image? */ +#define MCDI_EVENT_AOE_NO_LOAD 0x1 +/* enum: AOE FC reported an exception */ +#define MCDI_EVENT_AOE_FC_ASSERT 0x2 +/* enum: AOE FC watchdogged */ +#define MCDI_EVENT_AOE_FC_WATCHDOG 0x3 +/* enum: AOE FC failed to start */ +#define MCDI_EVENT_AOE_FC_NO_START 0x4 +/* enum: Generic AOE fault - likely to have been reported via other means too + * but intended for use by aoex driver. + */ +#define MCDI_EVENT_AOE_FAULT 0x5 +/* enum: Results of reprogramming the CPLD (status in AOE_ERR_DATA) */ +#define MCDI_EVENT_AOE_CPLD_REPROGRAMMED 0x6 +/* enum: AOE loaded successfully */ +#define MCDI_EVENT_AOE_LOAD 0x7 +/* enum: AOE DMA operation completed (LSB of HOST_HANDLE in AOE_ERR_DATA) */ +#define MCDI_EVENT_AOE_DMA 0x8 +/* enum: AOE byteblaster connected/disconnected (Connection status in + * AOE_ERR_DATA) + */ +#define MCDI_EVENT_AOE_BYTEBLASTER 0x9 +/* enum: DDR ECC status update */ +#define MCDI_EVENT_AOE_DDR_ECC_STATUS 0xa +/* enum: PTP status update */ +#define MCDI_EVENT_AOE_PTP_STATUS 0xb +/* enum: FPGA header incorrect */ +#define MCDI_EVENT_AOE_FPGA_LOAD_HEADER_ERR 0xc +/* enum: FPGA Powered Off due to error in powering up FPGA */ +#define MCDI_EVENT_AOE_FPGA_POWER_OFF 0xd +/* enum: AOE FPGA load failed due to MC to MUM communication failure */ +#define MCDI_EVENT_AOE_FPGA_LOAD_FAILED 0xe +/* enum: Notify that invalid flash type detected */ +#define MCDI_EVENT_AOE_INVALID_FPGA_FLASH_TYPE 0xf +/* enum: Notify that the attempt to run FPGA Controller firmware timed out */ +#define MCDI_EVENT_AOE_FC_RUN_TIMEDOUT 0x10 +/* enum: Failure to probe one or more FPGA boot flash chips */ +#define MCDI_EVENT_AOE_FPGA_BOOT_FLASH_INVALID 0x11 +/* enum: FPGA boot-flash contains an invalid image header */ +#define MCDI_EVENT_AOE_FPGA_BOOT_FLASH_HDR_INVALID 0x12 +/* enum: Failed to program clocks required by the FPGA */ +#define MCDI_EVENT_AOE_FPGA_CLOCKS_PROGRAM_FAILED 0x13 +/* enum: Notify that FPGA Controller is alive to serve MCDI requests */ +#define MCDI_EVENT_AOE_FC_RUNNING 0x14 +#define MCDI_EVENT_AOE_ERR_DATA_OFST 0 +#define MCDI_EVENT_AOE_ERR_DATA_LBN 8 +#define MCDI_EVENT_AOE_ERR_DATA_WIDTH 8 +#define MCDI_EVENT_AOE_ERR_FC_ASSERT_INFO_OFST 0 +#define MCDI_EVENT_AOE_ERR_FC_ASSERT_INFO_LBN 8 +#define MCDI_EVENT_AOE_ERR_FC_ASSERT_INFO_WIDTH 8 +/* enum: FC Assert happened, but the register information is not available */ +#define MCDI_EVENT_AOE_ERR_FC_ASSERT_SEEN 0x0 +/* enum: The register information for FC Assert is ready for reading by driver + */ +#define MCDI_EVENT_AOE_ERR_FC_ASSERT_DATA_READY 0x1 +#define MCDI_EVENT_AOE_ERR_CODE_FPGA_HEADER_VERIFY_FAILED_OFST 0 +#define MCDI_EVENT_AOE_ERR_CODE_FPGA_HEADER_VERIFY_FAILED_LBN 8 +#define MCDI_EVENT_AOE_ERR_CODE_FPGA_HEADER_VERIFY_FAILED_WIDTH 8 +/* enum: Reading from NV failed */ +#define MCDI_EVENT_AOE_ERR_FPGA_HEADER_NV_READ_FAIL 0x0 +/* enum: Invalid Magic Number if FPGA header */ +#define MCDI_EVENT_AOE_ERR_FPGA_HEADER_MAGIC_FAIL 0x1 +/* enum: Invalid Silicon type detected in header */ +#define MCDI_EVENT_AOE_ERR_FPGA_HEADER_SILICON_TYPE 0x2 +/* enum: Unsupported VRatio */ +#define MCDI_EVENT_AOE_ERR_FPGA_HEADER_VRATIO 0x3 +/* enum: Unsupported DDR Type */ +#define MCDI_EVENT_AOE_ERR_FPGA_HEADER_DDR_TYPE 0x4 +/* enum: DDR Voltage out of supported range */ +#define MCDI_EVENT_AOE_ERR_FPGA_HEADER_DDR_VOLTAGE 0x5 +/* enum: Unsupported DDR speed */ +#define MCDI_EVENT_AOE_ERR_FPGA_HEADER_DDR_SPEED 0x6 +/* enum: Unsupported DDR size */ +#define MCDI_EVENT_AOE_ERR_FPGA_HEADER_DDR_SIZE 0x7 +/* enum: Unsupported DDR rank */ +#define MCDI_EVENT_AOE_ERR_FPGA_HEADER_DDR_RANK 0x8 +#define MCDI_EVENT_AOE_ERR_CODE_INVALID_FPGA_FLASH_TYPE_INFO_OFST 0 +#define MCDI_EVENT_AOE_ERR_CODE_INVALID_FPGA_FLASH_TYPE_INFO_LBN 8 +#define MCDI_EVENT_AOE_ERR_CODE_INVALID_FPGA_FLASH_TYPE_INFO_WIDTH 8 +/* enum: Primary boot flash */ +#define MCDI_EVENT_AOE_FLASH_TYPE_BOOT_PRIMARY 0x0 +/* enum: Secondary boot flash */ +#define MCDI_EVENT_AOE_FLASH_TYPE_BOOT_SECONDARY 0x1 +#define MCDI_EVENT_AOE_ERR_CODE_FPGA_POWER_OFF_OFST 0 +#define MCDI_EVENT_AOE_ERR_CODE_FPGA_POWER_OFF_LBN 8 +#define MCDI_EVENT_AOE_ERR_CODE_FPGA_POWER_OFF_WIDTH 8 +#define MCDI_EVENT_AOE_ERR_CODE_FPGA_LOAD_FAILED_OFST 0 +#define MCDI_EVENT_AOE_ERR_CODE_FPGA_LOAD_FAILED_LBN 8 +#define MCDI_EVENT_AOE_ERR_CODE_FPGA_LOAD_FAILED_WIDTH 8 +#define MCDI_EVENT_RX_ERR_RXQ_OFST 0 +#define MCDI_EVENT_RX_ERR_RXQ_LBN 0 +#define MCDI_EVENT_RX_ERR_RXQ_WIDTH 12 +#define MCDI_EVENT_RX_ERR_TYPE_OFST 0 +#define MCDI_EVENT_RX_ERR_TYPE_LBN 12 +#define MCDI_EVENT_RX_ERR_TYPE_WIDTH 4 +#define MCDI_EVENT_RX_ERR_INFO_OFST 0 +#define MCDI_EVENT_RX_ERR_INFO_LBN 16 +#define MCDI_EVENT_RX_ERR_INFO_WIDTH 16 +#define MCDI_EVENT_RX_FLUSH_TO_DRIVER_OFST 0 +#define MCDI_EVENT_RX_FLUSH_TO_DRIVER_LBN 12 +#define MCDI_EVENT_RX_FLUSH_TO_DRIVER_WIDTH 1 +#define MCDI_EVENT_RX_FLUSH_RXQ_OFST 0 +#define MCDI_EVENT_RX_FLUSH_RXQ_LBN 0 +#define MCDI_EVENT_RX_FLUSH_RXQ_WIDTH 12 +#define MCDI_EVENT_MC_REBOOT_COUNT_OFST 0 +#define MCDI_EVENT_MC_REBOOT_COUNT_LBN 0 +#define MCDI_EVENT_MC_REBOOT_COUNT_WIDTH 16 +#define MCDI_EVENT_MUM_ERR_TYPE_OFST 0 +#define MCDI_EVENT_MUM_ERR_TYPE_LBN 0 +#define MCDI_EVENT_MUM_ERR_TYPE_WIDTH 8 +/* enum: MUM failed to load - no valid image? */ +#define MCDI_EVENT_MUM_NO_LOAD 0x1 +/* enum: MUM f/w reported an exception */ +#define MCDI_EVENT_MUM_ASSERT 0x2 +/* enum: MUM not kicking watchdog */ +#define MCDI_EVENT_MUM_WATCHDOG 0x3 +#define MCDI_EVENT_MUM_ERR_DATA_OFST 0 +#define MCDI_EVENT_MUM_ERR_DATA_LBN 8 +#define MCDI_EVENT_MUM_ERR_DATA_WIDTH 8 +#define MCDI_EVENT_DBRET_SEQ_OFST 0 +#define MCDI_EVENT_DBRET_SEQ_LBN 0 +#define MCDI_EVENT_DBRET_SEQ_WIDTH 8 +#define MCDI_EVENT_SUC_ERR_TYPE_OFST 0 +#define MCDI_EVENT_SUC_ERR_TYPE_LBN 0 +#define MCDI_EVENT_SUC_ERR_TYPE_WIDTH 8 +/* enum: Corrupted or bad SUC application. */ +#define MCDI_EVENT_SUC_BAD_APP 0x1 +/* enum: SUC application reported an assert. */ +#define MCDI_EVENT_SUC_ASSERT 0x2 +/* enum: SUC application reported an exception. */ +#define MCDI_EVENT_SUC_EXCEPTION 0x3 +/* enum: SUC watchdog timer expired. */ +#define MCDI_EVENT_SUC_WATCHDOG 0x4 +#define MCDI_EVENT_SUC_ERR_ADDRESS_OFST 0 +#define MCDI_EVENT_SUC_ERR_ADDRESS_LBN 8 +#define MCDI_EVENT_SUC_ERR_ADDRESS_WIDTH 24 +#define MCDI_EVENT_SUC_ERR_DATA_OFST 0 +#define MCDI_EVENT_SUC_ERR_DATA_LBN 8 +#define MCDI_EVENT_SUC_ERR_DATA_WIDTH 24 +#define MCDI_EVENT_LINKCHANGE_V2_LP_CAP_OFST 0 +#define MCDI_EVENT_LINKCHANGE_V2_LP_CAP_LBN 0 +#define MCDI_EVENT_LINKCHANGE_V2_LP_CAP_WIDTH 24 +#define MCDI_EVENT_LINKCHANGE_V2_SPEED_OFST 0 +#define MCDI_EVENT_LINKCHANGE_V2_SPEED_LBN 24 +#define MCDI_EVENT_LINKCHANGE_V2_SPEED_WIDTH 4 +/* Enum values, see field(s): */ +/* MCDI_EVENT/LINKCHANGE_SPEED */ +#define MCDI_EVENT_LINKCHANGE_V2_FLAGS_LINK_UP_OFST 0 +#define MCDI_EVENT_LINKCHANGE_V2_FLAGS_LINK_UP_LBN 28 +#define MCDI_EVENT_LINKCHANGE_V2_FLAGS_LINK_UP_WIDTH 1 +#define MCDI_EVENT_LINKCHANGE_V2_FCNTL_OFST 0 +#define MCDI_EVENT_LINKCHANGE_V2_FCNTL_LBN 29 +#define MCDI_EVENT_LINKCHANGE_V2_FCNTL_WIDTH 3 +/* Enum values, see field(s): */ +/* MC_CMD_SET_MAC/MC_CMD_SET_MAC_IN/FCNTL */ +#define MCDI_EVENT_MODULECHANGE_LD_CAP_OFST 0 +#define MCDI_EVENT_MODULECHANGE_LD_CAP_LBN 0 +#define MCDI_EVENT_MODULECHANGE_LD_CAP_WIDTH 30 +#define MCDI_EVENT_MODULECHANGE_SEQ_OFST 0 +#define MCDI_EVENT_MODULECHANGE_SEQ_LBN 30 +#define MCDI_EVENT_MODULECHANGE_SEQ_WIDTH 2 +#define MCDI_EVENT_DESC_PROXY_VIRTQ_VI_ID_OFST 0 +#define MCDI_EVENT_DESC_PROXY_VIRTQ_VI_ID_LBN 0 +#define MCDI_EVENT_DESC_PROXY_VIRTQ_VI_ID_WIDTH 16 +#define MCDI_EVENT_DESC_PROXY_VIRTQ_ID_OFST 0 +#define MCDI_EVENT_DESC_PROXY_VIRTQ_ID_LBN 16 +#define MCDI_EVENT_DESC_PROXY_VIRTQ_ID_WIDTH 16 +#define MCDI_EVENT_DATA_LBN 0 +#define MCDI_EVENT_DATA_WIDTH 32 +/* Alias for PTP_DATA. */ +#define MCDI_EVENT_SRC_LBN 36 +#define MCDI_EVENT_SRC_WIDTH 8 +/* Data associated with PTP events which doesn't fit into the main DATA field + */ +#define MCDI_EVENT_PTP_DATA_LBN 36 +#define MCDI_EVENT_PTP_DATA_WIDTH 8 +/* EF100 specific. Defined by QDMA. The phase bit, changes each time round the + * event ring + */ +#define MCDI_EVENT_EV_EVQ_PHASE_LBN 59 +#define MCDI_EVENT_EV_EVQ_PHASE_WIDTH 1 +#define MCDI_EVENT_EV_CODE_LBN 60 +#define MCDI_EVENT_EV_CODE_WIDTH 4 +#define MCDI_EVENT_CODE_LBN 44 +#define MCDI_EVENT_CODE_WIDTH 8 +/* enum: Event generated by host software */ +#define MCDI_EVENT_SW_EVENT 0x0 +/* enum: Bad assert. */ +#define MCDI_EVENT_CODE_BADSSERT 0x1 +/* enum: PM Notice. */ +#define MCDI_EVENT_CODE_PMNOTICE 0x2 +/* enum: Command done. */ +#define MCDI_EVENT_CODE_CMDDONE 0x3 +/* enum: Link change. */ +#define MCDI_EVENT_CODE_LINKCHANGE 0x4 +/* enum: Sensor Event. */ +#define MCDI_EVENT_CODE_SENSOREVT 0x5 +/* enum: Schedule error. */ +#define MCDI_EVENT_CODE_SCHEDERR 0x6 +/* enum: Reboot. */ +#define MCDI_EVENT_CODE_REBOOT 0x7 +/* enum: Mac stats DMA. */ +#define MCDI_EVENT_CODE_MAC_STATS_DMA 0x8 +/* enum: Firmware alert. */ +#define MCDI_EVENT_CODE_FWALERT 0x9 +/* enum: Function level reset. */ +#define MCDI_EVENT_CODE_FLR 0xa +/* enum: Transmit error */ +#define MCDI_EVENT_CODE_TX_ERR 0xb +/* enum: Tx flush has completed */ +#define MCDI_EVENT_CODE_TX_FLUSH 0xc +/* enum: PTP packet received timestamp */ +#define MCDI_EVENT_CODE_PTP_RX 0xd +/* enum: PTP NIC failure */ +#define MCDI_EVENT_CODE_PTP_FAULT 0xe +/* enum: PTP PPS event */ +#define MCDI_EVENT_CODE_PTP_PPS 0xf +/* enum: Rx flush has completed */ +#define MCDI_EVENT_CODE_RX_FLUSH 0x10 +/* enum: Receive error */ +#define MCDI_EVENT_CODE_RX_ERR 0x11 +/* enum: AOE fault */ +#define MCDI_EVENT_CODE_AOE 0x12 +/* enum: Network port calibration failed (VCAL). */ +#define MCDI_EVENT_CODE_VCAL_FAIL 0x13 +/* enum: HW PPS event */ +#define MCDI_EVENT_CODE_HW_PPS 0x14 +/* enum: The MC has rebooted (huntington and later, siena uses CODE_REBOOT and + * a different format) + */ +#define MCDI_EVENT_CODE_MC_REBOOT 0x15 +/* enum: the MC has detected a parity error */ +#define MCDI_EVENT_CODE_PAR_ERR 0x16 +/* enum: the MC has detected a correctable error */ +#define MCDI_EVENT_CODE_ECC_CORR_ERR 0x17 +/* enum: the MC has detected an uncorrectable error */ +#define MCDI_EVENT_CODE_ECC_FATAL_ERR 0x18 +/* enum: The MC has entered offline BIST mode */ +#define MCDI_EVENT_CODE_MC_BIST 0x19 +/* enum: PTP tick event providing current NIC time */ +#define MCDI_EVENT_CODE_PTP_TIME 0x1a +/* enum: MUM fault */ +#define MCDI_EVENT_CODE_MUM 0x1b +/* enum: notify the designated PF of a new authorization request */ +#define MCDI_EVENT_CODE_PROXY_REQUEST 0x1c +/* enum: notify a function that awaits an authorization that its request has + * been processed and it may now resend the command + */ +#define MCDI_EVENT_CODE_PROXY_RESPONSE 0x1d +/* enum: MCDI command accepted. New commands can be issued but this command is + * not done yet. + */ +#define MCDI_EVENT_CODE_DBRET 0x1e +/* enum: The MC has detected a fault on the SUC */ +#define MCDI_EVENT_CODE_SUC 0x1f +/* enum: Link change. This event is sent instead of LINKCHANGE if + * WANT_V2_LINKCHANGES was set on driver attach. + */ +#define MCDI_EVENT_CODE_LINKCHANGE_V2 0x20 +/* enum: This event is sent if WANT_V2_LINKCHANGES was set on driver attach + * when the local device capabilities changes. This will usually correspond to + * a module change. + */ +#define MCDI_EVENT_CODE_MODULECHANGE 0x21 +/* enum: Notification that the sensors have been added and/or removed from the + * sensor table. This event includes the new sensor table generation count, if + * this does not match the driver's local copy it is expected to call + * DYNAMIC_SENSORS_LIST to refresh it. + */ +#define MCDI_EVENT_CODE_DYNAMIC_SENSORS_CHANGE 0x22 +/* enum: Notification that a sensor has changed state as a result of a reading + * crossing a threshold. This is sent as two events, the first event contains + * the handle and the sensor's state (in the SRC field), and the second + * contains the value. + */ +#define MCDI_EVENT_CODE_DYNAMIC_SENSORS_STATE_CHANGE 0x23 +/* enum: Notification that a descriptor proxy function configuration has been + * pushed to "live" status (visible to host). SRC field contains the handle of + * the affected descriptor proxy function. DATA field contains the generation + * count of configuration set applied. See MC_CMD_DESC_PROXY_FUNC_CONFIG_SET / + * MC_CMD_DESC_PROXY_FUNC_CONFIG_COMMIT and SF-122927-TC for details. + */ +#define MCDI_EVENT_CODE_DESC_PROXY_FUNC_CONFIG_COMMITTED 0x24 +/* enum: Notification that a descriptor proxy function has been reset. SRC + * field contains the handle of the affected descriptor proxy function. See + * SF-122927-TC for details. + */ +#define MCDI_EVENT_CODE_DESC_PROXY_FUNC_RESET 0x25 +/* enum: Notification that a driver attached to a descriptor proxy function. + * SRC field contains the handle of the affected descriptor proxy function. For + * Virtio proxy functions this message consists of two MCDI events, where the + * first event's (CONT=1) DATA field carries negotiated virtio feature bits 0 + * to 31 and the second (CONT=0) carries bits 32 to 63. For EF100 proxy + * functions event length and meaning of DATA field is not yet defined. See + * SF-122927-TC for details. + */ +#define MCDI_EVENT_CODE_DESC_PROXY_FUNC_DRIVER_ATTACH 0x26 +/* enum: Notification that the mport journal has changed since it was last read + * and updates can be read using the MC_CMD_MAE_MPORT_READ_JOURNAL command. The + * firmware may moderate the events so that an event is not sent for every + * change to the journal. + */ +#define MCDI_EVENT_CODE_MPORT_JOURNAL_CHANGE 0x27 +/* enum: Notification that a source queue is enabled and attached to its proxy + * sink queue. SRC field contains the handle of the affected descriptor proxy + * function. DATA field contains the relative source queue number and absolute + * VI ID. + */ +#define MCDI_EVENT_CODE_DESC_PROXY_FUNC_QUEUE_START 0x28 +/* enum: Artificial event generated by host and posted via MC for test + * purposes. + */ +#define MCDI_EVENT_CODE_TESTGEN 0xfa +#define MCDI_EVENT_CMDDONE_DATA_OFST 0 +#define MCDI_EVENT_CMDDONE_DATA_LEN 4 +#define MCDI_EVENT_CMDDONE_DATA_LBN 0 +#define MCDI_EVENT_CMDDONE_DATA_WIDTH 32 +#define MCDI_EVENT_LINKCHANGE_DATA_OFST 0 +#define MCDI_EVENT_LINKCHANGE_DATA_LEN 4 +#define MCDI_EVENT_LINKCHANGE_DATA_LBN 0 +#define MCDI_EVENT_LINKCHANGE_DATA_WIDTH 32 +#define MCDI_EVENT_SENSOREVT_DATA_OFST 0 +#define MCDI_EVENT_SENSOREVT_DATA_LEN 4 +#define MCDI_EVENT_SENSOREVT_DATA_LBN 0 +#define MCDI_EVENT_SENSOREVT_DATA_WIDTH 32 +#define MCDI_EVENT_MAC_STATS_DMA_GENERATION_OFST 0 +#define MCDI_EVENT_MAC_STATS_DMA_GENERATION_LEN 4 +#define MCDI_EVENT_MAC_STATS_DMA_GENERATION_LBN 0 +#define MCDI_EVENT_MAC_STATS_DMA_GENERATION_WIDTH 32 +#define MCDI_EVENT_TX_ERR_DATA_OFST 0 +#define MCDI_EVENT_TX_ERR_DATA_LEN 4 +#define MCDI_EVENT_TX_ERR_DATA_LBN 0 +#define MCDI_EVENT_TX_ERR_DATA_WIDTH 32 +/* For CODE_PTP_RX, CODE_PTP_PPS and CODE_HW_PPS events the seconds field of + * timestamp + */ +#define MCDI_EVENT_PTP_SECONDS_OFST 0 +#define MCDI_EVENT_PTP_SECONDS_LEN 4 +#define MCDI_EVENT_PTP_SECONDS_LBN 0 +#define MCDI_EVENT_PTP_SECONDS_WIDTH 32 +/* For CODE_PTP_RX, CODE_PTP_PPS and CODE_HW_PPS events the major field of + * timestamp + */ +#define MCDI_EVENT_PTP_MAJOR_OFST 0 +#define MCDI_EVENT_PTP_MAJOR_LEN 4 +#define MCDI_EVENT_PTP_MAJOR_LBN 0 +#define MCDI_EVENT_PTP_MAJOR_WIDTH 32 +/* For CODE_PTP_RX, CODE_PTP_PPS and CODE_HW_PPS events the nanoseconds field + * of timestamp + */ +#define MCDI_EVENT_PTP_NANOSECONDS_OFST 0 +#define MCDI_EVENT_PTP_NANOSECONDS_LEN 4 +#define MCDI_EVENT_PTP_NANOSECONDS_LBN 0 +#define MCDI_EVENT_PTP_NANOSECONDS_WIDTH 32 +/* For CODE_PTP_RX, CODE_PTP_PPS and CODE_HW_PPS events the minor field of + * timestamp + */ +#define MCDI_EVENT_PTP_MINOR_OFST 0 +#define MCDI_EVENT_PTP_MINOR_LEN 4 +#define MCDI_EVENT_PTP_MINOR_LBN 0 +#define MCDI_EVENT_PTP_MINOR_WIDTH 32 +/* For CODE_PTP_RX events, the lowest four bytes of sourceUUID from PTP packet + */ +#define MCDI_EVENT_PTP_UUID_OFST 0 +#define MCDI_EVENT_PTP_UUID_LEN 4 +#define MCDI_EVENT_PTP_UUID_LBN 0 +#define MCDI_EVENT_PTP_UUID_WIDTH 32 +#define MCDI_EVENT_RX_ERR_DATA_OFST 0 +#define MCDI_EVENT_RX_ERR_DATA_LEN 4 +#define MCDI_EVENT_RX_ERR_DATA_LBN 0 +#define MCDI_EVENT_RX_ERR_DATA_WIDTH 32 +#define MCDI_EVENT_PAR_ERR_DATA_OFST 0 +#define MCDI_EVENT_PAR_ERR_DATA_LEN 4 +#define MCDI_EVENT_PAR_ERR_DATA_LBN 0 +#define MCDI_EVENT_PAR_ERR_DATA_WIDTH 32 +#define MCDI_EVENT_ECC_CORR_ERR_DATA_OFST 0 +#define MCDI_EVENT_ECC_CORR_ERR_DATA_LEN 4 +#define MCDI_EVENT_ECC_CORR_ERR_DATA_LBN 0 +#define MCDI_EVENT_ECC_CORR_ERR_DATA_WIDTH 32 +#define MCDI_EVENT_ECC_FATAL_ERR_DATA_OFST 0 +#define MCDI_EVENT_ECC_FATAL_ERR_DATA_LEN 4 +#define MCDI_EVENT_ECC_FATAL_ERR_DATA_LBN 0 +#define MCDI_EVENT_ECC_FATAL_ERR_DATA_WIDTH 32 +/* For CODE_PTP_TIME events, the major value of the PTP clock */ +#define MCDI_EVENT_PTP_TIME_MAJOR_OFST 0 +#define MCDI_EVENT_PTP_TIME_MAJOR_LEN 4 +#define MCDI_EVENT_PTP_TIME_MAJOR_LBN 0 +#define MCDI_EVENT_PTP_TIME_MAJOR_WIDTH 32 +/* For CODE_PTP_TIME events, bits 19-26 of the minor value of the PTP clock */ +#define MCDI_EVENT_PTP_TIME_MINOR_26_19_LBN 36 +#define MCDI_EVENT_PTP_TIME_MINOR_26_19_WIDTH 8 +/* For CODE_PTP_TIME events, most significant bits of the minor value of the + * PTP clock. This is a more generic equivalent of PTP_TIME_MINOR_26_19. + */ +#define MCDI_EVENT_PTP_TIME_MINOR_MS_8BITS_LBN 36 +#define MCDI_EVENT_PTP_TIME_MINOR_MS_8BITS_WIDTH 8 +/* For CODE_PTP_TIME events where report sync status is enabled, indicates + * whether the NIC clock has ever been set + */ +#define MCDI_EVENT_PTP_TIME_NIC_CLOCK_VALID_LBN 36 +#define MCDI_EVENT_PTP_TIME_NIC_CLOCK_VALID_WIDTH 1 +/* For CODE_PTP_TIME events where report sync status is enabled, indicates + * whether the NIC and System clocks are in sync + */ +#define MCDI_EVENT_PTP_TIME_HOST_NIC_IN_SYNC_LBN 37 +#define MCDI_EVENT_PTP_TIME_HOST_NIC_IN_SYNC_WIDTH 1 +/* For CODE_PTP_TIME events where report sync status is enabled, bits 21-26 of + * the minor value of the PTP clock + */ +#define MCDI_EVENT_PTP_TIME_MINOR_26_21_LBN 38 +#define MCDI_EVENT_PTP_TIME_MINOR_26_21_WIDTH 6 +/* For CODE_PTP_TIME events, most significant bits of the minor value of the + * PTP clock. This is a more generic equivalent of PTP_TIME_MINOR_26_21. + */ +#define MCDI_EVENT_PTP_TIME_MINOR_MS_6BITS_LBN 38 +#define MCDI_EVENT_PTP_TIME_MINOR_MS_6BITS_WIDTH 6 +#define MCDI_EVENT_PROXY_REQUEST_BUFF_INDEX_OFST 0 +#define MCDI_EVENT_PROXY_REQUEST_BUFF_INDEX_LEN 4 +#define MCDI_EVENT_PROXY_REQUEST_BUFF_INDEX_LBN 0 +#define MCDI_EVENT_PROXY_REQUEST_BUFF_INDEX_WIDTH 32 +#define MCDI_EVENT_PROXY_RESPONSE_HANDLE_OFST 0 +#define MCDI_EVENT_PROXY_RESPONSE_HANDLE_LEN 4 +#define MCDI_EVENT_PROXY_RESPONSE_HANDLE_LBN 0 +#define MCDI_EVENT_PROXY_RESPONSE_HANDLE_WIDTH 32 +/* Zero means that the request has been completed or authorized, and the driver + * should resend it. A non-zero value means that the authorization has been + * denied, and gives the reason. Typically it will be EPERM. + */ +#define MCDI_EVENT_PROXY_RESPONSE_RC_LBN 36 +#define MCDI_EVENT_PROXY_RESPONSE_RC_WIDTH 8 +#define MCDI_EVENT_DBRET_DATA_OFST 0 +#define MCDI_EVENT_DBRET_DATA_LEN 4 +#define MCDI_EVENT_DBRET_DATA_LBN 0 +#define MCDI_EVENT_DBRET_DATA_WIDTH 32 +#define MCDI_EVENT_LINKCHANGE_V2_DATA_OFST 0 +#define MCDI_EVENT_LINKCHANGE_V2_DATA_LEN 4 +#define MCDI_EVENT_LINKCHANGE_V2_DATA_LBN 0 +#define MCDI_EVENT_LINKCHANGE_V2_DATA_WIDTH 32 +#define MCDI_EVENT_MODULECHANGE_DATA_OFST 0 +#define MCDI_EVENT_MODULECHANGE_DATA_LEN 4 +#define MCDI_EVENT_MODULECHANGE_DATA_LBN 0 +#define MCDI_EVENT_MODULECHANGE_DATA_WIDTH 32 +/* The new generation count after a sensor has been added or deleted. */ +#define MCDI_EVENT_DYNAMIC_SENSORS_GENERATION_OFST 0 +#define MCDI_EVENT_DYNAMIC_SENSORS_GENERATION_LEN 4 +#define MCDI_EVENT_DYNAMIC_SENSORS_GENERATION_LBN 0 +#define MCDI_EVENT_DYNAMIC_SENSORS_GENERATION_WIDTH 32 +/* The handle of a dynamic sensor. */ +#define MCDI_EVENT_DYNAMIC_SENSORS_HANDLE_OFST 0 +#define MCDI_EVENT_DYNAMIC_SENSORS_HANDLE_LEN 4 +#define MCDI_EVENT_DYNAMIC_SENSORS_HANDLE_LBN 0 +#define MCDI_EVENT_DYNAMIC_SENSORS_HANDLE_WIDTH 32 +/* The current values of a sensor. */ +#define MCDI_EVENT_DYNAMIC_SENSORS_VALUE_OFST 0 +#define MCDI_EVENT_DYNAMIC_SENSORS_VALUE_LEN 4 +#define MCDI_EVENT_DYNAMIC_SENSORS_VALUE_LBN 0 +#define MCDI_EVENT_DYNAMIC_SENSORS_VALUE_WIDTH 32 +/* The current state of a sensor. */ +#define MCDI_EVENT_DYNAMIC_SENSORS_STATE_LBN 36 +#define MCDI_EVENT_DYNAMIC_SENSORS_STATE_WIDTH 8 +#define MCDI_EVENT_DESC_PROXY_DATA_OFST 0 +#define MCDI_EVENT_DESC_PROXY_DATA_LEN 4 +#define MCDI_EVENT_DESC_PROXY_DATA_LBN 0 +#define MCDI_EVENT_DESC_PROXY_DATA_WIDTH 32 +/* Generation count of applied configuration set */ +#define MCDI_EVENT_DESC_PROXY_GENERATION_OFST 0 +#define MCDI_EVENT_DESC_PROXY_GENERATION_LEN 4 +#define MCDI_EVENT_DESC_PROXY_GENERATION_LBN 0 +#define MCDI_EVENT_DESC_PROXY_GENERATION_WIDTH 32 +/* Virtio features negotiated with the host driver. First event (CONT=1) + * carries bits 0 to 31. Second event (CONT=0) carries bits 32 to 63. + */ +#define MCDI_EVENT_DESC_PROXY_VIRTIO_FEATURES_OFST 0 +#define MCDI_EVENT_DESC_PROXY_VIRTIO_FEATURES_LEN 4 +#define MCDI_EVENT_DESC_PROXY_VIRTIO_FEATURES_LBN 0 +#define MCDI_EVENT_DESC_PROXY_VIRTIO_FEATURES_WIDTH 32 + +/* FCDI_EVENT structuredef */ +#define FCDI_EVENT_LEN 8 +#define FCDI_EVENT_CONT_LBN 32 +#define FCDI_EVENT_CONT_WIDTH 1 +#define FCDI_EVENT_LEVEL_LBN 33 +#define FCDI_EVENT_LEVEL_WIDTH 3 +/* enum: Info. */ +#define FCDI_EVENT_LEVEL_INFO 0x0 +/* enum: Warning. */ +#define FCDI_EVENT_LEVEL_WARN 0x1 +/* enum: Error. */ +#define FCDI_EVENT_LEVEL_ERR 0x2 +/* enum: Fatal. */ +#define FCDI_EVENT_LEVEL_FATAL 0x3 +#define FCDI_EVENT_DATA_OFST 0 +#define FCDI_EVENT_DATA_LEN 4 +#define FCDI_EVENT_LINK_STATE_STATUS_OFST 0 +#define FCDI_EVENT_LINK_STATE_STATUS_LBN 0 +#define FCDI_EVENT_LINK_STATE_STATUS_WIDTH 1 +#define FCDI_EVENT_LINK_DOWN 0x0 /* enum */ +#define FCDI_EVENT_LINK_UP 0x1 /* enum */ +#define FCDI_EVENT_DATA_LBN 0 +#define FCDI_EVENT_DATA_WIDTH 32 +#define FCDI_EVENT_SRC_LBN 36 +#define FCDI_EVENT_SRC_WIDTH 8 +#define FCDI_EVENT_EV_CODE_LBN 60 +#define FCDI_EVENT_EV_CODE_WIDTH 4 +#define FCDI_EVENT_CODE_LBN 44 +#define FCDI_EVENT_CODE_WIDTH 8 +/* enum: The FC was rebooted. */ +#define FCDI_EVENT_CODE_REBOOT 0x1 +/* enum: Bad assert. */ +#define FCDI_EVENT_CODE_ASSERT 0x2 +/* enum: DDR3 test result. */ +#define FCDI_EVENT_CODE_DDR_TEST_RESULT 0x3 +/* enum: Link status. */ +#define FCDI_EVENT_CODE_LINK_STATE 0x4 +/* enum: A timed read is ready to be serviced. */ +#define FCDI_EVENT_CODE_TIMED_READ 0x5 +/* enum: One or more PPS IN events */ +#define FCDI_EVENT_CODE_PPS_IN 0x6 +/* enum: Tick event from PTP clock */ +#define FCDI_EVENT_CODE_PTP_TICK 0x7 +/* enum: ECC error counters */ +#define FCDI_EVENT_CODE_DDR_ECC_STATUS 0x8 +/* enum: Current status of PTP */ +#define FCDI_EVENT_CODE_PTP_STATUS 0x9 +/* enum: Port id config to map MC-FC port idx */ +#define FCDI_EVENT_CODE_PORT_CONFIG 0xa +/* enum: Boot result or error code */ +#define FCDI_EVENT_CODE_BOOT_RESULT 0xb +#define FCDI_EVENT_REBOOT_SRC_LBN 36 +#define FCDI_EVENT_REBOOT_SRC_WIDTH 8 +#define FCDI_EVENT_REBOOT_FC_FW 0x0 /* enum */ +#define FCDI_EVENT_REBOOT_FC_BOOTLOADER 0x1 /* enum */ +#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_OFST 0 +#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_LEN 4 +#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_LBN 0 +#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_WIDTH 32 +#define FCDI_EVENT_ASSERT_TYPE_LBN 36 +#define FCDI_EVENT_ASSERT_TYPE_WIDTH 8 +#define FCDI_EVENT_DDR_TEST_RESULT_STATUS_CODE_LBN 36 +#define FCDI_EVENT_DDR_TEST_RESULT_STATUS_CODE_WIDTH 8 +#define FCDI_EVENT_DDR_TEST_RESULT_RESULT_OFST 0 +#define FCDI_EVENT_DDR_TEST_RESULT_RESULT_LEN 4 +#define FCDI_EVENT_DDR_TEST_RESULT_RESULT_LBN 0 +#define FCDI_EVENT_DDR_TEST_RESULT_RESULT_WIDTH 32 +#define FCDI_EVENT_LINK_STATE_DATA_OFST 0 +#define FCDI_EVENT_LINK_STATE_DATA_LEN 4 +#define FCDI_EVENT_LINK_STATE_DATA_LBN 0 +#define FCDI_EVENT_LINK_STATE_DATA_WIDTH 32 +#define FCDI_EVENT_PTP_STATE_OFST 0 +#define FCDI_EVENT_PTP_STATE_LEN 4 +#define FCDI_EVENT_PTP_UNDEFINED 0x0 /* enum */ +#define FCDI_EVENT_PTP_SETUP_FAILED 0x1 /* enum */ +#define FCDI_EVENT_PTP_OPERATIONAL 0x2 /* enum */ +#define FCDI_EVENT_PTP_STATE_LBN 0 +#define FCDI_EVENT_PTP_STATE_WIDTH 32 +#define FCDI_EVENT_DDR_ECC_STATUS_BANK_ID_LBN 36 +#define FCDI_EVENT_DDR_ECC_STATUS_BANK_ID_WIDTH 8 +#define FCDI_EVENT_DDR_ECC_STATUS_STATUS_OFST 0 +#define FCDI_EVENT_DDR_ECC_STATUS_STATUS_LEN 4 +#define FCDI_EVENT_DDR_ECC_STATUS_STATUS_LBN 0 +#define FCDI_EVENT_DDR_ECC_STATUS_STATUS_WIDTH 32 +/* Index of MC port being referred to */ +#define FCDI_EVENT_PORT_CONFIG_SRC_LBN 36 +#define FCDI_EVENT_PORT_CONFIG_SRC_WIDTH 8 +/* FC Port index that matches the MC port index in SRC */ +#define FCDI_EVENT_PORT_CONFIG_DATA_OFST 0 +#define FCDI_EVENT_PORT_CONFIG_DATA_LEN 4 +#define FCDI_EVENT_PORT_CONFIG_DATA_LBN 0 +#define FCDI_EVENT_PORT_CONFIG_DATA_WIDTH 32 +#define FCDI_EVENT_BOOT_RESULT_OFST 0 +#define FCDI_EVENT_BOOT_RESULT_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_AOE/MC_CMD_AOE_OUT_INFO/FC_BOOT_RESULT */ +#define FCDI_EVENT_BOOT_RESULT_LBN 0 +#define FCDI_EVENT_BOOT_RESULT_WIDTH 32 + +/* FCDI_EXTENDED_EVENT_PPS structuredef: Extended FCDI event to send PPS events + * to the MC. Note that this structure | is overlayed over a normal FCDI event + * such that bits 32-63 containing | event code, level, source etc remain the + * same. In this case the data | field of the header is defined to be the + * number of timestamps + */ +#define FCDI_EXTENDED_EVENT_PPS_LENMIN 16 +#define FCDI_EXTENDED_EVENT_PPS_LENMAX 248 +#define FCDI_EXTENDED_EVENT_PPS_LENMAX_MCDI2 1016 +#define FCDI_EXTENDED_EVENT_PPS_LEN(num) (8+8*(num)) +#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_NUM(len) (((len)-8)/8) +/* Number of timestamps following */ +#define FCDI_EXTENDED_EVENT_PPS_COUNT_OFST 0 +#define FCDI_EXTENDED_EVENT_PPS_COUNT_LEN 4 +#define FCDI_EXTENDED_EVENT_PPS_COUNT_LBN 0 +#define FCDI_EXTENDED_EVENT_PPS_COUNT_WIDTH 32 +/* Seconds field of a timestamp record */ +#define FCDI_EXTENDED_EVENT_PPS_SECONDS_OFST 8 +#define FCDI_EXTENDED_EVENT_PPS_SECONDS_LEN 4 +#define FCDI_EXTENDED_EVENT_PPS_SECONDS_LBN 64 +#define FCDI_EXTENDED_EVENT_PPS_SECONDS_WIDTH 32 +/* Nanoseconds field of a timestamp record */ +#define FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_OFST 12 +#define FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_LEN 4 +#define FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_LBN 96 +#define FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_WIDTH 32 +/* Timestamp records comprising the event */ +#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_OFST 8 +#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_LEN 8 +#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_LO_OFST 8 +#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_LO_LEN 4 +#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_LO_LBN 64 +#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_LO_WIDTH 32 +#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_HI_OFST 12 +#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_HI_LEN 4 +#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_HI_LBN 96 +#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_HI_WIDTH 32 +#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_MINNUM 1 +#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_MAXNUM 30 +#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_MAXNUM_MCDI2 126 +#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_LBN 64 +#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_WIDTH 64 + +/* MUM_EVENT structuredef */ +#define MUM_EVENT_LEN 8 +#define MUM_EVENT_CONT_LBN 32 +#define MUM_EVENT_CONT_WIDTH 1 +#define MUM_EVENT_LEVEL_LBN 33 +#define MUM_EVENT_LEVEL_WIDTH 3 +/* enum: Info. */ +#define MUM_EVENT_LEVEL_INFO 0x0 +/* enum: Warning. */ +#define MUM_EVENT_LEVEL_WARN 0x1 +/* enum: Error. */ +#define MUM_EVENT_LEVEL_ERR 0x2 +/* enum: Fatal. */ +#define MUM_EVENT_LEVEL_FATAL 0x3 +#define MUM_EVENT_DATA_OFST 0 +#define MUM_EVENT_DATA_LEN 4 +#define MUM_EVENT_SENSOR_ID_OFST 0 +#define MUM_EVENT_SENSOR_ID_LBN 0 +#define MUM_EVENT_SENSOR_ID_WIDTH 8 +/* Enum values, see field(s): */ +/* MC_CMD_SENSOR_INFO/MC_CMD_SENSOR_INFO_OUT/MASK */ +#define MUM_EVENT_SENSOR_STATE_OFST 0 +#define MUM_EVENT_SENSOR_STATE_LBN 8 +#define MUM_EVENT_SENSOR_STATE_WIDTH 8 +#define MUM_EVENT_PORT_PHY_READY_OFST 0 +#define MUM_EVENT_PORT_PHY_READY_LBN 0 +#define MUM_EVENT_PORT_PHY_READY_WIDTH 1 +#define MUM_EVENT_PORT_PHY_LINK_UP_OFST 0 +#define MUM_EVENT_PORT_PHY_LINK_UP_LBN 1 +#define MUM_EVENT_PORT_PHY_LINK_UP_WIDTH 1 +#define MUM_EVENT_PORT_PHY_TX_LOL_OFST 0 +#define MUM_EVENT_PORT_PHY_TX_LOL_LBN 2 +#define MUM_EVENT_PORT_PHY_TX_LOL_WIDTH 1 +#define MUM_EVENT_PORT_PHY_RX_LOL_OFST 0 +#define MUM_EVENT_PORT_PHY_RX_LOL_LBN 3 +#define MUM_EVENT_PORT_PHY_RX_LOL_WIDTH 1 +#define MUM_EVENT_PORT_PHY_TX_LOS_OFST 0 +#define MUM_EVENT_PORT_PHY_TX_LOS_LBN 4 +#define MUM_EVENT_PORT_PHY_TX_LOS_WIDTH 1 +#define MUM_EVENT_PORT_PHY_RX_LOS_OFST 0 +#define MUM_EVENT_PORT_PHY_RX_LOS_LBN 5 +#define MUM_EVENT_PORT_PHY_RX_LOS_WIDTH 1 +#define MUM_EVENT_PORT_PHY_TX_FAULT_OFST 0 +#define MUM_EVENT_PORT_PHY_TX_FAULT_LBN 6 +#define MUM_EVENT_PORT_PHY_TX_FAULT_WIDTH 1 +#define MUM_EVENT_DATA_LBN 0 +#define MUM_EVENT_DATA_WIDTH 32 +#define MUM_EVENT_SRC_LBN 36 +#define MUM_EVENT_SRC_WIDTH 8 +#define MUM_EVENT_EV_CODE_LBN 60 +#define MUM_EVENT_EV_CODE_WIDTH 4 +#define MUM_EVENT_CODE_LBN 44 +#define MUM_EVENT_CODE_WIDTH 8 +/* enum: The MUM was rebooted. */ +#define MUM_EVENT_CODE_REBOOT 0x1 +/* enum: Bad assert. */ +#define MUM_EVENT_CODE_ASSERT 0x2 +/* enum: Sensor failure. */ +#define MUM_EVENT_CODE_SENSOR 0x3 +/* enum: Link fault has been asserted, or has cleared. */ +#define MUM_EVENT_CODE_QSFP_LASI_INTERRUPT 0x4 +#define MUM_EVENT_SENSOR_DATA_OFST 0 +#define MUM_EVENT_SENSOR_DATA_LEN 4 +#define MUM_EVENT_SENSOR_DATA_LBN 0 +#define MUM_EVENT_SENSOR_DATA_WIDTH 32 +#define MUM_EVENT_PORT_PHY_FLAGS_OFST 0 +#define MUM_EVENT_PORT_PHY_FLAGS_LEN 4 +#define MUM_EVENT_PORT_PHY_FLAGS_LBN 0 +#define MUM_EVENT_PORT_PHY_FLAGS_WIDTH 32 +#define MUM_EVENT_PORT_PHY_COPPER_LEN_OFST 0 +#define MUM_EVENT_PORT_PHY_COPPER_LEN_LEN 4 +#define MUM_EVENT_PORT_PHY_COPPER_LEN_LBN 0 +#define MUM_EVENT_PORT_PHY_COPPER_LEN_WIDTH 32 +#define MUM_EVENT_PORT_PHY_CAPS_OFST 0 +#define MUM_EVENT_PORT_PHY_CAPS_LEN 4 +#define MUM_EVENT_PORT_PHY_CAPS_LBN 0 +#define MUM_EVENT_PORT_PHY_CAPS_WIDTH 32 +#define MUM_EVENT_PORT_PHY_TECH_OFST 0 +#define MUM_EVENT_PORT_PHY_TECH_LEN 4 +#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_UNKNOWN 0x0 /* enum */ +#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_OPTICAL 0x1 /* enum */ +#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_COPPER_PASSIVE 0x2 /* enum */ +#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_COPPER_PASSIVE_EQUALIZED 0x3 /* enum */ +#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_COPPER_ACTIVE_LIMITING 0x4 /* enum */ +#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_COPPER_ACTIVE_LINEAR 0x5 /* enum */ +#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_BASE_T 0x6 /* enum */ +#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_LOOPBACK_PASSIVE 0x7 /* enum */ +#define MUM_EVENT_PORT_PHY_TECH_LBN 0 +#define MUM_EVENT_PORT_PHY_TECH_WIDTH 32 +#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_LBN 36 +#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_WIDTH 4 +#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_FLAGS 0x0 /* enum */ +#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_COPPER_LEN 0x1 /* enum */ +#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_CAPS 0x2 /* enum */ +#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_TECH 0x3 /* enum */ +#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_MAX 0x4 /* enum */ +#define MUM_EVENT_PORT_PHY_SRC_PORT_NO_LBN 40 +#define MUM_EVENT_PORT_PHY_SRC_PORT_NO_WIDTH 4 + + +/***********************************/ +/* MC_CMD_READ32 + * Read multiple 32byte words from MC memory. Note - this command really + * belongs to INSECURE category but is required by shmboot. The command handler + * has additional checks to reject insecure calls. + */ +#define MC_CMD_READ32 0x1 +#undef MC_CMD_0x1_PRIVILEGE_CTG + +#define MC_CMD_0x1_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_READ32_IN msgrequest */ +#define MC_CMD_READ32_IN_LEN 8 +#define MC_CMD_READ32_IN_ADDR_OFST 0 +#define MC_CMD_READ32_IN_ADDR_LEN 4 +#define MC_CMD_READ32_IN_NUMWORDS_OFST 4 +#define MC_CMD_READ32_IN_NUMWORDS_LEN 4 + +/* MC_CMD_READ32_OUT msgresponse */ +#define MC_CMD_READ32_OUT_LENMIN 4 +#define MC_CMD_READ32_OUT_LENMAX 252 +#define MC_CMD_READ32_OUT_LENMAX_MCDI2 1020 +#define MC_CMD_READ32_OUT_LEN(num) (0+4*(num)) +#define MC_CMD_READ32_OUT_BUFFER_NUM(len) (((len)-0)/4) +#define MC_CMD_READ32_OUT_BUFFER_OFST 0 +#define MC_CMD_READ32_OUT_BUFFER_LEN 4 +#define MC_CMD_READ32_OUT_BUFFER_MINNUM 1 +#define MC_CMD_READ32_OUT_BUFFER_MAXNUM 63 +#define MC_CMD_READ32_OUT_BUFFER_MAXNUM_MCDI2 255 + + +/***********************************/ +/* MC_CMD_WRITE32 + * Write multiple 32byte words to MC memory. + */ +#define MC_CMD_WRITE32 0x2 +#undef MC_CMD_0x2_PRIVILEGE_CTG + +#define MC_CMD_0x2_PRIVILEGE_CTG SRIOV_CTG_INSECURE + +/* MC_CMD_WRITE32_IN msgrequest */ +#define MC_CMD_WRITE32_IN_LENMIN 8 +#define MC_CMD_WRITE32_IN_LENMAX 252 +#define MC_CMD_WRITE32_IN_LENMAX_MCDI2 1020 +#define MC_CMD_WRITE32_IN_LEN(num) (4+4*(num)) +#define MC_CMD_WRITE32_IN_BUFFER_NUM(len) (((len)-4)/4) +#define MC_CMD_WRITE32_IN_ADDR_OFST 0 +#define MC_CMD_WRITE32_IN_ADDR_LEN 4 +#define MC_CMD_WRITE32_IN_BUFFER_OFST 4 +#define MC_CMD_WRITE32_IN_BUFFER_LEN 4 +#define MC_CMD_WRITE32_IN_BUFFER_MINNUM 1 +#define MC_CMD_WRITE32_IN_BUFFER_MAXNUM 62 +#define MC_CMD_WRITE32_IN_BUFFER_MAXNUM_MCDI2 254 + +/* MC_CMD_WRITE32_OUT msgresponse */ +#define MC_CMD_WRITE32_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_COPYCODE + * Copy MC code between two locations and jump. Note - this command really + * belongs to INSECURE category but is required by shmboot. The command handler + * has additional checks to reject insecure calls. + */ +#define MC_CMD_COPYCODE 0x3 +#undef MC_CMD_0x3_PRIVILEGE_CTG + +#define MC_CMD_0x3_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND + +/* MC_CMD_COPYCODE_IN msgrequest */ +#define MC_CMD_COPYCODE_IN_LEN 16 +/* Source address + * + * The main image should be entered via a copy of a single word from and to a + * magic address, which controls various aspects of the boot. The magic address + * is a bitfield, with each bit as documented below. + */ +#define MC_CMD_COPYCODE_IN_SRC_ADDR_OFST 0 +#define MC_CMD_COPYCODE_IN_SRC_ADDR_LEN 4 +/* enum: Deprecated; equivalent to setting BOOT_MAGIC_PRESENT (see below) */ +#define MC_CMD_COPYCODE_HUNT_NO_MAGIC_ADDR 0x10000 +/* enum: Deprecated; equivalent to setting BOOT_MAGIC_PRESENT and + * BOOT_MAGIC_SATELLITE_CPUS_NOT_LOADED (see below) + */ +#define MC_CMD_COPYCODE_HUNT_NO_DATAPATH_MAGIC_ADDR 0x1d0d0 +/* enum: Deprecated; equivalent to setting BOOT_MAGIC_PRESENT, + * BOOT_MAGIC_SATELLITE_CPUS_NOT_LOADED and BOOT_MAGIC_IGNORE_CONFIG (see + * below) + */ +#define MC_CMD_COPYCODE_HUNT_IGNORE_CONFIG_MAGIC_ADDR 0x1badc +#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_PRESENT_OFST 0 +#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_PRESENT_LBN 17 +#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_PRESENT_WIDTH 1 +#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_SATELLITE_CPUS_NOT_LOADED_OFST 0 +#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_SATELLITE_CPUS_NOT_LOADED_LBN 2 +#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_SATELLITE_CPUS_NOT_LOADED_WIDTH 1 +#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_IGNORE_CONFIG_OFST 0 +#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_IGNORE_CONFIG_LBN 3 +#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_IGNORE_CONFIG_WIDTH 1 +#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_SKIP_BOOT_ICORE_SYNC_OFST 0 +#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_SKIP_BOOT_ICORE_SYNC_LBN 4 +#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_SKIP_BOOT_ICORE_SYNC_WIDTH 1 +#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_FORCE_STANDALONE_OFST 0 +#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_FORCE_STANDALONE_LBN 5 +#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_FORCE_STANDALONE_WIDTH 1 +#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_DISABLE_XIP_OFST 0 +#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_DISABLE_XIP_LBN 6 +#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_DISABLE_XIP_WIDTH 1 +/* Destination address */ +#define MC_CMD_COPYCODE_IN_DEST_ADDR_OFST 4 +#define MC_CMD_COPYCODE_IN_DEST_ADDR_LEN 4 +#define MC_CMD_COPYCODE_IN_NUMWORDS_OFST 8 +#define MC_CMD_COPYCODE_IN_NUMWORDS_LEN 4 +/* Address of where to jump after copy. */ +#define MC_CMD_COPYCODE_IN_JUMP_OFST 12 +#define MC_CMD_COPYCODE_IN_JUMP_LEN 4 +/* enum: Control should return to the caller rather than jumping */ +#define MC_CMD_COPYCODE_JUMP_NONE 0x1 + +/* MC_CMD_COPYCODE_OUT msgresponse */ +#define MC_CMD_COPYCODE_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_SET_FUNC + * Select function for function-specific commands. + */ +#define MC_CMD_SET_FUNC 0x4 +#undef MC_CMD_0x4_PRIVILEGE_CTG + +#define MC_CMD_0x4_PRIVILEGE_CTG SRIOV_CTG_INSECURE + +/* MC_CMD_SET_FUNC_IN msgrequest */ +#define MC_CMD_SET_FUNC_IN_LEN 4 +/* Set function */ +#define MC_CMD_SET_FUNC_IN_FUNC_OFST 0 +#define MC_CMD_SET_FUNC_IN_FUNC_LEN 4 + +/* MC_CMD_SET_FUNC_OUT msgresponse */ +#define MC_CMD_SET_FUNC_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_GET_BOOT_STATUS + * Get the instruction address from which the MC booted. + */ +#define MC_CMD_GET_BOOT_STATUS 0x5 +#undef MC_CMD_0x5_PRIVILEGE_CTG + +#define MC_CMD_0x5_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_BOOT_STATUS_IN msgrequest */ +#define MC_CMD_GET_BOOT_STATUS_IN_LEN 0 + +/* MC_CMD_GET_BOOT_STATUS_OUT msgresponse */ +#define MC_CMD_GET_BOOT_STATUS_OUT_LEN 8 +/* ?? */ +#define MC_CMD_GET_BOOT_STATUS_OUT_BOOT_OFFSET_OFST 0 +#define MC_CMD_GET_BOOT_STATUS_OUT_BOOT_OFFSET_LEN 4 +/* enum: indicates that the MC wasn't flash booted */ +#define MC_CMD_GET_BOOT_STATUS_OUT_BOOT_OFFSET_NULL 0xdeadbeef +#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_OFST 4 +#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_LEN 4 +#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_WATCHDOG_OFST 4 +#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_WATCHDOG_LBN 0 +#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_WATCHDOG_WIDTH 1 +#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_PRIMARY_OFST 4 +#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_PRIMARY_LBN 1 +#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_PRIMARY_WIDTH 1 +#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_BACKUP_OFST 4 +#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_BACKUP_LBN 2 +#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_BACKUP_WIDTH 1 + + +/***********************************/ +/* MC_CMD_GET_ASSERTS + * Get (and optionally clear) the current assertion status. Only + * OUT.GLOBAL_FLAGS is guaranteed to exist in the completion payload. The other + * fields will only be present if OUT.GLOBAL_FLAGS != NO_FAILS + */ +#define MC_CMD_GET_ASSERTS 0x6 +#undef MC_CMD_0x6_PRIVILEGE_CTG + +#define MC_CMD_0x6_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_GET_ASSERTS_IN msgrequest */ +#define MC_CMD_GET_ASSERTS_IN_LEN 4 +/* Set to clear assertion */ +#define MC_CMD_GET_ASSERTS_IN_CLEAR_OFST 0 +#define MC_CMD_GET_ASSERTS_IN_CLEAR_LEN 4 + +/* MC_CMD_GET_ASSERTS_OUT msgresponse */ +#define MC_CMD_GET_ASSERTS_OUT_LEN 140 +/* Assertion status flag. */ +#define MC_CMD_GET_ASSERTS_OUT_GLOBAL_FLAGS_OFST 0 +#define MC_CMD_GET_ASSERTS_OUT_GLOBAL_FLAGS_LEN 4 +/* enum: No assertions have failed. */ +#define MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS 0x1 +/* enum: A system-level assertion has failed. */ +#define MC_CMD_GET_ASSERTS_FLAGS_SYS_FAIL 0x2 +/* enum: A thread-level assertion has failed. */ +#define MC_CMD_GET_ASSERTS_FLAGS_THR_FAIL 0x3 +/* enum: The system was reset by the watchdog. */ +#define MC_CMD_GET_ASSERTS_FLAGS_WDOG_FIRED 0x4 +/* enum: An illegal address trap stopped the system (huntington and later) */ +#define MC_CMD_GET_ASSERTS_FLAGS_ADDR_TRAP 0x5 +/* Failing PC value */ +#define MC_CMD_GET_ASSERTS_OUT_SAVED_PC_OFFS_OFST 4 +#define MC_CMD_GET_ASSERTS_OUT_SAVED_PC_OFFS_LEN 4 +/* Saved GP regs */ +#define MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_OFST 8 +#define MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_LEN 4 +#define MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_NUM 31 +/* enum: A magic value hinting that the value in this register at the time of + * the failure has likely been lost. + */ +#define MC_CMD_GET_ASSERTS_REG_NO_DATA 0xda7a1057 +/* Failing thread address */ +#define MC_CMD_GET_ASSERTS_OUT_THREAD_OFFS_OFST 132 +#define MC_CMD_GET_ASSERTS_OUT_THREAD_OFFS_LEN 4 +#define MC_CMD_GET_ASSERTS_OUT_RESERVED_OFST 136 +#define MC_CMD_GET_ASSERTS_OUT_RESERVED_LEN 4 + +/* MC_CMD_GET_ASSERTS_OUT_V2 msgresponse: Extended response for MicroBlaze CPUs + * found on Riverhead designs + */ +#define MC_CMD_GET_ASSERTS_OUT_V2_LEN 240 +/* Assertion status flag. */ +#define MC_CMD_GET_ASSERTS_OUT_V2_GLOBAL_FLAGS_OFST 0 +#define MC_CMD_GET_ASSERTS_OUT_V2_GLOBAL_FLAGS_LEN 4 +/* enum: No assertions have failed. */ +/* MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS 0x1 */ +/* enum: A system-level assertion has failed. */ +/* MC_CMD_GET_ASSERTS_FLAGS_SYS_FAIL 0x2 */ +/* enum: A thread-level assertion has failed. */ +/* MC_CMD_GET_ASSERTS_FLAGS_THR_FAIL 0x3 */ +/* enum: The system was reset by the watchdog. */ +/* MC_CMD_GET_ASSERTS_FLAGS_WDOG_FIRED 0x4 */ +/* enum: An illegal address trap stopped the system (huntington and later) */ +/* MC_CMD_GET_ASSERTS_FLAGS_ADDR_TRAP 0x5 */ +/* Failing PC value */ +#define MC_CMD_GET_ASSERTS_OUT_V2_SAVED_PC_OFFS_OFST 4 +#define MC_CMD_GET_ASSERTS_OUT_V2_SAVED_PC_OFFS_LEN 4 +/* Saved GP regs */ +#define MC_CMD_GET_ASSERTS_OUT_V2_GP_REGS_OFFS_OFST 8 +#define MC_CMD_GET_ASSERTS_OUT_V2_GP_REGS_OFFS_LEN 4 +#define MC_CMD_GET_ASSERTS_OUT_V2_GP_REGS_OFFS_NUM 31 +/* enum: A magic value hinting that the value in this register at the time of + * the failure has likely been lost. + */ +/* MC_CMD_GET_ASSERTS_REG_NO_DATA 0xda7a1057 */ +/* Failing thread address */ +#define MC_CMD_GET_ASSERTS_OUT_V2_THREAD_OFFS_OFST 132 +#define MC_CMD_GET_ASSERTS_OUT_V2_THREAD_OFFS_LEN 4 +#define MC_CMD_GET_ASSERTS_OUT_V2_RESERVED_OFST 136 +#define MC_CMD_GET_ASSERTS_OUT_V2_RESERVED_LEN 4 +/* Saved Special Function Registers */ +#define MC_CMD_GET_ASSERTS_OUT_V2_SF_REGS_OFFS_OFST 136 +#define MC_CMD_GET_ASSERTS_OUT_V2_SF_REGS_OFFS_LEN 4 +#define MC_CMD_GET_ASSERTS_OUT_V2_SF_REGS_OFFS_NUM 26 + +/* MC_CMD_GET_ASSERTS_OUT_V3 msgresponse: Extended response with asserted + * firmware version information + */ +#define MC_CMD_GET_ASSERTS_OUT_V3_LEN 360 +/* Assertion status flag. */ +#define MC_CMD_GET_ASSERTS_OUT_V3_GLOBAL_FLAGS_OFST 0 +#define MC_CMD_GET_ASSERTS_OUT_V3_GLOBAL_FLAGS_LEN 4 +/* enum: No assertions have failed. */ +/* MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS 0x1 */ +/* enum: A system-level assertion has failed. */ +/* MC_CMD_GET_ASSERTS_FLAGS_SYS_FAIL 0x2 */ +/* enum: A thread-level assertion has failed. */ +/* MC_CMD_GET_ASSERTS_FLAGS_THR_FAIL 0x3 */ +/* enum: The system was reset by the watchdog. */ +/* MC_CMD_GET_ASSERTS_FLAGS_WDOG_FIRED 0x4 */ +/* enum: An illegal address trap stopped the system (huntington and later) */ +/* MC_CMD_GET_ASSERTS_FLAGS_ADDR_TRAP 0x5 */ +/* Failing PC value */ +#define MC_CMD_GET_ASSERTS_OUT_V3_SAVED_PC_OFFS_OFST 4 +#define MC_CMD_GET_ASSERTS_OUT_V3_SAVED_PC_OFFS_LEN 4 +/* Saved GP regs */ +#define MC_CMD_GET_ASSERTS_OUT_V3_GP_REGS_OFFS_OFST 8 +#define MC_CMD_GET_ASSERTS_OUT_V3_GP_REGS_OFFS_LEN 4 +#define MC_CMD_GET_ASSERTS_OUT_V3_GP_REGS_OFFS_NUM 31 +/* enum: A magic value hinting that the value in this register at the time of + * the failure has likely been lost. + */ +/* MC_CMD_GET_ASSERTS_REG_NO_DATA 0xda7a1057 */ +/* Failing thread address */ +#define MC_CMD_GET_ASSERTS_OUT_V3_THREAD_OFFS_OFST 132 +#define MC_CMD_GET_ASSERTS_OUT_V3_THREAD_OFFS_LEN 4 +#define MC_CMD_GET_ASSERTS_OUT_V3_RESERVED_OFST 136 +#define MC_CMD_GET_ASSERTS_OUT_V3_RESERVED_LEN 4 +/* Saved Special Function Registers */ +#define MC_CMD_GET_ASSERTS_OUT_V3_SF_REGS_OFFS_OFST 136 +#define MC_CMD_GET_ASSERTS_OUT_V3_SF_REGS_OFFS_LEN 4 +#define MC_CMD_GET_ASSERTS_OUT_V3_SF_REGS_OFFS_NUM 26 +/* MC firmware unique build ID (as binary SHA-1 value) */ +#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_BUILD_ID_OFST 240 +#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_BUILD_ID_LEN 20 +/* MC firmware build date (as Unix timestamp) */ +#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_BUILD_TIMESTAMP_OFST 260 +#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_BUILD_TIMESTAMP_LEN 8 +#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_BUILD_TIMESTAMP_LO_OFST 260 +#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_BUILD_TIMESTAMP_LO_LEN 4 +#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_BUILD_TIMESTAMP_LO_LBN 2080 +#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_BUILD_TIMESTAMP_LO_WIDTH 32 +#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_BUILD_TIMESTAMP_HI_OFST 264 +#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_BUILD_TIMESTAMP_HI_LEN 4 +#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_BUILD_TIMESTAMP_HI_LBN 2112 +#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_BUILD_TIMESTAMP_HI_WIDTH 32 +/* MC firmware version number */ +#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_VERSION_OFST 268 +#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_VERSION_LEN 8 +#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_VERSION_LO_OFST 268 +#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_VERSION_LO_LEN 4 +#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_VERSION_LO_LBN 2144 +#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_VERSION_LO_WIDTH 32 +#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_VERSION_HI_OFST 272 +#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_VERSION_HI_LEN 4 +#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_VERSION_HI_LBN 2176 +#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_VERSION_HI_WIDTH 32 +/* MC firmware security level */ +#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_SECURITY_LEVEL_OFST 276 +#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_SECURITY_LEVEL_LEN 4 +/* MC firmware extra version info (as null-terminated US-ASCII string) */ +#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_EXTRA_INFO_OFST 280 +#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_EXTRA_INFO_LEN 16 +/* MC firmware build name (as null-terminated US-ASCII string) */ +#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_BUILD_NAME_OFST 296 +#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_BUILD_NAME_LEN 64 + + +/***********************************/ +/* MC_CMD_LOG_CTRL + * Configure the output stream for log events such as link state changes, + * sensor notifications and MCDI completions + */ +#define MC_CMD_LOG_CTRL 0x7 +#undef MC_CMD_0x7_PRIVILEGE_CTG + +#define MC_CMD_0x7_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_LOG_CTRL_IN msgrequest */ +#define MC_CMD_LOG_CTRL_IN_LEN 8 +/* Log destination */ +#define MC_CMD_LOG_CTRL_IN_LOG_DEST_OFST 0 +#define MC_CMD_LOG_CTRL_IN_LOG_DEST_LEN 4 +/* enum: UART. */ +#define MC_CMD_LOG_CTRL_IN_LOG_DEST_UART 0x1 +/* enum: Event queue. */ +#define MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ 0x2 +/* Legacy argument. Must be zero. */ +#define MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ_OFST 4 +#define MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ_LEN 4 + +/* MC_CMD_LOG_CTRL_OUT msgresponse */ +#define MC_CMD_LOG_CTRL_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_GET_VERSION + * Get version information about adapter components. + */ +#define MC_CMD_GET_VERSION 0x8 +#undef MC_CMD_0x8_PRIVILEGE_CTG + +#define MC_CMD_0x8_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_VERSION_IN msgrequest */ +#define MC_CMD_GET_VERSION_IN_LEN 0 + +/* MC_CMD_GET_VERSION_EXT_IN msgrequest: Asks for the extended version */ +#define MC_CMD_GET_VERSION_EXT_IN_LEN 4 +/* placeholder, set to 0 */ +#define MC_CMD_GET_VERSION_EXT_IN_EXT_FLAGS_OFST 0 +#define MC_CMD_GET_VERSION_EXT_IN_EXT_FLAGS_LEN 4 + +/* MC_CMD_GET_VERSION_V0_OUT msgresponse: deprecated version format */ +#define MC_CMD_GET_VERSION_V0_OUT_LEN 4 +#define MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0 +#define MC_CMD_GET_VERSION_OUT_FIRMWARE_LEN 4 +/* enum: Reserved version number to indicate "any" version. */ +#define MC_CMD_GET_VERSION_OUT_FIRMWARE_ANY 0xffffffff +/* enum: Bootrom version value for Siena. */ +#define MC_CMD_GET_VERSION_OUT_FIRMWARE_SIENA_BOOTROM 0xb0070000 +/* enum: Bootrom version value for Huntington. */ +#define MC_CMD_GET_VERSION_OUT_FIRMWARE_HUNT_BOOTROM 0xb0070001 +/* enum: Bootrom version value for Medford2. */ +#define MC_CMD_GET_VERSION_OUT_FIRMWARE_MEDFORD2_BOOTROM 0xb0070002 + +/* MC_CMD_GET_VERSION_OUT msgresponse */ +#define MC_CMD_GET_VERSION_OUT_LEN 32 +/* MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0 */ +/* MC_CMD_GET_VERSION_OUT_FIRMWARE_LEN 4 */ +/* Enum values, see field(s): */ +/* MC_CMD_GET_VERSION_V0_OUT/MC_CMD_GET_VERSION_OUT_FIRMWARE */ +#define MC_CMD_GET_VERSION_OUT_PCOL_OFST 4 +#define MC_CMD_GET_VERSION_OUT_PCOL_LEN 4 +/* 128bit mask of functions supported by the current firmware */ +#define MC_CMD_GET_VERSION_OUT_SUPPORTED_FUNCS_OFST 8 +#define MC_CMD_GET_VERSION_OUT_SUPPORTED_FUNCS_LEN 16 +#define MC_CMD_GET_VERSION_OUT_VERSION_OFST 24 +#define MC_CMD_GET_VERSION_OUT_VERSION_LEN 8 +#define MC_CMD_GET_VERSION_OUT_VERSION_LO_OFST 24 +#define MC_CMD_GET_VERSION_OUT_VERSION_LO_LEN 4 +#define MC_CMD_GET_VERSION_OUT_VERSION_LO_LBN 192 +#define MC_CMD_GET_VERSION_OUT_VERSION_LO_WIDTH 32 +#define MC_CMD_GET_VERSION_OUT_VERSION_HI_OFST 28 +#define MC_CMD_GET_VERSION_OUT_VERSION_HI_LEN 4 +#define MC_CMD_GET_VERSION_OUT_VERSION_HI_LBN 224 +#define MC_CMD_GET_VERSION_OUT_VERSION_HI_WIDTH 32 + +/* MC_CMD_GET_VERSION_EXT_OUT msgresponse */ +#define MC_CMD_GET_VERSION_EXT_OUT_LEN 48 +/* MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0 */ +/* MC_CMD_GET_VERSION_OUT_FIRMWARE_LEN 4 */ +/* Enum values, see field(s): */ +/* MC_CMD_GET_VERSION_V0_OUT/MC_CMD_GET_VERSION_OUT_FIRMWARE */ +#define MC_CMD_GET_VERSION_EXT_OUT_PCOL_OFST 4 +#define MC_CMD_GET_VERSION_EXT_OUT_PCOL_LEN 4 +/* 128bit mask of functions supported by the current firmware */ +#define MC_CMD_GET_VERSION_EXT_OUT_SUPPORTED_FUNCS_OFST 8 +#define MC_CMD_GET_VERSION_EXT_OUT_SUPPORTED_FUNCS_LEN 16 +#define MC_CMD_GET_VERSION_EXT_OUT_VERSION_OFST 24 +#define MC_CMD_GET_VERSION_EXT_OUT_VERSION_LEN 8 +#define MC_CMD_GET_VERSION_EXT_OUT_VERSION_LO_OFST 24 +#define MC_CMD_GET_VERSION_EXT_OUT_VERSION_LO_LEN 4 +#define MC_CMD_GET_VERSION_EXT_OUT_VERSION_LO_LBN 192 +#define MC_CMD_GET_VERSION_EXT_OUT_VERSION_LO_WIDTH 32 +#define MC_CMD_GET_VERSION_EXT_OUT_VERSION_HI_OFST 28 +#define MC_CMD_GET_VERSION_EXT_OUT_VERSION_HI_LEN 4 +#define MC_CMD_GET_VERSION_EXT_OUT_VERSION_HI_LBN 224 +#define MC_CMD_GET_VERSION_EXT_OUT_VERSION_HI_WIDTH 32 +/* extra info */ +#define MC_CMD_GET_VERSION_EXT_OUT_EXTRA_OFST 32 +#define MC_CMD_GET_VERSION_EXT_OUT_EXTRA_LEN 16 + +/* MC_CMD_GET_VERSION_V2_OUT msgresponse: Extended response providing version + * information for all adapter components. For Riverhead based designs, base MC + * firmware version fields refer to NMC firmware, while CMC firmware data is in + * dedicated CMC fields. Flags indicate which data is present in the response + * (depending on which components exist on a particular adapter) + */ +#define MC_CMD_GET_VERSION_V2_OUT_LEN 304 +/* MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0 */ +/* MC_CMD_GET_VERSION_OUT_FIRMWARE_LEN 4 */ +/* Enum values, see field(s): */ +/* MC_CMD_GET_VERSION_V0_OUT/MC_CMD_GET_VERSION_OUT_FIRMWARE */ +#define MC_CMD_GET_VERSION_V2_OUT_PCOL_OFST 4 +#define MC_CMD_GET_VERSION_V2_OUT_PCOL_LEN 4 +/* 128bit mask of functions supported by the current firmware */ +#define MC_CMD_GET_VERSION_V2_OUT_SUPPORTED_FUNCS_OFST 8 +#define MC_CMD_GET_VERSION_V2_OUT_SUPPORTED_FUNCS_LEN 16 +#define MC_CMD_GET_VERSION_V2_OUT_VERSION_OFST 24 +#define MC_CMD_GET_VERSION_V2_OUT_VERSION_LEN 8 +#define MC_CMD_GET_VERSION_V2_OUT_VERSION_LO_OFST 24 +#define MC_CMD_GET_VERSION_V2_OUT_VERSION_LO_LEN 4 +#define MC_CMD_GET_VERSION_V2_OUT_VERSION_LO_LBN 192 +#define MC_CMD_GET_VERSION_V2_OUT_VERSION_LO_WIDTH 32 +#define MC_CMD_GET_VERSION_V2_OUT_VERSION_HI_OFST 28 +#define MC_CMD_GET_VERSION_V2_OUT_VERSION_HI_LEN 4 +#define MC_CMD_GET_VERSION_V2_OUT_VERSION_HI_LBN 224 +#define MC_CMD_GET_VERSION_V2_OUT_VERSION_HI_WIDTH 32 +/* extra info */ +#define MC_CMD_GET_VERSION_V2_OUT_EXTRA_OFST 32 +#define MC_CMD_GET_VERSION_V2_OUT_EXTRA_LEN 16 +/* Flags indicating which extended fields are valid */ +#define MC_CMD_GET_VERSION_V2_OUT_FLAGS_OFST 48 +#define MC_CMD_GET_VERSION_V2_OUT_FLAGS_LEN 4 +#define MC_CMD_GET_VERSION_V2_OUT_MCFW_EXT_INFO_PRESENT_OFST 48 +#define MC_CMD_GET_VERSION_V2_OUT_MCFW_EXT_INFO_PRESENT_LBN 0 +#define MC_CMD_GET_VERSION_V2_OUT_MCFW_EXT_INFO_PRESENT_WIDTH 1 +#define MC_CMD_GET_VERSION_V2_OUT_SUCFW_EXT_INFO_PRESENT_OFST 48 +#define MC_CMD_GET_VERSION_V2_OUT_SUCFW_EXT_INFO_PRESENT_LBN 1 +#define MC_CMD_GET_VERSION_V2_OUT_SUCFW_EXT_INFO_PRESENT_WIDTH 1 +#define MC_CMD_GET_VERSION_V2_OUT_CMC_EXT_INFO_PRESENT_OFST 48 +#define MC_CMD_GET_VERSION_V2_OUT_CMC_EXT_INFO_PRESENT_LBN 2 +#define MC_CMD_GET_VERSION_V2_OUT_CMC_EXT_INFO_PRESENT_WIDTH 1 +#define MC_CMD_GET_VERSION_V2_OUT_FPGA_EXT_INFO_PRESENT_OFST 48 +#define MC_CMD_GET_VERSION_V2_OUT_FPGA_EXT_INFO_PRESENT_LBN 3 +#define MC_CMD_GET_VERSION_V2_OUT_FPGA_EXT_INFO_PRESENT_WIDTH 1 +#define MC_CMD_GET_VERSION_V2_OUT_BOARD_EXT_INFO_PRESENT_OFST 48 +#define MC_CMD_GET_VERSION_V2_OUT_BOARD_EXT_INFO_PRESENT_LBN 4 +#define MC_CMD_GET_VERSION_V2_OUT_BOARD_EXT_INFO_PRESENT_WIDTH 1 +#define MC_CMD_GET_VERSION_V2_OUT_DATAPATH_HW_VERSION_PRESENT_OFST 48 +#define MC_CMD_GET_VERSION_V2_OUT_DATAPATH_HW_VERSION_PRESENT_LBN 5 +#define MC_CMD_GET_VERSION_V2_OUT_DATAPATH_HW_VERSION_PRESENT_WIDTH 1 +#define MC_CMD_GET_VERSION_V2_OUT_DATAPATH_FW_VERSION_PRESENT_OFST 48 +#define MC_CMD_GET_VERSION_V2_OUT_DATAPATH_FW_VERSION_PRESENT_LBN 6 +#define MC_CMD_GET_VERSION_V2_OUT_DATAPATH_FW_VERSION_PRESENT_WIDTH 1 +#define MC_CMD_GET_VERSION_V2_OUT_SOC_BOOT_VERSION_PRESENT_OFST 48 +#define MC_CMD_GET_VERSION_V2_OUT_SOC_BOOT_VERSION_PRESENT_LBN 7 +#define MC_CMD_GET_VERSION_V2_OUT_SOC_BOOT_VERSION_PRESENT_WIDTH 1 +#define MC_CMD_GET_VERSION_V2_OUT_SOC_UBOOT_VERSION_PRESENT_OFST 48 +#define MC_CMD_GET_VERSION_V2_OUT_SOC_UBOOT_VERSION_PRESENT_LBN 8 +#define MC_CMD_GET_VERSION_V2_OUT_SOC_UBOOT_VERSION_PRESENT_WIDTH 1 +#define MC_CMD_GET_VERSION_V2_OUT_SOC_MAIN_ROOTFS_VERSION_PRESENT_OFST 48 +#define MC_CMD_GET_VERSION_V2_OUT_SOC_MAIN_ROOTFS_VERSION_PRESENT_LBN 9 +#define MC_CMD_GET_VERSION_V2_OUT_SOC_MAIN_ROOTFS_VERSION_PRESENT_WIDTH 1 +#define MC_CMD_GET_VERSION_V2_OUT_SOC_RECOVERY_BUILDROOT_VERSION_PRESENT_OFST 48 +#define MC_CMD_GET_VERSION_V2_OUT_SOC_RECOVERY_BUILDROOT_VERSION_PRESENT_LBN 10 +#define MC_CMD_GET_VERSION_V2_OUT_SOC_RECOVERY_BUILDROOT_VERSION_PRESENT_WIDTH 1 +#define MC_CMD_GET_VERSION_V2_OUT_SUCFW_VERSION_PRESENT_OFST 48 +#define MC_CMD_GET_VERSION_V2_OUT_SUCFW_VERSION_PRESENT_LBN 11 +#define MC_CMD_GET_VERSION_V2_OUT_SUCFW_VERSION_PRESENT_WIDTH 1 +#define MC_CMD_GET_VERSION_V2_OUT_BOARD_VERSION_PRESENT_OFST 48 +#define MC_CMD_GET_VERSION_V2_OUT_BOARD_VERSION_PRESENT_LBN 12 +#define MC_CMD_GET_VERSION_V2_OUT_BOARD_VERSION_PRESENT_WIDTH 1 +#define MC_CMD_GET_VERSION_V2_OUT_BUNDLE_VERSION_PRESENT_OFST 48 +#define MC_CMD_GET_VERSION_V2_OUT_BUNDLE_VERSION_PRESENT_LBN 13 +#define MC_CMD_GET_VERSION_V2_OUT_BUNDLE_VERSION_PRESENT_WIDTH 1 +/* MC firmware unique build ID (as binary SHA-1 value) */ +#define MC_CMD_GET_VERSION_V2_OUT_MCFW_BUILD_ID_OFST 52 +#define MC_CMD_GET_VERSION_V2_OUT_MCFW_BUILD_ID_LEN 20 +/* MC firmware security level */ +#define MC_CMD_GET_VERSION_V2_OUT_MCFW_SECURITY_LEVEL_OFST 72 +#define MC_CMD_GET_VERSION_V2_OUT_MCFW_SECURITY_LEVEL_LEN 4 +/* MC firmware build name (as null-terminated US-ASCII string) */ +#define MC_CMD_GET_VERSION_V2_OUT_MCFW_BUILD_NAME_OFST 76 +#define MC_CMD_GET_VERSION_V2_OUT_MCFW_BUILD_NAME_LEN 64 +/* The SUC firmware version as four numbers - a.b.c.d */ +#define MC_CMD_GET_VERSION_V2_OUT_SUCFW_VERSION_OFST 140 +#define MC_CMD_GET_VERSION_V2_OUT_SUCFW_VERSION_LEN 4 +#define MC_CMD_GET_VERSION_V2_OUT_SUCFW_VERSION_NUM 4 +/* SUC firmware build date (as 64-bit Unix timestamp) */ +#define MC_CMD_GET_VERSION_V2_OUT_SUCFW_BUILD_DATE_OFST 156 +#define MC_CMD_GET_VERSION_V2_OUT_SUCFW_BUILD_DATE_LEN 8 +#define MC_CMD_GET_VERSION_V2_OUT_SUCFW_BUILD_DATE_LO_OFST 156 +#define MC_CMD_GET_VERSION_V2_OUT_SUCFW_BUILD_DATE_LO_LEN 4 +#define MC_CMD_GET_VERSION_V2_OUT_SUCFW_BUILD_DATE_LO_LBN 1248 +#define MC_CMD_GET_VERSION_V2_OUT_SUCFW_BUILD_DATE_LO_WIDTH 32 +#define MC_CMD_GET_VERSION_V2_OUT_SUCFW_BUILD_DATE_HI_OFST 160 +#define MC_CMD_GET_VERSION_V2_OUT_SUCFW_BUILD_DATE_HI_LEN 4 +#define MC_CMD_GET_VERSION_V2_OUT_SUCFW_BUILD_DATE_HI_LBN 1280 +#define MC_CMD_GET_VERSION_V2_OUT_SUCFW_BUILD_DATE_HI_WIDTH 32 +/* The ID of the SUC chip. This is specific to the platform but typically + * indicates family, memory sizes etc. See SF-116728-SW for further details. + */ +#define MC_CMD_GET_VERSION_V2_OUT_SUCFW_CHIP_ID_OFST 164 +#define MC_CMD_GET_VERSION_V2_OUT_SUCFW_CHIP_ID_LEN 4 +/* The CMC firmware version as four numbers - a.b.c.d */ +#define MC_CMD_GET_VERSION_V2_OUT_CMCFW_VERSION_OFST 168 +#define MC_CMD_GET_VERSION_V2_OUT_CMCFW_VERSION_LEN 4 +#define MC_CMD_GET_VERSION_V2_OUT_CMCFW_VERSION_NUM 4 +/* CMC firmware build date (as 64-bit Unix timestamp) */ +#define MC_CMD_GET_VERSION_V2_OUT_CMCFW_BUILD_DATE_OFST 184 +#define MC_CMD_GET_VERSION_V2_OUT_CMCFW_BUILD_DATE_LEN 8 +#define MC_CMD_GET_VERSION_V2_OUT_CMCFW_BUILD_DATE_LO_OFST 184 +#define MC_CMD_GET_VERSION_V2_OUT_CMCFW_BUILD_DATE_LO_LEN 4 +#define MC_CMD_GET_VERSION_V2_OUT_CMCFW_BUILD_DATE_LO_LBN 1472 +#define MC_CMD_GET_VERSION_V2_OUT_CMCFW_BUILD_DATE_LO_WIDTH 32 +#define MC_CMD_GET_VERSION_V2_OUT_CMCFW_BUILD_DATE_HI_OFST 188 +#define MC_CMD_GET_VERSION_V2_OUT_CMCFW_BUILD_DATE_HI_LEN 4 +#define MC_CMD_GET_VERSION_V2_OUT_CMCFW_BUILD_DATE_HI_LBN 1504 +#define MC_CMD_GET_VERSION_V2_OUT_CMCFW_BUILD_DATE_HI_WIDTH 32 +/* FPGA version as three numbers. On Riverhead based systems this field uses + * the same encoding as hardware version ID registers (MC_FPGA_BUILD_HWRD_REG): + * FPGA_VERSION[0]: x => Image H{x} FPGA_VERSION[1]: Revision letter (0 => A, 1 + * => B, ...) FPGA_VERSION[2]: Sub-revision number + */ +#define MC_CMD_GET_VERSION_V2_OUT_FPGA_VERSION_OFST 192 +#define MC_CMD_GET_VERSION_V2_OUT_FPGA_VERSION_LEN 4 +#define MC_CMD_GET_VERSION_V2_OUT_FPGA_VERSION_NUM 3 +/* Extra FPGA revision information (as null-terminated US-ASCII string) */ +#define MC_CMD_GET_VERSION_V2_OUT_FPGA_EXTRA_OFST 204 +#define MC_CMD_GET_VERSION_V2_OUT_FPGA_EXTRA_LEN 16 +/* Board name / adapter model (as null-terminated US-ASCII string) */ +#define MC_CMD_GET_VERSION_V2_OUT_BOARD_NAME_OFST 220 +#define MC_CMD_GET_VERSION_V2_OUT_BOARD_NAME_LEN 16 +/* Board revision number */ +#define MC_CMD_GET_VERSION_V2_OUT_BOARD_REVISION_OFST 236 +#define MC_CMD_GET_VERSION_V2_OUT_BOARD_REVISION_LEN 4 +/* Board serial number (as null-terminated US-ASCII string) */ +#define MC_CMD_GET_VERSION_V2_OUT_BOARD_SERIAL_OFST 240 +#define MC_CMD_GET_VERSION_V2_OUT_BOARD_SERIAL_LEN 64 + +/* MC_CMD_GET_VERSION_V3_OUT msgresponse: Extended response providing version + * information for all adapter components. For Riverhead based designs, base MC + * firmware version fields refer to NMC firmware, while CMC firmware data is in + * dedicated CMC fields. Flags indicate which data is present in the response + * (depending on which components exist on a particular adapter) + */ +#define MC_CMD_GET_VERSION_V3_OUT_LEN 328 +/* MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0 */ +/* MC_CMD_GET_VERSION_OUT_FIRMWARE_LEN 4 */ +/* Enum values, see field(s): */ +/* MC_CMD_GET_VERSION_V0_OUT/MC_CMD_GET_VERSION_OUT_FIRMWARE */ +#define MC_CMD_GET_VERSION_V3_OUT_PCOL_OFST 4 +#define MC_CMD_GET_VERSION_V3_OUT_PCOL_LEN 4 +/* 128bit mask of functions supported by the current firmware */ +#define MC_CMD_GET_VERSION_V3_OUT_SUPPORTED_FUNCS_OFST 8 +#define MC_CMD_GET_VERSION_V3_OUT_SUPPORTED_FUNCS_LEN 16 +#define MC_CMD_GET_VERSION_V3_OUT_VERSION_OFST 24 +#define MC_CMD_GET_VERSION_V3_OUT_VERSION_LEN 8 +#define MC_CMD_GET_VERSION_V3_OUT_VERSION_LO_OFST 24 +#define MC_CMD_GET_VERSION_V3_OUT_VERSION_LO_LEN 4 +#define MC_CMD_GET_VERSION_V3_OUT_VERSION_LO_LBN 192 +#define MC_CMD_GET_VERSION_V3_OUT_VERSION_LO_WIDTH 32 +#define MC_CMD_GET_VERSION_V3_OUT_VERSION_HI_OFST 28 +#define MC_CMD_GET_VERSION_V3_OUT_VERSION_HI_LEN 4 +#define MC_CMD_GET_VERSION_V3_OUT_VERSION_HI_LBN 224 +#define MC_CMD_GET_VERSION_V3_OUT_VERSION_HI_WIDTH 32 +/* extra info */ +#define MC_CMD_GET_VERSION_V3_OUT_EXTRA_OFST 32 +#define MC_CMD_GET_VERSION_V3_OUT_EXTRA_LEN 16 +/* Flags indicating which extended fields are valid */ +#define MC_CMD_GET_VERSION_V3_OUT_FLAGS_OFST 48 +#define MC_CMD_GET_VERSION_V3_OUT_FLAGS_LEN 4 +#define MC_CMD_GET_VERSION_V3_OUT_MCFW_EXT_INFO_PRESENT_OFST 48 +#define MC_CMD_GET_VERSION_V3_OUT_MCFW_EXT_INFO_PRESENT_LBN 0 +#define MC_CMD_GET_VERSION_V3_OUT_MCFW_EXT_INFO_PRESENT_WIDTH 1 +#define MC_CMD_GET_VERSION_V3_OUT_SUCFW_EXT_INFO_PRESENT_OFST 48 +#define MC_CMD_GET_VERSION_V3_OUT_SUCFW_EXT_INFO_PRESENT_LBN 1 +#define MC_CMD_GET_VERSION_V3_OUT_SUCFW_EXT_INFO_PRESENT_WIDTH 1 +#define MC_CMD_GET_VERSION_V3_OUT_CMC_EXT_INFO_PRESENT_OFST 48 +#define MC_CMD_GET_VERSION_V3_OUT_CMC_EXT_INFO_PRESENT_LBN 2 +#define MC_CMD_GET_VERSION_V3_OUT_CMC_EXT_INFO_PRESENT_WIDTH 1 +#define MC_CMD_GET_VERSION_V3_OUT_FPGA_EXT_INFO_PRESENT_OFST 48 +#define MC_CMD_GET_VERSION_V3_OUT_FPGA_EXT_INFO_PRESENT_LBN 3 +#define MC_CMD_GET_VERSION_V3_OUT_FPGA_EXT_INFO_PRESENT_WIDTH 1 +#define MC_CMD_GET_VERSION_V3_OUT_BOARD_EXT_INFO_PRESENT_OFST 48 +#define MC_CMD_GET_VERSION_V3_OUT_BOARD_EXT_INFO_PRESENT_LBN 4 +#define MC_CMD_GET_VERSION_V3_OUT_BOARD_EXT_INFO_PRESENT_WIDTH 1 +#define MC_CMD_GET_VERSION_V3_OUT_DATAPATH_HW_VERSION_PRESENT_OFST 48 +#define MC_CMD_GET_VERSION_V3_OUT_DATAPATH_HW_VERSION_PRESENT_LBN 5 +#define MC_CMD_GET_VERSION_V3_OUT_DATAPATH_HW_VERSION_PRESENT_WIDTH 1 +#define MC_CMD_GET_VERSION_V3_OUT_DATAPATH_FW_VERSION_PRESENT_OFST 48 +#define MC_CMD_GET_VERSION_V3_OUT_DATAPATH_FW_VERSION_PRESENT_LBN 6 +#define MC_CMD_GET_VERSION_V3_OUT_DATAPATH_FW_VERSION_PRESENT_WIDTH 1 +#define MC_CMD_GET_VERSION_V3_OUT_SOC_BOOT_VERSION_PRESENT_OFST 48 +#define MC_CMD_GET_VERSION_V3_OUT_SOC_BOOT_VERSION_PRESENT_LBN 7 +#define MC_CMD_GET_VERSION_V3_OUT_SOC_BOOT_VERSION_PRESENT_WIDTH 1 +#define MC_CMD_GET_VERSION_V3_OUT_SOC_UBOOT_VERSION_PRESENT_OFST 48 +#define MC_CMD_GET_VERSION_V3_OUT_SOC_UBOOT_VERSION_PRESENT_LBN 8 +#define MC_CMD_GET_VERSION_V3_OUT_SOC_UBOOT_VERSION_PRESENT_WIDTH 1 +#define MC_CMD_GET_VERSION_V3_OUT_SOC_MAIN_ROOTFS_VERSION_PRESENT_OFST 48 +#define MC_CMD_GET_VERSION_V3_OUT_SOC_MAIN_ROOTFS_VERSION_PRESENT_LBN 9 +#define MC_CMD_GET_VERSION_V3_OUT_SOC_MAIN_ROOTFS_VERSION_PRESENT_WIDTH 1 +#define MC_CMD_GET_VERSION_V3_OUT_SOC_RECOVERY_BUILDROOT_VERSION_PRESENT_OFST 48 +#define MC_CMD_GET_VERSION_V3_OUT_SOC_RECOVERY_BUILDROOT_VERSION_PRESENT_LBN 10 +#define MC_CMD_GET_VERSION_V3_OUT_SOC_RECOVERY_BUILDROOT_VERSION_PRESENT_WIDTH 1 +#define MC_CMD_GET_VERSION_V3_OUT_SUCFW_VERSION_PRESENT_OFST 48 +#define MC_CMD_GET_VERSION_V3_OUT_SUCFW_VERSION_PRESENT_LBN 11 +#define MC_CMD_GET_VERSION_V3_OUT_SUCFW_VERSION_PRESENT_WIDTH 1 +#define MC_CMD_GET_VERSION_V3_OUT_BOARD_VERSION_PRESENT_OFST 48 +#define MC_CMD_GET_VERSION_V3_OUT_BOARD_VERSION_PRESENT_LBN 12 +#define MC_CMD_GET_VERSION_V3_OUT_BOARD_VERSION_PRESENT_WIDTH 1 +#define MC_CMD_GET_VERSION_V3_OUT_BUNDLE_VERSION_PRESENT_OFST 48 +#define MC_CMD_GET_VERSION_V3_OUT_BUNDLE_VERSION_PRESENT_LBN 13 +#define MC_CMD_GET_VERSION_V3_OUT_BUNDLE_VERSION_PRESENT_WIDTH 1 +/* MC firmware unique build ID (as binary SHA-1 value) */ +#define MC_CMD_GET_VERSION_V3_OUT_MCFW_BUILD_ID_OFST 52 +#define MC_CMD_GET_VERSION_V3_OUT_MCFW_BUILD_ID_LEN 20 +/* MC firmware security level */ +#define MC_CMD_GET_VERSION_V3_OUT_MCFW_SECURITY_LEVEL_OFST 72 +#define MC_CMD_GET_VERSION_V3_OUT_MCFW_SECURITY_LEVEL_LEN 4 +/* MC firmware build name (as null-terminated US-ASCII string) */ +#define MC_CMD_GET_VERSION_V3_OUT_MCFW_BUILD_NAME_OFST 76 +#define MC_CMD_GET_VERSION_V3_OUT_MCFW_BUILD_NAME_LEN 64 +/* The SUC firmware version as four numbers - a.b.c.d */ +#define MC_CMD_GET_VERSION_V3_OUT_SUCFW_VERSION_OFST 140 +#define MC_CMD_GET_VERSION_V3_OUT_SUCFW_VERSION_LEN 4 +#define MC_CMD_GET_VERSION_V3_OUT_SUCFW_VERSION_NUM 4 +/* SUC firmware build date (as 64-bit Unix timestamp) */ +#define MC_CMD_GET_VERSION_V3_OUT_SUCFW_BUILD_DATE_OFST 156 +#define MC_CMD_GET_VERSION_V3_OUT_SUCFW_BUILD_DATE_LEN 8 +#define MC_CMD_GET_VERSION_V3_OUT_SUCFW_BUILD_DATE_LO_OFST 156 +#define MC_CMD_GET_VERSION_V3_OUT_SUCFW_BUILD_DATE_LO_LEN 4 +#define MC_CMD_GET_VERSION_V3_OUT_SUCFW_BUILD_DATE_LO_LBN 1248 +#define MC_CMD_GET_VERSION_V3_OUT_SUCFW_BUILD_DATE_LO_WIDTH 32 +#define MC_CMD_GET_VERSION_V3_OUT_SUCFW_BUILD_DATE_HI_OFST 160 +#define MC_CMD_GET_VERSION_V3_OUT_SUCFW_BUILD_DATE_HI_LEN 4 +#define MC_CMD_GET_VERSION_V3_OUT_SUCFW_BUILD_DATE_HI_LBN 1280 +#define MC_CMD_GET_VERSION_V3_OUT_SUCFW_BUILD_DATE_HI_WIDTH 32 +/* The ID of the SUC chip. This is specific to the platform but typically + * indicates family, memory sizes etc. See SF-116728-SW for further details. + */ +#define MC_CMD_GET_VERSION_V3_OUT_SUCFW_CHIP_ID_OFST 164 +#define MC_CMD_GET_VERSION_V3_OUT_SUCFW_CHIP_ID_LEN 4 +/* The CMC firmware version as four numbers - a.b.c.d */ +#define MC_CMD_GET_VERSION_V3_OUT_CMCFW_VERSION_OFST 168 +#define MC_CMD_GET_VERSION_V3_OUT_CMCFW_VERSION_LEN 4 +#define MC_CMD_GET_VERSION_V3_OUT_CMCFW_VERSION_NUM 4 +/* CMC firmware build date (as 64-bit Unix timestamp) */ +#define MC_CMD_GET_VERSION_V3_OUT_CMCFW_BUILD_DATE_OFST 184 +#define MC_CMD_GET_VERSION_V3_OUT_CMCFW_BUILD_DATE_LEN 8 +#define MC_CMD_GET_VERSION_V3_OUT_CMCFW_BUILD_DATE_LO_OFST 184 +#define MC_CMD_GET_VERSION_V3_OUT_CMCFW_BUILD_DATE_LO_LEN 4 +#define MC_CMD_GET_VERSION_V3_OUT_CMCFW_BUILD_DATE_LO_LBN 1472 +#define MC_CMD_GET_VERSION_V3_OUT_CMCFW_BUILD_DATE_LO_WIDTH 32 +#define MC_CMD_GET_VERSION_V3_OUT_CMCFW_BUILD_DATE_HI_OFST 188 +#define MC_CMD_GET_VERSION_V3_OUT_CMCFW_BUILD_DATE_HI_LEN 4 +#define MC_CMD_GET_VERSION_V3_OUT_CMCFW_BUILD_DATE_HI_LBN 1504 +#define MC_CMD_GET_VERSION_V3_OUT_CMCFW_BUILD_DATE_HI_WIDTH 32 +/* FPGA version as three numbers. On Riverhead based systems this field uses + * the same encoding as hardware version ID registers (MC_FPGA_BUILD_HWRD_REG): + * FPGA_VERSION[0]: x => Image H{x} FPGA_VERSION[1]: Revision letter (0 => A, 1 + * => B, ...) FPGA_VERSION[2]: Sub-revision number + */ +#define MC_CMD_GET_VERSION_V3_OUT_FPGA_VERSION_OFST 192 +#define MC_CMD_GET_VERSION_V3_OUT_FPGA_VERSION_LEN 4 +#define MC_CMD_GET_VERSION_V3_OUT_FPGA_VERSION_NUM 3 +/* Extra FPGA revision information (as null-terminated US-ASCII string) */ +#define MC_CMD_GET_VERSION_V3_OUT_FPGA_EXTRA_OFST 204 +#define MC_CMD_GET_VERSION_V3_OUT_FPGA_EXTRA_LEN 16 +/* Board name / adapter model (as null-terminated US-ASCII string) */ +#define MC_CMD_GET_VERSION_V3_OUT_BOARD_NAME_OFST 220 +#define MC_CMD_GET_VERSION_V3_OUT_BOARD_NAME_LEN 16 +/* Board revision number */ +#define MC_CMD_GET_VERSION_V3_OUT_BOARD_REVISION_OFST 236 +#define MC_CMD_GET_VERSION_V3_OUT_BOARD_REVISION_LEN 4 +/* Board serial number (as null-terminated US-ASCII string) */ +#define MC_CMD_GET_VERSION_V3_OUT_BOARD_SERIAL_OFST 240 +#define MC_CMD_GET_VERSION_V3_OUT_BOARD_SERIAL_LEN 64 +/* The version of the datapath hardware design as three number - a.b.c */ +#define MC_CMD_GET_VERSION_V3_OUT_DATAPATH_HW_VERSION_OFST 304 +#define MC_CMD_GET_VERSION_V3_OUT_DATAPATH_HW_VERSION_LEN 4 +#define MC_CMD_GET_VERSION_V3_OUT_DATAPATH_HW_VERSION_NUM 3 +/* The version of the firmware library used to control the datapath as three + * number - a.b.c + */ +#define MC_CMD_GET_VERSION_V3_OUT_DATAPATH_FW_VERSION_OFST 316 +#define MC_CMD_GET_VERSION_V3_OUT_DATAPATH_FW_VERSION_LEN 4 +#define MC_CMD_GET_VERSION_V3_OUT_DATAPATH_FW_VERSION_NUM 3 + +/* MC_CMD_GET_VERSION_V4_OUT msgresponse: Extended response providing SoC + * version information + */ +#define MC_CMD_GET_VERSION_V4_OUT_LEN 392 +/* MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0 */ +/* MC_CMD_GET_VERSION_OUT_FIRMWARE_LEN 4 */ +/* Enum values, see field(s): */ +/* MC_CMD_GET_VERSION_V0_OUT/MC_CMD_GET_VERSION_OUT_FIRMWARE */ +#define MC_CMD_GET_VERSION_V4_OUT_PCOL_OFST 4 +#define MC_CMD_GET_VERSION_V4_OUT_PCOL_LEN 4 +/* 128bit mask of functions supported by the current firmware */ +#define MC_CMD_GET_VERSION_V4_OUT_SUPPORTED_FUNCS_OFST 8 +#define MC_CMD_GET_VERSION_V4_OUT_SUPPORTED_FUNCS_LEN 16 +#define MC_CMD_GET_VERSION_V4_OUT_VERSION_OFST 24 +#define MC_CMD_GET_VERSION_V4_OUT_VERSION_LEN 8 +#define MC_CMD_GET_VERSION_V4_OUT_VERSION_LO_OFST 24 +#define MC_CMD_GET_VERSION_V4_OUT_VERSION_LO_LEN 4 +#define MC_CMD_GET_VERSION_V4_OUT_VERSION_LO_LBN 192 +#define MC_CMD_GET_VERSION_V4_OUT_VERSION_LO_WIDTH 32 +#define MC_CMD_GET_VERSION_V4_OUT_VERSION_HI_OFST 28 +#define MC_CMD_GET_VERSION_V4_OUT_VERSION_HI_LEN 4 +#define MC_CMD_GET_VERSION_V4_OUT_VERSION_HI_LBN 224 +#define MC_CMD_GET_VERSION_V4_OUT_VERSION_HI_WIDTH 32 +/* extra info */ +#define MC_CMD_GET_VERSION_V4_OUT_EXTRA_OFST 32 +#define MC_CMD_GET_VERSION_V4_OUT_EXTRA_LEN 16 +/* Flags indicating which extended fields are valid */ +#define MC_CMD_GET_VERSION_V4_OUT_FLAGS_OFST 48 +#define MC_CMD_GET_VERSION_V4_OUT_FLAGS_LEN 4 +#define MC_CMD_GET_VERSION_V4_OUT_MCFW_EXT_INFO_PRESENT_OFST 48 +#define MC_CMD_GET_VERSION_V4_OUT_MCFW_EXT_INFO_PRESENT_LBN 0 +#define MC_CMD_GET_VERSION_V4_OUT_MCFW_EXT_INFO_PRESENT_WIDTH 1 +#define MC_CMD_GET_VERSION_V4_OUT_SUCFW_EXT_INFO_PRESENT_OFST 48 +#define MC_CMD_GET_VERSION_V4_OUT_SUCFW_EXT_INFO_PRESENT_LBN 1 +#define MC_CMD_GET_VERSION_V4_OUT_SUCFW_EXT_INFO_PRESENT_WIDTH 1 +#define MC_CMD_GET_VERSION_V4_OUT_CMC_EXT_INFO_PRESENT_OFST 48 +#define MC_CMD_GET_VERSION_V4_OUT_CMC_EXT_INFO_PRESENT_LBN 2 +#define MC_CMD_GET_VERSION_V4_OUT_CMC_EXT_INFO_PRESENT_WIDTH 1 +#define MC_CMD_GET_VERSION_V4_OUT_FPGA_EXT_INFO_PRESENT_OFST 48 +#define MC_CMD_GET_VERSION_V4_OUT_FPGA_EXT_INFO_PRESENT_LBN 3 +#define MC_CMD_GET_VERSION_V4_OUT_FPGA_EXT_INFO_PRESENT_WIDTH 1 +#define MC_CMD_GET_VERSION_V4_OUT_BOARD_EXT_INFO_PRESENT_OFST 48 +#define MC_CMD_GET_VERSION_V4_OUT_BOARD_EXT_INFO_PRESENT_LBN 4 +#define MC_CMD_GET_VERSION_V4_OUT_BOARD_EXT_INFO_PRESENT_WIDTH 1 +#define MC_CMD_GET_VERSION_V4_OUT_DATAPATH_HW_VERSION_PRESENT_OFST 48 +#define MC_CMD_GET_VERSION_V4_OUT_DATAPATH_HW_VERSION_PRESENT_LBN 5 +#define MC_CMD_GET_VERSION_V4_OUT_DATAPATH_HW_VERSION_PRESENT_WIDTH 1 +#define MC_CMD_GET_VERSION_V4_OUT_DATAPATH_FW_VERSION_PRESENT_OFST 48 +#define MC_CMD_GET_VERSION_V4_OUT_DATAPATH_FW_VERSION_PRESENT_LBN 6 +#define MC_CMD_GET_VERSION_V4_OUT_DATAPATH_FW_VERSION_PRESENT_WIDTH 1 +#define MC_CMD_GET_VERSION_V4_OUT_SOC_BOOT_VERSION_PRESENT_OFST 48 +#define MC_CMD_GET_VERSION_V4_OUT_SOC_BOOT_VERSION_PRESENT_LBN 7 +#define MC_CMD_GET_VERSION_V4_OUT_SOC_BOOT_VERSION_PRESENT_WIDTH 1 +#define MC_CMD_GET_VERSION_V4_OUT_SOC_UBOOT_VERSION_PRESENT_OFST 48 +#define MC_CMD_GET_VERSION_V4_OUT_SOC_UBOOT_VERSION_PRESENT_LBN 8 +#define MC_CMD_GET_VERSION_V4_OUT_SOC_UBOOT_VERSION_PRESENT_WIDTH 1 +#define MC_CMD_GET_VERSION_V4_OUT_SOC_MAIN_ROOTFS_VERSION_PRESENT_OFST 48 +#define MC_CMD_GET_VERSION_V4_OUT_SOC_MAIN_ROOTFS_VERSION_PRESENT_LBN 9 +#define MC_CMD_GET_VERSION_V4_OUT_SOC_MAIN_ROOTFS_VERSION_PRESENT_WIDTH 1 +#define MC_CMD_GET_VERSION_V4_OUT_SOC_RECOVERY_BUILDROOT_VERSION_PRESENT_OFST 48 +#define MC_CMD_GET_VERSION_V4_OUT_SOC_RECOVERY_BUILDROOT_VERSION_PRESENT_LBN 10 +#define MC_CMD_GET_VERSION_V4_OUT_SOC_RECOVERY_BUILDROOT_VERSION_PRESENT_WIDTH 1 +#define MC_CMD_GET_VERSION_V4_OUT_SUCFW_VERSION_PRESENT_OFST 48 +#define MC_CMD_GET_VERSION_V4_OUT_SUCFW_VERSION_PRESENT_LBN 11 +#define MC_CMD_GET_VERSION_V4_OUT_SUCFW_VERSION_PRESENT_WIDTH 1 +#define MC_CMD_GET_VERSION_V4_OUT_BOARD_VERSION_PRESENT_OFST 48 +#define MC_CMD_GET_VERSION_V4_OUT_BOARD_VERSION_PRESENT_LBN 12 +#define MC_CMD_GET_VERSION_V4_OUT_BOARD_VERSION_PRESENT_WIDTH 1 +#define MC_CMD_GET_VERSION_V4_OUT_BUNDLE_VERSION_PRESENT_OFST 48 +#define MC_CMD_GET_VERSION_V4_OUT_BUNDLE_VERSION_PRESENT_LBN 13 +#define MC_CMD_GET_VERSION_V4_OUT_BUNDLE_VERSION_PRESENT_WIDTH 1 +/* MC firmware unique build ID (as binary SHA-1 value) */ +#define MC_CMD_GET_VERSION_V4_OUT_MCFW_BUILD_ID_OFST 52 +#define MC_CMD_GET_VERSION_V4_OUT_MCFW_BUILD_ID_LEN 20 +/* MC firmware security level */ +#define MC_CMD_GET_VERSION_V4_OUT_MCFW_SECURITY_LEVEL_OFST 72 +#define MC_CMD_GET_VERSION_V4_OUT_MCFW_SECURITY_LEVEL_LEN 4 +/* MC firmware build name (as null-terminated US-ASCII string) */ +#define MC_CMD_GET_VERSION_V4_OUT_MCFW_BUILD_NAME_OFST 76 +#define MC_CMD_GET_VERSION_V4_OUT_MCFW_BUILD_NAME_LEN 64 +/* The SUC firmware version as four numbers - a.b.c.d */ +#define MC_CMD_GET_VERSION_V4_OUT_SUCFW_VERSION_OFST 140 +#define MC_CMD_GET_VERSION_V4_OUT_SUCFW_VERSION_LEN 4 +#define MC_CMD_GET_VERSION_V4_OUT_SUCFW_VERSION_NUM 4 +/* SUC firmware build date (as 64-bit Unix timestamp) */ +#define MC_CMD_GET_VERSION_V4_OUT_SUCFW_BUILD_DATE_OFST 156 +#define MC_CMD_GET_VERSION_V4_OUT_SUCFW_BUILD_DATE_LEN 8 +#define MC_CMD_GET_VERSION_V4_OUT_SUCFW_BUILD_DATE_LO_OFST 156 +#define MC_CMD_GET_VERSION_V4_OUT_SUCFW_BUILD_DATE_LO_LEN 4 +#define MC_CMD_GET_VERSION_V4_OUT_SUCFW_BUILD_DATE_LO_LBN 1248 +#define MC_CMD_GET_VERSION_V4_OUT_SUCFW_BUILD_DATE_LO_WIDTH 32 +#define MC_CMD_GET_VERSION_V4_OUT_SUCFW_BUILD_DATE_HI_OFST 160 +#define MC_CMD_GET_VERSION_V4_OUT_SUCFW_BUILD_DATE_HI_LEN 4 +#define MC_CMD_GET_VERSION_V4_OUT_SUCFW_BUILD_DATE_HI_LBN 1280 +#define MC_CMD_GET_VERSION_V4_OUT_SUCFW_BUILD_DATE_HI_WIDTH 32 +/* The ID of the SUC chip. This is specific to the platform but typically + * indicates family, memory sizes etc. See SF-116728-SW for further details. + */ +#define MC_CMD_GET_VERSION_V4_OUT_SUCFW_CHIP_ID_OFST 164 +#define MC_CMD_GET_VERSION_V4_OUT_SUCFW_CHIP_ID_LEN 4 +/* The CMC firmware version as four numbers - a.b.c.d */ +#define MC_CMD_GET_VERSION_V4_OUT_CMCFW_VERSION_OFST 168 +#define MC_CMD_GET_VERSION_V4_OUT_CMCFW_VERSION_LEN 4 +#define MC_CMD_GET_VERSION_V4_OUT_CMCFW_VERSION_NUM 4 +/* CMC firmware build date (as 64-bit Unix timestamp) */ +#define MC_CMD_GET_VERSION_V4_OUT_CMCFW_BUILD_DATE_OFST 184 +#define MC_CMD_GET_VERSION_V4_OUT_CMCFW_BUILD_DATE_LEN 8 +#define MC_CMD_GET_VERSION_V4_OUT_CMCFW_BUILD_DATE_LO_OFST 184 +#define MC_CMD_GET_VERSION_V4_OUT_CMCFW_BUILD_DATE_LO_LEN 4 +#define MC_CMD_GET_VERSION_V4_OUT_CMCFW_BUILD_DATE_LO_LBN 1472 +#define MC_CMD_GET_VERSION_V4_OUT_CMCFW_BUILD_DATE_LO_WIDTH 32 +#define MC_CMD_GET_VERSION_V4_OUT_CMCFW_BUILD_DATE_HI_OFST 188 +#define MC_CMD_GET_VERSION_V4_OUT_CMCFW_BUILD_DATE_HI_LEN 4 +#define MC_CMD_GET_VERSION_V4_OUT_CMCFW_BUILD_DATE_HI_LBN 1504 +#define MC_CMD_GET_VERSION_V4_OUT_CMCFW_BUILD_DATE_HI_WIDTH 32 +/* FPGA version as three numbers. On Riverhead based systems this field uses + * the same encoding as hardware version ID registers (MC_FPGA_BUILD_HWRD_REG): + * FPGA_VERSION[0]: x => Image H{x} FPGA_VERSION[1]: Revision letter (0 => A, 1 + * => B, ...) FPGA_VERSION[2]: Sub-revision number + */ +#define MC_CMD_GET_VERSION_V4_OUT_FPGA_VERSION_OFST 192 +#define MC_CMD_GET_VERSION_V4_OUT_FPGA_VERSION_LEN 4 +#define MC_CMD_GET_VERSION_V4_OUT_FPGA_VERSION_NUM 3 +/* Extra FPGA revision information (as null-terminated US-ASCII string) */ +#define MC_CMD_GET_VERSION_V4_OUT_FPGA_EXTRA_OFST 204 +#define MC_CMD_GET_VERSION_V4_OUT_FPGA_EXTRA_LEN 16 +/* Board name / adapter model (as null-terminated US-ASCII string) */ +#define MC_CMD_GET_VERSION_V4_OUT_BOARD_NAME_OFST 220 +#define MC_CMD_GET_VERSION_V4_OUT_BOARD_NAME_LEN 16 +/* Board revision number */ +#define MC_CMD_GET_VERSION_V4_OUT_BOARD_REVISION_OFST 236 +#define MC_CMD_GET_VERSION_V4_OUT_BOARD_REVISION_LEN 4 +/* Board serial number (as null-terminated US-ASCII string) */ +#define MC_CMD_GET_VERSION_V4_OUT_BOARD_SERIAL_OFST 240 +#define MC_CMD_GET_VERSION_V4_OUT_BOARD_SERIAL_LEN 64 +/* The version of the datapath hardware design as three number - a.b.c */ +#define MC_CMD_GET_VERSION_V4_OUT_DATAPATH_HW_VERSION_OFST 304 +#define MC_CMD_GET_VERSION_V4_OUT_DATAPATH_HW_VERSION_LEN 4 +#define MC_CMD_GET_VERSION_V4_OUT_DATAPATH_HW_VERSION_NUM 3 +/* The version of the firmware library used to control the datapath as three + * number - a.b.c + */ +#define MC_CMD_GET_VERSION_V4_OUT_DATAPATH_FW_VERSION_OFST 316 +#define MC_CMD_GET_VERSION_V4_OUT_DATAPATH_FW_VERSION_LEN 4 +#define MC_CMD_GET_VERSION_V4_OUT_DATAPATH_FW_VERSION_NUM 3 +/* The SOC boot version as four numbers - a.b.c.d */ +#define MC_CMD_GET_VERSION_V4_OUT_SOC_BOOT_VERSION_OFST 328 +#define MC_CMD_GET_VERSION_V4_OUT_SOC_BOOT_VERSION_LEN 4 +#define MC_CMD_GET_VERSION_V4_OUT_SOC_BOOT_VERSION_NUM 4 +/* The SOC uboot version as four numbers - a.b.c.d */ +#define MC_CMD_GET_VERSION_V4_OUT_SOC_UBOOT_VERSION_OFST 344 +#define MC_CMD_GET_VERSION_V4_OUT_SOC_UBOOT_VERSION_LEN 4 +#define MC_CMD_GET_VERSION_V4_OUT_SOC_UBOOT_VERSION_NUM 4 +/* The SOC main rootfs version as four numbers - a.b.c.d */ +#define MC_CMD_GET_VERSION_V4_OUT_SOC_MAIN_ROOTFS_VERSION_OFST 360 +#define MC_CMD_GET_VERSION_V4_OUT_SOC_MAIN_ROOTFS_VERSION_LEN 4 +#define MC_CMD_GET_VERSION_V4_OUT_SOC_MAIN_ROOTFS_VERSION_NUM 4 +/* The SOC recovery buildroot version as four numbers - a.b.c.d */ +#define MC_CMD_GET_VERSION_V4_OUT_SOC_RECOVERY_BUILDROOT_VERSION_OFST 376 +#define MC_CMD_GET_VERSION_V4_OUT_SOC_RECOVERY_BUILDROOT_VERSION_LEN 4 +#define MC_CMD_GET_VERSION_V4_OUT_SOC_RECOVERY_BUILDROOT_VERSION_NUM 4 + +/* MC_CMD_GET_VERSION_V5_OUT msgresponse: Extended response providing bundle + * and board version information + */ +#define MC_CMD_GET_VERSION_V5_OUT_LEN 424 +/* MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0 */ +/* MC_CMD_GET_VERSION_OUT_FIRMWARE_LEN 4 */ +/* Enum values, see field(s): */ +/* MC_CMD_GET_VERSION_V0_OUT/MC_CMD_GET_VERSION_OUT_FIRMWARE */ +#define MC_CMD_GET_VERSION_V5_OUT_PCOL_OFST 4 +#define MC_CMD_GET_VERSION_V5_OUT_PCOL_LEN 4 +/* 128bit mask of functions supported by the current firmware */ +#define MC_CMD_GET_VERSION_V5_OUT_SUPPORTED_FUNCS_OFST 8 +#define MC_CMD_GET_VERSION_V5_OUT_SUPPORTED_FUNCS_LEN 16 +#define MC_CMD_GET_VERSION_V5_OUT_VERSION_OFST 24 +#define MC_CMD_GET_VERSION_V5_OUT_VERSION_LEN 8 +#define MC_CMD_GET_VERSION_V5_OUT_VERSION_LO_OFST 24 +#define MC_CMD_GET_VERSION_V5_OUT_VERSION_LO_LEN 4 +#define MC_CMD_GET_VERSION_V5_OUT_VERSION_LO_LBN 192 +#define MC_CMD_GET_VERSION_V5_OUT_VERSION_LO_WIDTH 32 +#define MC_CMD_GET_VERSION_V5_OUT_VERSION_HI_OFST 28 +#define MC_CMD_GET_VERSION_V5_OUT_VERSION_HI_LEN 4 +#define MC_CMD_GET_VERSION_V5_OUT_VERSION_HI_LBN 224 +#define MC_CMD_GET_VERSION_V5_OUT_VERSION_HI_WIDTH 32 +/* extra info */ +#define MC_CMD_GET_VERSION_V5_OUT_EXTRA_OFST 32 +#define MC_CMD_GET_VERSION_V5_OUT_EXTRA_LEN 16 +/* Flags indicating which extended fields are valid */ +#define MC_CMD_GET_VERSION_V5_OUT_FLAGS_OFST 48 +#define MC_CMD_GET_VERSION_V5_OUT_FLAGS_LEN 4 +#define MC_CMD_GET_VERSION_V5_OUT_MCFW_EXT_INFO_PRESENT_OFST 48 +#define MC_CMD_GET_VERSION_V5_OUT_MCFW_EXT_INFO_PRESENT_LBN 0 +#define MC_CMD_GET_VERSION_V5_OUT_MCFW_EXT_INFO_PRESENT_WIDTH 1 +#define MC_CMD_GET_VERSION_V5_OUT_SUCFW_EXT_INFO_PRESENT_OFST 48 +#define MC_CMD_GET_VERSION_V5_OUT_SUCFW_EXT_INFO_PRESENT_LBN 1 +#define MC_CMD_GET_VERSION_V5_OUT_SUCFW_EXT_INFO_PRESENT_WIDTH 1 +#define MC_CMD_GET_VERSION_V5_OUT_CMC_EXT_INFO_PRESENT_OFST 48 +#define MC_CMD_GET_VERSION_V5_OUT_CMC_EXT_INFO_PRESENT_LBN 2 +#define MC_CMD_GET_VERSION_V5_OUT_CMC_EXT_INFO_PRESENT_WIDTH 1 +#define MC_CMD_GET_VERSION_V5_OUT_FPGA_EXT_INFO_PRESENT_OFST 48 +#define MC_CMD_GET_VERSION_V5_OUT_FPGA_EXT_INFO_PRESENT_LBN 3 +#define MC_CMD_GET_VERSION_V5_OUT_FPGA_EXT_INFO_PRESENT_WIDTH 1 +#define MC_CMD_GET_VERSION_V5_OUT_BOARD_EXT_INFO_PRESENT_OFST 48 +#define MC_CMD_GET_VERSION_V5_OUT_BOARD_EXT_INFO_PRESENT_LBN 4 +#define MC_CMD_GET_VERSION_V5_OUT_BOARD_EXT_INFO_PRESENT_WIDTH 1 +#define MC_CMD_GET_VERSION_V5_OUT_DATAPATH_HW_VERSION_PRESENT_OFST 48 +#define MC_CMD_GET_VERSION_V5_OUT_DATAPATH_HW_VERSION_PRESENT_LBN 5 +#define MC_CMD_GET_VERSION_V5_OUT_DATAPATH_HW_VERSION_PRESENT_WIDTH 1 +#define MC_CMD_GET_VERSION_V5_OUT_DATAPATH_FW_VERSION_PRESENT_OFST 48 +#define MC_CMD_GET_VERSION_V5_OUT_DATAPATH_FW_VERSION_PRESENT_LBN 6 +#define MC_CMD_GET_VERSION_V5_OUT_DATAPATH_FW_VERSION_PRESENT_WIDTH 1 +#define MC_CMD_GET_VERSION_V5_OUT_SOC_BOOT_VERSION_PRESENT_OFST 48 +#define MC_CMD_GET_VERSION_V5_OUT_SOC_BOOT_VERSION_PRESENT_LBN 7 +#define MC_CMD_GET_VERSION_V5_OUT_SOC_BOOT_VERSION_PRESENT_WIDTH 1 +#define MC_CMD_GET_VERSION_V5_OUT_SOC_UBOOT_VERSION_PRESENT_OFST 48 +#define MC_CMD_GET_VERSION_V5_OUT_SOC_UBOOT_VERSION_PRESENT_LBN 8 +#define MC_CMD_GET_VERSION_V5_OUT_SOC_UBOOT_VERSION_PRESENT_WIDTH 1 +#define MC_CMD_GET_VERSION_V5_OUT_SOC_MAIN_ROOTFS_VERSION_PRESENT_OFST 48 +#define MC_CMD_GET_VERSION_V5_OUT_SOC_MAIN_ROOTFS_VERSION_PRESENT_LBN 9 +#define MC_CMD_GET_VERSION_V5_OUT_SOC_MAIN_ROOTFS_VERSION_PRESENT_WIDTH 1 +#define MC_CMD_GET_VERSION_V5_OUT_SOC_RECOVERY_BUILDROOT_VERSION_PRESENT_OFST 48 +#define MC_CMD_GET_VERSION_V5_OUT_SOC_RECOVERY_BUILDROOT_VERSION_PRESENT_LBN 10 +#define MC_CMD_GET_VERSION_V5_OUT_SOC_RECOVERY_BUILDROOT_VERSION_PRESENT_WIDTH 1 +#define MC_CMD_GET_VERSION_V5_OUT_SUCFW_VERSION_PRESENT_OFST 48 +#define MC_CMD_GET_VERSION_V5_OUT_SUCFW_VERSION_PRESENT_LBN 11 +#define MC_CMD_GET_VERSION_V5_OUT_SUCFW_VERSION_PRESENT_WIDTH 1 +#define MC_CMD_GET_VERSION_V5_OUT_BOARD_VERSION_PRESENT_OFST 48 +#define MC_CMD_GET_VERSION_V5_OUT_BOARD_VERSION_PRESENT_LBN 12 +#define MC_CMD_GET_VERSION_V5_OUT_BOARD_VERSION_PRESENT_WIDTH 1 +#define MC_CMD_GET_VERSION_V5_OUT_BUNDLE_VERSION_PRESENT_OFST 48 +#define MC_CMD_GET_VERSION_V5_OUT_BUNDLE_VERSION_PRESENT_LBN 13 +#define MC_CMD_GET_VERSION_V5_OUT_BUNDLE_VERSION_PRESENT_WIDTH 1 +/* MC firmware unique build ID (as binary SHA-1 value) */ +#define MC_CMD_GET_VERSION_V5_OUT_MCFW_BUILD_ID_OFST 52 +#define MC_CMD_GET_VERSION_V5_OUT_MCFW_BUILD_ID_LEN 20 +/* MC firmware security level */ +#define MC_CMD_GET_VERSION_V5_OUT_MCFW_SECURITY_LEVEL_OFST 72 +#define MC_CMD_GET_VERSION_V5_OUT_MCFW_SECURITY_LEVEL_LEN 4 +/* MC firmware build name (as null-terminated US-ASCII string) */ +#define MC_CMD_GET_VERSION_V5_OUT_MCFW_BUILD_NAME_OFST 76 +#define MC_CMD_GET_VERSION_V5_OUT_MCFW_BUILD_NAME_LEN 64 +/* The SUC firmware version as four numbers - a.b.c.d */ +#define MC_CMD_GET_VERSION_V5_OUT_SUCFW_VERSION_OFST 140 +#define MC_CMD_GET_VERSION_V5_OUT_SUCFW_VERSION_LEN 4 +#define MC_CMD_GET_VERSION_V5_OUT_SUCFW_VERSION_NUM 4 +/* SUC firmware build date (as 64-bit Unix timestamp) */ +#define MC_CMD_GET_VERSION_V5_OUT_SUCFW_BUILD_DATE_OFST 156 +#define MC_CMD_GET_VERSION_V5_OUT_SUCFW_BUILD_DATE_LEN 8 +#define MC_CMD_GET_VERSION_V5_OUT_SUCFW_BUILD_DATE_LO_OFST 156 +#define MC_CMD_GET_VERSION_V5_OUT_SUCFW_BUILD_DATE_LO_LEN 4 +#define MC_CMD_GET_VERSION_V5_OUT_SUCFW_BUILD_DATE_LO_LBN 1248 +#define MC_CMD_GET_VERSION_V5_OUT_SUCFW_BUILD_DATE_LO_WIDTH 32 +#define MC_CMD_GET_VERSION_V5_OUT_SUCFW_BUILD_DATE_HI_OFST 160 +#define MC_CMD_GET_VERSION_V5_OUT_SUCFW_BUILD_DATE_HI_LEN 4 +#define MC_CMD_GET_VERSION_V5_OUT_SUCFW_BUILD_DATE_HI_LBN 1280 +#define MC_CMD_GET_VERSION_V5_OUT_SUCFW_BUILD_DATE_HI_WIDTH 32 +/* The ID of the SUC chip. This is specific to the platform but typically + * indicates family, memory sizes etc. See SF-116728-SW for further details. + */ +#define MC_CMD_GET_VERSION_V5_OUT_SUCFW_CHIP_ID_OFST 164 +#define MC_CMD_GET_VERSION_V5_OUT_SUCFW_CHIP_ID_LEN 4 +/* The CMC firmware version as four numbers - a.b.c.d */ +#define MC_CMD_GET_VERSION_V5_OUT_CMCFW_VERSION_OFST 168 +#define MC_CMD_GET_VERSION_V5_OUT_CMCFW_VERSION_LEN 4 +#define MC_CMD_GET_VERSION_V5_OUT_CMCFW_VERSION_NUM 4 +/* CMC firmware build date (as 64-bit Unix timestamp) */ +#define MC_CMD_GET_VERSION_V5_OUT_CMCFW_BUILD_DATE_OFST 184 +#define MC_CMD_GET_VERSION_V5_OUT_CMCFW_BUILD_DATE_LEN 8 +#define MC_CMD_GET_VERSION_V5_OUT_CMCFW_BUILD_DATE_LO_OFST 184 +#define MC_CMD_GET_VERSION_V5_OUT_CMCFW_BUILD_DATE_LO_LEN 4 +#define MC_CMD_GET_VERSION_V5_OUT_CMCFW_BUILD_DATE_LO_LBN 1472 +#define MC_CMD_GET_VERSION_V5_OUT_CMCFW_BUILD_DATE_LO_WIDTH 32 +#define MC_CMD_GET_VERSION_V5_OUT_CMCFW_BUILD_DATE_HI_OFST 188 +#define MC_CMD_GET_VERSION_V5_OUT_CMCFW_BUILD_DATE_HI_LEN 4 +#define MC_CMD_GET_VERSION_V5_OUT_CMCFW_BUILD_DATE_HI_LBN 1504 +#define MC_CMD_GET_VERSION_V5_OUT_CMCFW_BUILD_DATE_HI_WIDTH 32 +/* FPGA version as three numbers. On Riverhead based systems this field uses + * the same encoding as hardware version ID registers (MC_FPGA_BUILD_HWRD_REG): + * FPGA_VERSION[0]: x => Image H{x} FPGA_VERSION[1]: Revision letter (0 => A, 1 + * => B, ...) FPGA_VERSION[2]: Sub-revision number + */ +#define MC_CMD_GET_VERSION_V5_OUT_FPGA_VERSION_OFST 192 +#define MC_CMD_GET_VERSION_V5_OUT_FPGA_VERSION_LEN 4 +#define MC_CMD_GET_VERSION_V5_OUT_FPGA_VERSION_NUM 3 +/* Extra FPGA revision information (as null-terminated US-ASCII string) */ +#define MC_CMD_GET_VERSION_V5_OUT_FPGA_EXTRA_OFST 204 +#define MC_CMD_GET_VERSION_V5_OUT_FPGA_EXTRA_LEN 16 +/* Board name / adapter model (as null-terminated US-ASCII string) */ +#define MC_CMD_GET_VERSION_V5_OUT_BOARD_NAME_OFST 220 +#define MC_CMD_GET_VERSION_V5_OUT_BOARD_NAME_LEN 16 +/* Board revision number */ +#define MC_CMD_GET_VERSION_V5_OUT_BOARD_REVISION_OFST 236 +#define MC_CMD_GET_VERSION_V5_OUT_BOARD_REVISION_LEN 4 +/* Board serial number (as null-terminated US-ASCII string) */ +#define MC_CMD_GET_VERSION_V5_OUT_BOARD_SERIAL_OFST 240 +#define MC_CMD_GET_VERSION_V5_OUT_BOARD_SERIAL_LEN 64 +/* The version of the datapath hardware design as three number - a.b.c */ +#define MC_CMD_GET_VERSION_V5_OUT_DATAPATH_HW_VERSION_OFST 304 +#define MC_CMD_GET_VERSION_V5_OUT_DATAPATH_HW_VERSION_LEN 4 +#define MC_CMD_GET_VERSION_V5_OUT_DATAPATH_HW_VERSION_NUM 3 +/* The version of the firmware library used to control the datapath as three + * number - a.b.c + */ +#define MC_CMD_GET_VERSION_V5_OUT_DATAPATH_FW_VERSION_OFST 316 +#define MC_CMD_GET_VERSION_V5_OUT_DATAPATH_FW_VERSION_LEN 4 +#define MC_CMD_GET_VERSION_V5_OUT_DATAPATH_FW_VERSION_NUM 3 +/* The SOC boot version as four numbers - a.b.c.d */ +#define MC_CMD_GET_VERSION_V5_OUT_SOC_BOOT_VERSION_OFST 328 +#define MC_CMD_GET_VERSION_V5_OUT_SOC_BOOT_VERSION_LEN 4 +#define MC_CMD_GET_VERSION_V5_OUT_SOC_BOOT_VERSION_NUM 4 +/* The SOC uboot version as four numbers - a.b.c.d */ +#define MC_CMD_GET_VERSION_V5_OUT_SOC_UBOOT_VERSION_OFST 344 +#define MC_CMD_GET_VERSION_V5_OUT_SOC_UBOOT_VERSION_LEN 4 +#define MC_CMD_GET_VERSION_V5_OUT_SOC_UBOOT_VERSION_NUM 4 +/* The SOC main rootfs version as four numbers - a.b.c.d */ +#define MC_CMD_GET_VERSION_V5_OUT_SOC_MAIN_ROOTFS_VERSION_OFST 360 +#define MC_CMD_GET_VERSION_V5_OUT_SOC_MAIN_ROOTFS_VERSION_LEN 4 +#define MC_CMD_GET_VERSION_V5_OUT_SOC_MAIN_ROOTFS_VERSION_NUM 4 +/* The SOC recovery buildroot version as four numbers - a.b.c.d */ +#define MC_CMD_GET_VERSION_V5_OUT_SOC_RECOVERY_BUILDROOT_VERSION_OFST 376 +#define MC_CMD_GET_VERSION_V5_OUT_SOC_RECOVERY_BUILDROOT_VERSION_LEN 4 +#define MC_CMD_GET_VERSION_V5_OUT_SOC_RECOVERY_BUILDROOT_VERSION_NUM 4 +/* Board version as four numbers - a.b.c.d. BOARD_VERSION[0] duplicates the + * BOARD_REVISION field + */ +#define MC_CMD_GET_VERSION_V5_OUT_BOARD_VERSION_OFST 392 +#define MC_CMD_GET_VERSION_V5_OUT_BOARD_VERSION_LEN 4 +#define MC_CMD_GET_VERSION_V5_OUT_BOARD_VERSION_NUM 4 +/* Bundle version as four numbers - a.b.c.d */ +#define MC_CMD_GET_VERSION_V5_OUT_BUNDLE_VERSION_OFST 408 +#define MC_CMD_GET_VERSION_V5_OUT_BUNDLE_VERSION_LEN 4 +#define MC_CMD_GET_VERSION_V5_OUT_BUNDLE_VERSION_NUM 4 + + +/***********************************/ +/* MC_CMD_PTP + * Perform PTP operation + */ +#define MC_CMD_PTP 0xb +#undef MC_CMD_0xb_PRIVILEGE_CTG + +#define MC_CMD_0xb_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_PTP_IN msgrequest */ +#define MC_CMD_PTP_IN_LEN 1 +/* PTP operation code */ +#define MC_CMD_PTP_IN_OP_OFST 0 +#define MC_CMD_PTP_IN_OP_LEN 1 +/* enum: Enable PTP packet timestamping operation. */ +#define MC_CMD_PTP_OP_ENABLE 0x1 +/* enum: Disable PTP packet timestamping operation. */ +#define MC_CMD_PTP_OP_DISABLE 0x2 +/* enum: Send a PTP packet. This operation is used on Siena and Huntington. + * From Medford onwards it is not supported: on those platforms PTP transmit + * timestamping is done using the fast path. + */ +#define MC_CMD_PTP_OP_TRANSMIT 0x3 +/* enum: Read the current NIC time. */ +#define MC_CMD_PTP_OP_READ_NIC_TIME 0x4 +/* enum: Get the current PTP status. Note that the clock frequency returned (in + * Hz) is rounded to the nearest MHz (e.g. 666000000 for 666666666). + */ +#define MC_CMD_PTP_OP_STATUS 0x5 +/* enum: Adjust the PTP NIC's time. */ +#define MC_CMD_PTP_OP_ADJUST 0x6 +/* enum: Synchronize host and NIC time. */ +#define MC_CMD_PTP_OP_SYNCHRONIZE 0x7 +/* enum: Basic manufacturing tests. Siena PTP adapters only. */ +#define MC_CMD_PTP_OP_MANFTEST_BASIC 0x8 +/* enum: Packet based manufacturing tests. Siena PTP adapters only. */ +#define MC_CMD_PTP_OP_MANFTEST_PACKET 0x9 +/* enum: Reset some of the PTP related statistics */ +#define MC_CMD_PTP_OP_RESET_STATS 0xa +/* enum: Debug operations to MC. */ +#define MC_CMD_PTP_OP_DEBUG 0xb +/* enum: Read an FPGA register. Siena PTP adapters only. */ +#define MC_CMD_PTP_OP_FPGAREAD 0xc +/* enum: Write an FPGA register. Siena PTP adapters only. */ +#define MC_CMD_PTP_OP_FPGAWRITE 0xd +/* enum: Apply an offset to the NIC clock */ +#define MC_CMD_PTP_OP_CLOCK_OFFSET_ADJUST 0xe +/* enum: Change the frequency correction applied to the NIC clock */ +#define MC_CMD_PTP_OP_CLOCK_FREQ_ADJUST 0xf +/* enum: Set the MC packet filter VLAN tags for received PTP packets. + * Deprecated for Huntington onwards. + */ +#define MC_CMD_PTP_OP_RX_SET_VLAN_FILTER 0x10 +/* enum: Set the MC packet filter UUID for received PTP packets. Deprecated for + * Huntington onwards. + */ +#define MC_CMD_PTP_OP_RX_SET_UUID_FILTER 0x11 +/* enum: Set the MC packet filter Domain for received PTP packets. Deprecated + * for Huntington onwards. + */ +#define MC_CMD_PTP_OP_RX_SET_DOMAIN_FILTER 0x12 +/* enum: Set the clock source. Required for snapper tests on Huntington and + * Medford. Not implemented for Siena or Medford2. + */ +#define MC_CMD_PTP_OP_SET_CLK_SRC 0x13 +/* enum: Reset value of Timer Reg. Not implemented. */ +#define MC_CMD_PTP_OP_RST_CLK 0x14 +/* enum: Enable the forwarding of PPS events to the host */ +#define MC_CMD_PTP_OP_PPS_ENABLE 0x15 +/* enum: Get the time format used by this NIC for PTP operations */ +#define MC_CMD_PTP_OP_GET_TIME_FORMAT 0x16 +/* enum: Get the clock attributes. NOTE- extended version of + * MC_CMD_PTP_OP_GET_TIME_FORMAT + */ +#define MC_CMD_PTP_OP_GET_ATTRIBUTES 0x16 +/* enum: Get corrections that should be applied to the various different + * timestamps + */ +#define MC_CMD_PTP_OP_GET_TIMESTAMP_CORRECTIONS 0x17 +/* enum: Subscribe to receive periodic time events indicating the current NIC + * time + */ +#define MC_CMD_PTP_OP_TIME_EVENT_SUBSCRIBE 0x18 +/* enum: Unsubscribe to stop receiving time events */ +#define MC_CMD_PTP_OP_TIME_EVENT_UNSUBSCRIBE 0x19 +/* enum: PPS based manfacturing tests. Requires PPS output to be looped to PPS + * input on the same NIC. Siena PTP adapters only. + */ +#define MC_CMD_PTP_OP_MANFTEST_PPS 0x1a +/* enum: Set the PTP sync status. Status is used by firmware to report to event + * subscribers. + */ +#define MC_CMD_PTP_OP_SET_SYNC_STATUS 0x1b +/* enum: Above this for future use. */ +#define MC_CMD_PTP_OP_MAX 0x1c + +/* MC_CMD_PTP_IN_ENABLE msgrequest */ +#define MC_CMD_PTP_IN_ENABLE_LEN 16 +#define MC_CMD_PTP_IN_CMD_OFST 0 +#define MC_CMD_PTP_IN_CMD_LEN 4 +#define MC_CMD_PTP_IN_PERIPH_ID_OFST 4 +#define MC_CMD_PTP_IN_PERIPH_ID_LEN 4 +/* Not used, initialize to 0. Events are always sent to function relative queue + * 0. + */ +#define MC_CMD_PTP_IN_ENABLE_QUEUE_OFST 8 +#define MC_CMD_PTP_IN_ENABLE_QUEUE_LEN 4 +/* PTP timestamping mode. Not used from Huntington onwards. */ +#define MC_CMD_PTP_IN_ENABLE_MODE_OFST 12 +#define MC_CMD_PTP_IN_ENABLE_MODE_LEN 4 +/* enum: PTP, version 1 */ +#define MC_CMD_PTP_MODE_V1 0x0 +/* enum: PTP, version 1, with VLAN headers - deprecated */ +#define MC_CMD_PTP_MODE_V1_VLAN 0x1 +/* enum: PTP, version 2 */ +#define MC_CMD_PTP_MODE_V2 0x2 +/* enum: PTP, version 2, with VLAN headers - deprecated */ +#define MC_CMD_PTP_MODE_V2_VLAN 0x3 +/* enum: PTP, version 2, with improved UUID filtering */ +#define MC_CMD_PTP_MODE_V2_ENHANCED 0x4 +/* enum: FCoE (seconds and microseconds) */ +#define MC_CMD_PTP_MODE_FCOE 0x5 + +/* MC_CMD_PTP_IN_DISABLE msgrequest */ +#define MC_CMD_PTP_IN_DISABLE_LEN 8 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ + +/* MC_CMD_PTP_IN_TRANSMIT msgrequest */ +#define MC_CMD_PTP_IN_TRANSMIT_LENMIN 13 +#define MC_CMD_PTP_IN_TRANSMIT_LENMAX 252 +#define MC_CMD_PTP_IN_TRANSMIT_LENMAX_MCDI2 1020 +#define MC_CMD_PTP_IN_TRANSMIT_LEN(num) (12+1*(num)) +#define MC_CMD_PTP_IN_TRANSMIT_PACKET_NUM(len) (((len)-12)/1) +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ +/* Transmit packet length */ +#define MC_CMD_PTP_IN_TRANSMIT_LENGTH_OFST 8 +#define MC_CMD_PTP_IN_TRANSMIT_LENGTH_LEN 4 +/* Transmit packet data */ +#define MC_CMD_PTP_IN_TRANSMIT_PACKET_OFST 12 +#define MC_CMD_PTP_IN_TRANSMIT_PACKET_LEN 1 +#define MC_CMD_PTP_IN_TRANSMIT_PACKET_MINNUM 1 +#define MC_CMD_PTP_IN_TRANSMIT_PACKET_MAXNUM 240 +#define MC_CMD_PTP_IN_TRANSMIT_PACKET_MAXNUM_MCDI2 1008 + +/* MC_CMD_PTP_IN_READ_NIC_TIME msgrequest */ +#define MC_CMD_PTP_IN_READ_NIC_TIME_LEN 8 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ + +/* MC_CMD_PTP_IN_READ_NIC_TIME_V2 msgrequest */ +#define MC_CMD_PTP_IN_READ_NIC_TIME_V2_LEN 8 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ + +/* MC_CMD_PTP_IN_STATUS msgrequest */ +#define MC_CMD_PTP_IN_STATUS_LEN 8 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ + +/* MC_CMD_PTP_IN_ADJUST msgrequest */ +#define MC_CMD_PTP_IN_ADJUST_LEN 24 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ +/* Frequency adjustment 40 bit fixed point ns */ +#define MC_CMD_PTP_IN_ADJUST_FREQ_OFST 8 +#define MC_CMD_PTP_IN_ADJUST_FREQ_LEN 8 +#define MC_CMD_PTP_IN_ADJUST_FREQ_LO_OFST 8 +#define MC_CMD_PTP_IN_ADJUST_FREQ_LO_LEN 4 +#define MC_CMD_PTP_IN_ADJUST_FREQ_LO_LBN 64 +#define MC_CMD_PTP_IN_ADJUST_FREQ_LO_WIDTH 32 +#define MC_CMD_PTP_IN_ADJUST_FREQ_HI_OFST 12 +#define MC_CMD_PTP_IN_ADJUST_FREQ_HI_LEN 4 +#define MC_CMD_PTP_IN_ADJUST_FREQ_HI_LBN 96 +#define MC_CMD_PTP_IN_ADJUST_FREQ_HI_WIDTH 32 +/* enum: Number of fractional bits in frequency adjustment */ +#define MC_CMD_PTP_IN_ADJUST_BITS 0x28 +/* enum: Number of fractional bits in frequency adjustment when FP44_FREQ_ADJ + * is indicated in the MC_CMD_PTP_OUT_GET_ATTRIBUTES command CAPABILITIES + * field. + */ +#define MC_CMD_PTP_IN_ADJUST_BITS_FP44 0x2c +/* Time adjustment in seconds */ +#define MC_CMD_PTP_IN_ADJUST_SECONDS_OFST 16 +#define MC_CMD_PTP_IN_ADJUST_SECONDS_LEN 4 +/* Time adjustment major value */ +#define MC_CMD_PTP_IN_ADJUST_MAJOR_OFST 16 +#define MC_CMD_PTP_IN_ADJUST_MAJOR_LEN 4 +/* Time adjustment in nanoseconds */ +#define MC_CMD_PTP_IN_ADJUST_NANOSECONDS_OFST 20 +#define MC_CMD_PTP_IN_ADJUST_NANOSECONDS_LEN 4 +/* Time adjustment minor value */ +#define MC_CMD_PTP_IN_ADJUST_MINOR_OFST 20 +#define MC_CMD_PTP_IN_ADJUST_MINOR_LEN 4 + +/* MC_CMD_PTP_IN_ADJUST_V2 msgrequest */ +#define MC_CMD_PTP_IN_ADJUST_V2_LEN 28 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ +/* Frequency adjustment 40 bit fixed point ns */ +#define MC_CMD_PTP_IN_ADJUST_V2_FREQ_OFST 8 +#define MC_CMD_PTP_IN_ADJUST_V2_FREQ_LEN 8 +#define MC_CMD_PTP_IN_ADJUST_V2_FREQ_LO_OFST 8 +#define MC_CMD_PTP_IN_ADJUST_V2_FREQ_LO_LEN 4 +#define MC_CMD_PTP_IN_ADJUST_V2_FREQ_LO_LBN 64 +#define MC_CMD_PTP_IN_ADJUST_V2_FREQ_LO_WIDTH 32 +#define MC_CMD_PTP_IN_ADJUST_V2_FREQ_HI_OFST 12 +#define MC_CMD_PTP_IN_ADJUST_V2_FREQ_HI_LEN 4 +#define MC_CMD_PTP_IN_ADJUST_V2_FREQ_HI_LBN 96 +#define MC_CMD_PTP_IN_ADJUST_V2_FREQ_HI_WIDTH 32 +/* enum: Number of fractional bits in frequency adjustment */ +/* MC_CMD_PTP_IN_ADJUST_BITS 0x28 */ +/* enum: Number of fractional bits in frequency adjustment when FP44_FREQ_ADJ + * is indicated in the MC_CMD_PTP_OUT_GET_ATTRIBUTES command CAPABILITIES + * field. + */ +/* MC_CMD_PTP_IN_ADJUST_BITS_FP44 0x2c */ +/* Time adjustment in seconds */ +#define MC_CMD_PTP_IN_ADJUST_V2_SECONDS_OFST 16 +#define MC_CMD_PTP_IN_ADJUST_V2_SECONDS_LEN 4 +/* Time adjustment major value */ +#define MC_CMD_PTP_IN_ADJUST_V2_MAJOR_OFST 16 +#define MC_CMD_PTP_IN_ADJUST_V2_MAJOR_LEN 4 +/* Time adjustment in nanoseconds */ +#define MC_CMD_PTP_IN_ADJUST_V2_NANOSECONDS_OFST 20 +#define MC_CMD_PTP_IN_ADJUST_V2_NANOSECONDS_LEN 4 +/* Time adjustment minor value */ +#define MC_CMD_PTP_IN_ADJUST_V2_MINOR_OFST 20 +#define MC_CMD_PTP_IN_ADJUST_V2_MINOR_LEN 4 +/* Upper 32bits of major time offset adjustment */ +#define MC_CMD_PTP_IN_ADJUST_V2_MAJOR_HI_OFST 24 +#define MC_CMD_PTP_IN_ADJUST_V2_MAJOR_HI_LEN 4 + +/* MC_CMD_PTP_IN_SYNCHRONIZE msgrequest */ +#define MC_CMD_PTP_IN_SYNCHRONIZE_LEN 20 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ +/* Number of time readings to capture */ +#define MC_CMD_PTP_IN_SYNCHRONIZE_NUMTIMESETS_OFST 8 +#define MC_CMD_PTP_IN_SYNCHRONIZE_NUMTIMESETS_LEN 4 +/* Host address in which to write "synchronization started" indication (64 + * bits) + */ +#define MC_CMD_PTP_IN_SYNCHRONIZE_START_ADDR_OFST 12 +#define MC_CMD_PTP_IN_SYNCHRONIZE_START_ADDR_LEN 8 +#define MC_CMD_PTP_IN_SYNCHRONIZE_START_ADDR_LO_OFST 12 +#define MC_CMD_PTP_IN_SYNCHRONIZE_START_ADDR_LO_LEN 4 +#define MC_CMD_PTP_IN_SYNCHRONIZE_START_ADDR_LO_LBN 96 +#define MC_CMD_PTP_IN_SYNCHRONIZE_START_ADDR_LO_WIDTH 32 +#define MC_CMD_PTP_IN_SYNCHRONIZE_START_ADDR_HI_OFST 16 +#define MC_CMD_PTP_IN_SYNCHRONIZE_START_ADDR_HI_LEN 4 +#define MC_CMD_PTP_IN_SYNCHRONIZE_START_ADDR_HI_LBN 128 +#define MC_CMD_PTP_IN_SYNCHRONIZE_START_ADDR_HI_WIDTH 32 + +/* MC_CMD_PTP_IN_MANFTEST_BASIC msgrequest */ +#define MC_CMD_PTP_IN_MANFTEST_BASIC_LEN 8 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ + +/* MC_CMD_PTP_IN_MANFTEST_PACKET msgrequest */ +#define MC_CMD_PTP_IN_MANFTEST_PACKET_LEN 12 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ +/* Enable or disable packet testing */ +#define MC_CMD_PTP_IN_MANFTEST_PACKET_TEST_ENABLE_OFST 8 +#define MC_CMD_PTP_IN_MANFTEST_PACKET_TEST_ENABLE_LEN 4 + +/* MC_CMD_PTP_IN_RESET_STATS msgrequest: Reset PTP statistics */ +#define MC_CMD_PTP_IN_RESET_STATS_LEN 8 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ + +/* MC_CMD_PTP_IN_DEBUG msgrequest */ +#define MC_CMD_PTP_IN_DEBUG_LEN 12 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ +/* Debug operations */ +#define MC_CMD_PTP_IN_DEBUG_DEBUG_PARAM_OFST 8 +#define MC_CMD_PTP_IN_DEBUG_DEBUG_PARAM_LEN 4 + +/* MC_CMD_PTP_IN_FPGAREAD msgrequest */ +#define MC_CMD_PTP_IN_FPGAREAD_LEN 16 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ +#define MC_CMD_PTP_IN_FPGAREAD_ADDR_OFST 8 +#define MC_CMD_PTP_IN_FPGAREAD_ADDR_LEN 4 +#define MC_CMD_PTP_IN_FPGAREAD_NUMBYTES_OFST 12 +#define MC_CMD_PTP_IN_FPGAREAD_NUMBYTES_LEN 4 + +/* MC_CMD_PTP_IN_FPGAWRITE msgrequest */ +#define MC_CMD_PTP_IN_FPGAWRITE_LENMIN 13 +#define MC_CMD_PTP_IN_FPGAWRITE_LENMAX 252 +#define MC_CMD_PTP_IN_FPGAWRITE_LENMAX_MCDI2 1020 +#define MC_CMD_PTP_IN_FPGAWRITE_LEN(num) (12+1*(num)) +#define MC_CMD_PTP_IN_FPGAWRITE_BUFFER_NUM(len) (((len)-12)/1) +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ +#define MC_CMD_PTP_IN_FPGAWRITE_ADDR_OFST 8 +#define MC_CMD_PTP_IN_FPGAWRITE_ADDR_LEN 4 +#define MC_CMD_PTP_IN_FPGAWRITE_BUFFER_OFST 12 +#define MC_CMD_PTP_IN_FPGAWRITE_BUFFER_LEN 1 +#define MC_CMD_PTP_IN_FPGAWRITE_BUFFER_MINNUM 1 +#define MC_CMD_PTP_IN_FPGAWRITE_BUFFER_MAXNUM 240 +#define MC_CMD_PTP_IN_FPGAWRITE_BUFFER_MAXNUM_MCDI2 1008 + +/* MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST msgrequest */ +#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_LEN 16 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ +/* Time adjustment in seconds */ +#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_SECONDS_OFST 8 +#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_SECONDS_LEN 4 +/* Time adjustment major value */ +#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_MAJOR_OFST 8 +#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_MAJOR_LEN 4 +/* Time adjustment in nanoseconds */ +#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_NANOSECONDS_OFST 12 +#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_NANOSECONDS_LEN 4 +/* Time adjustment minor value */ +#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_MINOR_OFST 12 +#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_MINOR_LEN 4 + +/* MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2 msgrequest */ +#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_LEN 20 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ +/* Time adjustment in seconds */ +#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_SECONDS_OFST 8 +#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_SECONDS_LEN 4 +/* Time adjustment major value */ +#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_MAJOR_OFST 8 +#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_MAJOR_LEN 4 +/* Time adjustment in nanoseconds */ +#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_NANOSECONDS_OFST 12 +#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_NANOSECONDS_LEN 4 +/* Time adjustment minor value */ +#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_MINOR_OFST 12 +#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_MINOR_LEN 4 +/* Upper 32bits of major time offset adjustment */ +#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_MAJOR_HI_OFST 16 +#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_MAJOR_HI_LEN 4 + +/* MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST msgrequest */ +#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_LEN 16 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ +/* Frequency adjustment 40 bit fixed point ns */ +#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_OFST 8 +#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_LEN 8 +#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_LO_OFST 8 +#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_LO_LEN 4 +#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_LO_LBN 64 +#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_LO_WIDTH 32 +#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_HI_OFST 12 +#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_HI_LEN 4 +#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_HI_LBN 96 +#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_HI_WIDTH 32 +/* Enum values, see field(s): */ +/* MC_CMD_PTP/MC_CMD_PTP_IN_ADJUST/FREQ */ + +/* MC_CMD_PTP_IN_RX_SET_VLAN_FILTER msgrequest */ +#define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_LEN 24 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ +/* Number of VLAN tags, 0 if not VLAN */ +#define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_NUM_VLAN_TAGS_OFST 8 +#define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_NUM_VLAN_TAGS_LEN 4 +/* Set of VLAN tags to filter against */ +#define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_VLAN_TAG_OFST 12 +#define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_VLAN_TAG_LEN 4 +#define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_VLAN_TAG_NUM 3 + +/* MC_CMD_PTP_IN_RX_SET_UUID_FILTER msgrequest */ +#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_LEN 20 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ +/* 1 to enable UUID filtering, 0 to disable */ +#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_ENABLE_OFST 8 +#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_ENABLE_LEN 4 +/* UUID to filter against */ +#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_UUID_OFST 12 +#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_UUID_LEN 8 +#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_UUID_LO_OFST 12 +#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_UUID_LO_LEN 4 +#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_UUID_LO_LBN 96 +#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_UUID_LO_WIDTH 32 +#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_UUID_HI_OFST 16 +#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_UUID_HI_LEN 4 +#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_UUID_HI_LBN 128 +#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_UUID_HI_WIDTH 32 + +/* MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER msgrequest */ +#define MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER_LEN 16 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ +/* 1 to enable Domain filtering, 0 to disable */ +#define MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER_ENABLE_OFST 8 +#define MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER_ENABLE_LEN 4 +/* Domain number to filter against */ +#define MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER_DOMAIN_OFST 12 +#define MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER_DOMAIN_LEN 4 + +/* MC_CMD_PTP_IN_SET_CLK_SRC msgrequest */ +#define MC_CMD_PTP_IN_SET_CLK_SRC_LEN 12 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ +/* Set the clock source. */ +#define MC_CMD_PTP_IN_SET_CLK_SRC_CLK_OFST 8 +#define MC_CMD_PTP_IN_SET_CLK_SRC_CLK_LEN 4 +/* enum: Internal. */ +#define MC_CMD_PTP_CLK_SRC_INTERNAL 0x0 +/* enum: External. */ +#define MC_CMD_PTP_CLK_SRC_EXTERNAL 0x1 + +/* MC_CMD_PTP_IN_RST_CLK msgrequest: Reset value of Timer Reg. */ +#define MC_CMD_PTP_IN_RST_CLK_LEN 8 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ + +/* MC_CMD_PTP_IN_PPS_ENABLE msgrequest */ +#define MC_CMD_PTP_IN_PPS_ENABLE_LEN 12 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ +/* Enable or disable */ +#define MC_CMD_PTP_IN_PPS_ENABLE_OP_OFST 4 +#define MC_CMD_PTP_IN_PPS_ENABLE_OP_LEN 4 +/* enum: Enable */ +#define MC_CMD_PTP_ENABLE_PPS 0x0 +/* enum: Disable */ +#define MC_CMD_PTP_DISABLE_PPS 0x1 +/* Not used, initialize to 0. Events are always sent to function relative queue + * 0. + */ +#define MC_CMD_PTP_IN_PPS_ENABLE_QUEUE_ID_OFST 8 +#define MC_CMD_PTP_IN_PPS_ENABLE_QUEUE_ID_LEN 4 + +/* MC_CMD_PTP_IN_GET_TIME_FORMAT msgrequest */ +#define MC_CMD_PTP_IN_GET_TIME_FORMAT_LEN 8 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ + +/* MC_CMD_PTP_IN_GET_ATTRIBUTES msgrequest */ +#define MC_CMD_PTP_IN_GET_ATTRIBUTES_LEN 8 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ + +/* MC_CMD_PTP_IN_GET_TIMESTAMP_CORRECTIONS msgrequest */ +#define MC_CMD_PTP_IN_GET_TIMESTAMP_CORRECTIONS_LEN 8 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ + +/* MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE msgrequest */ +#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_LEN 12 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ +/* Original field containing queue ID. Now extended to include flags. */ +#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_OFST 8 +#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_LEN 4 +#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_ID_OFST 8 +#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_ID_LBN 0 +#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_ID_WIDTH 16 +#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_REPORT_SYNC_STATUS_OFST 8 +#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_REPORT_SYNC_STATUS_LBN 31 +#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_REPORT_SYNC_STATUS_WIDTH 1 + +/* MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE msgrequest */ +#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_LEN 16 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ +/* Unsubscribe options */ +#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_CONTROL_OFST 8 +#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_CONTROL_LEN 4 +/* enum: Unsubscribe a single queue */ +#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_SINGLE 0x0 +/* enum: Unsubscribe all queues */ +#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_ALL 0x1 +/* Event queue ID */ +#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_QUEUE_OFST 12 +#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_QUEUE_LEN 4 + +/* MC_CMD_PTP_IN_MANFTEST_PPS msgrequest */ +#define MC_CMD_PTP_IN_MANFTEST_PPS_LEN 12 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ +/* 1 to enable PPS test mode, 0 to disable and return result. */ +#define MC_CMD_PTP_IN_MANFTEST_PPS_TEST_ENABLE_OFST 8 +#define MC_CMD_PTP_IN_MANFTEST_PPS_TEST_ENABLE_LEN 4 + +/* MC_CMD_PTP_IN_SET_SYNC_STATUS msgrequest */ +#define MC_CMD_PTP_IN_SET_SYNC_STATUS_LEN 24 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ +/* NIC - Host System Clock Synchronization status */ +#define MC_CMD_PTP_IN_SET_SYNC_STATUS_STATUS_OFST 8 +#define MC_CMD_PTP_IN_SET_SYNC_STATUS_STATUS_LEN 4 +/* enum: Host System clock and NIC clock are not in sync */ +#define MC_CMD_PTP_IN_SET_SYNC_STATUS_NOT_IN_SYNC 0x0 +/* enum: Host System clock and NIC clock are synchronized */ +#define MC_CMD_PTP_IN_SET_SYNC_STATUS_IN_SYNC 0x1 +/* If synchronized, number of seconds until clocks should be considered to be + * no longer in sync. + */ +#define MC_CMD_PTP_IN_SET_SYNC_STATUS_TIMEOUT_OFST 12 +#define MC_CMD_PTP_IN_SET_SYNC_STATUS_TIMEOUT_LEN 4 +#define MC_CMD_PTP_IN_SET_SYNC_STATUS_RESERVED0_OFST 16 +#define MC_CMD_PTP_IN_SET_SYNC_STATUS_RESERVED0_LEN 4 +#define MC_CMD_PTP_IN_SET_SYNC_STATUS_RESERVED1_OFST 20 +#define MC_CMD_PTP_IN_SET_SYNC_STATUS_RESERVED1_LEN 4 + +/* MC_CMD_PTP_OUT msgresponse */ +#define MC_CMD_PTP_OUT_LEN 0 + +/* MC_CMD_PTP_OUT_TRANSMIT msgresponse */ +#define MC_CMD_PTP_OUT_TRANSMIT_LEN 8 +/* Value of seconds timestamp */ +#define MC_CMD_PTP_OUT_TRANSMIT_SECONDS_OFST 0 +#define MC_CMD_PTP_OUT_TRANSMIT_SECONDS_LEN 4 +/* Timestamp major value */ +#define MC_CMD_PTP_OUT_TRANSMIT_MAJOR_OFST 0 +#define MC_CMD_PTP_OUT_TRANSMIT_MAJOR_LEN 4 +/* Value of nanoseconds timestamp */ +#define MC_CMD_PTP_OUT_TRANSMIT_NANOSECONDS_OFST 4 +#define MC_CMD_PTP_OUT_TRANSMIT_NANOSECONDS_LEN 4 +/* Timestamp minor value */ +#define MC_CMD_PTP_OUT_TRANSMIT_MINOR_OFST 4 +#define MC_CMD_PTP_OUT_TRANSMIT_MINOR_LEN 4 + +/* MC_CMD_PTP_OUT_TIME_EVENT_SUBSCRIBE msgresponse */ +#define MC_CMD_PTP_OUT_TIME_EVENT_SUBSCRIBE_LEN 0 + +/* MC_CMD_PTP_OUT_TIME_EVENT_UNSUBSCRIBE msgresponse */ +#define MC_CMD_PTP_OUT_TIME_EVENT_UNSUBSCRIBE_LEN 0 + +/* MC_CMD_PTP_OUT_READ_NIC_TIME msgresponse */ +#define MC_CMD_PTP_OUT_READ_NIC_TIME_LEN 8 +/* Value of seconds timestamp */ +#define MC_CMD_PTP_OUT_READ_NIC_TIME_SECONDS_OFST 0 +#define MC_CMD_PTP_OUT_READ_NIC_TIME_SECONDS_LEN 4 +/* Timestamp major value */ +#define MC_CMD_PTP_OUT_READ_NIC_TIME_MAJOR_OFST 0 +#define MC_CMD_PTP_OUT_READ_NIC_TIME_MAJOR_LEN 4 +/* Value of nanoseconds timestamp */ +#define MC_CMD_PTP_OUT_READ_NIC_TIME_NANOSECONDS_OFST 4 +#define MC_CMD_PTP_OUT_READ_NIC_TIME_NANOSECONDS_LEN 4 +/* Timestamp minor value */ +#define MC_CMD_PTP_OUT_READ_NIC_TIME_MINOR_OFST 4 +#define MC_CMD_PTP_OUT_READ_NIC_TIME_MINOR_LEN 4 + +/* MC_CMD_PTP_OUT_READ_NIC_TIME_V2 msgresponse */ +#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_LEN 12 +/* Value of seconds timestamp */ +#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_SECONDS_OFST 0 +#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_SECONDS_LEN 4 +/* Timestamp major value */ +#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_MAJOR_OFST 0 +#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_MAJOR_LEN 4 +/* Value of nanoseconds timestamp */ +#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_NANOSECONDS_OFST 4 +#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_NANOSECONDS_LEN 4 +/* Timestamp minor value */ +#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_MINOR_OFST 4 +#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_MINOR_LEN 4 +/* Upper 32bits of major timestamp value */ +#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_MAJOR_HI_OFST 8 +#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_MAJOR_HI_LEN 4 + +/* MC_CMD_PTP_OUT_STATUS msgresponse */ +#define MC_CMD_PTP_OUT_STATUS_LEN 64 +/* Frequency of NIC's hardware clock */ +#define MC_CMD_PTP_OUT_STATUS_CLOCK_FREQ_OFST 0 +#define MC_CMD_PTP_OUT_STATUS_CLOCK_FREQ_LEN 4 +/* Number of packets transmitted and timestamped */ +#define MC_CMD_PTP_OUT_STATUS_STATS_TX_OFST 4 +#define MC_CMD_PTP_OUT_STATUS_STATS_TX_LEN 4 +/* Number of packets received and timestamped */ +#define MC_CMD_PTP_OUT_STATUS_STATS_RX_OFST 8 +#define MC_CMD_PTP_OUT_STATUS_STATS_RX_LEN 4 +/* Number of packets timestamped by the FPGA */ +#define MC_CMD_PTP_OUT_STATUS_STATS_TS_OFST 12 +#define MC_CMD_PTP_OUT_STATUS_STATS_TS_LEN 4 +/* Number of packets filter matched */ +#define MC_CMD_PTP_OUT_STATUS_STATS_FM_OFST 16 +#define MC_CMD_PTP_OUT_STATUS_STATS_FM_LEN 4 +/* Number of packets not filter matched */ +#define MC_CMD_PTP_OUT_STATUS_STATS_NFM_OFST 20 +#define MC_CMD_PTP_OUT_STATUS_STATS_NFM_LEN 4 +/* Number of PPS overflows (noise on input?) */ +#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFLOW_OFST 24 +#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFLOW_LEN 4 +/* Number of PPS bad periods */ +#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_BAD_OFST 28 +#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_BAD_LEN 4 +/* Minimum period of PPS pulse in nanoseconds */ +#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MIN_OFST 32 +#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MIN_LEN 4 +/* Maximum period of PPS pulse in nanoseconds */ +#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MAX_OFST 36 +#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MAX_LEN 4 +/* Last period of PPS pulse in nanoseconds */ +#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_LAST_OFST 40 +#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_LAST_LEN 4 +/* Mean period of PPS pulse in nanoseconds */ +#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MEAN_OFST 44 +#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MEAN_LEN 4 +/* Minimum offset of PPS pulse in nanoseconds (signed) */ +#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MIN_OFST 48 +#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MIN_LEN 4 +/* Maximum offset of PPS pulse in nanoseconds (signed) */ +#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MAX_OFST 52 +#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MAX_LEN 4 +/* Last offset of PPS pulse in nanoseconds (signed) */ +#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_LAST_OFST 56 +#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_LAST_LEN 4 +/* Mean offset of PPS pulse in nanoseconds (signed) */ +#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MEAN_OFST 60 +#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MEAN_LEN 4 + +/* MC_CMD_PTP_OUT_SYNCHRONIZE msgresponse */ +#define MC_CMD_PTP_OUT_SYNCHRONIZE_LENMIN 20 +#define MC_CMD_PTP_OUT_SYNCHRONIZE_LENMAX 240 +#define MC_CMD_PTP_OUT_SYNCHRONIZE_LENMAX_MCDI2 1020 +#define MC_CMD_PTP_OUT_SYNCHRONIZE_LEN(num) (0+20*(num)) +#define MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_NUM(len) (((len)-0)/20) +/* A set of host and NIC times */ +#define MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_OFST 0 +#define MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_LEN 20 +#define MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_MINNUM 1 +#define MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_MAXNUM 12 +#define MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_MAXNUM_MCDI2 51 +/* Host time immediately before NIC's hardware clock read */ +#define MC_CMD_PTP_OUT_SYNCHRONIZE_HOSTSTART_OFST 0 +#define MC_CMD_PTP_OUT_SYNCHRONIZE_HOSTSTART_LEN 4 +/* Value of seconds timestamp */ +#define MC_CMD_PTP_OUT_SYNCHRONIZE_SECONDS_OFST 4 +#define MC_CMD_PTP_OUT_SYNCHRONIZE_SECONDS_LEN 4 +/* Timestamp major value */ +#define MC_CMD_PTP_OUT_SYNCHRONIZE_MAJOR_OFST 4 +#define MC_CMD_PTP_OUT_SYNCHRONIZE_MAJOR_LEN 4 +/* Value of nanoseconds timestamp */ +#define MC_CMD_PTP_OUT_SYNCHRONIZE_NANOSECONDS_OFST 8 +#define MC_CMD_PTP_OUT_SYNCHRONIZE_NANOSECONDS_LEN 4 +/* Timestamp minor value */ +#define MC_CMD_PTP_OUT_SYNCHRONIZE_MINOR_OFST 8 +#define MC_CMD_PTP_OUT_SYNCHRONIZE_MINOR_LEN 4 +/* Host time immediately after NIC's hardware clock read */ +#define MC_CMD_PTP_OUT_SYNCHRONIZE_HOSTEND_OFST 12 +#define MC_CMD_PTP_OUT_SYNCHRONIZE_HOSTEND_LEN 4 +/* Number of nanoseconds waited after reading NIC's hardware clock */ +#define MC_CMD_PTP_OUT_SYNCHRONIZE_WAITNS_OFST 16 +#define MC_CMD_PTP_OUT_SYNCHRONIZE_WAITNS_LEN 4 + +/* MC_CMD_PTP_OUT_MANFTEST_BASIC msgresponse */ +#define MC_CMD_PTP_OUT_MANFTEST_BASIC_LEN 8 +/* Results of testing */ +#define MC_CMD_PTP_OUT_MANFTEST_BASIC_TEST_RESULT_OFST 0 +#define MC_CMD_PTP_OUT_MANFTEST_BASIC_TEST_RESULT_LEN 4 +/* enum: Successful test */ +#define MC_CMD_PTP_MANF_SUCCESS 0x0 +/* enum: FPGA load failed */ +#define MC_CMD_PTP_MANF_FPGA_LOAD 0x1 +/* enum: FPGA version invalid */ +#define MC_CMD_PTP_MANF_FPGA_VERSION 0x2 +/* enum: FPGA registers incorrect */ +#define MC_CMD_PTP_MANF_FPGA_REGISTERS 0x3 +/* enum: Oscillator possibly not working? */ +#define MC_CMD_PTP_MANF_OSCILLATOR 0x4 +/* enum: Timestamps not increasing */ +#define MC_CMD_PTP_MANF_TIMESTAMPS 0x5 +/* enum: Mismatched packet count */ +#define MC_CMD_PTP_MANF_PACKET_COUNT 0x6 +/* enum: Mismatched packet count (Siena filter and FPGA) */ +#define MC_CMD_PTP_MANF_FILTER_COUNT 0x7 +/* enum: Not enough packets to perform timestamp check */ +#define MC_CMD_PTP_MANF_PACKET_ENOUGH 0x8 +/* enum: Timestamp trigger GPIO not working */ +#define MC_CMD_PTP_MANF_GPIO_TRIGGER 0x9 +/* enum: Insufficient PPS events to perform checks */ +#define MC_CMD_PTP_MANF_PPS_ENOUGH 0xa +/* enum: PPS time event period not sufficiently close to 1s. */ +#define MC_CMD_PTP_MANF_PPS_PERIOD 0xb +/* enum: PPS time event nS reading not sufficiently close to zero. */ +#define MC_CMD_PTP_MANF_PPS_NS 0xc +/* enum: PTP peripheral registers incorrect */ +#define MC_CMD_PTP_MANF_REGISTERS 0xd +/* enum: Failed to read time from PTP peripheral */ +#define MC_CMD_PTP_MANF_CLOCK_READ 0xe +/* Presence of external oscillator */ +#define MC_CMD_PTP_OUT_MANFTEST_BASIC_TEST_EXTOSC_OFST 4 +#define MC_CMD_PTP_OUT_MANFTEST_BASIC_TEST_EXTOSC_LEN 4 + +/* MC_CMD_PTP_OUT_MANFTEST_PACKET msgresponse */ +#define MC_CMD_PTP_OUT_MANFTEST_PACKET_LEN 12 +/* Results of testing */ +#define MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_RESULT_OFST 0 +#define MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_RESULT_LEN 4 +/* Number of packets received by FPGA */ +#define MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_FPGACOUNT_OFST 4 +#define MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_FPGACOUNT_LEN 4 +/* Number of packets received by Siena filters */ +#define MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_FILTERCOUNT_OFST 8 +#define MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_FILTERCOUNT_LEN 4 + +/* MC_CMD_PTP_OUT_FPGAREAD msgresponse */ +#define MC_CMD_PTP_OUT_FPGAREAD_LENMIN 1 +#define MC_CMD_PTP_OUT_FPGAREAD_LENMAX 252 +#define MC_CMD_PTP_OUT_FPGAREAD_LENMAX_MCDI2 1020 +#define MC_CMD_PTP_OUT_FPGAREAD_LEN(num) (0+1*(num)) +#define MC_CMD_PTP_OUT_FPGAREAD_BUFFER_NUM(len) (((len)-0)/1) +#define MC_CMD_PTP_OUT_FPGAREAD_BUFFER_OFST 0 +#define MC_CMD_PTP_OUT_FPGAREAD_BUFFER_LEN 1 +#define MC_CMD_PTP_OUT_FPGAREAD_BUFFER_MINNUM 1 +#define MC_CMD_PTP_OUT_FPGAREAD_BUFFER_MAXNUM 252 +#define MC_CMD_PTP_OUT_FPGAREAD_BUFFER_MAXNUM_MCDI2 1020 + +/* MC_CMD_PTP_OUT_GET_TIME_FORMAT msgresponse */ +#define MC_CMD_PTP_OUT_GET_TIME_FORMAT_LEN 4 +/* Time format required/used by for this NIC. Applies to all PTP MCDI + * operations that pass times between the host and firmware. If this operation + * is not supported (older firmware) a format of seconds and nanoseconds should + * be assumed. Note this enum is deprecated. Do not add to it- use the + * TIME_FORMAT field in MC_CMD_PTP_OUT_GET_ATTRIBUTES instead. + */ +#define MC_CMD_PTP_OUT_GET_TIME_FORMAT_FORMAT_OFST 0 +#define MC_CMD_PTP_OUT_GET_TIME_FORMAT_FORMAT_LEN 4 +/* enum: Times are in seconds and nanoseconds */ +#define MC_CMD_PTP_OUT_GET_TIME_FORMAT_SECONDS_NANOSECONDS 0x0 +/* enum: Major register has units of 16 second per tick, minor 8 ns per tick */ +#define MC_CMD_PTP_OUT_GET_TIME_FORMAT_16SECONDS_8NANOSECONDS 0x1 +/* enum: Major register has units of seconds, minor 2^-27s per tick */ +#define MC_CMD_PTP_OUT_GET_TIME_FORMAT_SECONDS_27FRACTION 0x2 + +/* MC_CMD_PTP_OUT_GET_ATTRIBUTES msgresponse */ +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_LEN 24 +/* Time format required/used by for this NIC. Applies to all PTP MCDI + * operations that pass times between the host and firmware. If this operation + * is not supported (older firmware) a format of seconds and nanoseconds should + * be assumed. + */ +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_TIME_FORMAT_OFST 0 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_TIME_FORMAT_LEN 4 +/* enum: Times are in seconds and nanoseconds */ +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_NANOSECONDS 0x0 +/* enum: Major register has units of 16 second per tick, minor 8 ns per tick */ +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_16SECONDS_8NANOSECONDS 0x1 +/* enum: Major register has units of seconds, minor 2^-27s per tick */ +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_27FRACTION 0x2 +/* enum: Major register units are seconds, minor units are quarter nanoseconds + */ +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_QTR_NANOSECONDS 0x3 +/* Minimum acceptable value for a corrected synchronization timeset. When + * comparing host and NIC clock times, the MC returns a set of samples that + * contain the host start and end time, the MC time when the host start was + * detected and the time the MC waited between reading the time and detecting + * the host end. The corrected sync window is the difference between the host + * end and start times minus the time that the MC waited for host end. + */ +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_SYNC_WINDOW_MIN_OFST 4 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_SYNC_WINDOW_MIN_LEN 4 +/* Various PTP capabilities */ +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_CAPABILITIES_OFST 8 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_CAPABILITIES_LEN 4 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_REPORT_SYNC_STATUS_OFST 8 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_REPORT_SYNC_STATUS_LBN 0 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_REPORT_SYNC_STATUS_WIDTH 1 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RX_TSTAMP_OOB_OFST 8 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RX_TSTAMP_OOB_LBN 1 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RX_TSTAMP_OOB_WIDTH 1 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_64BIT_SECONDS_OFST 8 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_64BIT_SECONDS_LBN 2 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_64BIT_SECONDS_WIDTH 1 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_FP44_FREQ_ADJ_OFST 8 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_FP44_FREQ_ADJ_LBN 3 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_FP44_FREQ_ADJ_WIDTH 1 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED0_OFST 12 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED0_LEN 4 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED1_OFST 16 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED1_LEN 4 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED2_OFST 20 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED2_LEN 4 + +/* MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2 msgresponse */ +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_LEN 40 +/* Time format required/used by for this NIC. Applies to all PTP MCDI + * operations that pass times between the host and firmware. If this operation + * is not supported (older firmware) a format of seconds and nanoseconds should + * be assumed. + */ +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_TIME_FORMAT_OFST 0 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_TIME_FORMAT_LEN 4 +/* enum: Times are in seconds and nanoseconds */ +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_SECONDS_NANOSECONDS 0x0 +/* enum: Major register has units of 16 second per tick, minor 8 ns per tick */ +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_16SECONDS_8NANOSECONDS 0x1 +/* enum: Major register has units of seconds, minor 2^-27s per tick */ +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_SECONDS_27FRACTION 0x2 +/* enum: Major register units are seconds, minor units are quarter nanoseconds + */ +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_SECONDS_QTR_NANOSECONDS 0x3 +/* Minimum acceptable value for a corrected synchronization timeset. When + * comparing host and NIC clock times, the MC returns a set of samples that + * contain the host start and end time, the MC time when the host start was + * detected and the time the MC waited between reading the time and detecting + * the host end. The corrected sync window is the difference between the host + * end and start times minus the time that the MC waited for host end. + */ +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_SYNC_WINDOW_MIN_OFST 4 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_SYNC_WINDOW_MIN_LEN 4 +/* Various PTP capabilities */ +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_CAPABILITIES_OFST 8 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_CAPABILITIES_LEN 4 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_REPORT_SYNC_STATUS_OFST 8 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_REPORT_SYNC_STATUS_LBN 0 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_REPORT_SYNC_STATUS_WIDTH 1 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_RX_TSTAMP_OOB_OFST 8 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_RX_TSTAMP_OOB_LBN 1 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_RX_TSTAMP_OOB_WIDTH 1 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_64BIT_SECONDS_OFST 8 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_64BIT_SECONDS_LBN 2 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_64BIT_SECONDS_WIDTH 1 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_FP44_FREQ_ADJ_OFST 8 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_FP44_FREQ_ADJ_LBN 3 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_FP44_FREQ_ADJ_WIDTH 1 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_RESERVED0_OFST 12 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_RESERVED0_LEN 4 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_RESERVED1_OFST 16 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_RESERVED1_LEN 4 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_RESERVED2_OFST 20 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_RESERVED2_LEN 4 +/* Minimum supported value for the FREQ field in + * MC_CMD_PTP/MC_CMD_PTP_IN_ADJUST and + * MC_CMD_PTP/MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST message requests. If this message + * response is not supported a value of -0.1 ns should be assumed, which is + * equivalent to a -10% adjustment. + */ +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_FREQ_ADJ_MIN_OFST 24 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_FREQ_ADJ_MIN_LEN 8 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_FREQ_ADJ_MIN_LO_OFST 24 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_FREQ_ADJ_MIN_LO_LEN 4 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_FREQ_ADJ_MIN_LO_LBN 192 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_FREQ_ADJ_MIN_LO_WIDTH 32 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_FREQ_ADJ_MIN_HI_OFST 28 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_FREQ_ADJ_MIN_HI_LEN 4 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_FREQ_ADJ_MIN_HI_LBN 224 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_FREQ_ADJ_MIN_HI_WIDTH 32 +/* Maximum supported value for the FREQ field in + * MC_CMD_PTP/MC_CMD_PTP_IN_ADJUST and + * MC_CMD_PTP/MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST message requests. If this message + * response is not supported a value of 0.1 ns should be assumed, which is + * equivalent to a +10% adjustment. + */ +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_FREQ_ADJ_MAX_OFST 32 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_FREQ_ADJ_MAX_LEN 8 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_FREQ_ADJ_MAX_LO_OFST 32 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_FREQ_ADJ_MAX_LO_LEN 4 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_FREQ_ADJ_MAX_LO_LBN 256 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_FREQ_ADJ_MAX_LO_WIDTH 32 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_FREQ_ADJ_MAX_HI_OFST 36 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_FREQ_ADJ_MAX_HI_LEN 4 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_FREQ_ADJ_MAX_HI_LBN 288 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_V2_FREQ_ADJ_MAX_HI_WIDTH 32 + +/* MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS msgresponse */ +#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_LEN 16 +/* Uncorrected error on PTP transmit timestamps in NIC clock format */ +#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_TRANSMIT_OFST 0 +#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_TRANSMIT_LEN 4 +/* Uncorrected error on PTP receive timestamps in NIC clock format */ +#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_RECEIVE_OFST 4 +#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_RECEIVE_LEN 4 +/* Uncorrected error on PPS output in NIC clock format */ +#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_OUT_OFST 8 +#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_OUT_LEN 4 +/* Uncorrected error on PPS input in NIC clock format */ +#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_IN_OFST 12 +#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_IN_LEN 4 + +/* MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2 msgresponse */ +#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_LEN 24 +/* Uncorrected error on PTP transmit timestamps in NIC clock format */ +#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PTP_TX_OFST 0 +#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PTP_TX_LEN 4 +/* Uncorrected error on PTP receive timestamps in NIC clock format */ +#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PTP_RX_OFST 4 +#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PTP_RX_LEN 4 +/* Uncorrected error on PPS output in NIC clock format */ +#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PPS_OUT_OFST 8 +#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PPS_OUT_LEN 4 +/* Uncorrected error on PPS input in NIC clock format */ +#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PPS_IN_OFST 12 +#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PPS_IN_LEN 4 +/* Uncorrected error on non-PTP transmit timestamps in NIC clock format */ +#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_GENERAL_TX_OFST 16 +#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_GENERAL_TX_LEN 4 +/* Uncorrected error on non-PTP receive timestamps in NIC clock format */ +#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_GENERAL_RX_OFST 20 +#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_GENERAL_RX_LEN 4 + +/* MC_CMD_PTP_OUT_MANFTEST_PPS msgresponse */ +#define MC_CMD_PTP_OUT_MANFTEST_PPS_LEN 4 +/* Results of testing */ +#define MC_CMD_PTP_OUT_MANFTEST_PPS_TEST_RESULT_OFST 0 +#define MC_CMD_PTP_OUT_MANFTEST_PPS_TEST_RESULT_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_PTP_OUT_MANFTEST_BASIC/TEST_RESULT */ + +/* MC_CMD_PTP_OUT_SET_SYNC_STATUS msgresponse */ +#define MC_CMD_PTP_OUT_SET_SYNC_STATUS_LEN 0 + + +/***********************************/ +/* MC_CMD_CSR_READ32 + * Read 32bit words from the indirect memory map. + * + * Note - this command originally belonged to INSECURE category. But access is + * required to specific registers for customer diagnostics. The command handler + * has additional checks to reject insecure calls. + */ +#define MC_CMD_CSR_READ32 0xc +#undef MC_CMD_0xc_PRIVILEGE_CTG + +#define MC_CMD_0xc_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_CSR_READ32_IN msgrequest */ +#define MC_CMD_CSR_READ32_IN_LEN 12 +/* Address */ +#define MC_CMD_CSR_READ32_IN_ADDR_OFST 0 +#define MC_CMD_CSR_READ32_IN_ADDR_LEN 4 +#define MC_CMD_CSR_READ32_IN_STEP_OFST 4 +#define MC_CMD_CSR_READ32_IN_STEP_LEN 4 +#define MC_CMD_CSR_READ32_IN_NUMWORDS_OFST 8 +#define MC_CMD_CSR_READ32_IN_NUMWORDS_LEN 4 + +/* MC_CMD_CSR_READ32_OUT msgresponse */ +#define MC_CMD_CSR_READ32_OUT_LENMIN 4 +#define MC_CMD_CSR_READ32_OUT_LENMAX 252 +#define MC_CMD_CSR_READ32_OUT_LENMAX_MCDI2 1020 +#define MC_CMD_CSR_READ32_OUT_LEN(num) (0+4*(num)) +#define MC_CMD_CSR_READ32_OUT_BUFFER_NUM(len) (((len)-0)/4) +/* The last dword is the status, not a value read */ +#define MC_CMD_CSR_READ32_OUT_BUFFER_OFST 0 +#define MC_CMD_CSR_READ32_OUT_BUFFER_LEN 4 +#define MC_CMD_CSR_READ32_OUT_BUFFER_MINNUM 1 +#define MC_CMD_CSR_READ32_OUT_BUFFER_MAXNUM 63 +#define MC_CMD_CSR_READ32_OUT_BUFFER_MAXNUM_MCDI2 255 + + +/***********************************/ +/* MC_CMD_CSR_WRITE32 + * Write 32bit dwords to the indirect memory map. + */ +#define MC_CMD_CSR_WRITE32 0xd +#undef MC_CMD_0xd_PRIVILEGE_CTG + +#define MC_CMD_0xd_PRIVILEGE_CTG SRIOV_CTG_INSECURE + +/* MC_CMD_CSR_WRITE32_IN msgrequest */ +#define MC_CMD_CSR_WRITE32_IN_LENMIN 12 +#define MC_CMD_CSR_WRITE32_IN_LENMAX 252 +#define MC_CMD_CSR_WRITE32_IN_LENMAX_MCDI2 1020 +#define MC_CMD_CSR_WRITE32_IN_LEN(num) (8+4*(num)) +#define MC_CMD_CSR_WRITE32_IN_BUFFER_NUM(len) (((len)-8)/4) +/* Address */ +#define MC_CMD_CSR_WRITE32_IN_ADDR_OFST 0 +#define MC_CMD_CSR_WRITE32_IN_ADDR_LEN 4 +#define MC_CMD_CSR_WRITE32_IN_STEP_OFST 4 +#define MC_CMD_CSR_WRITE32_IN_STEP_LEN 4 +#define MC_CMD_CSR_WRITE32_IN_BUFFER_OFST 8 +#define MC_CMD_CSR_WRITE32_IN_BUFFER_LEN 4 +#define MC_CMD_CSR_WRITE32_IN_BUFFER_MINNUM 1 +#define MC_CMD_CSR_WRITE32_IN_BUFFER_MAXNUM 61 +#define MC_CMD_CSR_WRITE32_IN_BUFFER_MAXNUM_MCDI2 253 + +/* MC_CMD_CSR_WRITE32_OUT msgresponse */ +#define MC_CMD_CSR_WRITE32_OUT_LEN 4 +#define MC_CMD_CSR_WRITE32_OUT_STATUS_OFST 0 +#define MC_CMD_CSR_WRITE32_OUT_STATUS_LEN 4 + + +/***********************************/ +/* MC_CMD_HP + * These commands are used for HP related features. They are grouped under one + * MCDI command to avoid creating too many MCDI commands. + */ +#define MC_CMD_HP 0x54 +#undef MC_CMD_0x54_PRIVILEGE_CTG + +#define MC_CMD_0x54_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND + +/* MC_CMD_HP_IN msgrequest */ +#define MC_CMD_HP_IN_LEN 16 +/* HP OCSD sub-command. When address is not NULL, request activation of OCSD at + * the specified address with the specified interval.When address is NULL, + * INTERVAL is interpreted as a command: 0: stop OCSD / 1: Report OCSD current + * state / 2: (debug) Show temperature reported by one of the supported + * sensors. + */ +#define MC_CMD_HP_IN_SUBCMD_OFST 0 +#define MC_CMD_HP_IN_SUBCMD_LEN 4 +/* enum: OCSD (Option Card Sensor Data) sub-command. */ +#define MC_CMD_HP_IN_OCSD_SUBCMD 0x0 +/* enum: Last known valid HP sub-command. */ +#define MC_CMD_HP_IN_LAST_SUBCMD 0x0 +/* The address to the array of sensor fields. (Or NULL to use a sub-command.) + */ +#define MC_CMD_HP_IN_OCSD_ADDR_OFST 4 +#define MC_CMD_HP_IN_OCSD_ADDR_LEN 8 +#define MC_CMD_HP_IN_OCSD_ADDR_LO_OFST 4 +#define MC_CMD_HP_IN_OCSD_ADDR_LO_LEN 4 +#define MC_CMD_HP_IN_OCSD_ADDR_LO_LBN 32 +#define MC_CMD_HP_IN_OCSD_ADDR_LO_WIDTH 32 +#define MC_CMD_HP_IN_OCSD_ADDR_HI_OFST 8 +#define MC_CMD_HP_IN_OCSD_ADDR_HI_LEN 4 +#define MC_CMD_HP_IN_OCSD_ADDR_HI_LBN 64 +#define MC_CMD_HP_IN_OCSD_ADDR_HI_WIDTH 32 +/* The requested update interval, in seconds. (Or the sub-command if ADDR is + * NULL.) + */ +#define MC_CMD_HP_IN_OCSD_INTERVAL_OFST 12 +#define MC_CMD_HP_IN_OCSD_INTERVAL_LEN 4 + +/* MC_CMD_HP_OUT msgresponse */ +#define MC_CMD_HP_OUT_LEN 4 +#define MC_CMD_HP_OUT_OCSD_STATUS_OFST 0 +#define MC_CMD_HP_OUT_OCSD_STATUS_LEN 4 +/* enum: OCSD stopped for this card. */ +#define MC_CMD_HP_OUT_OCSD_STOPPED 0x1 +/* enum: OCSD was successfully started with the address provided. */ +#define MC_CMD_HP_OUT_OCSD_STARTED 0x2 +/* enum: OCSD was already started for this card. */ +#define MC_CMD_HP_OUT_OCSD_ALREADY_STARTED 0x3 + + +/***********************************/ +/* MC_CMD_STACKINFO + * Get stack information. + */ +#define MC_CMD_STACKINFO 0xf +#undef MC_CMD_0xf_PRIVILEGE_CTG + +#define MC_CMD_0xf_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_STACKINFO_IN msgrequest */ +#define MC_CMD_STACKINFO_IN_LEN 0 + +/* MC_CMD_STACKINFO_OUT msgresponse */ +#define MC_CMD_STACKINFO_OUT_LENMIN 12 +#define MC_CMD_STACKINFO_OUT_LENMAX 252 +#define MC_CMD_STACKINFO_OUT_LENMAX_MCDI2 1020 +#define MC_CMD_STACKINFO_OUT_LEN(num) (0+12*(num)) +#define MC_CMD_STACKINFO_OUT_THREAD_INFO_NUM(len) (((len)-0)/12) +/* (thread ptr, stack size, free space) for each thread in system */ +#define MC_CMD_STACKINFO_OUT_THREAD_INFO_OFST 0 +#define MC_CMD_STACKINFO_OUT_THREAD_INFO_LEN 12 +#define MC_CMD_STACKINFO_OUT_THREAD_INFO_MINNUM 1 +#define MC_CMD_STACKINFO_OUT_THREAD_INFO_MAXNUM 21 +#define MC_CMD_STACKINFO_OUT_THREAD_INFO_MAXNUM_MCDI2 85 + + +/***********************************/ +/* MC_CMD_MDIO_READ + * MDIO register read. + */ +#define MC_CMD_MDIO_READ 0x10 +#undef MC_CMD_0x10_PRIVILEGE_CTG + +#define MC_CMD_0x10_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_MDIO_READ_IN msgrequest */ +#define MC_CMD_MDIO_READ_IN_LEN 16 +/* Bus number; there are two MDIO buses: one for the internal PHY, and one for + * external devices. + */ +#define MC_CMD_MDIO_READ_IN_BUS_OFST 0 +#define MC_CMD_MDIO_READ_IN_BUS_LEN 4 +/* enum: Internal. */ +#define MC_CMD_MDIO_BUS_INTERNAL 0x0 +/* enum: External. */ +#define MC_CMD_MDIO_BUS_EXTERNAL 0x1 +/* Port address */ +#define MC_CMD_MDIO_READ_IN_PRTAD_OFST 4 +#define MC_CMD_MDIO_READ_IN_PRTAD_LEN 4 +/* Device Address or clause 22. */ +#define MC_CMD_MDIO_READ_IN_DEVAD_OFST 8 +#define MC_CMD_MDIO_READ_IN_DEVAD_LEN 4 +/* enum: By default all the MCDI MDIO operations perform clause45 mode. If you + * want to use clause22 then set DEVAD = MC_CMD_MDIO_CLAUSE22. + */ +#define MC_CMD_MDIO_CLAUSE22 0x20 +/* Address */ +#define MC_CMD_MDIO_READ_IN_ADDR_OFST 12 +#define MC_CMD_MDIO_READ_IN_ADDR_LEN 4 + +/* MC_CMD_MDIO_READ_OUT msgresponse */ +#define MC_CMD_MDIO_READ_OUT_LEN 8 +/* Value */ +#define MC_CMD_MDIO_READ_OUT_VALUE_OFST 0 +#define MC_CMD_MDIO_READ_OUT_VALUE_LEN 4 +/* Status the MDIO commands return the raw status bits from the MDIO block. A + * "good" transaction should have the DONE bit set and all other bits clear. + */ +#define MC_CMD_MDIO_READ_OUT_STATUS_OFST 4 +#define MC_CMD_MDIO_READ_OUT_STATUS_LEN 4 +/* enum: Good. */ +#define MC_CMD_MDIO_STATUS_GOOD 0x8 + + +/***********************************/ +/* MC_CMD_MDIO_WRITE + * MDIO register write. + */ +#define MC_CMD_MDIO_WRITE 0x11 +#undef MC_CMD_0x11_PRIVILEGE_CTG + +#define MC_CMD_0x11_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_MDIO_WRITE_IN msgrequest */ +#define MC_CMD_MDIO_WRITE_IN_LEN 20 +/* Bus number; there are two MDIO buses: one for the internal PHY, and one for + * external devices. + */ +#define MC_CMD_MDIO_WRITE_IN_BUS_OFST 0 +#define MC_CMD_MDIO_WRITE_IN_BUS_LEN 4 +/* enum: Internal. */ +/* MC_CMD_MDIO_BUS_INTERNAL 0x0 */ +/* enum: External. */ +/* MC_CMD_MDIO_BUS_EXTERNAL 0x1 */ +/* Port address */ +#define MC_CMD_MDIO_WRITE_IN_PRTAD_OFST 4 +#define MC_CMD_MDIO_WRITE_IN_PRTAD_LEN 4 +/* Device Address or clause 22. */ +#define MC_CMD_MDIO_WRITE_IN_DEVAD_OFST 8 +#define MC_CMD_MDIO_WRITE_IN_DEVAD_LEN 4 +/* enum: By default all the MCDI MDIO operations perform clause45 mode. If you + * want to use clause22 then set DEVAD = MC_CMD_MDIO_CLAUSE22. + */ +/* MC_CMD_MDIO_CLAUSE22 0x20 */ +/* Address */ +#define MC_CMD_MDIO_WRITE_IN_ADDR_OFST 12 +#define MC_CMD_MDIO_WRITE_IN_ADDR_LEN 4 +/* Value */ +#define MC_CMD_MDIO_WRITE_IN_VALUE_OFST 16 +#define MC_CMD_MDIO_WRITE_IN_VALUE_LEN 4 + +/* MC_CMD_MDIO_WRITE_OUT msgresponse */ +#define MC_CMD_MDIO_WRITE_OUT_LEN 4 +/* Status; the MDIO commands return the raw status bits from the MDIO block. A + * "good" transaction should have the DONE bit set and all other bits clear. + */ +#define MC_CMD_MDIO_WRITE_OUT_STATUS_OFST 0 +#define MC_CMD_MDIO_WRITE_OUT_STATUS_LEN 4 +/* enum: Good. */ +/* MC_CMD_MDIO_STATUS_GOOD 0x8 */ + + +/***********************************/ +/* MC_CMD_DBI_WRITE + * Write DBI register(s). + */ +#define MC_CMD_DBI_WRITE 0x12 +#undef MC_CMD_0x12_PRIVILEGE_CTG + +#define MC_CMD_0x12_PRIVILEGE_CTG SRIOV_CTG_INSECURE + +/* MC_CMD_DBI_WRITE_IN msgrequest */ +#define MC_CMD_DBI_WRITE_IN_LENMIN 12 +#define MC_CMD_DBI_WRITE_IN_LENMAX 252 +#define MC_CMD_DBI_WRITE_IN_LENMAX_MCDI2 1020 +#define MC_CMD_DBI_WRITE_IN_LEN(num) (0+12*(num)) +#define MC_CMD_DBI_WRITE_IN_DBIWROP_NUM(len) (((len)-0)/12) +/* Each write op consists of an address (offset 0), byte enable/VF/CS2 (offset + * 32) and value (offset 64). See MC_CMD_DBIWROP_TYPEDEF. + */ +#define MC_CMD_DBI_WRITE_IN_DBIWROP_OFST 0 +#define MC_CMD_DBI_WRITE_IN_DBIWROP_LEN 12 +#define MC_CMD_DBI_WRITE_IN_DBIWROP_MINNUM 1 +#define MC_CMD_DBI_WRITE_IN_DBIWROP_MAXNUM 21 +#define MC_CMD_DBI_WRITE_IN_DBIWROP_MAXNUM_MCDI2 85 + +/* MC_CMD_DBI_WRITE_OUT msgresponse */ +#define MC_CMD_DBI_WRITE_OUT_LEN 0 + +/* MC_CMD_DBIWROP_TYPEDEF structuredef */ +#define MC_CMD_DBIWROP_TYPEDEF_LEN 12 +#define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_OFST 0 +#define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_LEN 4 +#define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_LBN 0 +#define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_WIDTH 32 +#define MC_CMD_DBIWROP_TYPEDEF_PARMS_OFST 4 +#define MC_CMD_DBIWROP_TYPEDEF_PARMS_LEN 4 +#define MC_CMD_DBIWROP_TYPEDEF_VF_NUM_OFST 4 +#define MC_CMD_DBIWROP_TYPEDEF_VF_NUM_LBN 16 +#define MC_CMD_DBIWROP_TYPEDEF_VF_NUM_WIDTH 16 +#define MC_CMD_DBIWROP_TYPEDEF_VF_ACTIVE_OFST 4 +#define MC_CMD_DBIWROP_TYPEDEF_VF_ACTIVE_LBN 15 +#define MC_CMD_DBIWROP_TYPEDEF_VF_ACTIVE_WIDTH 1 +#define MC_CMD_DBIWROP_TYPEDEF_CS2_OFST 4 +#define MC_CMD_DBIWROP_TYPEDEF_CS2_LBN 14 +#define MC_CMD_DBIWROP_TYPEDEF_CS2_WIDTH 1 +#define MC_CMD_DBIWROP_TYPEDEF_PARMS_LBN 32 +#define MC_CMD_DBIWROP_TYPEDEF_PARMS_WIDTH 32 +#define MC_CMD_DBIWROP_TYPEDEF_VALUE_OFST 8 +#define MC_CMD_DBIWROP_TYPEDEF_VALUE_LEN 4 +#define MC_CMD_DBIWROP_TYPEDEF_VALUE_LBN 64 +#define MC_CMD_DBIWROP_TYPEDEF_VALUE_WIDTH 32 + + +/***********************************/ +/* MC_CMD_PORT_READ32 + * Read a 32-bit register from the indirect port register map. The port to + * access is implied by the Shared memory channel used. + */ +#define MC_CMD_PORT_READ32 0x14 + +/* MC_CMD_PORT_READ32_IN msgrequest */ +#define MC_CMD_PORT_READ32_IN_LEN 4 +/* Address */ +#define MC_CMD_PORT_READ32_IN_ADDR_OFST 0 +#define MC_CMD_PORT_READ32_IN_ADDR_LEN 4 + +/* MC_CMD_PORT_READ32_OUT msgresponse */ +#define MC_CMD_PORT_READ32_OUT_LEN 8 +/* Value */ +#define MC_CMD_PORT_READ32_OUT_VALUE_OFST 0 +#define MC_CMD_PORT_READ32_OUT_VALUE_LEN 4 +/* Status */ +#define MC_CMD_PORT_READ32_OUT_STATUS_OFST 4 +#define MC_CMD_PORT_READ32_OUT_STATUS_LEN 4 + + +/***********************************/ +/* MC_CMD_PORT_WRITE32 + * Write a 32-bit register to the indirect port register map. The port to + * access is implied by the Shared memory channel used. + */ +#define MC_CMD_PORT_WRITE32 0x15 + +/* MC_CMD_PORT_WRITE32_IN msgrequest */ +#define MC_CMD_PORT_WRITE32_IN_LEN 8 +/* Address */ +#define MC_CMD_PORT_WRITE32_IN_ADDR_OFST 0 +#define MC_CMD_PORT_WRITE32_IN_ADDR_LEN 4 +/* Value */ +#define MC_CMD_PORT_WRITE32_IN_VALUE_OFST 4 +#define MC_CMD_PORT_WRITE32_IN_VALUE_LEN 4 + +/* MC_CMD_PORT_WRITE32_OUT msgresponse */ +#define MC_CMD_PORT_WRITE32_OUT_LEN 4 +/* Status */ +#define MC_CMD_PORT_WRITE32_OUT_STATUS_OFST 0 +#define MC_CMD_PORT_WRITE32_OUT_STATUS_LEN 4 + + +/***********************************/ +/* MC_CMD_PORT_READ128 + * Read a 128-bit register from the indirect port register map. The port to + * access is implied by the Shared memory channel used. + */ +#define MC_CMD_PORT_READ128 0x16 + +/* MC_CMD_PORT_READ128_IN msgrequest */ +#define MC_CMD_PORT_READ128_IN_LEN 4 +/* Address */ +#define MC_CMD_PORT_READ128_IN_ADDR_OFST 0 +#define MC_CMD_PORT_READ128_IN_ADDR_LEN 4 + +/* MC_CMD_PORT_READ128_OUT msgresponse */ +#define MC_CMD_PORT_READ128_OUT_LEN 20 +/* Value */ +#define MC_CMD_PORT_READ128_OUT_VALUE_OFST 0 +#define MC_CMD_PORT_READ128_OUT_VALUE_LEN 16 +/* Status */ +#define MC_CMD_PORT_READ128_OUT_STATUS_OFST 16 +#define MC_CMD_PORT_READ128_OUT_STATUS_LEN 4 + + +/***********************************/ +/* MC_CMD_PORT_WRITE128 + * Write a 128-bit register to the indirect port register map. The port to + * access is implied by the Shared memory channel used. + */ +#define MC_CMD_PORT_WRITE128 0x17 + +/* MC_CMD_PORT_WRITE128_IN msgrequest */ +#define MC_CMD_PORT_WRITE128_IN_LEN 20 +/* Address */ +#define MC_CMD_PORT_WRITE128_IN_ADDR_OFST 0 +#define MC_CMD_PORT_WRITE128_IN_ADDR_LEN 4 +/* Value */ +#define MC_CMD_PORT_WRITE128_IN_VALUE_OFST 4 +#define MC_CMD_PORT_WRITE128_IN_VALUE_LEN 16 + +/* MC_CMD_PORT_WRITE128_OUT msgresponse */ +#define MC_CMD_PORT_WRITE128_OUT_LEN 4 +/* Status */ +#define MC_CMD_PORT_WRITE128_OUT_STATUS_OFST 0 +#define MC_CMD_PORT_WRITE128_OUT_STATUS_LEN 4 + +/* MC_CMD_CAPABILITIES structuredef */ +#define MC_CMD_CAPABILITIES_LEN 4 +/* Small buf table. */ +#define MC_CMD_CAPABILITIES_SMALL_BUF_TBL_LBN 0 +#define MC_CMD_CAPABILITIES_SMALL_BUF_TBL_WIDTH 1 +/* Turbo mode (for Maranello). */ +#define MC_CMD_CAPABILITIES_TURBO_LBN 1 +#define MC_CMD_CAPABILITIES_TURBO_WIDTH 1 +/* Turbo mode active (for Maranello). */ +#define MC_CMD_CAPABILITIES_TURBO_ACTIVE_LBN 2 +#define MC_CMD_CAPABILITIES_TURBO_ACTIVE_WIDTH 1 +/* PTP offload. */ +#define MC_CMD_CAPABILITIES_PTP_LBN 3 +#define MC_CMD_CAPABILITIES_PTP_WIDTH 1 +/* AOE mode. */ +#define MC_CMD_CAPABILITIES_AOE_LBN 4 +#define MC_CMD_CAPABILITIES_AOE_WIDTH 1 +/* AOE mode active. */ +#define MC_CMD_CAPABILITIES_AOE_ACTIVE_LBN 5 +#define MC_CMD_CAPABILITIES_AOE_ACTIVE_WIDTH 1 +/* AOE mode active. */ +#define MC_CMD_CAPABILITIES_FC_ACTIVE_LBN 6 +#define MC_CMD_CAPABILITIES_FC_ACTIVE_WIDTH 1 +#define MC_CMD_CAPABILITIES_RESERVED_LBN 7 +#define MC_CMD_CAPABILITIES_RESERVED_WIDTH 25 + + +/***********************************/ +/* MC_CMD_GET_BOARD_CFG + * Returns the MC firmware configuration structure. + */ +#define MC_CMD_GET_BOARD_CFG 0x18 +#undef MC_CMD_0x18_PRIVILEGE_CTG + +#define MC_CMD_0x18_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_BOARD_CFG_IN msgrequest */ +#define MC_CMD_GET_BOARD_CFG_IN_LEN 0 + +/* MC_CMD_GET_BOARD_CFG_OUT msgresponse */ +#define MC_CMD_GET_BOARD_CFG_OUT_LENMIN 96 +#define MC_CMD_GET_BOARD_CFG_OUT_LENMAX 136 +#define MC_CMD_GET_BOARD_CFG_OUT_LENMAX_MCDI2 136 +#define MC_CMD_GET_BOARD_CFG_OUT_LEN(num) (72+2*(num)) +#define MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_NUM(len) (((len)-72)/2) +#define MC_CMD_GET_BOARD_CFG_OUT_BOARD_TYPE_OFST 0 +#define MC_CMD_GET_BOARD_CFG_OUT_BOARD_TYPE_LEN 4 +#define MC_CMD_GET_BOARD_CFG_OUT_BOARD_NAME_OFST 4 +#define MC_CMD_GET_BOARD_CFG_OUT_BOARD_NAME_LEN 32 +/* Capabilities for Siena Port0 (see struct MC_CMD_CAPABILITIES). Unused on + * EF10 and later (use MC_CMD_GET_CAPABILITIES). + */ +#define MC_CMD_GET_BOARD_CFG_OUT_CAPABILITIES_PORT0_OFST 36 +#define MC_CMD_GET_BOARD_CFG_OUT_CAPABILITIES_PORT0_LEN 4 +/* Capabilities for Siena Port1 (see struct MC_CMD_CAPABILITIES). Unused on + * EF10 and later (use MC_CMD_GET_CAPABILITIES). + */ +#define MC_CMD_GET_BOARD_CFG_OUT_CAPABILITIES_PORT1_OFST 40 +#define MC_CMD_GET_BOARD_CFG_OUT_CAPABILITIES_PORT1_LEN 4 +/* Base MAC address for Siena Port0. Unused on EF10 and later (use + * MC_CMD_GET_MAC_ADDRESSES). + */ +#define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_OFST 44 +#define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_LEN 6 +/* Base MAC address for Siena Port1. Unused on EF10 and later (use + * MC_CMD_GET_MAC_ADDRESSES). + */ +#define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1_OFST 50 +#define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1_LEN 6 +/* Size of MAC address pool for Siena Port0. Unused on EF10 and later (use + * MC_CMD_GET_MAC_ADDRESSES). + */ +#define MC_CMD_GET_BOARD_CFG_OUT_MAC_COUNT_PORT0_OFST 56 +#define MC_CMD_GET_BOARD_CFG_OUT_MAC_COUNT_PORT0_LEN 4 +/* Size of MAC address pool for Siena Port1. Unused on EF10 and later (use + * MC_CMD_GET_MAC_ADDRESSES). + */ +#define MC_CMD_GET_BOARD_CFG_OUT_MAC_COUNT_PORT1_OFST 60 +#define MC_CMD_GET_BOARD_CFG_OUT_MAC_COUNT_PORT1_LEN 4 +/* Increment between addresses in MAC address pool for Siena Port0. Unused on + * EF10 and later (use MC_CMD_GET_MAC_ADDRESSES). + */ +#define MC_CMD_GET_BOARD_CFG_OUT_MAC_STRIDE_PORT0_OFST 64 +#define MC_CMD_GET_BOARD_CFG_OUT_MAC_STRIDE_PORT0_LEN 4 +/* Increment between addresses in MAC address pool for Siena Port1. Unused on + * EF10 and later (use MC_CMD_GET_MAC_ADDRESSES). + */ +#define MC_CMD_GET_BOARD_CFG_OUT_MAC_STRIDE_PORT1_OFST 68 +#define MC_CMD_GET_BOARD_CFG_OUT_MAC_STRIDE_PORT1_LEN 4 +/* Siena only. This field contains a 16-bit value for each of the types of + * NVRAM area. The values are defined in the firmware/mc/platform/.c file for a + * specific board type, but otherwise have no meaning to the MC; they are used + * by the driver to manage selection of appropriate firmware updates. Unused on + * EF10 and later (use MC_CMD_NVRAM_METADATA). + */ +#define MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_OFST 72 +#define MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_LEN 2 +#define MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MINNUM 12 +#define MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MAXNUM 32 +#define MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MAXNUM_MCDI2 32 + + +/***********************************/ +/* MC_CMD_DBI_READX + * Read DBI register(s) -- extended functionality + */ +#define MC_CMD_DBI_READX 0x19 +#undef MC_CMD_0x19_PRIVILEGE_CTG + +#define MC_CMD_0x19_PRIVILEGE_CTG SRIOV_CTG_INSECURE + +/* MC_CMD_DBI_READX_IN msgrequest */ +#define MC_CMD_DBI_READX_IN_LENMIN 8 +#define MC_CMD_DBI_READX_IN_LENMAX 248 +#define MC_CMD_DBI_READX_IN_LENMAX_MCDI2 1016 +#define MC_CMD_DBI_READX_IN_LEN(num) (0+8*(num)) +#define MC_CMD_DBI_READX_IN_DBIRDOP_NUM(len) (((len)-0)/8) +/* Each Read op consists of an address (offset 0), VF/CS2) */ +#define MC_CMD_DBI_READX_IN_DBIRDOP_OFST 0 +#define MC_CMD_DBI_READX_IN_DBIRDOP_LEN 8 +#define MC_CMD_DBI_READX_IN_DBIRDOP_LO_OFST 0 +#define MC_CMD_DBI_READX_IN_DBIRDOP_LO_LEN 4 +#define MC_CMD_DBI_READX_IN_DBIRDOP_LO_LBN 0 +#define MC_CMD_DBI_READX_IN_DBIRDOP_LO_WIDTH 32 +#define MC_CMD_DBI_READX_IN_DBIRDOP_HI_OFST 4 +#define MC_CMD_DBI_READX_IN_DBIRDOP_HI_LEN 4 +#define MC_CMD_DBI_READX_IN_DBIRDOP_HI_LBN 32 +#define MC_CMD_DBI_READX_IN_DBIRDOP_HI_WIDTH 32 +#define MC_CMD_DBI_READX_IN_DBIRDOP_MINNUM 1 +#define MC_CMD_DBI_READX_IN_DBIRDOP_MAXNUM 31 +#define MC_CMD_DBI_READX_IN_DBIRDOP_MAXNUM_MCDI2 127 + +/* MC_CMD_DBI_READX_OUT msgresponse */ +#define MC_CMD_DBI_READX_OUT_LENMIN 4 +#define MC_CMD_DBI_READX_OUT_LENMAX 252 +#define MC_CMD_DBI_READX_OUT_LENMAX_MCDI2 1020 +#define MC_CMD_DBI_READX_OUT_LEN(num) (0+4*(num)) +#define MC_CMD_DBI_READX_OUT_VALUE_NUM(len) (((len)-0)/4) +/* Value */ +#define MC_CMD_DBI_READX_OUT_VALUE_OFST 0 +#define MC_CMD_DBI_READX_OUT_VALUE_LEN 4 +#define MC_CMD_DBI_READX_OUT_VALUE_MINNUM 1 +#define MC_CMD_DBI_READX_OUT_VALUE_MAXNUM 63 +#define MC_CMD_DBI_READX_OUT_VALUE_MAXNUM_MCDI2 255 + +/* MC_CMD_DBIRDOP_TYPEDEF structuredef */ +#define MC_CMD_DBIRDOP_TYPEDEF_LEN 8 +#define MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_OFST 0 +#define MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_LEN 4 +#define MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_LBN 0 +#define MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_WIDTH 32 +#define MC_CMD_DBIRDOP_TYPEDEF_PARMS_OFST 4 +#define MC_CMD_DBIRDOP_TYPEDEF_PARMS_LEN 4 +#define MC_CMD_DBIRDOP_TYPEDEF_VF_NUM_OFST 4 +#define MC_CMD_DBIRDOP_TYPEDEF_VF_NUM_LBN 16 +#define MC_CMD_DBIRDOP_TYPEDEF_VF_NUM_WIDTH 16 +#define MC_CMD_DBIRDOP_TYPEDEF_VF_ACTIVE_OFST 4 +#define MC_CMD_DBIRDOP_TYPEDEF_VF_ACTIVE_LBN 15 +#define MC_CMD_DBIRDOP_TYPEDEF_VF_ACTIVE_WIDTH 1 +#define MC_CMD_DBIRDOP_TYPEDEF_CS2_OFST 4 +#define MC_CMD_DBIRDOP_TYPEDEF_CS2_LBN 14 +#define MC_CMD_DBIRDOP_TYPEDEF_CS2_WIDTH 1 +#define MC_CMD_DBIRDOP_TYPEDEF_PARMS_LBN 32 +#define MC_CMD_DBIRDOP_TYPEDEF_PARMS_WIDTH 32 + + +/***********************************/ +/* MC_CMD_SET_RAND_SEED + * Set the 16byte seed for the MC pseudo-random generator. + */ +#define MC_CMD_SET_RAND_SEED 0x1a +#undef MC_CMD_0x1a_PRIVILEGE_CTG + +#define MC_CMD_0x1a_PRIVILEGE_CTG SRIOV_CTG_INSECURE + +/* MC_CMD_SET_RAND_SEED_IN msgrequest */ +#define MC_CMD_SET_RAND_SEED_IN_LEN 16 +/* Seed value. */ +#define MC_CMD_SET_RAND_SEED_IN_SEED_OFST 0 +#define MC_CMD_SET_RAND_SEED_IN_SEED_LEN 16 + +/* MC_CMD_SET_RAND_SEED_OUT msgresponse */ +#define MC_CMD_SET_RAND_SEED_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_LTSSM_HIST + * Retrieve the history of the LTSSM, if the build supports it. + */ +#define MC_CMD_LTSSM_HIST 0x1b + +/* MC_CMD_LTSSM_HIST_IN msgrequest */ +#define MC_CMD_LTSSM_HIST_IN_LEN 0 + +/* MC_CMD_LTSSM_HIST_OUT msgresponse */ +#define MC_CMD_LTSSM_HIST_OUT_LENMIN 0 +#define MC_CMD_LTSSM_HIST_OUT_LENMAX 252 +#define MC_CMD_LTSSM_HIST_OUT_LENMAX_MCDI2 1020 +#define MC_CMD_LTSSM_HIST_OUT_LEN(num) (0+4*(num)) +#define MC_CMD_LTSSM_HIST_OUT_DATA_NUM(len) (((len)-0)/4) +/* variable number of LTSSM values, as bytes. The history is read-to-clear. */ +#define MC_CMD_LTSSM_HIST_OUT_DATA_OFST 0 +#define MC_CMD_LTSSM_HIST_OUT_DATA_LEN 4 +#define MC_CMD_LTSSM_HIST_OUT_DATA_MINNUM 0 +#define MC_CMD_LTSSM_HIST_OUT_DATA_MAXNUM 63 +#define MC_CMD_LTSSM_HIST_OUT_DATA_MAXNUM_MCDI2 255 + + +/***********************************/ +/* MC_CMD_DRV_ATTACH + * Inform MCPU that this port is managed on the host (i.e. driver active). For + * Huntington, also request the preferred datapath firmware to use if possible + * (it may not be possible for this request to be fulfilled; the driver must + * issue a subsequent MC_CMD_GET_CAPABILITIES command to determine which + * features are actually available). The FIRMWARE_ID field is ignored by older + * platforms. + */ +#define MC_CMD_DRV_ATTACH 0x1c +#undef MC_CMD_0x1c_PRIVILEGE_CTG + +#define MC_CMD_0x1c_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_DRV_ATTACH_IN msgrequest */ +#define MC_CMD_DRV_ATTACH_IN_LEN 12 +/* new state to set if UPDATE=1 */ +#define MC_CMD_DRV_ATTACH_IN_NEW_STATE_OFST 0 +#define MC_CMD_DRV_ATTACH_IN_NEW_STATE_LEN 4 +#define MC_CMD_DRV_ATTACH_OFST 0 +#define MC_CMD_DRV_ATTACH_LBN 0 +#define MC_CMD_DRV_ATTACH_WIDTH 1 +#define MC_CMD_DRV_ATTACH_IN_ATTACH_OFST 0 +#define MC_CMD_DRV_ATTACH_IN_ATTACH_LBN 0 +#define MC_CMD_DRV_ATTACH_IN_ATTACH_WIDTH 1 +#define MC_CMD_DRV_PREBOOT_OFST 0 +#define MC_CMD_DRV_PREBOOT_LBN 1 +#define MC_CMD_DRV_PREBOOT_WIDTH 1 +#define MC_CMD_DRV_ATTACH_IN_PREBOOT_OFST 0 +#define MC_CMD_DRV_ATTACH_IN_PREBOOT_LBN 1 +#define MC_CMD_DRV_ATTACH_IN_PREBOOT_WIDTH 1 +#define MC_CMD_DRV_ATTACH_IN_SUBVARIANT_AWARE_OFST 0 +#define MC_CMD_DRV_ATTACH_IN_SUBVARIANT_AWARE_LBN 2 +#define MC_CMD_DRV_ATTACH_IN_SUBVARIANT_AWARE_WIDTH 1 +#define MC_CMD_DRV_ATTACH_IN_WANT_VI_SPREADING_OFST 0 +#define MC_CMD_DRV_ATTACH_IN_WANT_VI_SPREADING_LBN 3 +#define MC_CMD_DRV_ATTACH_IN_WANT_VI_SPREADING_WIDTH 1 +#define MC_CMD_DRV_ATTACH_IN_WANT_V2_LINKCHANGES_OFST 0 +#define MC_CMD_DRV_ATTACH_IN_WANT_V2_LINKCHANGES_LBN 4 +#define MC_CMD_DRV_ATTACH_IN_WANT_V2_LINKCHANGES_WIDTH 1 +#define MC_CMD_DRV_ATTACH_IN_WANT_RX_VI_SPREADING_INHIBIT_OFST 0 +#define MC_CMD_DRV_ATTACH_IN_WANT_RX_VI_SPREADING_INHIBIT_LBN 5 +#define MC_CMD_DRV_ATTACH_IN_WANT_RX_VI_SPREADING_INHIBIT_WIDTH 1 +#define MC_CMD_DRV_ATTACH_IN_WANT_TX_ONLY_SPREADING_OFST 0 +#define MC_CMD_DRV_ATTACH_IN_WANT_TX_ONLY_SPREADING_LBN 5 +#define MC_CMD_DRV_ATTACH_IN_WANT_TX_ONLY_SPREADING_WIDTH 1 +/* 1 to set new state, or 0 to just report the existing state */ +#define MC_CMD_DRV_ATTACH_IN_UPDATE_OFST 4 +#define MC_CMD_DRV_ATTACH_IN_UPDATE_LEN 4 +/* preferred datapath firmware (for Huntington; ignored for Siena) */ +#define MC_CMD_DRV_ATTACH_IN_FIRMWARE_ID_OFST 8 +#define MC_CMD_DRV_ATTACH_IN_FIRMWARE_ID_LEN 4 +/* enum: Prefer to use full featured firmware */ +#define MC_CMD_FW_FULL_FEATURED 0x0 +/* enum: Prefer to use firmware with fewer features but lower latency */ +#define MC_CMD_FW_LOW_LATENCY 0x1 +/* enum: Prefer to use firmware for SolarCapture packed stream mode */ +#define MC_CMD_FW_PACKED_STREAM 0x2 +/* enum: Prefer to use firmware with fewer features and simpler TX event + * batching but higher TX packet rate + */ +#define MC_CMD_FW_HIGH_TX_RATE 0x3 +/* enum: Reserved value */ +#define MC_CMD_FW_PACKED_STREAM_HASH_MODE_1 0x4 +/* enum: Prefer to use firmware with additional "rules engine" filtering + * support + */ +#define MC_CMD_FW_RULES_ENGINE 0x5 +/* enum: Prefer to use firmware with additional DPDK support */ +#define MC_CMD_FW_DPDK 0x6 +/* enum: Prefer to use "l3xudp" custom datapath firmware (see SF-119495-PD and + * bug69716) + */ +#define MC_CMD_FW_L3XUDP 0x7 +/* enum: Requests that the MC keep whatever datapath firmware is currently + * running. It's used for test purposes, where we want to be able to shmboot + * special test firmware variants. This option is only recognised in eftest + * (i.e. non-production) builds. + */ +#define MC_CMD_FW_KEEP_CURRENT_EFTEST_ONLY 0xfffffffe +/* enum: Only this option is allowed for non-admin functions */ +#define MC_CMD_FW_DONT_CARE 0xffffffff + +/* MC_CMD_DRV_ATTACH_IN_V2 msgrequest: Updated DRV_ATTACH to include driver + * version + */ +#define MC_CMD_DRV_ATTACH_IN_V2_LEN 32 +/* new state to set if UPDATE=1 */ +#define MC_CMD_DRV_ATTACH_IN_V2_NEW_STATE_OFST 0 +#define MC_CMD_DRV_ATTACH_IN_V2_NEW_STATE_LEN 4 +/* MC_CMD_DRV_ATTACH_OFST 0 */ +/* MC_CMD_DRV_ATTACH_LBN 0 */ +/* MC_CMD_DRV_ATTACH_WIDTH 1 */ +#define MC_CMD_DRV_ATTACH_IN_V2_ATTACH_OFST 0 +#define MC_CMD_DRV_ATTACH_IN_V2_ATTACH_LBN 0 +#define MC_CMD_DRV_ATTACH_IN_V2_ATTACH_WIDTH 1 +/* MC_CMD_DRV_PREBOOT_OFST 0 */ +/* MC_CMD_DRV_PREBOOT_LBN 1 */ +/* MC_CMD_DRV_PREBOOT_WIDTH 1 */ +#define MC_CMD_DRV_ATTACH_IN_V2_PREBOOT_OFST 0 +#define MC_CMD_DRV_ATTACH_IN_V2_PREBOOT_LBN 1 +#define MC_CMD_DRV_ATTACH_IN_V2_PREBOOT_WIDTH 1 +#define MC_CMD_DRV_ATTACH_IN_V2_SUBVARIANT_AWARE_OFST 0 +#define MC_CMD_DRV_ATTACH_IN_V2_SUBVARIANT_AWARE_LBN 2 +#define MC_CMD_DRV_ATTACH_IN_V2_SUBVARIANT_AWARE_WIDTH 1 +#define MC_CMD_DRV_ATTACH_IN_V2_WANT_VI_SPREADING_OFST 0 +#define MC_CMD_DRV_ATTACH_IN_V2_WANT_VI_SPREADING_LBN 3 +#define MC_CMD_DRV_ATTACH_IN_V2_WANT_VI_SPREADING_WIDTH 1 +#define MC_CMD_DRV_ATTACH_IN_V2_WANT_V2_LINKCHANGES_OFST 0 +#define MC_CMD_DRV_ATTACH_IN_V2_WANT_V2_LINKCHANGES_LBN 4 +#define MC_CMD_DRV_ATTACH_IN_V2_WANT_V2_LINKCHANGES_WIDTH 1 +#define MC_CMD_DRV_ATTACH_IN_V2_WANT_RX_VI_SPREADING_INHIBIT_OFST 0 +#define MC_CMD_DRV_ATTACH_IN_V2_WANT_RX_VI_SPREADING_INHIBIT_LBN 5 +#define MC_CMD_DRV_ATTACH_IN_V2_WANT_RX_VI_SPREADING_INHIBIT_WIDTH 1 +#define MC_CMD_DRV_ATTACH_IN_V2_WANT_TX_ONLY_SPREADING_OFST 0 +#define MC_CMD_DRV_ATTACH_IN_V2_WANT_TX_ONLY_SPREADING_LBN 5 +#define MC_CMD_DRV_ATTACH_IN_V2_WANT_TX_ONLY_SPREADING_WIDTH 1 +/* 1 to set new state, or 0 to just report the existing state */ +#define MC_CMD_DRV_ATTACH_IN_V2_UPDATE_OFST 4 +#define MC_CMD_DRV_ATTACH_IN_V2_UPDATE_LEN 4 +/* preferred datapath firmware (for Huntington; ignored for Siena) */ +#define MC_CMD_DRV_ATTACH_IN_V2_FIRMWARE_ID_OFST 8 +#define MC_CMD_DRV_ATTACH_IN_V2_FIRMWARE_ID_LEN 4 +/* enum: Prefer to use full featured firmware */ +/* MC_CMD_FW_FULL_FEATURED 0x0 */ +/* enum: Prefer to use firmware with fewer features but lower latency */ +/* MC_CMD_FW_LOW_LATENCY 0x1 */ +/* enum: Prefer to use firmware for SolarCapture packed stream mode */ +/* MC_CMD_FW_PACKED_STREAM 0x2 */ +/* enum: Prefer to use firmware with fewer features and simpler TX event + * batching but higher TX packet rate + */ +/* MC_CMD_FW_HIGH_TX_RATE 0x3 */ +/* enum: Reserved value */ +/* MC_CMD_FW_PACKED_STREAM_HASH_MODE_1 0x4 */ +/* enum: Prefer to use firmware with additional "rules engine" filtering + * support + */ +/* MC_CMD_FW_RULES_ENGINE 0x5 */ +/* enum: Prefer to use firmware with additional DPDK support */ +/* MC_CMD_FW_DPDK 0x6 */ +/* enum: Prefer to use "l3xudp" custom datapath firmware (see SF-119495-PD and + * bug69716) + */ +/* MC_CMD_FW_L3XUDP 0x7 */ +/* enum: Requests that the MC keep whatever datapath firmware is currently + * running. It's used for test purposes, where we want to be able to shmboot + * special test firmware variants. This option is only recognised in eftest + * (i.e. non-production) builds. + */ +/* MC_CMD_FW_KEEP_CURRENT_EFTEST_ONLY 0xfffffffe */ +/* enum: Only this option is allowed for non-admin functions */ +/* MC_CMD_FW_DONT_CARE 0xffffffff */ +/* Version of the driver to be reported by management protocols (e.g. NC-SI) + * handled by the NIC. This is a zero-terminated ASCII string. + */ +#define MC_CMD_DRV_ATTACH_IN_V2_DRIVER_VERSION_OFST 12 +#define MC_CMD_DRV_ATTACH_IN_V2_DRIVER_VERSION_LEN 20 + +/* MC_CMD_DRV_ATTACH_OUT msgresponse */ +#define MC_CMD_DRV_ATTACH_OUT_LEN 4 +/* previous or existing state, see the bitmask at NEW_STATE */ +#define MC_CMD_DRV_ATTACH_OUT_OLD_STATE_OFST 0 +#define MC_CMD_DRV_ATTACH_OUT_OLD_STATE_LEN 4 + +/* MC_CMD_DRV_ATTACH_EXT_OUT msgresponse */ +#define MC_CMD_DRV_ATTACH_EXT_OUT_LEN 8 +/* previous or existing state, see the bitmask at NEW_STATE */ +#define MC_CMD_DRV_ATTACH_EXT_OUT_OLD_STATE_OFST 0 +#define MC_CMD_DRV_ATTACH_EXT_OUT_OLD_STATE_LEN 4 +/* Flags associated with this function */ +#define MC_CMD_DRV_ATTACH_EXT_OUT_FUNC_FLAGS_OFST 4 +#define MC_CMD_DRV_ATTACH_EXT_OUT_FUNC_FLAGS_LEN 4 +/* enum: Labels the lowest-numbered function visible to the OS */ +#define MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_PRIMARY 0x0 +/* enum: The function can control the link state of the physical port it is + * bound to. + */ +#define MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL 0x1 +/* enum: The function can perform privileged operations */ +#define MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_TRUSTED 0x2 +/* enum: The function does not have an active port associated with it. The port + * refers to the Sorrento external FPGA port. + */ +#define MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_NO_ACTIVE_PORT 0x3 +/* enum: If set, indicates that VI spreading is currently enabled. Will always + * indicate the current state, regardless of the value in the WANT_VI_SPREADING + * input. + */ +#define MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_VI_SPREADING_ENABLED 0x4 +/* enum: Used during development only. Should no longer be used. */ +#define MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_RX_VI_SPREADING_INHIBITED 0x5 +/* enum: If set, indicates that TX only spreading is enabled. Even-numbered + * TXQs will use one engine, and odd-numbered TXQs will use the other. This + * also has the effect that only even-numbered RXQs will receive traffic. + */ +#define MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_TX_ONLY_VI_SPREADING_ENABLED 0x5 + + +/***********************************/ +/* MC_CMD_SHMUART + * Route UART output to circular buffer in shared memory instead. + */ +#define MC_CMD_SHMUART 0x1f + +/* MC_CMD_SHMUART_IN msgrequest */ +#define MC_CMD_SHMUART_IN_LEN 4 +/* ??? */ +#define MC_CMD_SHMUART_IN_FLAG_OFST 0 +#define MC_CMD_SHMUART_IN_FLAG_LEN 4 + +/* MC_CMD_SHMUART_OUT msgresponse */ +#define MC_CMD_SHMUART_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_PORT_RESET + * Generic per-port reset. There is no equivalent for per-board reset. Locks + * required: None; Return code: 0, ETIME. NOTE: This command is deprecated - + * use MC_CMD_ENTITY_RESET instead. + */ +#define MC_CMD_PORT_RESET 0x20 +#undef MC_CMD_0x20_PRIVILEGE_CTG + +#define MC_CMD_0x20_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_PORT_RESET_IN msgrequest */ +#define MC_CMD_PORT_RESET_IN_LEN 0 + +/* MC_CMD_PORT_RESET_OUT msgresponse */ +#define MC_CMD_PORT_RESET_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_ENTITY_RESET + * Generic per-resource reset. There is no equivalent for per-board reset. + * Locks required: None; Return code: 0, ETIME. NOTE: This command is an + * extended version of the deprecated MC_CMD_PORT_RESET with added fields. + */ +#define MC_CMD_ENTITY_RESET 0x20 +/* MC_CMD_0x20_PRIVILEGE_CTG SRIOV_CTG_GENERAL */ + +/* MC_CMD_ENTITY_RESET_IN msgrequest */ +#define MC_CMD_ENTITY_RESET_IN_LEN 4 +/* Optional flags field. Omitting this will perform a "legacy" reset action + * (TBD). + */ +#define MC_CMD_ENTITY_RESET_IN_FLAG_OFST 0 +#define MC_CMD_ENTITY_RESET_IN_FLAG_LEN 4 +#define MC_CMD_ENTITY_RESET_IN_FUNCTION_RESOURCE_RESET_OFST 0 +#define MC_CMD_ENTITY_RESET_IN_FUNCTION_RESOURCE_RESET_LBN 0 +#define MC_CMD_ENTITY_RESET_IN_FUNCTION_RESOURCE_RESET_WIDTH 1 + +/* MC_CMD_ENTITY_RESET_OUT msgresponse */ +#define MC_CMD_ENTITY_RESET_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_PCIE_CREDITS + * Read instantaneous and minimum flow control thresholds. + */ +#define MC_CMD_PCIE_CREDITS 0x21 + +/* MC_CMD_PCIE_CREDITS_IN msgrequest */ +#define MC_CMD_PCIE_CREDITS_IN_LEN 8 +/* poll period. 0 is disabled */ +#define MC_CMD_PCIE_CREDITS_IN_POLL_PERIOD_OFST 0 +#define MC_CMD_PCIE_CREDITS_IN_POLL_PERIOD_LEN 4 +/* wipe statistics */ +#define MC_CMD_PCIE_CREDITS_IN_WIPE_OFST 4 +#define MC_CMD_PCIE_CREDITS_IN_WIPE_LEN 4 + +/* MC_CMD_PCIE_CREDITS_OUT msgresponse */ +#define MC_CMD_PCIE_CREDITS_OUT_LEN 16 +#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_P_HDR_OFST 0 +#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_P_HDR_LEN 2 +#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_P_DATA_OFST 2 +#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_P_DATA_LEN 2 +#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_NP_HDR_OFST 4 +#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_NP_HDR_LEN 2 +#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_NP_DATA_OFST 6 +#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_NP_DATA_LEN 2 +#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_P_HDR_OFST 8 +#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_P_HDR_LEN 2 +#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_P_DATA_OFST 10 +#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_P_DATA_LEN 2 +#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_NP_HDR_OFST 12 +#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_NP_HDR_LEN 2 +#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_NP_DATA_OFST 14 +#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_NP_DATA_LEN 2 + + +/***********************************/ +/* MC_CMD_RXD_MONITOR + * Get histogram of RX queue fill level. + */ +#define MC_CMD_RXD_MONITOR 0x22 + +/* MC_CMD_RXD_MONITOR_IN msgrequest */ +#define MC_CMD_RXD_MONITOR_IN_LEN 12 +#define MC_CMD_RXD_MONITOR_IN_QID_OFST 0 +#define MC_CMD_RXD_MONITOR_IN_QID_LEN 4 +#define MC_CMD_RXD_MONITOR_IN_POLL_PERIOD_OFST 4 +#define MC_CMD_RXD_MONITOR_IN_POLL_PERIOD_LEN 4 +#define MC_CMD_RXD_MONITOR_IN_WIPE_OFST 8 +#define MC_CMD_RXD_MONITOR_IN_WIPE_LEN 4 + +/* MC_CMD_RXD_MONITOR_OUT msgresponse */ +#define MC_CMD_RXD_MONITOR_OUT_LEN 80 +#define MC_CMD_RXD_MONITOR_OUT_QID_OFST 0 +#define MC_CMD_RXD_MONITOR_OUT_QID_LEN 4 +#define MC_CMD_RXD_MONITOR_OUT_RING_FILL_OFST 4 +#define MC_CMD_RXD_MONITOR_OUT_RING_FILL_LEN 4 +#define MC_CMD_RXD_MONITOR_OUT_CACHE_FILL_OFST 8 +#define MC_CMD_RXD_MONITOR_OUT_CACHE_FILL_LEN 4 +#define MC_CMD_RXD_MONITOR_OUT_RING_LT_1_OFST 12 +#define MC_CMD_RXD_MONITOR_OUT_RING_LT_1_LEN 4 +#define MC_CMD_RXD_MONITOR_OUT_RING_LT_2_OFST 16 +#define MC_CMD_RXD_MONITOR_OUT_RING_LT_2_LEN 4 +#define MC_CMD_RXD_MONITOR_OUT_RING_LT_4_OFST 20 +#define MC_CMD_RXD_MONITOR_OUT_RING_LT_4_LEN 4 +#define MC_CMD_RXD_MONITOR_OUT_RING_LT_8_OFST 24 +#define MC_CMD_RXD_MONITOR_OUT_RING_LT_8_LEN 4 +#define MC_CMD_RXD_MONITOR_OUT_RING_LT_16_OFST 28 +#define MC_CMD_RXD_MONITOR_OUT_RING_LT_16_LEN 4 +#define MC_CMD_RXD_MONITOR_OUT_RING_LT_32_OFST 32 +#define MC_CMD_RXD_MONITOR_OUT_RING_LT_32_LEN 4 +#define MC_CMD_RXD_MONITOR_OUT_RING_LT_64_OFST 36 +#define MC_CMD_RXD_MONITOR_OUT_RING_LT_64_LEN 4 +#define MC_CMD_RXD_MONITOR_OUT_RING_LT_128_OFST 40 +#define MC_CMD_RXD_MONITOR_OUT_RING_LT_128_LEN 4 +#define MC_CMD_RXD_MONITOR_OUT_RING_LT_256_OFST 44 +#define MC_CMD_RXD_MONITOR_OUT_RING_LT_256_LEN 4 +#define MC_CMD_RXD_MONITOR_OUT_RING_GE_256_OFST 48 +#define MC_CMD_RXD_MONITOR_OUT_RING_GE_256_LEN 4 +#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_1_OFST 52 +#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_1_LEN 4 +#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_2_OFST 56 +#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_2_LEN 4 +#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_4_OFST 60 +#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_4_LEN 4 +#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_8_OFST 64 +#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_8_LEN 4 +#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_16_OFST 68 +#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_16_LEN 4 +#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_32_OFST 72 +#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_32_LEN 4 +#define MC_CMD_RXD_MONITOR_OUT_CACHE_GE_32_OFST 76 +#define MC_CMD_RXD_MONITOR_OUT_CACHE_GE_32_LEN 4 + + +/***********************************/ +/* MC_CMD_PUTS + * Copy the given ASCII string out onto UART and/or out of the network port. + */ +#define MC_CMD_PUTS 0x23 +#undef MC_CMD_0x23_PRIVILEGE_CTG + +#define MC_CMD_0x23_PRIVILEGE_CTG SRIOV_CTG_INSECURE + +/* MC_CMD_PUTS_IN msgrequest */ +#define MC_CMD_PUTS_IN_LENMIN 13 +#define MC_CMD_PUTS_IN_LENMAX 252 +#define MC_CMD_PUTS_IN_LENMAX_MCDI2 1020 +#define MC_CMD_PUTS_IN_LEN(num) (12+1*(num)) +#define MC_CMD_PUTS_IN_STRING_NUM(len) (((len)-12)/1) +#define MC_CMD_PUTS_IN_DEST_OFST 0 +#define MC_CMD_PUTS_IN_DEST_LEN 4 +#define MC_CMD_PUTS_IN_UART_OFST 0 +#define MC_CMD_PUTS_IN_UART_LBN 0 +#define MC_CMD_PUTS_IN_UART_WIDTH 1 +#define MC_CMD_PUTS_IN_PORT_OFST 0 +#define MC_CMD_PUTS_IN_PORT_LBN 1 +#define MC_CMD_PUTS_IN_PORT_WIDTH 1 +#define MC_CMD_PUTS_IN_DHOST_OFST 4 +#define MC_CMD_PUTS_IN_DHOST_LEN 6 +#define MC_CMD_PUTS_IN_STRING_OFST 12 +#define MC_CMD_PUTS_IN_STRING_LEN 1 +#define MC_CMD_PUTS_IN_STRING_MINNUM 1 +#define MC_CMD_PUTS_IN_STRING_MAXNUM 240 +#define MC_CMD_PUTS_IN_STRING_MAXNUM_MCDI2 1008 + +/* MC_CMD_PUTS_OUT msgresponse */ +#define MC_CMD_PUTS_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_GET_PHY_CFG + * Report PHY configuration. This guarantees to succeed even if the PHY is in a + * 'zombie' state. Locks required: None + */ +#define MC_CMD_GET_PHY_CFG 0x24 +#undef MC_CMD_0x24_PRIVILEGE_CTG + +#define MC_CMD_0x24_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_PHY_CFG_IN msgrequest */ +#define MC_CMD_GET_PHY_CFG_IN_LEN 0 + +/* MC_CMD_GET_PHY_CFG_IN_V2 msgrequest */ +#define MC_CMD_GET_PHY_CFG_IN_V2_LEN 8 +/* Target port to request PHY state for. Uses MAE_LINK_ENDPOINT_SELECTOR which + * identifies a real or virtual network port by MAE port and link end. See the + * structure definition for more details + */ +#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_OFST 0 +#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_LEN 8 +#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_LO_OFST 0 +#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_LO_LEN 4 +#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_LO_LBN 0 +#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_LO_WIDTH 32 +#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_HI_OFST 4 +#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_HI_LEN 4 +#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_HI_LBN 32 +#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_HI_WIDTH 32 +#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_MPORT_SELECTOR_OFST 0 +#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_MPORT_SELECTOR_LEN 4 +#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_MPORT_SELECTOR_FLAT_OFST 0 +#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_MPORT_SELECTOR_FLAT_LEN 4 +#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_MPORT_SELECTOR_TYPE_OFST 3 +#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_MPORT_SELECTOR_TYPE_LEN 1 +#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_MPORT_SELECTOR_MPORT_ID_OFST 0 +#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_MPORT_SELECTOR_MPORT_ID_LEN 3 +#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_MPORT_SELECTOR_PPORT_ID_LBN 0 +#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_MPORT_SELECTOR_PPORT_ID_WIDTH 4 +#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_MPORT_SELECTOR_FUNC_INTF_ID_LBN 20 +#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_MPORT_SELECTOR_FUNC_INTF_ID_WIDTH 4 +#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_MPORT_SELECTOR_FUNC_MH_PF_ID_LBN 16 +#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_MPORT_SELECTOR_FUNC_MH_PF_ID_WIDTH 4 +#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_MPORT_SELECTOR_FUNC_PF_ID_OFST 2 +#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_MPORT_SELECTOR_FUNC_PF_ID_LEN 1 +#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_MPORT_SELECTOR_FUNC_VF_ID_OFST 0 +#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_MPORT_SELECTOR_FUNC_VF_ID_LEN 2 +#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_LINK_END_OFST 4 +#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_LINK_END_LEN 4 +#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_FLAT_OFST 0 +#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_FLAT_LEN 8 +#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_FLAT_LO_OFST 0 +#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_FLAT_LO_LEN 4 +#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_FLAT_LO_LBN 0 +#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_FLAT_LO_WIDTH 32 +#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_FLAT_HI_OFST 4 +#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_FLAT_HI_LEN 4 +#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_FLAT_HI_LBN 32 +#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_FLAT_HI_WIDTH 32 + +/* MC_CMD_GET_PHY_CFG_OUT msgresponse */ +#define MC_CMD_GET_PHY_CFG_OUT_LEN 72 +/* flags */ +#define MC_CMD_GET_PHY_CFG_OUT_FLAGS_OFST 0 +#define MC_CMD_GET_PHY_CFG_OUT_FLAGS_LEN 4 +#define MC_CMD_GET_PHY_CFG_OUT_PRESENT_OFST 0 +#define MC_CMD_GET_PHY_CFG_OUT_PRESENT_LBN 0 +#define MC_CMD_GET_PHY_CFG_OUT_PRESENT_WIDTH 1 +#define MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_SHORT_OFST 0 +#define MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_SHORT_LBN 1 +#define MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_SHORT_WIDTH 1 +#define MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_LONG_OFST 0 +#define MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_LONG_LBN 2 +#define MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_LONG_WIDTH 1 +#define MC_CMD_GET_PHY_CFG_OUT_LOWPOWER_OFST 0 +#define MC_CMD_GET_PHY_CFG_OUT_LOWPOWER_LBN 3 +#define MC_CMD_GET_PHY_CFG_OUT_LOWPOWER_WIDTH 1 +#define MC_CMD_GET_PHY_CFG_OUT_POWEROFF_OFST 0 +#define MC_CMD_GET_PHY_CFG_OUT_POWEROFF_LBN 4 +#define MC_CMD_GET_PHY_CFG_OUT_POWEROFF_WIDTH 1 +#define MC_CMD_GET_PHY_CFG_OUT_TXDIS_OFST 0 +#define MC_CMD_GET_PHY_CFG_OUT_TXDIS_LBN 5 +#define MC_CMD_GET_PHY_CFG_OUT_TXDIS_WIDTH 1 +#define MC_CMD_GET_PHY_CFG_OUT_BIST_OFST 0 +#define MC_CMD_GET_PHY_CFG_OUT_BIST_LBN 6 +#define MC_CMD_GET_PHY_CFG_OUT_BIST_WIDTH 1 +/* ?? */ +#define MC_CMD_GET_PHY_CFG_OUT_TYPE_OFST 4 +#define MC_CMD_GET_PHY_CFG_OUT_TYPE_LEN 4 +/* Bitmask of supported capabilities */ +#define MC_CMD_GET_PHY_CFG_OUT_SUPPORTED_CAP_OFST 8 +#define MC_CMD_GET_PHY_CFG_OUT_SUPPORTED_CAP_LEN 4 +#define MC_CMD_PHY_CAP_10HDX_OFST 8 +#define MC_CMD_PHY_CAP_10HDX_LBN 1 +#define MC_CMD_PHY_CAP_10HDX_WIDTH 1 +#define MC_CMD_PHY_CAP_10FDX_OFST 8 +#define MC_CMD_PHY_CAP_10FDX_LBN 2 +#define MC_CMD_PHY_CAP_10FDX_WIDTH 1 +#define MC_CMD_PHY_CAP_100HDX_OFST 8 +#define MC_CMD_PHY_CAP_100HDX_LBN 3 +#define MC_CMD_PHY_CAP_100HDX_WIDTH 1 +#define MC_CMD_PHY_CAP_100FDX_OFST 8 +#define MC_CMD_PHY_CAP_100FDX_LBN 4 +#define MC_CMD_PHY_CAP_100FDX_WIDTH 1 +#define MC_CMD_PHY_CAP_1000HDX_OFST 8 +#define MC_CMD_PHY_CAP_1000HDX_LBN 5 +#define MC_CMD_PHY_CAP_1000HDX_WIDTH 1 +#define MC_CMD_PHY_CAP_1000FDX_OFST 8 +#define MC_CMD_PHY_CAP_1000FDX_LBN 6 +#define MC_CMD_PHY_CAP_1000FDX_WIDTH 1 +#define MC_CMD_PHY_CAP_10000FDX_OFST 8 +#define MC_CMD_PHY_CAP_10000FDX_LBN 7 +#define MC_CMD_PHY_CAP_10000FDX_WIDTH 1 +#define MC_CMD_PHY_CAP_PAUSE_OFST 8 +#define MC_CMD_PHY_CAP_PAUSE_LBN 8 +#define MC_CMD_PHY_CAP_PAUSE_WIDTH 1 +#define MC_CMD_PHY_CAP_ASYM_OFST 8 +#define MC_CMD_PHY_CAP_ASYM_LBN 9 +#define MC_CMD_PHY_CAP_ASYM_WIDTH 1 +#define MC_CMD_PHY_CAP_AN_OFST 8 +#define MC_CMD_PHY_CAP_AN_LBN 10 +#define MC_CMD_PHY_CAP_AN_WIDTH 1 +#define MC_CMD_PHY_CAP_40000FDX_OFST 8 +#define MC_CMD_PHY_CAP_40000FDX_LBN 11 +#define MC_CMD_PHY_CAP_40000FDX_WIDTH 1 +#define MC_CMD_PHY_CAP_DDM_OFST 8 +#define MC_CMD_PHY_CAP_DDM_LBN 12 +#define MC_CMD_PHY_CAP_DDM_WIDTH 1 +#define MC_CMD_PHY_CAP_100000FDX_OFST 8 +#define MC_CMD_PHY_CAP_100000FDX_LBN 13 +#define MC_CMD_PHY_CAP_100000FDX_WIDTH 1 +#define MC_CMD_PHY_CAP_25000FDX_OFST 8 +#define MC_CMD_PHY_CAP_25000FDX_LBN 14 +#define MC_CMD_PHY_CAP_25000FDX_WIDTH 1 +#define MC_CMD_PHY_CAP_50000FDX_OFST 8 +#define MC_CMD_PHY_CAP_50000FDX_LBN 15 +#define MC_CMD_PHY_CAP_50000FDX_WIDTH 1 +#define MC_CMD_PHY_CAP_BASER_FEC_OFST 8 +#define MC_CMD_PHY_CAP_BASER_FEC_LBN 16 +#define MC_CMD_PHY_CAP_BASER_FEC_WIDTH 1 +#define MC_CMD_PHY_CAP_BASER_FEC_REQUESTED_OFST 8 +#define MC_CMD_PHY_CAP_BASER_FEC_REQUESTED_LBN 17 +#define MC_CMD_PHY_CAP_BASER_FEC_REQUESTED_WIDTH 1 +#define MC_CMD_PHY_CAP_RS_FEC_OFST 8 +#define MC_CMD_PHY_CAP_RS_FEC_LBN 18 +#define MC_CMD_PHY_CAP_RS_FEC_WIDTH 1 +#define MC_CMD_PHY_CAP_RS_FEC_REQUESTED_OFST 8 +#define MC_CMD_PHY_CAP_RS_FEC_REQUESTED_LBN 19 +#define MC_CMD_PHY_CAP_RS_FEC_REQUESTED_WIDTH 1 +#define MC_CMD_PHY_CAP_25G_BASER_FEC_OFST 8 +#define MC_CMD_PHY_CAP_25G_BASER_FEC_LBN 20 +#define MC_CMD_PHY_CAP_25G_BASER_FEC_WIDTH 1 +#define MC_CMD_PHY_CAP_25G_BASER_FEC_REQUESTED_OFST 8 +#define MC_CMD_PHY_CAP_25G_BASER_FEC_REQUESTED_LBN 21 +#define MC_CMD_PHY_CAP_25G_BASER_FEC_REQUESTED_WIDTH 1 +#define MC_CMD_PHY_CAP_200000FDX_OFST 8 +#define MC_CMD_PHY_CAP_200000FDX_LBN 22 +#define MC_CMD_PHY_CAP_200000FDX_WIDTH 1 +/* ?? */ +#define MC_CMD_GET_PHY_CFG_OUT_CHANNEL_OFST 12 +#define MC_CMD_GET_PHY_CFG_OUT_CHANNEL_LEN 4 +/* ?? */ +#define MC_CMD_GET_PHY_CFG_OUT_PRT_OFST 16 +#define MC_CMD_GET_PHY_CFG_OUT_PRT_LEN 4 +/* ?? */ +#define MC_CMD_GET_PHY_CFG_OUT_STATS_MASK_OFST 20 +#define MC_CMD_GET_PHY_CFG_OUT_STATS_MASK_LEN 4 +/* ?? */ +#define MC_CMD_GET_PHY_CFG_OUT_NAME_OFST 24 +#define MC_CMD_GET_PHY_CFG_OUT_NAME_LEN 20 +/* ?? */ +#define MC_CMD_GET_PHY_CFG_OUT_MEDIA_TYPE_OFST 44 +#define MC_CMD_GET_PHY_CFG_OUT_MEDIA_TYPE_LEN 4 +/* enum: Xaui. */ +#define MC_CMD_MEDIA_XAUI 0x1 +/* enum: CX4. */ +#define MC_CMD_MEDIA_CX4 0x2 +/* enum: KX4. */ +#define MC_CMD_MEDIA_KX4 0x3 +/* enum: XFP Far. */ +#define MC_CMD_MEDIA_XFP 0x4 +/* enum: SFP+. */ +#define MC_CMD_MEDIA_SFP_PLUS 0x5 +/* enum: 10GBaseT. */ +#define MC_CMD_MEDIA_BASE_T 0x6 +/* enum: QSFP+. */ +#define MC_CMD_MEDIA_QSFP_PLUS 0x7 +/* enum: DSFP. */ +#define MC_CMD_MEDIA_DSFP 0x8 +#define MC_CMD_GET_PHY_CFG_OUT_MMD_MASK_OFST 48 +#define MC_CMD_GET_PHY_CFG_OUT_MMD_MASK_LEN 4 +/* enum: Native clause 22 */ +#define MC_CMD_MMD_CLAUSE22 0x0 +#define MC_CMD_MMD_CLAUSE45_PMAPMD 0x1 /* enum */ +#define MC_CMD_MMD_CLAUSE45_WIS 0x2 /* enum */ +#define MC_CMD_MMD_CLAUSE45_PCS 0x3 /* enum */ +#define MC_CMD_MMD_CLAUSE45_PHYXS 0x4 /* enum */ +#define MC_CMD_MMD_CLAUSE45_DTEXS 0x5 /* enum */ +#define MC_CMD_MMD_CLAUSE45_TC 0x6 /* enum */ +#define MC_CMD_MMD_CLAUSE45_AN 0x7 /* enum */ +/* enum: Clause22 proxied over clause45 by PHY. */ +#define MC_CMD_MMD_CLAUSE45_C22EXT 0x1d +#define MC_CMD_MMD_CLAUSE45_VEND1 0x1e /* enum */ +#define MC_CMD_MMD_CLAUSE45_VEND2 0x1f /* enum */ +#define MC_CMD_GET_PHY_CFG_OUT_REVISION_OFST 52 +#define MC_CMD_GET_PHY_CFG_OUT_REVISION_LEN 20 + + +/***********************************/ +/* MC_CMD_START_BIST + * Start a BIST test on the PHY. Locks required: PHY_LOCK if doing a PHY BIST + * Return code: 0, EINVAL, EACCES (if PHY_LOCK is not held) + */ +#define MC_CMD_START_BIST 0x25 +#undef MC_CMD_0x25_PRIVILEGE_CTG + +#define MC_CMD_0x25_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND + +/* MC_CMD_START_BIST_IN msgrequest */ +#define MC_CMD_START_BIST_IN_LEN 4 +/* Type of test. */ +#define MC_CMD_START_BIST_IN_TYPE_OFST 0 +#define MC_CMD_START_BIST_IN_TYPE_LEN 4 +/* enum: Run the PHY's short cable BIST. */ +#define MC_CMD_PHY_BIST_CABLE_SHORT 0x1 +/* enum: Run the PHY's long cable BIST. */ +#define MC_CMD_PHY_BIST_CABLE_LONG 0x2 +/* enum: Run BIST on the currently selected BPX Serdes (XAUI or XFI) . */ +#define MC_CMD_BPX_SERDES_BIST 0x3 +/* enum: Run the MC loopback tests. */ +#define MC_CMD_MC_LOOPBACK_BIST 0x4 +/* enum: Run the PHY's standard BIST. */ +#define MC_CMD_PHY_BIST 0x5 +/* enum: Run MC RAM test. */ +#define MC_CMD_MC_MEM_BIST 0x6 +/* enum: Run Port RAM test. */ +#define MC_CMD_PORT_MEM_BIST 0x7 +/* enum: Run register test. */ +#define MC_CMD_REG_BIST 0x8 + +/* MC_CMD_START_BIST_OUT msgresponse */ +#define MC_CMD_START_BIST_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_POLL_BIST + * Poll for BIST completion. Returns a single status code, and optionally some + * PHY specific bist output. The driver should only consume the BIST output + * after validating OUTLEN and MC_CMD_GET_PHY_CFG.TYPE. If a driver can't + * successfully parse the BIST output, it should still respect the pass/Fail in + * OUT.RESULT. Locks required: PHY_LOCK if doing a PHY BIST. Return code: 0, + * EACCES (if PHY_LOCK is not held). + */ +#define MC_CMD_POLL_BIST 0x26 +#undef MC_CMD_0x26_PRIVILEGE_CTG + +#define MC_CMD_0x26_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND + +/* MC_CMD_POLL_BIST_IN msgrequest */ +#define MC_CMD_POLL_BIST_IN_LEN 0 + +/* MC_CMD_POLL_BIST_OUT msgresponse */ +#define MC_CMD_POLL_BIST_OUT_LEN 8 +/* result */ +#define MC_CMD_POLL_BIST_OUT_RESULT_OFST 0 +#define MC_CMD_POLL_BIST_OUT_RESULT_LEN 4 +/* enum: Running. */ +#define MC_CMD_POLL_BIST_RUNNING 0x1 +/* enum: Passed. */ +#define MC_CMD_POLL_BIST_PASSED 0x2 +/* enum: Failed. */ +#define MC_CMD_POLL_BIST_FAILED 0x3 +/* enum: Timed-out. */ +#define MC_CMD_POLL_BIST_TIMEOUT 0x4 +#define MC_CMD_POLL_BIST_OUT_PRIVATE_OFST 4 +#define MC_CMD_POLL_BIST_OUT_PRIVATE_LEN 4 + +/* MC_CMD_POLL_BIST_OUT_SFT9001 msgresponse */ +#define MC_CMD_POLL_BIST_OUT_SFT9001_LEN 36 +/* result */ +/* MC_CMD_POLL_BIST_OUT_RESULT_OFST 0 */ +/* MC_CMD_POLL_BIST_OUT_RESULT_LEN 4 */ +/* Enum values, see field(s): */ +/* MC_CMD_POLL_BIST_OUT/MC_CMD_POLL_BIST_OUT_RESULT */ +#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_A_OFST 4 +#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_A_LEN 4 +#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_B_OFST 8 +#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_B_LEN 4 +#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_C_OFST 12 +#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_C_LEN 4 +#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_D_OFST 16 +#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_D_LEN 4 +/* Status of each channel A */ +#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_A_OFST 20 +#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_A_LEN 4 +/* enum: Ok. */ +#define MC_CMD_POLL_BIST_SFT9001_PAIR_OK 0x1 +/* enum: Open. */ +#define MC_CMD_POLL_BIST_SFT9001_PAIR_OPEN 0x2 +/* enum: Intra-pair short. */ +#define MC_CMD_POLL_BIST_SFT9001_INTRA_PAIR_SHORT 0x3 +/* enum: Inter-pair short. */ +#define MC_CMD_POLL_BIST_SFT9001_INTER_PAIR_SHORT 0x4 +/* enum: Busy. */ +#define MC_CMD_POLL_BIST_SFT9001_PAIR_BUSY 0x9 +/* Status of each channel B */ +#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_B_OFST 24 +#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_B_LEN 4 +/* Enum values, see field(s): */ +/* CABLE_STATUS_A */ +/* Status of each channel C */ +#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_C_OFST 28 +#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_C_LEN 4 +/* Enum values, see field(s): */ +/* CABLE_STATUS_A */ +/* Status of each channel D */ +#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_D_OFST 32 +#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_D_LEN 4 +/* Enum values, see field(s): */ +/* CABLE_STATUS_A */ + +/* MC_CMD_POLL_BIST_OUT_MRSFP msgresponse */ +#define MC_CMD_POLL_BIST_OUT_MRSFP_LEN 8 +/* result */ +/* MC_CMD_POLL_BIST_OUT_RESULT_OFST 0 */ +/* MC_CMD_POLL_BIST_OUT_RESULT_LEN 4 */ +/* Enum values, see field(s): */ +/* MC_CMD_POLL_BIST_OUT/MC_CMD_POLL_BIST_OUT_RESULT */ +#define MC_CMD_POLL_BIST_OUT_MRSFP_TEST_OFST 4 +#define MC_CMD_POLL_BIST_OUT_MRSFP_TEST_LEN 4 +/* enum: Complete. */ +#define MC_CMD_POLL_BIST_MRSFP_TEST_COMPLETE 0x0 +/* enum: Bus switch off I2C write. */ +#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_OFF_I2C_WRITE 0x1 +/* enum: Bus switch off I2C no access IO exp. */ +#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_OFF_I2C_NO_ACCESS_IO_EXP 0x2 +/* enum: Bus switch off I2C no access module. */ +#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_OFF_I2C_NO_ACCESS_MODULE 0x3 +/* enum: IO exp I2C configure. */ +#define MC_CMD_POLL_BIST_MRSFP_TEST_IO_EXP_I2C_CONFIGURE 0x4 +/* enum: Bus switch I2C no cross talk. */ +#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_I2C_NO_CROSSTALK 0x5 +/* enum: Module presence. */ +#define MC_CMD_POLL_BIST_MRSFP_TEST_MODULE_PRESENCE 0x6 +/* enum: Module ID I2C access. */ +#define MC_CMD_POLL_BIST_MRSFP_TEST_MODULE_ID_I2C_ACCESS 0x7 +/* enum: Module ID sane value. */ +#define MC_CMD_POLL_BIST_MRSFP_TEST_MODULE_ID_SANE_VALUE 0x8 + +/* MC_CMD_POLL_BIST_OUT_MEM msgresponse */ +#define MC_CMD_POLL_BIST_OUT_MEM_LEN 36 +/* result */ +/* MC_CMD_POLL_BIST_OUT_RESULT_OFST 0 */ +/* MC_CMD_POLL_BIST_OUT_RESULT_LEN 4 */ +/* Enum values, see field(s): */ +/* MC_CMD_POLL_BIST_OUT/MC_CMD_POLL_BIST_OUT_RESULT */ +#define MC_CMD_POLL_BIST_OUT_MEM_TEST_OFST 4 +#define MC_CMD_POLL_BIST_OUT_MEM_TEST_LEN 4 +/* enum: Test has completed. */ +#define MC_CMD_POLL_BIST_MEM_COMPLETE 0x0 +/* enum: RAM test - walk ones. */ +#define MC_CMD_POLL_BIST_MEM_MEM_WALK_ONES 0x1 +/* enum: RAM test - walk zeros. */ +#define MC_CMD_POLL_BIST_MEM_MEM_WALK_ZEROS 0x2 +/* enum: RAM test - walking inversions zeros/ones. */ +#define MC_CMD_POLL_BIST_MEM_MEM_INV_ZERO_ONE 0x3 +/* enum: RAM test - walking inversions checkerboard. */ +#define MC_CMD_POLL_BIST_MEM_MEM_INV_CHKBOARD 0x4 +/* enum: Register test - set / clear individual bits. */ +#define MC_CMD_POLL_BIST_MEM_REG 0x5 +/* enum: ECC error detected. */ +#define MC_CMD_POLL_BIST_MEM_ECC 0x6 +/* Failure address, only valid if result is POLL_BIST_FAILED */ +#define MC_CMD_POLL_BIST_OUT_MEM_ADDR_OFST 8 +#define MC_CMD_POLL_BIST_OUT_MEM_ADDR_LEN 4 +/* Bus or address space to which the failure address corresponds */ +#define MC_CMD_POLL_BIST_OUT_MEM_BUS_OFST 12 +#define MC_CMD_POLL_BIST_OUT_MEM_BUS_LEN 4 +/* enum: MC MIPS bus. */ +#define MC_CMD_POLL_BIST_MEM_BUS_MC 0x0 +/* enum: CSR IREG bus. */ +#define MC_CMD_POLL_BIST_MEM_BUS_CSR 0x1 +/* enum: RX0 DPCPU bus. */ +#define MC_CMD_POLL_BIST_MEM_BUS_DPCPU_RX 0x2 +/* enum: TX0 DPCPU bus. */ +#define MC_CMD_POLL_BIST_MEM_BUS_DPCPU_TX0 0x3 +/* enum: TX1 DPCPU bus. */ +#define MC_CMD_POLL_BIST_MEM_BUS_DPCPU_TX1 0x4 +/* enum: RX0 DICPU bus. */ +#define MC_CMD_POLL_BIST_MEM_BUS_DICPU_RX 0x5 +/* enum: TX DICPU bus. */ +#define MC_CMD_POLL_BIST_MEM_BUS_DICPU_TX 0x6 +/* enum: RX1 DPCPU bus. */ +#define MC_CMD_POLL_BIST_MEM_BUS_DPCPU_RX1 0x7 +/* enum: RX1 DICPU bus. */ +#define MC_CMD_POLL_BIST_MEM_BUS_DICPU_RX1 0x8 +/* Pattern written to RAM / register */ +#define MC_CMD_POLL_BIST_OUT_MEM_EXPECT_OFST 16 +#define MC_CMD_POLL_BIST_OUT_MEM_EXPECT_LEN 4 +/* Actual value read from RAM / register */ +#define MC_CMD_POLL_BIST_OUT_MEM_ACTUAL_OFST 20 +#define MC_CMD_POLL_BIST_OUT_MEM_ACTUAL_LEN 4 +/* ECC error mask */ +#define MC_CMD_POLL_BIST_OUT_MEM_ECC_OFST 24 +#define MC_CMD_POLL_BIST_OUT_MEM_ECC_LEN 4 +/* ECC parity error mask */ +#define MC_CMD_POLL_BIST_OUT_MEM_ECC_PARITY_OFST 28 +#define MC_CMD_POLL_BIST_OUT_MEM_ECC_PARITY_LEN 4 +/* ECC fatal error mask */ +#define MC_CMD_POLL_BIST_OUT_MEM_ECC_FATAL_OFST 32 +#define MC_CMD_POLL_BIST_OUT_MEM_ECC_FATAL_LEN 4 + + +/***********************************/ +/* MC_CMD_FLUSH_RX_QUEUES + * Flush receive queue(s). If SRIOV is enabled (via MC_CMD_SRIOV), then RXQ + * flushes should be initiated via this MCDI operation, rather than via + * directly writing FLUSH_CMD. + * + * The flush is completed (either done/fail) asynchronously (after this command + * returns). The driver must still wait for flush done/failure events as usual. + */ +#define MC_CMD_FLUSH_RX_QUEUES 0x27 + +/* MC_CMD_FLUSH_RX_QUEUES_IN msgrequest */ +#define MC_CMD_FLUSH_RX_QUEUES_IN_LENMIN 4 +#define MC_CMD_FLUSH_RX_QUEUES_IN_LENMAX 252 +#define MC_CMD_FLUSH_RX_QUEUES_IN_LENMAX_MCDI2 1020 +#define MC_CMD_FLUSH_RX_QUEUES_IN_LEN(num) (0+4*(num)) +#define MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_NUM(len) (((len)-0)/4) +#define MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_OFST 0 +#define MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_LEN 4 +#define MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MINNUM 1 +#define MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MAXNUM 63 +#define MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MAXNUM_MCDI2 255 + +/* MC_CMD_FLUSH_RX_QUEUES_OUT msgresponse */ +#define MC_CMD_FLUSH_RX_QUEUES_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_GET_LOOPBACK_MODES + * Returns a bitmask of loopback modes available at each speed. + */ +#define MC_CMD_GET_LOOPBACK_MODES 0x28 +#undef MC_CMD_0x28_PRIVILEGE_CTG + +#define MC_CMD_0x28_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_LOOPBACK_MODES_IN msgrequest */ +#define MC_CMD_GET_LOOPBACK_MODES_IN_LEN 0 + +/* MC_CMD_GET_LOOPBACK_MODES_IN_V2 msgrequest */ +#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_LEN 8 +/* Target port to request loopback modes for. Uses MAE_LINK_ENDPOINT_SELECTOR + * which identifies a real or virtual network port by MAE port and link end. + * See the structure definition for more details + */ +#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_OFST 0 +#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_LEN 8 +#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_LO_OFST 0 +#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_LO_LEN 4 +#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_LO_LBN 0 +#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_LO_WIDTH 32 +#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_HI_OFST 4 +#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_HI_LEN 4 +#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_HI_LBN 32 +#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_HI_WIDTH 32 +#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_MPORT_SELECTOR_OFST 0 +#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_MPORT_SELECTOR_LEN 4 +#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_MPORT_SELECTOR_FLAT_OFST 0 +#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_MPORT_SELECTOR_FLAT_LEN 4 +#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_MPORT_SELECTOR_TYPE_OFST 3 +#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_MPORT_SELECTOR_TYPE_LEN 1 +#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_MPORT_SELECTOR_MPORT_ID_OFST 0 +#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_MPORT_SELECTOR_MPORT_ID_LEN 3 +#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_MPORT_SELECTOR_PPORT_ID_LBN 0 +#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_MPORT_SELECTOR_PPORT_ID_WIDTH 4 +#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_MPORT_SELECTOR_FUNC_INTF_ID_LBN 20 +#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_MPORT_SELECTOR_FUNC_INTF_ID_WIDTH 4 +#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_MPORT_SELECTOR_FUNC_MH_PF_ID_LBN 16 +#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_MPORT_SELECTOR_FUNC_MH_PF_ID_WIDTH 4 +#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_MPORT_SELECTOR_FUNC_PF_ID_OFST 2 +#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_MPORT_SELECTOR_FUNC_PF_ID_LEN 1 +#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_MPORT_SELECTOR_FUNC_VF_ID_OFST 0 +#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_MPORT_SELECTOR_FUNC_VF_ID_LEN 2 +#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_LINK_END_OFST 4 +#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_LINK_END_LEN 4 +#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_FLAT_OFST 0 +#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_FLAT_LEN 8 +#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_FLAT_LO_OFST 0 +#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_FLAT_LO_LEN 4 +#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_FLAT_LO_LBN 0 +#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_FLAT_LO_WIDTH 32 +#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_FLAT_HI_OFST 4 +#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_FLAT_HI_LEN 4 +#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_FLAT_HI_LBN 32 +#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_FLAT_HI_WIDTH 32 + +/* MC_CMD_GET_LOOPBACK_MODES_OUT msgresponse */ +#define MC_CMD_GET_LOOPBACK_MODES_OUT_LEN 40 +/* Supported loopbacks. */ +#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_OFST 0 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_LEN 8 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_LO_OFST 0 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_LO_LEN 4 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_LO_LBN 0 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_LO_WIDTH 32 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_HI_OFST 4 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_HI_LEN 4 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_HI_LBN 32 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_HI_WIDTH 32 +/* enum: None. */ +#define MC_CMD_LOOPBACK_NONE 0x0 +/* enum: Data. */ +#define MC_CMD_LOOPBACK_DATA 0x1 +/* enum: GMAC. */ +#define MC_CMD_LOOPBACK_GMAC 0x2 +/* enum: XGMII. */ +#define MC_CMD_LOOPBACK_XGMII 0x3 +/* enum: XGXS. */ +#define MC_CMD_LOOPBACK_XGXS 0x4 +/* enum: XAUI. */ +#define MC_CMD_LOOPBACK_XAUI 0x5 +/* enum: GMII. */ +#define MC_CMD_LOOPBACK_GMII 0x6 +/* enum: SGMII. */ +#define MC_CMD_LOOPBACK_SGMII 0x7 +/* enum: XGBR. */ +#define MC_CMD_LOOPBACK_XGBR 0x8 +/* enum: XFI. */ +#define MC_CMD_LOOPBACK_XFI 0x9 +/* enum: XAUI Far. */ +#define MC_CMD_LOOPBACK_XAUI_FAR 0xa +/* enum: GMII Far. */ +#define MC_CMD_LOOPBACK_GMII_FAR 0xb +/* enum: SGMII Far. */ +#define MC_CMD_LOOPBACK_SGMII_FAR 0xc +/* enum: XFI Far. */ +#define MC_CMD_LOOPBACK_XFI_FAR 0xd +/* enum: GPhy. */ +#define MC_CMD_LOOPBACK_GPHY 0xe +/* enum: PhyXS. */ +#define MC_CMD_LOOPBACK_PHYXS 0xf +/* enum: PCS. */ +#define MC_CMD_LOOPBACK_PCS 0x10 +/* enum: PMA-PMD. */ +#define MC_CMD_LOOPBACK_PMAPMD 0x11 +/* enum: Cross-Port. */ +#define MC_CMD_LOOPBACK_XPORT 0x12 +/* enum: XGMII-Wireside. */ +#define MC_CMD_LOOPBACK_XGMII_WS 0x13 +/* enum: XAUI Wireside. */ +#define MC_CMD_LOOPBACK_XAUI_WS 0x14 +/* enum: XAUI Wireside Far. */ +#define MC_CMD_LOOPBACK_XAUI_WS_FAR 0x15 +/* enum: XAUI Wireside near. */ +#define MC_CMD_LOOPBACK_XAUI_WS_NEAR 0x16 +/* enum: GMII Wireside. */ +#define MC_CMD_LOOPBACK_GMII_WS 0x17 +/* enum: XFI Wireside. */ +#define MC_CMD_LOOPBACK_XFI_WS 0x18 +/* enum: XFI Wireside Far. */ +#define MC_CMD_LOOPBACK_XFI_WS_FAR 0x19 +/* enum: PhyXS Wireside. */ +#define MC_CMD_LOOPBACK_PHYXS_WS 0x1a +/* enum: PMA lanes MAC-Serdes. */ +#define MC_CMD_LOOPBACK_PMA_INT 0x1b +/* enum: KR Serdes Parallel (Encoder). */ +#define MC_CMD_LOOPBACK_SD_NEAR 0x1c +/* enum: KR Serdes Serial. */ +#define MC_CMD_LOOPBACK_SD_FAR 0x1d +/* enum: PMA lanes MAC-Serdes Wireside. */ +#define MC_CMD_LOOPBACK_PMA_INT_WS 0x1e +/* enum: KR Serdes Parallel Wireside (Full PCS). */ +#define MC_CMD_LOOPBACK_SD_FEP2_WS 0x1f +/* enum: KR Serdes Parallel Wireside (Sym Aligner to TX). */ +#define MC_CMD_LOOPBACK_SD_FEP1_5_WS 0x20 +/* enum: KR Serdes Parallel Wireside (Deserializer to Serializer). */ +#define MC_CMD_LOOPBACK_SD_FEP_WS 0x21 +/* enum: KR Serdes Serial Wireside. */ +#define MC_CMD_LOOPBACK_SD_FES_WS 0x22 +/* enum: Near side of AOE Siena side port */ +#define MC_CMD_LOOPBACK_AOE_INT_NEAR 0x23 +/* enum: Medford Wireside datapath loopback */ +#define MC_CMD_LOOPBACK_DATA_WS 0x24 +/* enum: Force link up without setting up any physical loopback (snapper use + * only) + */ +#define MC_CMD_LOOPBACK_FORCE_EXT_LINK 0x25 +/* Supported loopbacks. */ +#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_OFST 8 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_LEN 8 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_LO_OFST 8 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_LO_LEN 4 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_LO_LBN 64 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_LO_WIDTH 32 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_HI_OFST 12 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_HI_LEN 4 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_HI_LBN 96 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_HI_WIDTH 32 +/* Enum values, see field(s): */ +/* 100M */ +/* Supported loopbacks. */ +#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_OFST 16 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_LEN 8 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_LO_OFST 16 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_LO_LEN 4 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_LO_LBN 128 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_LO_WIDTH 32 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_HI_OFST 20 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_HI_LEN 4 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_HI_LBN 160 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_HI_WIDTH 32 +/* Enum values, see field(s): */ +/* 100M */ +/* Supported loopbacks. */ +#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_OFST 24 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_LEN 8 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_LO_OFST 24 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_LO_LEN 4 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_LO_LBN 192 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_LO_WIDTH 32 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_HI_OFST 28 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_HI_LEN 4 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_HI_LBN 224 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_HI_WIDTH 32 +/* Enum values, see field(s): */ +/* 100M */ +/* Supported loopbacks. */ +#define MC_CMD_GET_LOOPBACK_MODES_OUT_40G_OFST 32 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_40G_LEN 8 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_40G_LO_OFST 32 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_40G_LO_LEN 4 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_40G_LO_LBN 256 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_40G_LO_WIDTH 32 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_40G_HI_OFST 36 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_40G_HI_LEN 4 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_40G_HI_LBN 288 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_40G_HI_WIDTH 32 +/* Enum values, see field(s): */ +/* 100M */ + +/* MC_CMD_GET_LOOPBACK_MODES_OUT_V2 msgresponse: Supported loopback modes for + * newer NICs with 25G/50G/100G support + */ +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_LEN 64 +/* Supported loopbacks. */ +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100M_OFST 0 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100M_LEN 8 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100M_LO_OFST 0 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100M_LO_LEN 4 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100M_LO_LBN 0 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100M_LO_WIDTH 32 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100M_HI_OFST 4 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100M_HI_LEN 4 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100M_HI_LBN 32 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100M_HI_WIDTH 32 +/* enum: None. */ +/* MC_CMD_LOOPBACK_NONE 0x0 */ +/* enum: Data. */ +/* MC_CMD_LOOPBACK_DATA 0x1 */ +/* enum: GMAC. */ +/* MC_CMD_LOOPBACK_GMAC 0x2 */ +/* enum: XGMII. */ +/* MC_CMD_LOOPBACK_XGMII 0x3 */ +/* enum: XGXS. */ +/* MC_CMD_LOOPBACK_XGXS 0x4 */ +/* enum: XAUI. */ +/* MC_CMD_LOOPBACK_XAUI 0x5 */ +/* enum: GMII. */ +/* MC_CMD_LOOPBACK_GMII 0x6 */ +/* enum: SGMII. */ +/* MC_CMD_LOOPBACK_SGMII 0x7 */ +/* enum: XGBR. */ +/* MC_CMD_LOOPBACK_XGBR 0x8 */ +/* enum: XFI. */ +/* MC_CMD_LOOPBACK_XFI 0x9 */ +/* enum: XAUI Far. */ +/* MC_CMD_LOOPBACK_XAUI_FAR 0xa */ +/* enum: GMII Far. */ +/* MC_CMD_LOOPBACK_GMII_FAR 0xb */ +/* enum: SGMII Far. */ +/* MC_CMD_LOOPBACK_SGMII_FAR 0xc */ +/* enum: XFI Far. */ +/* MC_CMD_LOOPBACK_XFI_FAR 0xd */ +/* enum: GPhy. */ +/* MC_CMD_LOOPBACK_GPHY 0xe */ +/* enum: PhyXS. */ +/* MC_CMD_LOOPBACK_PHYXS 0xf */ +/* enum: PCS. */ +/* MC_CMD_LOOPBACK_PCS 0x10 */ +/* enum: PMA-PMD. */ +/* MC_CMD_LOOPBACK_PMAPMD 0x11 */ +/* enum: Cross-Port. */ +/* MC_CMD_LOOPBACK_XPORT 0x12 */ +/* enum: XGMII-Wireside. */ +/* MC_CMD_LOOPBACK_XGMII_WS 0x13 */ +/* enum: XAUI Wireside. */ +/* MC_CMD_LOOPBACK_XAUI_WS 0x14 */ +/* enum: XAUI Wireside Far. */ +/* MC_CMD_LOOPBACK_XAUI_WS_FAR 0x15 */ +/* enum: XAUI Wireside near. */ +/* MC_CMD_LOOPBACK_XAUI_WS_NEAR 0x16 */ +/* enum: GMII Wireside. */ +/* MC_CMD_LOOPBACK_GMII_WS 0x17 */ +/* enum: XFI Wireside. */ +/* MC_CMD_LOOPBACK_XFI_WS 0x18 */ +/* enum: XFI Wireside Far. */ +/* MC_CMD_LOOPBACK_XFI_WS_FAR 0x19 */ +/* enum: PhyXS Wireside. */ +/* MC_CMD_LOOPBACK_PHYXS_WS 0x1a */ +/* enum: PMA lanes MAC-Serdes. */ +/* MC_CMD_LOOPBACK_PMA_INT 0x1b */ +/* enum: KR Serdes Parallel (Encoder). */ +/* MC_CMD_LOOPBACK_SD_NEAR 0x1c */ +/* enum: KR Serdes Serial. */ +/* MC_CMD_LOOPBACK_SD_FAR 0x1d */ +/* enum: PMA lanes MAC-Serdes Wireside. */ +/* MC_CMD_LOOPBACK_PMA_INT_WS 0x1e */ +/* enum: KR Serdes Parallel Wireside (Full PCS). */ +/* MC_CMD_LOOPBACK_SD_FEP2_WS 0x1f */ +/* enum: KR Serdes Parallel Wireside (Sym Aligner to TX). */ +/* MC_CMD_LOOPBACK_SD_FEP1_5_WS 0x20 */ +/* enum: KR Serdes Parallel Wireside (Deserializer to Serializer). */ +/* MC_CMD_LOOPBACK_SD_FEP_WS 0x21 */ +/* enum: KR Serdes Serial Wireside. */ +/* MC_CMD_LOOPBACK_SD_FES_WS 0x22 */ +/* enum: Near side of AOE Siena side port */ +/* MC_CMD_LOOPBACK_AOE_INT_NEAR 0x23 */ +/* enum: Medford Wireside datapath loopback */ +/* MC_CMD_LOOPBACK_DATA_WS 0x24 */ +/* enum: Force link up without setting up any physical loopback (snapper use + * only) + */ +/* MC_CMD_LOOPBACK_FORCE_EXT_LINK 0x25 */ +/* Supported loopbacks. */ +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_1G_OFST 8 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_1G_LEN 8 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_1G_LO_OFST 8 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_1G_LO_LEN 4 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_1G_LO_LBN 64 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_1G_LO_WIDTH 32 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_1G_HI_OFST 12 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_1G_HI_LEN 4 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_1G_HI_LBN 96 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_1G_HI_WIDTH 32 +/* Enum values, see field(s): */ +/* 100M */ +/* Supported loopbacks. */ +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_10G_OFST 16 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_10G_LEN 8 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_10G_LO_OFST 16 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_10G_LO_LEN 4 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_10G_LO_LBN 128 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_10G_LO_WIDTH 32 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_10G_HI_OFST 20 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_10G_HI_LEN 4 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_10G_HI_LBN 160 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_10G_HI_WIDTH 32 +/* Enum values, see field(s): */ +/* 100M */ +/* Supported loopbacks. */ +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_SUGGESTED_OFST 24 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_SUGGESTED_LEN 8 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_SUGGESTED_LO_OFST 24 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_SUGGESTED_LO_LEN 4 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_SUGGESTED_LO_LBN 192 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_SUGGESTED_LO_WIDTH 32 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_SUGGESTED_HI_OFST 28 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_SUGGESTED_HI_LEN 4 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_SUGGESTED_HI_LBN 224 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_SUGGESTED_HI_WIDTH 32 +/* Enum values, see field(s): */ +/* 100M */ +/* Supported loopbacks. */ +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_40G_OFST 32 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_40G_LEN 8 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_40G_LO_OFST 32 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_40G_LO_LEN 4 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_40G_LO_LBN 256 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_40G_LO_WIDTH 32 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_40G_HI_OFST 36 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_40G_HI_LEN 4 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_40G_HI_LBN 288 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_40G_HI_WIDTH 32 +/* Enum values, see field(s): */ +/* 100M */ +/* Supported 25G loopbacks. */ +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_OFST 40 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_LEN 8 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_LO_OFST 40 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_LO_LEN 4 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_LO_LBN 320 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_LO_WIDTH 32 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_HI_OFST 44 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_HI_LEN 4 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_HI_LBN 352 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_HI_WIDTH 32 +/* Enum values, see field(s): */ +/* 100M */ +/* Supported 50 loopbacks. */ +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_OFST 48 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_LEN 8 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_LO_OFST 48 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_LO_LEN 4 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_LO_LBN 384 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_LO_WIDTH 32 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_HI_OFST 52 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_HI_LEN 4 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_HI_LBN 416 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_HI_WIDTH 32 +/* Enum values, see field(s): */ +/* 100M */ +/* Supported 100G loopbacks. */ +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_OFST 56 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_LEN 8 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_LO_OFST 56 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_LO_LEN 4 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_LO_LBN 448 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_LO_WIDTH 32 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_HI_OFST 60 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_HI_LEN 4 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_HI_LBN 480 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_HI_WIDTH 32 +/* Enum values, see field(s): */ +/* 100M */ + +/* MC_CMD_GET_LOOPBACK_MODES_OUT_V3 msgresponse: Supported loopback modes for + * newer NICs with 200G support + */ +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_LEN 72 +/* Supported loopbacks. */ +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100M_OFST 0 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100M_LEN 8 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100M_LO_OFST 0 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100M_LO_LEN 4 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100M_LO_LBN 0 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100M_LO_WIDTH 32 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100M_HI_OFST 4 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100M_HI_LEN 4 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100M_HI_LBN 32 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100M_HI_WIDTH 32 +/* enum: None. */ +/* MC_CMD_LOOPBACK_NONE 0x0 */ +/* enum: Data. */ +/* MC_CMD_LOOPBACK_DATA 0x1 */ +/* enum: GMAC. */ +/* MC_CMD_LOOPBACK_GMAC 0x2 */ +/* enum: XGMII. */ +/* MC_CMD_LOOPBACK_XGMII 0x3 */ +/* enum: XGXS. */ +/* MC_CMD_LOOPBACK_XGXS 0x4 */ +/* enum: XAUI. */ +/* MC_CMD_LOOPBACK_XAUI 0x5 */ +/* enum: GMII. */ +/* MC_CMD_LOOPBACK_GMII 0x6 */ +/* enum: SGMII. */ +/* MC_CMD_LOOPBACK_SGMII 0x7 */ +/* enum: XGBR. */ +/* MC_CMD_LOOPBACK_XGBR 0x8 */ +/* enum: XFI. */ +/* MC_CMD_LOOPBACK_XFI 0x9 */ +/* enum: XAUI Far. */ +/* MC_CMD_LOOPBACK_XAUI_FAR 0xa */ +/* enum: GMII Far. */ +/* MC_CMD_LOOPBACK_GMII_FAR 0xb */ +/* enum: SGMII Far. */ +/* MC_CMD_LOOPBACK_SGMII_FAR 0xc */ +/* enum: XFI Far. */ +/* MC_CMD_LOOPBACK_XFI_FAR 0xd */ +/* enum: GPhy. */ +/* MC_CMD_LOOPBACK_GPHY 0xe */ +/* enum: PhyXS. */ +/* MC_CMD_LOOPBACK_PHYXS 0xf */ +/* enum: PCS. */ +/* MC_CMD_LOOPBACK_PCS 0x10 */ +/* enum: PMA-PMD. */ +/* MC_CMD_LOOPBACK_PMAPMD 0x11 */ +/* enum: Cross-Port. */ +/* MC_CMD_LOOPBACK_XPORT 0x12 */ +/* enum: XGMII-Wireside. */ +/* MC_CMD_LOOPBACK_XGMII_WS 0x13 */ +/* enum: XAUI Wireside. */ +/* MC_CMD_LOOPBACK_XAUI_WS 0x14 */ +/* enum: XAUI Wireside Far. */ +/* MC_CMD_LOOPBACK_XAUI_WS_FAR 0x15 */ +/* enum: XAUI Wireside near. */ +/* MC_CMD_LOOPBACK_XAUI_WS_NEAR 0x16 */ +/* enum: GMII Wireside. */ +/* MC_CMD_LOOPBACK_GMII_WS 0x17 */ +/* enum: XFI Wireside. */ +/* MC_CMD_LOOPBACK_XFI_WS 0x18 */ +/* enum: XFI Wireside Far. */ +/* MC_CMD_LOOPBACK_XFI_WS_FAR 0x19 */ +/* enum: PhyXS Wireside. */ +/* MC_CMD_LOOPBACK_PHYXS_WS 0x1a */ +/* enum: PMA lanes MAC-Serdes. */ +/* MC_CMD_LOOPBACK_PMA_INT 0x1b */ +/* enum: KR Serdes Parallel (Encoder). */ +/* MC_CMD_LOOPBACK_SD_NEAR 0x1c */ +/* enum: KR Serdes Serial. */ +/* MC_CMD_LOOPBACK_SD_FAR 0x1d */ +/* enum: PMA lanes MAC-Serdes Wireside. */ +/* MC_CMD_LOOPBACK_PMA_INT_WS 0x1e */ +/* enum: KR Serdes Parallel Wireside (Full PCS). */ +/* MC_CMD_LOOPBACK_SD_FEP2_WS 0x1f */ +/* enum: KR Serdes Parallel Wireside (Sym Aligner to TX). */ +/* MC_CMD_LOOPBACK_SD_FEP1_5_WS 0x20 */ +/* enum: KR Serdes Parallel Wireside (Deserializer to Serializer). */ +/* MC_CMD_LOOPBACK_SD_FEP_WS 0x21 */ +/* enum: KR Serdes Serial Wireside. */ +/* MC_CMD_LOOPBACK_SD_FES_WS 0x22 */ +/* enum: Near side of AOE Siena side port */ +/* MC_CMD_LOOPBACK_AOE_INT_NEAR 0x23 */ +/* enum: Medford Wireside datapath loopback */ +/* MC_CMD_LOOPBACK_DATA_WS 0x24 */ +/* enum: Force link up without setting up any physical loopback (snapper use + * only) + */ +/* MC_CMD_LOOPBACK_FORCE_EXT_LINK 0x25 */ +/* Supported loopbacks. */ +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_1G_OFST 8 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_1G_LEN 8 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_1G_LO_OFST 8 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_1G_LO_LEN 4 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_1G_LO_LBN 64 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_1G_LO_WIDTH 32 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_1G_HI_OFST 12 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_1G_HI_LEN 4 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_1G_HI_LBN 96 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_1G_HI_WIDTH 32 +/* Enum values, see field(s): */ +/* 100M */ +/* Supported loopbacks. */ +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_10G_OFST 16 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_10G_LEN 8 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_10G_LO_OFST 16 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_10G_LO_LEN 4 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_10G_LO_LBN 128 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_10G_LO_WIDTH 32 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_10G_HI_OFST 20 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_10G_HI_LEN 4 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_10G_HI_LBN 160 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_10G_HI_WIDTH 32 +/* Enum values, see field(s): */ +/* 100M */ +/* Supported loopbacks. */ +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_SUGGESTED_OFST 24 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_SUGGESTED_LEN 8 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_SUGGESTED_LO_OFST 24 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_SUGGESTED_LO_LEN 4 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_SUGGESTED_LO_LBN 192 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_SUGGESTED_LO_WIDTH 32 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_SUGGESTED_HI_OFST 28 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_SUGGESTED_HI_LEN 4 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_SUGGESTED_HI_LBN 224 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_SUGGESTED_HI_WIDTH 32 +/* Enum values, see field(s): */ +/* 100M */ +/* Supported loopbacks. */ +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_40G_OFST 32 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_40G_LEN 8 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_40G_LO_OFST 32 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_40G_LO_LEN 4 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_40G_LO_LBN 256 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_40G_LO_WIDTH 32 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_40G_HI_OFST 36 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_40G_HI_LEN 4 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_40G_HI_LBN 288 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_40G_HI_WIDTH 32 +/* Enum values, see field(s): */ +/* 100M */ +/* Supported 25G loopbacks. */ +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_25G_OFST 40 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_25G_LEN 8 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_25G_LO_OFST 40 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_25G_LO_LEN 4 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_25G_LO_LBN 320 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_25G_LO_WIDTH 32 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_25G_HI_OFST 44 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_25G_HI_LEN 4 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_25G_HI_LBN 352 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_25G_HI_WIDTH 32 +/* Enum values, see field(s): */ +/* 100M */ +/* Supported 50 loopbacks. */ +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_50G_OFST 48 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_50G_LEN 8 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_50G_LO_OFST 48 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_50G_LO_LEN 4 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_50G_LO_LBN 384 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_50G_LO_WIDTH 32 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_50G_HI_OFST 52 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_50G_HI_LEN 4 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_50G_HI_LBN 416 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_50G_HI_WIDTH 32 +/* Enum values, see field(s): */ +/* 100M */ +/* Supported 100G loopbacks. */ +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100G_OFST 56 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100G_LEN 8 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100G_LO_OFST 56 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100G_LO_LEN 4 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100G_LO_LBN 448 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100G_LO_WIDTH 32 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100G_HI_OFST 60 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100G_HI_LEN 4 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100G_HI_LBN 480 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100G_HI_WIDTH 32 +/* Enum values, see field(s): */ +/* 100M */ +/* Supported 200G loopbacks. */ +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_200G_OFST 64 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_200G_LEN 8 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_200G_LO_OFST 64 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_200G_LO_LEN 4 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_200G_LO_LBN 512 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_200G_LO_WIDTH 32 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_200G_HI_OFST 68 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_200G_HI_LEN 4 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_200G_HI_LBN 544 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_200G_HI_WIDTH 32 +/* Enum values, see field(s): */ +/* 100M */ + +/* AN_TYPE structuredef: Auto-negotiation types defined in IEEE802.3 */ +#define AN_TYPE_LEN 4 +#define AN_TYPE_TYPE_OFST 0 +#define AN_TYPE_TYPE_LEN 4 +/* enum: None, AN disabled or not supported */ +#define MC_CMD_AN_NONE 0x0 +/* enum: Clause 28 - BASE-T */ +#define MC_CMD_AN_CLAUSE28 0x1 +/* enum: Clause 37 - BASE-X */ +#define MC_CMD_AN_CLAUSE37 0x2 +/* enum: Clause 73 - BASE-R startup protocol for backplane and copper cable + * assemblies. Includes Clause 72/Clause 92 link-training. + */ +#define MC_CMD_AN_CLAUSE73 0x3 +#define AN_TYPE_TYPE_LBN 0 +#define AN_TYPE_TYPE_WIDTH 32 + +/* FEC_TYPE structuredef: Forward error correction types defined in IEEE802.3 + */ +#define FEC_TYPE_LEN 4 +#define FEC_TYPE_TYPE_OFST 0 +#define FEC_TYPE_TYPE_LEN 4 +/* enum: No FEC */ +#define MC_CMD_FEC_NONE 0x0 +/* enum: Clause 74 BASE-R FEC (a.k.a Firecode) */ +#define MC_CMD_FEC_BASER 0x1 +/* enum: Clause 91/Clause 108 Reed-Solomon FEC */ +#define MC_CMD_FEC_RS 0x2 +#define FEC_TYPE_TYPE_LBN 0 +#define FEC_TYPE_TYPE_WIDTH 32 + + +/***********************************/ +/* MC_CMD_GET_LINK + * Read the unified MAC/PHY link state. Locks required: None Return code: 0, + * ETIME. + */ +#define MC_CMD_GET_LINK 0x29 +#undef MC_CMD_0x29_PRIVILEGE_CTG + +#define MC_CMD_0x29_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_LINK_IN msgrequest */ +#define MC_CMD_GET_LINK_IN_LEN 0 + +/* MC_CMD_GET_LINK_IN_V2 msgrequest */ +#define MC_CMD_GET_LINK_IN_V2_LEN 8 +/* Target port to request link state for. Uses MAE_LINK_ENDPOINT_SELECTOR which + * identifies a real or virtual network port by MAE port and link end. See the + * structure definition for more details. + */ +#define MC_CMD_GET_LINK_IN_V2_TARGET_OFST 0 +#define MC_CMD_GET_LINK_IN_V2_TARGET_LEN 8 +#define MC_CMD_GET_LINK_IN_V2_TARGET_LO_OFST 0 +#define MC_CMD_GET_LINK_IN_V2_TARGET_LO_LEN 4 +#define MC_CMD_GET_LINK_IN_V2_TARGET_LO_LBN 0 +#define MC_CMD_GET_LINK_IN_V2_TARGET_LO_WIDTH 32 +#define MC_CMD_GET_LINK_IN_V2_TARGET_HI_OFST 4 +#define MC_CMD_GET_LINK_IN_V2_TARGET_HI_LEN 4 +#define MC_CMD_GET_LINK_IN_V2_TARGET_HI_LBN 32 +#define MC_CMD_GET_LINK_IN_V2_TARGET_HI_WIDTH 32 +#define MC_CMD_GET_LINK_IN_V2_TARGET_MPORT_SELECTOR_OFST 0 +#define MC_CMD_GET_LINK_IN_V2_TARGET_MPORT_SELECTOR_LEN 4 +#define MC_CMD_GET_LINK_IN_V2_TARGET_MPORT_SELECTOR_FLAT_OFST 0 +#define MC_CMD_GET_LINK_IN_V2_TARGET_MPORT_SELECTOR_FLAT_LEN 4 +#define MC_CMD_GET_LINK_IN_V2_TARGET_MPORT_SELECTOR_TYPE_OFST 3 +#define MC_CMD_GET_LINK_IN_V2_TARGET_MPORT_SELECTOR_TYPE_LEN 1 +#define MC_CMD_GET_LINK_IN_V2_TARGET_MPORT_SELECTOR_MPORT_ID_OFST 0 +#define MC_CMD_GET_LINK_IN_V2_TARGET_MPORT_SELECTOR_MPORT_ID_LEN 3 +#define MC_CMD_GET_LINK_IN_V2_TARGET_MPORT_SELECTOR_PPORT_ID_LBN 0 +#define MC_CMD_GET_LINK_IN_V2_TARGET_MPORT_SELECTOR_PPORT_ID_WIDTH 4 +#define MC_CMD_GET_LINK_IN_V2_TARGET_MPORT_SELECTOR_FUNC_INTF_ID_LBN 20 +#define MC_CMD_GET_LINK_IN_V2_TARGET_MPORT_SELECTOR_FUNC_INTF_ID_WIDTH 4 +#define MC_CMD_GET_LINK_IN_V2_TARGET_MPORT_SELECTOR_FUNC_MH_PF_ID_LBN 16 +#define MC_CMD_GET_LINK_IN_V2_TARGET_MPORT_SELECTOR_FUNC_MH_PF_ID_WIDTH 4 +#define MC_CMD_GET_LINK_IN_V2_TARGET_MPORT_SELECTOR_FUNC_PF_ID_OFST 2 +#define MC_CMD_GET_LINK_IN_V2_TARGET_MPORT_SELECTOR_FUNC_PF_ID_LEN 1 +#define MC_CMD_GET_LINK_IN_V2_TARGET_MPORT_SELECTOR_FUNC_VF_ID_OFST 0 +#define MC_CMD_GET_LINK_IN_V2_TARGET_MPORT_SELECTOR_FUNC_VF_ID_LEN 2 +#define MC_CMD_GET_LINK_IN_V2_TARGET_LINK_END_OFST 4 +#define MC_CMD_GET_LINK_IN_V2_TARGET_LINK_END_LEN 4 +#define MC_CMD_GET_LINK_IN_V2_TARGET_FLAT_OFST 0 +#define MC_CMD_GET_LINK_IN_V2_TARGET_FLAT_LEN 8 +#define MC_CMD_GET_LINK_IN_V2_TARGET_FLAT_LO_OFST 0 +#define MC_CMD_GET_LINK_IN_V2_TARGET_FLAT_LO_LEN 4 +#define MC_CMD_GET_LINK_IN_V2_TARGET_FLAT_LO_LBN 0 +#define MC_CMD_GET_LINK_IN_V2_TARGET_FLAT_LO_WIDTH 32 +#define MC_CMD_GET_LINK_IN_V2_TARGET_FLAT_HI_OFST 4 +#define MC_CMD_GET_LINK_IN_V2_TARGET_FLAT_HI_LEN 4 +#define MC_CMD_GET_LINK_IN_V2_TARGET_FLAT_HI_LBN 32 +#define MC_CMD_GET_LINK_IN_V2_TARGET_FLAT_HI_WIDTH 32 + +/* MC_CMD_GET_LINK_OUT msgresponse */ +#define MC_CMD_GET_LINK_OUT_LEN 28 +/* Near-side advertised capabilities. Refer to + * MC_CMD_GET_PHY_CFG_OUT/SUPPORTED_CAP for bit definitions. + */ +#define MC_CMD_GET_LINK_OUT_CAP_OFST 0 +#define MC_CMD_GET_LINK_OUT_CAP_LEN 4 +/* Link-partner advertised capabilities. Refer to + * MC_CMD_GET_PHY_CFG_OUT/SUPPORTED_CAP for bit definitions. + */ +#define MC_CMD_GET_LINK_OUT_LP_CAP_OFST 4 +#define MC_CMD_GET_LINK_OUT_LP_CAP_LEN 4 +/* Autonegotiated speed in mbit/s. The link may still be down even if this + * reads non-zero. + */ +#define MC_CMD_GET_LINK_OUT_LINK_SPEED_OFST 8 +#define MC_CMD_GET_LINK_OUT_LINK_SPEED_LEN 4 +/* Current loopback setting. */ +#define MC_CMD_GET_LINK_OUT_LOOPBACK_MODE_OFST 12 +#define MC_CMD_GET_LINK_OUT_LOOPBACK_MODE_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_GET_LOOPBACK_MODES/MC_CMD_GET_LOOPBACK_MODES_OUT/100M */ +#define MC_CMD_GET_LINK_OUT_FLAGS_OFST 16 +#define MC_CMD_GET_LINK_OUT_FLAGS_LEN 4 +#define MC_CMD_GET_LINK_OUT_LINK_UP_OFST 16 +#define MC_CMD_GET_LINK_OUT_LINK_UP_LBN 0 +#define MC_CMD_GET_LINK_OUT_LINK_UP_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_FULL_DUPLEX_OFST 16 +#define MC_CMD_GET_LINK_OUT_FULL_DUPLEX_LBN 1 +#define MC_CMD_GET_LINK_OUT_FULL_DUPLEX_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_BPX_LINK_OFST 16 +#define MC_CMD_GET_LINK_OUT_BPX_LINK_LBN 2 +#define MC_CMD_GET_LINK_OUT_BPX_LINK_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_PHY_LINK_OFST 16 +#define MC_CMD_GET_LINK_OUT_PHY_LINK_LBN 3 +#define MC_CMD_GET_LINK_OUT_PHY_LINK_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_LINK_FAULT_RX_OFST 16 +#define MC_CMD_GET_LINK_OUT_LINK_FAULT_RX_LBN 6 +#define MC_CMD_GET_LINK_OUT_LINK_FAULT_RX_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_LINK_FAULT_TX_OFST 16 +#define MC_CMD_GET_LINK_OUT_LINK_FAULT_TX_LBN 7 +#define MC_CMD_GET_LINK_OUT_LINK_FAULT_TX_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_MODULE_UP_VALID_OFST 16 +#define MC_CMD_GET_LINK_OUT_MODULE_UP_VALID_LBN 8 +#define MC_CMD_GET_LINK_OUT_MODULE_UP_VALID_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_MODULE_UP_OFST 16 +#define MC_CMD_GET_LINK_OUT_MODULE_UP_LBN 9 +#define MC_CMD_GET_LINK_OUT_MODULE_UP_WIDTH 1 +/* This returns the negotiated flow control value. */ +#define MC_CMD_GET_LINK_OUT_FCNTL_OFST 20 +#define MC_CMD_GET_LINK_OUT_FCNTL_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_SET_MAC/MC_CMD_SET_MAC_IN/FCNTL */ +#define MC_CMD_GET_LINK_OUT_MAC_FAULT_OFST 24 +#define MC_CMD_GET_LINK_OUT_MAC_FAULT_LEN 4 +#define MC_CMD_MAC_FAULT_XGMII_LOCAL_OFST 24 +#define MC_CMD_MAC_FAULT_XGMII_LOCAL_LBN 0 +#define MC_CMD_MAC_FAULT_XGMII_LOCAL_WIDTH 1 +#define MC_CMD_MAC_FAULT_XGMII_REMOTE_OFST 24 +#define MC_CMD_MAC_FAULT_XGMII_REMOTE_LBN 1 +#define MC_CMD_MAC_FAULT_XGMII_REMOTE_WIDTH 1 +#define MC_CMD_MAC_FAULT_SGMII_REMOTE_OFST 24 +#define MC_CMD_MAC_FAULT_SGMII_REMOTE_LBN 2 +#define MC_CMD_MAC_FAULT_SGMII_REMOTE_WIDTH 1 +#define MC_CMD_MAC_FAULT_PENDING_RECONFIG_OFST 24 +#define MC_CMD_MAC_FAULT_PENDING_RECONFIG_LBN 3 +#define MC_CMD_MAC_FAULT_PENDING_RECONFIG_WIDTH 1 + +/* MC_CMD_GET_LINK_OUT_V2 msgresponse: Extended link state information */ +#define MC_CMD_GET_LINK_OUT_V2_LEN 44 +/* Near-side advertised capabilities. Refer to + * MC_CMD_GET_PHY_CFG_OUT/SUPPORTED_CAP for bit definitions. + */ +#define MC_CMD_GET_LINK_OUT_V2_CAP_OFST 0 +#define MC_CMD_GET_LINK_OUT_V2_CAP_LEN 4 +/* Link-partner advertised capabilities. Refer to + * MC_CMD_GET_PHY_CFG_OUT/SUPPORTED_CAP for bit definitions. + */ +#define MC_CMD_GET_LINK_OUT_V2_LP_CAP_OFST 4 +#define MC_CMD_GET_LINK_OUT_V2_LP_CAP_LEN 4 +/* Autonegotiated speed in mbit/s. The link may still be down even if this + * reads non-zero. + */ +#define MC_CMD_GET_LINK_OUT_V2_LINK_SPEED_OFST 8 +#define MC_CMD_GET_LINK_OUT_V2_LINK_SPEED_LEN 4 +/* Current loopback setting. */ +#define MC_CMD_GET_LINK_OUT_V2_LOOPBACK_MODE_OFST 12 +#define MC_CMD_GET_LINK_OUT_V2_LOOPBACK_MODE_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_GET_LOOPBACK_MODES/MC_CMD_GET_LOOPBACK_MODES_OUT/100M */ +#define MC_CMD_GET_LINK_OUT_V2_FLAGS_OFST 16 +#define MC_CMD_GET_LINK_OUT_V2_FLAGS_LEN 4 +#define MC_CMD_GET_LINK_OUT_V2_LINK_UP_OFST 16 +#define MC_CMD_GET_LINK_OUT_V2_LINK_UP_LBN 0 +#define MC_CMD_GET_LINK_OUT_V2_LINK_UP_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_V2_FULL_DUPLEX_OFST 16 +#define MC_CMD_GET_LINK_OUT_V2_FULL_DUPLEX_LBN 1 +#define MC_CMD_GET_LINK_OUT_V2_FULL_DUPLEX_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_V2_BPX_LINK_OFST 16 +#define MC_CMD_GET_LINK_OUT_V2_BPX_LINK_LBN 2 +#define MC_CMD_GET_LINK_OUT_V2_BPX_LINK_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_V2_PHY_LINK_OFST 16 +#define MC_CMD_GET_LINK_OUT_V2_PHY_LINK_LBN 3 +#define MC_CMD_GET_LINK_OUT_V2_PHY_LINK_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_V2_LINK_FAULT_RX_OFST 16 +#define MC_CMD_GET_LINK_OUT_V2_LINK_FAULT_RX_LBN 6 +#define MC_CMD_GET_LINK_OUT_V2_LINK_FAULT_RX_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_V2_LINK_FAULT_TX_OFST 16 +#define MC_CMD_GET_LINK_OUT_V2_LINK_FAULT_TX_LBN 7 +#define MC_CMD_GET_LINK_OUT_V2_LINK_FAULT_TX_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_V2_MODULE_UP_VALID_OFST 16 +#define MC_CMD_GET_LINK_OUT_V2_MODULE_UP_VALID_LBN 8 +#define MC_CMD_GET_LINK_OUT_V2_MODULE_UP_VALID_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_V2_MODULE_UP_OFST 16 +#define MC_CMD_GET_LINK_OUT_V2_MODULE_UP_LBN 9 +#define MC_CMD_GET_LINK_OUT_V2_MODULE_UP_WIDTH 1 +/* This returns the negotiated flow control value. */ +#define MC_CMD_GET_LINK_OUT_V2_FCNTL_OFST 20 +#define MC_CMD_GET_LINK_OUT_V2_FCNTL_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_SET_MAC/MC_CMD_SET_MAC_IN/FCNTL */ +#define MC_CMD_GET_LINK_OUT_V2_MAC_FAULT_OFST 24 +#define MC_CMD_GET_LINK_OUT_V2_MAC_FAULT_LEN 4 +/* MC_CMD_MAC_FAULT_XGMII_LOCAL_OFST 24 */ +/* MC_CMD_MAC_FAULT_XGMII_LOCAL_LBN 0 */ +/* MC_CMD_MAC_FAULT_XGMII_LOCAL_WIDTH 1 */ +/* MC_CMD_MAC_FAULT_XGMII_REMOTE_OFST 24 */ +/* MC_CMD_MAC_FAULT_XGMII_REMOTE_LBN 1 */ +/* MC_CMD_MAC_FAULT_XGMII_REMOTE_WIDTH 1 */ +/* MC_CMD_MAC_FAULT_SGMII_REMOTE_OFST 24 */ +/* MC_CMD_MAC_FAULT_SGMII_REMOTE_LBN 2 */ +/* MC_CMD_MAC_FAULT_SGMII_REMOTE_WIDTH 1 */ +/* MC_CMD_MAC_FAULT_PENDING_RECONFIG_OFST 24 */ +/* MC_CMD_MAC_FAULT_PENDING_RECONFIG_LBN 3 */ +/* MC_CMD_MAC_FAULT_PENDING_RECONFIG_WIDTH 1 */ +/* True local device capabilities (taking into account currently used PMD/MDI, + * e.g. plugged-in module). In general, subset of + * MC_CMD_GET_PHY_CFG_OUT/SUPPORTED_CAP, but may include extra _FEC_REQUEST + * bits, if the PMD requires FEC. 0 if unknown (e.g. module unplugged). Equal + * to SUPPORTED_CAP for non-pluggable PMDs. Refer to + * MC_CMD_GET_PHY_CFG_OUT/SUPPORTED_CAP for bit definitions. + */ +#define MC_CMD_GET_LINK_OUT_V2_LD_CAP_OFST 28 +#define MC_CMD_GET_LINK_OUT_V2_LD_CAP_LEN 4 +/* Auto-negotiation type used on the link */ +#define MC_CMD_GET_LINK_OUT_V2_AN_TYPE_OFST 32 +#define MC_CMD_GET_LINK_OUT_V2_AN_TYPE_LEN 4 +/* Enum values, see field(s): */ +/* AN_TYPE/TYPE */ +/* Forward error correction used on the link */ +#define MC_CMD_GET_LINK_OUT_V2_FEC_TYPE_OFST 36 +#define MC_CMD_GET_LINK_OUT_V2_FEC_TYPE_LEN 4 +/* Enum values, see field(s): */ +/* FEC_TYPE/TYPE */ +#define MC_CMD_GET_LINK_OUT_V2_EXT_FLAGS_OFST 40 +#define MC_CMD_GET_LINK_OUT_V2_EXT_FLAGS_LEN 4 +#define MC_CMD_GET_LINK_OUT_V2_PMD_MDI_CONNECTED_OFST 40 +#define MC_CMD_GET_LINK_OUT_V2_PMD_MDI_CONNECTED_LBN 0 +#define MC_CMD_GET_LINK_OUT_V2_PMD_MDI_CONNECTED_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_V2_PMD_READY_OFST 40 +#define MC_CMD_GET_LINK_OUT_V2_PMD_READY_LBN 1 +#define MC_CMD_GET_LINK_OUT_V2_PMD_READY_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_V2_PMD_LINK_UP_OFST 40 +#define MC_CMD_GET_LINK_OUT_V2_PMD_LINK_UP_LBN 2 +#define MC_CMD_GET_LINK_OUT_V2_PMD_LINK_UP_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_V2_PMA_LINK_UP_OFST 40 +#define MC_CMD_GET_LINK_OUT_V2_PMA_LINK_UP_LBN 3 +#define MC_CMD_GET_LINK_OUT_V2_PMA_LINK_UP_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_V2_PCS_LOCK_OFST 40 +#define MC_CMD_GET_LINK_OUT_V2_PCS_LOCK_LBN 4 +#define MC_CMD_GET_LINK_OUT_V2_PCS_LOCK_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_V2_ALIGN_LOCK_OFST 40 +#define MC_CMD_GET_LINK_OUT_V2_ALIGN_LOCK_LBN 5 +#define MC_CMD_GET_LINK_OUT_V2_ALIGN_LOCK_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_V2_HI_BER_OFST 40 +#define MC_CMD_GET_LINK_OUT_V2_HI_BER_LBN 6 +#define MC_CMD_GET_LINK_OUT_V2_HI_BER_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_V2_FEC_LOCK_OFST 40 +#define MC_CMD_GET_LINK_OUT_V2_FEC_LOCK_LBN 7 +#define MC_CMD_GET_LINK_OUT_V2_FEC_LOCK_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_V2_AN_DONE_OFST 40 +#define MC_CMD_GET_LINK_OUT_V2_AN_DONE_LBN 8 +#define MC_CMD_GET_LINK_OUT_V2_AN_DONE_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_V2_PORT_SHUTDOWN_OFST 40 +#define MC_CMD_GET_LINK_OUT_V2_PORT_SHUTDOWN_LBN 9 +#define MC_CMD_GET_LINK_OUT_V2_PORT_SHUTDOWN_WIDTH 1 + + +/***********************************/ +/* MC_CMD_SET_LINK + * Write the unified MAC/PHY link configuration. Locks required: None. Return + * code: 0, EINVAL, ETIME, EAGAIN + */ +#define MC_CMD_SET_LINK 0x2a +#undef MC_CMD_0x2a_PRIVILEGE_CTG + +#define MC_CMD_0x2a_PRIVILEGE_CTG SRIOV_CTG_LINK + +/* MC_CMD_SET_LINK_IN msgrequest */ +#define MC_CMD_SET_LINK_IN_LEN 16 +/* Near-side advertised capabilities. Refer to + * MC_CMD_GET_PHY_CFG_OUT/SUPPORTED_CAP for bit definitions. + */ +#define MC_CMD_SET_LINK_IN_CAP_OFST 0 +#define MC_CMD_SET_LINK_IN_CAP_LEN 4 +/* Flags */ +#define MC_CMD_SET_LINK_IN_FLAGS_OFST 4 +#define MC_CMD_SET_LINK_IN_FLAGS_LEN 4 +#define MC_CMD_SET_LINK_IN_LOWPOWER_OFST 4 +#define MC_CMD_SET_LINK_IN_LOWPOWER_LBN 0 +#define MC_CMD_SET_LINK_IN_LOWPOWER_WIDTH 1 +#define MC_CMD_SET_LINK_IN_POWEROFF_OFST 4 +#define MC_CMD_SET_LINK_IN_POWEROFF_LBN 1 +#define MC_CMD_SET_LINK_IN_POWEROFF_WIDTH 1 +#define MC_CMD_SET_LINK_IN_TXDIS_OFST 4 +#define MC_CMD_SET_LINK_IN_TXDIS_LBN 2 +#define MC_CMD_SET_LINK_IN_TXDIS_WIDTH 1 +#define MC_CMD_SET_LINK_IN_LINKDOWN_OFST 4 +#define MC_CMD_SET_LINK_IN_LINKDOWN_LBN 3 +#define MC_CMD_SET_LINK_IN_LINKDOWN_WIDTH 1 +/* Loopback mode. */ +#define MC_CMD_SET_LINK_IN_LOOPBACK_MODE_OFST 8 +#define MC_CMD_SET_LINK_IN_LOOPBACK_MODE_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_GET_LOOPBACK_MODES/MC_CMD_GET_LOOPBACK_MODES_OUT/100M */ +/* A loopback speed of "0" is supported, and means (choose any available + * speed). + */ +#define MC_CMD_SET_LINK_IN_LOOPBACK_SPEED_OFST 12 +#define MC_CMD_SET_LINK_IN_LOOPBACK_SPEED_LEN 4 + +/* MC_CMD_SET_LINK_IN_V2 msgrequest: Updated SET_LINK to include sequence + * number to ensure this SET_LINK command corresponds to the latest + * MODULECHANGE event. + */ +#define MC_CMD_SET_LINK_IN_V2_LEN 17 +/* Near-side advertised capabilities. Refer to + * MC_CMD_GET_PHY_CFG_OUT/SUPPORTED_CAP for bit definitions. + */ +#define MC_CMD_SET_LINK_IN_V2_CAP_OFST 0 +#define MC_CMD_SET_LINK_IN_V2_CAP_LEN 4 +/* Flags */ +#define MC_CMD_SET_LINK_IN_V2_FLAGS_OFST 4 +#define MC_CMD_SET_LINK_IN_V2_FLAGS_LEN 4 +#define MC_CMD_SET_LINK_IN_V2_LOWPOWER_OFST 4 +#define MC_CMD_SET_LINK_IN_V2_LOWPOWER_LBN 0 +#define MC_CMD_SET_LINK_IN_V2_LOWPOWER_WIDTH 1 +#define MC_CMD_SET_LINK_IN_V2_POWEROFF_OFST 4 +#define MC_CMD_SET_LINK_IN_V2_POWEROFF_LBN 1 +#define MC_CMD_SET_LINK_IN_V2_POWEROFF_WIDTH 1 +#define MC_CMD_SET_LINK_IN_V2_TXDIS_OFST 4 +#define MC_CMD_SET_LINK_IN_V2_TXDIS_LBN 2 +#define MC_CMD_SET_LINK_IN_V2_TXDIS_WIDTH 1 +#define MC_CMD_SET_LINK_IN_V2_LINKDOWN_OFST 4 +#define MC_CMD_SET_LINK_IN_V2_LINKDOWN_LBN 3 +#define MC_CMD_SET_LINK_IN_V2_LINKDOWN_WIDTH 1 +/* Loopback mode. */ +#define MC_CMD_SET_LINK_IN_V2_LOOPBACK_MODE_OFST 8 +#define MC_CMD_SET_LINK_IN_V2_LOOPBACK_MODE_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_GET_LOOPBACK_MODES/MC_CMD_GET_LOOPBACK_MODES_OUT/100M */ +/* A loopback speed of "0" is supported, and means (choose any available + * speed). + */ +#define MC_CMD_SET_LINK_IN_V2_LOOPBACK_SPEED_OFST 12 +#define MC_CMD_SET_LINK_IN_V2_LOOPBACK_SPEED_LEN 4 +#define MC_CMD_SET_LINK_IN_V2_MODULE_SEQ_OFST 16 +#define MC_CMD_SET_LINK_IN_V2_MODULE_SEQ_LEN 1 +#define MC_CMD_SET_LINK_IN_V2_MODULE_SEQ_NUMBER_OFST 16 +#define MC_CMD_SET_LINK_IN_V2_MODULE_SEQ_NUMBER_LBN 0 +#define MC_CMD_SET_LINK_IN_V2_MODULE_SEQ_NUMBER_WIDTH 7 +#define MC_CMD_SET_LINK_IN_V2_MODULE_SEQ_IGNORE_OFST 16 +#define MC_CMD_SET_LINK_IN_V2_MODULE_SEQ_IGNORE_LBN 7 +#define MC_CMD_SET_LINK_IN_V2_MODULE_SEQ_IGNORE_WIDTH 1 + +/* MC_CMD_SET_LINK_IN_V3 msgrequest */ +#define MC_CMD_SET_LINK_IN_V3_LEN 28 +/* Near-side advertised capabilities. Refer to + * MC_CMD_GET_PHY_CFG_OUT/SUPPORTED_CAP for bit definitions. + */ +#define MC_CMD_SET_LINK_IN_V3_CAP_OFST 0 +#define MC_CMD_SET_LINK_IN_V3_CAP_LEN 4 +/* Flags */ +#define MC_CMD_SET_LINK_IN_V3_FLAGS_OFST 4 +#define MC_CMD_SET_LINK_IN_V3_FLAGS_LEN 4 +#define MC_CMD_SET_LINK_IN_V3_LOWPOWER_OFST 4 +#define MC_CMD_SET_LINK_IN_V3_LOWPOWER_LBN 0 +#define MC_CMD_SET_LINK_IN_V3_LOWPOWER_WIDTH 1 +#define MC_CMD_SET_LINK_IN_V3_POWEROFF_OFST 4 +#define MC_CMD_SET_LINK_IN_V3_POWEROFF_LBN 1 +#define MC_CMD_SET_LINK_IN_V3_POWEROFF_WIDTH 1 +#define MC_CMD_SET_LINK_IN_V3_TXDIS_OFST 4 +#define MC_CMD_SET_LINK_IN_V3_TXDIS_LBN 2 +#define MC_CMD_SET_LINK_IN_V3_TXDIS_WIDTH 1 +#define MC_CMD_SET_LINK_IN_V3_LINKDOWN_OFST 4 +#define MC_CMD_SET_LINK_IN_V3_LINKDOWN_LBN 3 +#define MC_CMD_SET_LINK_IN_V3_LINKDOWN_WIDTH 1 +/* Loopback mode. */ +#define MC_CMD_SET_LINK_IN_V3_LOOPBACK_MODE_OFST 8 +#define MC_CMD_SET_LINK_IN_V3_LOOPBACK_MODE_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_GET_LOOPBACK_MODES/MC_CMD_GET_LOOPBACK_MODES_OUT/100M */ +/* A loopback speed of "0" is supported, and means (choose any available + * speed). + */ +#define MC_CMD_SET_LINK_IN_V3_LOOPBACK_SPEED_OFST 12 +#define MC_CMD_SET_LINK_IN_V3_LOOPBACK_SPEED_LEN 4 +#define MC_CMD_SET_LINK_IN_V3_MODULE_SEQ_OFST 16 +#define MC_CMD_SET_LINK_IN_V3_MODULE_SEQ_LEN 1 +#define MC_CMD_SET_LINK_IN_V3_MODULE_SEQ_NUMBER_OFST 16 +#define MC_CMD_SET_LINK_IN_V3_MODULE_SEQ_NUMBER_LBN 0 +#define MC_CMD_SET_LINK_IN_V3_MODULE_SEQ_NUMBER_WIDTH 7 +#define MC_CMD_SET_LINK_IN_V3_MODULE_SEQ_IGNORE_OFST 16 +#define MC_CMD_SET_LINK_IN_V3_MODULE_SEQ_IGNORE_LBN 7 +#define MC_CMD_SET_LINK_IN_V3_MODULE_SEQ_IGNORE_WIDTH 1 +/* Padding */ +#define MC_CMD_SET_LINK_IN_V3_RESERVED_OFST 17 +#define MC_CMD_SET_LINK_IN_V3_RESERVED_LEN 3 +/* Target port to set link state for. Uses MAE_LINK_ENDPOINT_SELECTOR which + * identifies a real or virtual network port by MAE port and link end. See the + * structure definition for more details + */ +#define MC_CMD_SET_LINK_IN_V3_TARGET_OFST 20 +#define MC_CMD_SET_LINK_IN_V3_TARGET_LEN 8 +#define MC_CMD_SET_LINK_IN_V3_TARGET_LO_OFST 20 +#define MC_CMD_SET_LINK_IN_V3_TARGET_LO_LEN 4 +#define MC_CMD_SET_LINK_IN_V3_TARGET_LO_LBN 160 +#define MC_CMD_SET_LINK_IN_V3_TARGET_LO_WIDTH 32 +#define MC_CMD_SET_LINK_IN_V3_TARGET_HI_OFST 24 +#define MC_CMD_SET_LINK_IN_V3_TARGET_HI_LEN 4 +#define MC_CMD_SET_LINK_IN_V3_TARGET_HI_LBN 192 +#define MC_CMD_SET_LINK_IN_V3_TARGET_HI_WIDTH 32 +#define MC_CMD_SET_LINK_IN_V3_TARGET_MPORT_SELECTOR_OFST 20 +#define MC_CMD_SET_LINK_IN_V3_TARGET_MPORT_SELECTOR_LEN 4 +#define MC_CMD_SET_LINK_IN_V3_TARGET_MPORT_SELECTOR_FLAT_OFST 20 +#define MC_CMD_SET_LINK_IN_V3_TARGET_MPORT_SELECTOR_FLAT_LEN 4 +#define MC_CMD_SET_LINK_IN_V3_TARGET_MPORT_SELECTOR_TYPE_OFST 23 +#define MC_CMD_SET_LINK_IN_V3_TARGET_MPORT_SELECTOR_TYPE_LEN 1 +#define MC_CMD_SET_LINK_IN_V3_TARGET_MPORT_SELECTOR_MPORT_ID_OFST 20 +#define MC_CMD_SET_LINK_IN_V3_TARGET_MPORT_SELECTOR_MPORT_ID_LEN 3 +#define MC_CMD_SET_LINK_IN_V3_TARGET_MPORT_SELECTOR_PPORT_ID_LBN 160 +#define MC_CMD_SET_LINK_IN_V3_TARGET_MPORT_SELECTOR_PPORT_ID_WIDTH 4 +#define MC_CMD_SET_LINK_IN_V3_TARGET_MPORT_SELECTOR_FUNC_INTF_ID_LBN 180 +#define MC_CMD_SET_LINK_IN_V3_TARGET_MPORT_SELECTOR_FUNC_INTF_ID_WIDTH 4 +#define MC_CMD_SET_LINK_IN_V3_TARGET_MPORT_SELECTOR_FUNC_MH_PF_ID_LBN 176 +#define MC_CMD_SET_LINK_IN_V3_TARGET_MPORT_SELECTOR_FUNC_MH_PF_ID_WIDTH 4 +#define MC_CMD_SET_LINK_IN_V3_TARGET_MPORT_SELECTOR_FUNC_PF_ID_OFST 22 +#define MC_CMD_SET_LINK_IN_V3_TARGET_MPORT_SELECTOR_FUNC_PF_ID_LEN 1 +#define MC_CMD_SET_LINK_IN_V3_TARGET_MPORT_SELECTOR_FUNC_VF_ID_OFST 20 +#define MC_CMD_SET_LINK_IN_V3_TARGET_MPORT_SELECTOR_FUNC_VF_ID_LEN 2 +#define MC_CMD_SET_LINK_IN_V3_TARGET_LINK_END_OFST 24 +#define MC_CMD_SET_LINK_IN_V3_TARGET_LINK_END_LEN 4 +#define MC_CMD_SET_LINK_IN_V3_TARGET_FLAT_OFST 20 +#define MC_CMD_SET_LINK_IN_V3_TARGET_FLAT_LEN 8 +#define MC_CMD_SET_LINK_IN_V3_TARGET_FLAT_LO_OFST 20 +#define MC_CMD_SET_LINK_IN_V3_TARGET_FLAT_LO_LEN 4 +#define MC_CMD_SET_LINK_IN_V3_TARGET_FLAT_LO_LBN 160 +#define MC_CMD_SET_LINK_IN_V3_TARGET_FLAT_LO_WIDTH 32 +#define MC_CMD_SET_LINK_IN_V3_TARGET_FLAT_HI_OFST 24 +#define MC_CMD_SET_LINK_IN_V3_TARGET_FLAT_HI_LEN 4 +#define MC_CMD_SET_LINK_IN_V3_TARGET_FLAT_HI_LBN 192 +#define MC_CMD_SET_LINK_IN_V3_TARGET_FLAT_HI_WIDTH 32 + +/* MC_CMD_SET_LINK_OUT msgresponse */ +#define MC_CMD_SET_LINK_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_SET_ID_LED + * Set identification LED state. Locks required: None. Return code: 0, EINVAL + */ +#define MC_CMD_SET_ID_LED 0x2b +#undef MC_CMD_0x2b_PRIVILEGE_CTG + +#define MC_CMD_0x2b_PRIVILEGE_CTG SRIOV_CTG_LINK + +/* MC_CMD_SET_ID_LED_IN msgrequest */ +#define MC_CMD_SET_ID_LED_IN_LEN 4 +/* Set LED state. */ +#define MC_CMD_SET_ID_LED_IN_STATE_OFST 0 +#define MC_CMD_SET_ID_LED_IN_STATE_LEN 4 +#define MC_CMD_LED_OFF 0x0 /* enum */ +#define MC_CMD_LED_ON 0x1 /* enum */ +#define MC_CMD_LED_DEFAULT 0x2 /* enum */ + +/* MC_CMD_SET_ID_LED_OUT msgresponse */ +#define MC_CMD_SET_ID_LED_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_SET_MAC + * Set MAC configuration. Locks required: None. Return code: 0, EINVAL + */ +#define MC_CMD_SET_MAC 0x2c +#undef MC_CMD_0x2c_PRIVILEGE_CTG + +#define MC_CMD_0x2c_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_SET_MAC_IN msgrequest */ +#define MC_CMD_SET_MAC_IN_LEN 28 +/* The MTU is the MTU programmed directly into the XMAC/GMAC (inclusive of + * EtherII, VLAN, bug16011 padding). + */ +#define MC_CMD_SET_MAC_IN_MTU_OFST 0 +#define MC_CMD_SET_MAC_IN_MTU_LEN 4 +#define MC_CMD_SET_MAC_IN_DRAIN_OFST 4 +#define MC_CMD_SET_MAC_IN_DRAIN_LEN 4 +#define MC_CMD_SET_MAC_IN_ADDR_OFST 8 +#define MC_CMD_SET_MAC_IN_ADDR_LEN 8 +#define MC_CMD_SET_MAC_IN_ADDR_LO_OFST 8 +#define MC_CMD_SET_MAC_IN_ADDR_LO_LEN 4 +#define MC_CMD_SET_MAC_IN_ADDR_LO_LBN 64 +#define MC_CMD_SET_MAC_IN_ADDR_LO_WIDTH 32 +#define MC_CMD_SET_MAC_IN_ADDR_HI_OFST 12 +#define MC_CMD_SET_MAC_IN_ADDR_HI_LEN 4 +#define MC_CMD_SET_MAC_IN_ADDR_HI_LBN 96 +#define MC_CMD_SET_MAC_IN_ADDR_HI_WIDTH 32 +#define MC_CMD_SET_MAC_IN_REJECT_OFST 16 +#define MC_CMD_SET_MAC_IN_REJECT_LEN 4 +#define MC_CMD_SET_MAC_IN_REJECT_UNCST_OFST 16 +#define MC_CMD_SET_MAC_IN_REJECT_UNCST_LBN 0 +#define MC_CMD_SET_MAC_IN_REJECT_UNCST_WIDTH 1 +#define MC_CMD_SET_MAC_IN_REJECT_BRDCST_OFST 16 +#define MC_CMD_SET_MAC_IN_REJECT_BRDCST_LBN 1 +#define MC_CMD_SET_MAC_IN_REJECT_BRDCST_WIDTH 1 +#define MC_CMD_SET_MAC_IN_FCNTL_OFST 20 +#define MC_CMD_SET_MAC_IN_FCNTL_LEN 4 +/* enum: Flow control is off. */ +#define MC_CMD_FCNTL_OFF 0x0 +/* enum: Respond to flow control. */ +#define MC_CMD_FCNTL_RESPOND 0x1 +/* enum: Respond to and Issue flow control. */ +#define MC_CMD_FCNTL_BIDIR 0x2 +/* enum: Auto neg flow control. */ +#define MC_CMD_FCNTL_AUTO 0x3 +/* enum: Priority flow control (eftest builds only). */ +#define MC_CMD_FCNTL_QBB 0x4 +/* enum: Issue flow control. */ +#define MC_CMD_FCNTL_GENERATE 0x5 +#define MC_CMD_SET_MAC_IN_FLAGS_OFST 24 +#define MC_CMD_SET_MAC_IN_FLAGS_LEN 4 +#define MC_CMD_SET_MAC_IN_FLAG_INCLUDE_FCS_OFST 24 +#define MC_CMD_SET_MAC_IN_FLAG_INCLUDE_FCS_LBN 0 +#define MC_CMD_SET_MAC_IN_FLAG_INCLUDE_FCS_WIDTH 1 + +/* MC_CMD_SET_MAC_EXT_IN msgrequest */ +#define MC_CMD_SET_MAC_EXT_IN_LEN 32 +/* The MTU is the MTU programmed directly into the XMAC/GMAC (inclusive of + * EtherII, VLAN, bug16011 padding). + */ +#define MC_CMD_SET_MAC_EXT_IN_MTU_OFST 0 +#define MC_CMD_SET_MAC_EXT_IN_MTU_LEN 4 +#define MC_CMD_SET_MAC_EXT_IN_DRAIN_OFST 4 +#define MC_CMD_SET_MAC_EXT_IN_DRAIN_LEN 4 +#define MC_CMD_SET_MAC_EXT_IN_ADDR_OFST 8 +#define MC_CMD_SET_MAC_EXT_IN_ADDR_LEN 8 +#define MC_CMD_SET_MAC_EXT_IN_ADDR_LO_OFST 8 +#define MC_CMD_SET_MAC_EXT_IN_ADDR_LO_LEN 4 +#define MC_CMD_SET_MAC_EXT_IN_ADDR_LO_LBN 64 +#define MC_CMD_SET_MAC_EXT_IN_ADDR_LO_WIDTH 32 +#define MC_CMD_SET_MAC_EXT_IN_ADDR_HI_OFST 12 +#define MC_CMD_SET_MAC_EXT_IN_ADDR_HI_LEN 4 +#define MC_CMD_SET_MAC_EXT_IN_ADDR_HI_LBN 96 +#define MC_CMD_SET_MAC_EXT_IN_ADDR_HI_WIDTH 32 +#define MC_CMD_SET_MAC_EXT_IN_REJECT_OFST 16 +#define MC_CMD_SET_MAC_EXT_IN_REJECT_LEN 4 +#define MC_CMD_SET_MAC_EXT_IN_REJECT_UNCST_OFST 16 +#define MC_CMD_SET_MAC_EXT_IN_REJECT_UNCST_LBN 0 +#define MC_CMD_SET_MAC_EXT_IN_REJECT_UNCST_WIDTH 1 +#define MC_CMD_SET_MAC_EXT_IN_REJECT_BRDCST_OFST 16 +#define MC_CMD_SET_MAC_EXT_IN_REJECT_BRDCST_LBN 1 +#define MC_CMD_SET_MAC_EXT_IN_REJECT_BRDCST_WIDTH 1 +#define MC_CMD_SET_MAC_EXT_IN_FCNTL_OFST 20 +#define MC_CMD_SET_MAC_EXT_IN_FCNTL_LEN 4 +/* enum: Flow control is off. */ +/* MC_CMD_FCNTL_OFF 0x0 */ +/* enum: Respond to flow control. */ +/* MC_CMD_FCNTL_RESPOND 0x1 */ +/* enum: Respond to and Issue flow control. */ +/* MC_CMD_FCNTL_BIDIR 0x2 */ +/* enum: Auto neg flow control. */ +/* MC_CMD_FCNTL_AUTO 0x3 */ +/* enum: Priority flow control (eftest builds only). */ +/* MC_CMD_FCNTL_QBB 0x4 */ +/* enum: Issue flow control. */ +/* MC_CMD_FCNTL_GENERATE 0x5 */ +#define MC_CMD_SET_MAC_EXT_IN_FLAGS_OFST 24 +#define MC_CMD_SET_MAC_EXT_IN_FLAGS_LEN 4 +#define MC_CMD_SET_MAC_EXT_IN_FLAG_INCLUDE_FCS_OFST 24 +#define MC_CMD_SET_MAC_EXT_IN_FLAG_INCLUDE_FCS_LBN 0 +#define MC_CMD_SET_MAC_EXT_IN_FLAG_INCLUDE_FCS_WIDTH 1 +/* Select which parameters to configure. A parameter will only be modified if + * the corresponding control flag is set. If SET_MAC_ENHANCED is not set in + * capabilities then this field is ignored (and all flags are assumed to be + * set). + */ +#define MC_CMD_SET_MAC_EXT_IN_CONTROL_OFST 28 +#define MC_CMD_SET_MAC_EXT_IN_CONTROL_LEN 4 +#define MC_CMD_SET_MAC_EXT_IN_CFG_MTU_OFST 28 +#define MC_CMD_SET_MAC_EXT_IN_CFG_MTU_LBN 0 +#define MC_CMD_SET_MAC_EXT_IN_CFG_MTU_WIDTH 1 +#define MC_CMD_SET_MAC_EXT_IN_CFG_DRAIN_OFST 28 +#define MC_CMD_SET_MAC_EXT_IN_CFG_DRAIN_LBN 1 +#define MC_CMD_SET_MAC_EXT_IN_CFG_DRAIN_WIDTH 1 +#define MC_CMD_SET_MAC_EXT_IN_CFG_REJECT_OFST 28 +#define MC_CMD_SET_MAC_EXT_IN_CFG_REJECT_LBN 2 +#define MC_CMD_SET_MAC_EXT_IN_CFG_REJECT_WIDTH 1 +#define MC_CMD_SET_MAC_EXT_IN_CFG_FCNTL_OFST 28 +#define MC_CMD_SET_MAC_EXT_IN_CFG_FCNTL_LBN 3 +#define MC_CMD_SET_MAC_EXT_IN_CFG_FCNTL_WIDTH 1 +#define MC_CMD_SET_MAC_EXT_IN_CFG_FCS_OFST 28 +#define MC_CMD_SET_MAC_EXT_IN_CFG_FCS_LBN 4 +#define MC_CMD_SET_MAC_EXT_IN_CFG_FCS_WIDTH 1 + +/* MC_CMD_SET_MAC_V3_IN msgrequest */ +#define MC_CMD_SET_MAC_V3_IN_LEN 40 +/* The MTU is the MTU programmed directly into the XMAC/GMAC (inclusive of + * EtherII, VLAN, bug16011 padding). + */ +#define MC_CMD_SET_MAC_V3_IN_MTU_OFST 0 +#define MC_CMD_SET_MAC_V3_IN_MTU_LEN 4 +#define MC_CMD_SET_MAC_V3_IN_DRAIN_OFST 4 +#define MC_CMD_SET_MAC_V3_IN_DRAIN_LEN 4 +#define MC_CMD_SET_MAC_V3_IN_ADDR_OFST 8 +#define MC_CMD_SET_MAC_V3_IN_ADDR_LEN 8 +#define MC_CMD_SET_MAC_V3_IN_ADDR_LO_OFST 8 +#define MC_CMD_SET_MAC_V3_IN_ADDR_LO_LEN 4 +#define MC_CMD_SET_MAC_V3_IN_ADDR_LO_LBN 64 +#define MC_CMD_SET_MAC_V3_IN_ADDR_LO_WIDTH 32 +#define MC_CMD_SET_MAC_V3_IN_ADDR_HI_OFST 12 +#define MC_CMD_SET_MAC_V3_IN_ADDR_HI_LEN 4 +#define MC_CMD_SET_MAC_V3_IN_ADDR_HI_LBN 96 +#define MC_CMD_SET_MAC_V3_IN_ADDR_HI_WIDTH 32 +#define MC_CMD_SET_MAC_V3_IN_REJECT_OFST 16 +#define MC_CMD_SET_MAC_V3_IN_REJECT_LEN 4 +#define MC_CMD_SET_MAC_V3_IN_REJECT_UNCST_OFST 16 +#define MC_CMD_SET_MAC_V3_IN_REJECT_UNCST_LBN 0 +#define MC_CMD_SET_MAC_V3_IN_REJECT_UNCST_WIDTH 1 +#define MC_CMD_SET_MAC_V3_IN_REJECT_BRDCST_OFST 16 +#define MC_CMD_SET_MAC_V3_IN_REJECT_BRDCST_LBN 1 +#define MC_CMD_SET_MAC_V3_IN_REJECT_BRDCST_WIDTH 1 +#define MC_CMD_SET_MAC_V3_IN_FCNTL_OFST 20 +#define MC_CMD_SET_MAC_V3_IN_FCNTL_LEN 4 +/* enum: Flow control is off. */ +/* MC_CMD_FCNTL_OFF 0x0 */ +/* enum: Respond to flow control. */ +/* MC_CMD_FCNTL_RESPOND 0x1 */ +/* enum: Respond to and Issue flow control. */ +/* MC_CMD_FCNTL_BIDIR 0x2 */ +/* enum: Auto neg flow control. */ +/* MC_CMD_FCNTL_AUTO 0x3 */ +/* enum: Priority flow control (eftest builds only). */ +/* MC_CMD_FCNTL_QBB 0x4 */ +/* enum: Issue flow control. */ +/* MC_CMD_FCNTL_GENERATE 0x5 */ +#define MC_CMD_SET_MAC_V3_IN_FLAGS_OFST 24 +#define MC_CMD_SET_MAC_V3_IN_FLAGS_LEN 4 +#define MC_CMD_SET_MAC_V3_IN_FLAG_INCLUDE_FCS_OFST 24 +#define MC_CMD_SET_MAC_V3_IN_FLAG_INCLUDE_FCS_LBN 0 +#define MC_CMD_SET_MAC_V3_IN_FLAG_INCLUDE_FCS_WIDTH 1 +/* Select which parameters to configure. A parameter will only be modified if + * the corresponding control flag is set. If SET_MAC_ENHANCED is not set in + * capabilities then this field is ignored (and all flags are assumed to be + * set). + */ +#define MC_CMD_SET_MAC_V3_IN_CONTROL_OFST 28 +#define MC_CMD_SET_MAC_V3_IN_CONTROL_LEN 4 +#define MC_CMD_SET_MAC_V3_IN_CFG_MTU_OFST 28 +#define MC_CMD_SET_MAC_V3_IN_CFG_MTU_LBN 0 +#define MC_CMD_SET_MAC_V3_IN_CFG_MTU_WIDTH 1 +#define MC_CMD_SET_MAC_V3_IN_CFG_DRAIN_OFST 28 +#define MC_CMD_SET_MAC_V3_IN_CFG_DRAIN_LBN 1 +#define MC_CMD_SET_MAC_V3_IN_CFG_DRAIN_WIDTH 1 +#define MC_CMD_SET_MAC_V3_IN_CFG_REJECT_OFST 28 +#define MC_CMD_SET_MAC_V3_IN_CFG_REJECT_LBN 2 +#define MC_CMD_SET_MAC_V3_IN_CFG_REJECT_WIDTH 1 +#define MC_CMD_SET_MAC_V3_IN_CFG_FCNTL_OFST 28 +#define MC_CMD_SET_MAC_V3_IN_CFG_FCNTL_LBN 3 +#define MC_CMD_SET_MAC_V3_IN_CFG_FCNTL_WIDTH 1 +#define MC_CMD_SET_MAC_V3_IN_CFG_FCS_OFST 28 +#define MC_CMD_SET_MAC_V3_IN_CFG_FCS_LBN 4 +#define MC_CMD_SET_MAC_V3_IN_CFG_FCS_WIDTH 1 +/* Target port to set mac state for. Uses MAE_LINK_ENDPOINT_SELECTOR which + * identifies a real or virtual network port by MAE port and link end. See the + * structure definition for more details + */ +#define MC_CMD_SET_MAC_V3_IN_TARGET_OFST 32 +#define MC_CMD_SET_MAC_V3_IN_TARGET_LEN 8 +#define MC_CMD_SET_MAC_V3_IN_TARGET_LO_OFST 32 +#define MC_CMD_SET_MAC_V3_IN_TARGET_LO_LEN 4 +#define MC_CMD_SET_MAC_V3_IN_TARGET_LO_LBN 256 +#define MC_CMD_SET_MAC_V3_IN_TARGET_LO_WIDTH 32 +#define MC_CMD_SET_MAC_V3_IN_TARGET_HI_OFST 36 +#define MC_CMD_SET_MAC_V3_IN_TARGET_HI_LEN 4 +#define MC_CMD_SET_MAC_V3_IN_TARGET_HI_LBN 288 +#define MC_CMD_SET_MAC_V3_IN_TARGET_HI_WIDTH 32 +#define MC_CMD_SET_MAC_V3_IN_TARGET_MPORT_SELECTOR_OFST 32 +#define MC_CMD_SET_MAC_V3_IN_TARGET_MPORT_SELECTOR_LEN 4 +#define MC_CMD_SET_MAC_V3_IN_TARGET_MPORT_SELECTOR_FLAT_OFST 32 +#define MC_CMD_SET_MAC_V3_IN_TARGET_MPORT_SELECTOR_FLAT_LEN 4 +#define MC_CMD_SET_MAC_V3_IN_TARGET_MPORT_SELECTOR_TYPE_OFST 35 +#define MC_CMD_SET_MAC_V3_IN_TARGET_MPORT_SELECTOR_TYPE_LEN 1 +#define MC_CMD_SET_MAC_V3_IN_TARGET_MPORT_SELECTOR_MPORT_ID_OFST 32 +#define MC_CMD_SET_MAC_V3_IN_TARGET_MPORT_SELECTOR_MPORT_ID_LEN 3 +#define MC_CMD_SET_MAC_V3_IN_TARGET_MPORT_SELECTOR_PPORT_ID_LBN 256 +#define MC_CMD_SET_MAC_V3_IN_TARGET_MPORT_SELECTOR_PPORT_ID_WIDTH 4 +#define MC_CMD_SET_MAC_V3_IN_TARGET_MPORT_SELECTOR_FUNC_INTF_ID_LBN 276 +#define MC_CMD_SET_MAC_V3_IN_TARGET_MPORT_SELECTOR_FUNC_INTF_ID_WIDTH 4 +#define MC_CMD_SET_MAC_V3_IN_TARGET_MPORT_SELECTOR_FUNC_MH_PF_ID_LBN 272 +#define MC_CMD_SET_MAC_V3_IN_TARGET_MPORT_SELECTOR_FUNC_MH_PF_ID_WIDTH 4 +#define MC_CMD_SET_MAC_V3_IN_TARGET_MPORT_SELECTOR_FUNC_PF_ID_OFST 34 +#define MC_CMD_SET_MAC_V3_IN_TARGET_MPORT_SELECTOR_FUNC_PF_ID_LEN 1 +#define MC_CMD_SET_MAC_V3_IN_TARGET_MPORT_SELECTOR_FUNC_VF_ID_OFST 32 +#define MC_CMD_SET_MAC_V3_IN_TARGET_MPORT_SELECTOR_FUNC_VF_ID_LEN 2 +#define MC_CMD_SET_MAC_V3_IN_TARGET_LINK_END_OFST 36 +#define MC_CMD_SET_MAC_V3_IN_TARGET_LINK_END_LEN 4 +#define MC_CMD_SET_MAC_V3_IN_TARGET_FLAT_OFST 32 +#define MC_CMD_SET_MAC_V3_IN_TARGET_FLAT_LEN 8 +#define MC_CMD_SET_MAC_V3_IN_TARGET_FLAT_LO_OFST 32 +#define MC_CMD_SET_MAC_V3_IN_TARGET_FLAT_LO_LEN 4 +#define MC_CMD_SET_MAC_V3_IN_TARGET_FLAT_LO_LBN 256 +#define MC_CMD_SET_MAC_V3_IN_TARGET_FLAT_LO_WIDTH 32 +#define MC_CMD_SET_MAC_V3_IN_TARGET_FLAT_HI_OFST 36 +#define MC_CMD_SET_MAC_V3_IN_TARGET_FLAT_HI_LEN 4 +#define MC_CMD_SET_MAC_V3_IN_TARGET_FLAT_HI_LBN 288 +#define MC_CMD_SET_MAC_V3_IN_TARGET_FLAT_HI_WIDTH 32 + +/* MC_CMD_SET_MAC_OUT msgresponse */ +#define MC_CMD_SET_MAC_OUT_LEN 0 + +/* MC_CMD_SET_MAC_V2_OUT msgresponse */ +#define MC_CMD_SET_MAC_V2_OUT_LEN 4 +/* MTU as configured after processing the request. See comment at + * MC_CMD_SET_MAC_IN/MTU. To query MTU without doing any changes, set CONTROL + * to 0. + */ +#define MC_CMD_SET_MAC_V2_OUT_MTU_OFST 0 +#define MC_CMD_SET_MAC_V2_OUT_MTU_LEN 4 + + +/***********************************/ +/* MC_CMD_PHY_STATS + * Get generic PHY statistics. This call returns the statistics for a generic + * PHY in a sparse array (indexed by the enumerate). Each value is represented + * by a 32bit number. If the DMA_ADDR is 0, then no DMA is performed, and the + * statistics may be read from the message response. If DMA_ADDR != 0, then the + * statistics are dmad to that (page-aligned location). Locks required: None. + * Returns: 0, ETIME + */ +#define MC_CMD_PHY_STATS 0x2d +#undef MC_CMD_0x2d_PRIVILEGE_CTG + +#define MC_CMD_0x2d_PRIVILEGE_CTG SRIOV_CTG_LINK + +/* MC_CMD_PHY_STATS_IN msgrequest */ +#define MC_CMD_PHY_STATS_IN_LEN 8 +/* ??? */ +#define MC_CMD_PHY_STATS_IN_DMA_ADDR_OFST 0 +#define MC_CMD_PHY_STATS_IN_DMA_ADDR_LEN 8 +#define MC_CMD_PHY_STATS_IN_DMA_ADDR_LO_OFST 0 +#define MC_CMD_PHY_STATS_IN_DMA_ADDR_LO_LEN 4 +#define MC_CMD_PHY_STATS_IN_DMA_ADDR_LO_LBN 0 +#define MC_CMD_PHY_STATS_IN_DMA_ADDR_LO_WIDTH 32 +#define MC_CMD_PHY_STATS_IN_DMA_ADDR_HI_OFST 4 +#define MC_CMD_PHY_STATS_IN_DMA_ADDR_HI_LEN 4 +#define MC_CMD_PHY_STATS_IN_DMA_ADDR_HI_LBN 32 +#define MC_CMD_PHY_STATS_IN_DMA_ADDR_HI_WIDTH 32 + +/* MC_CMD_PHY_STATS_OUT_DMA msgresponse */ +#define MC_CMD_PHY_STATS_OUT_DMA_LEN 0 + +/* MC_CMD_PHY_STATS_OUT_NO_DMA msgresponse */ +#define MC_CMD_PHY_STATS_OUT_NO_DMA_LEN (((MC_CMD_PHY_NSTATS*32))>>3) +#define MC_CMD_PHY_STATS_OUT_NO_DMA_STATISTICS_OFST 0 +#define MC_CMD_PHY_STATS_OUT_NO_DMA_STATISTICS_LEN 4 +#define MC_CMD_PHY_STATS_OUT_NO_DMA_STATISTICS_NUM MC_CMD_PHY_NSTATS +/* enum: OUI. */ +#define MC_CMD_OUI 0x0 +/* enum: PMA-PMD Link Up. */ +#define MC_CMD_PMA_PMD_LINK_UP 0x1 +/* enum: PMA-PMD RX Fault. */ +#define MC_CMD_PMA_PMD_RX_FAULT 0x2 +/* enum: PMA-PMD TX Fault. */ +#define MC_CMD_PMA_PMD_TX_FAULT 0x3 +/* enum: PMA-PMD Signal */ +#define MC_CMD_PMA_PMD_SIGNAL 0x4 +/* enum: PMA-PMD SNR A. */ +#define MC_CMD_PMA_PMD_SNR_A 0x5 +/* enum: PMA-PMD SNR B. */ +#define MC_CMD_PMA_PMD_SNR_B 0x6 +/* enum: PMA-PMD SNR C. */ +#define MC_CMD_PMA_PMD_SNR_C 0x7 +/* enum: PMA-PMD SNR D. */ +#define MC_CMD_PMA_PMD_SNR_D 0x8 +/* enum: PCS Link Up. */ +#define MC_CMD_PCS_LINK_UP 0x9 +/* enum: PCS RX Fault. */ +#define MC_CMD_PCS_RX_FAULT 0xa +/* enum: PCS TX Fault. */ +#define MC_CMD_PCS_TX_FAULT 0xb +/* enum: PCS BER. */ +#define MC_CMD_PCS_BER 0xc +/* enum: PCS Block Errors. */ +#define MC_CMD_PCS_BLOCK_ERRORS 0xd +/* enum: PhyXS Link Up. */ +#define MC_CMD_PHYXS_LINK_UP 0xe +/* enum: PhyXS RX Fault. */ +#define MC_CMD_PHYXS_RX_FAULT 0xf +/* enum: PhyXS TX Fault. */ +#define MC_CMD_PHYXS_TX_FAULT 0x10 +/* enum: PhyXS Align. */ +#define MC_CMD_PHYXS_ALIGN 0x11 +/* enum: PhyXS Sync. */ +#define MC_CMD_PHYXS_SYNC 0x12 +/* enum: AN link-up. */ +#define MC_CMD_AN_LINK_UP 0x13 +/* enum: AN Complete. */ +#define MC_CMD_AN_COMPLETE 0x14 +/* enum: AN 10GBaseT Status. */ +#define MC_CMD_AN_10GBT_STATUS 0x15 +/* enum: Clause 22 Link-Up. */ +#define MC_CMD_CL22_LINK_UP 0x16 +/* enum: (Last entry) */ +#define MC_CMD_PHY_NSTATS 0x17 + + +/***********************************/ +/* MC_CMD_MAC_STATS + * Get generic MAC statistics. This call returns unified statistics maintained + * by the MC as it switches between the GMAC and XMAC. The MC will write out + * all supported stats. The driver should zero initialise the buffer to + * guarantee consistent results. If the DMA_ADDR is 0, then no DMA is + * performed, and the statistics may be read from the message response. If + * DMA_ADDR != 0, then the statistics are dmad to that (page-aligned location). + * Locks required: None. The PERIODIC_CLEAR option is not used and now has no + * effect. Returns: 0, ETIME + */ +#define MC_CMD_MAC_STATS 0x2e +#undef MC_CMD_0x2e_PRIVILEGE_CTG + +#define MC_CMD_0x2e_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_MAC_STATS_IN msgrequest */ +#define MC_CMD_MAC_STATS_IN_LEN 20 +/* ??? */ +#define MC_CMD_MAC_STATS_IN_DMA_ADDR_OFST 0 +#define MC_CMD_MAC_STATS_IN_DMA_ADDR_LEN 8 +#define MC_CMD_MAC_STATS_IN_DMA_ADDR_LO_OFST 0 +#define MC_CMD_MAC_STATS_IN_DMA_ADDR_LO_LEN 4 +#define MC_CMD_MAC_STATS_IN_DMA_ADDR_LO_LBN 0 +#define MC_CMD_MAC_STATS_IN_DMA_ADDR_LO_WIDTH 32 +#define MC_CMD_MAC_STATS_IN_DMA_ADDR_HI_OFST 4 +#define MC_CMD_MAC_STATS_IN_DMA_ADDR_HI_LEN 4 +#define MC_CMD_MAC_STATS_IN_DMA_ADDR_HI_LBN 32 +#define MC_CMD_MAC_STATS_IN_DMA_ADDR_HI_WIDTH 32 +#define MC_CMD_MAC_STATS_IN_CMD_OFST 8 +#define MC_CMD_MAC_STATS_IN_CMD_LEN 4 +#define MC_CMD_MAC_STATS_IN_DMA_OFST 8 +#define MC_CMD_MAC_STATS_IN_DMA_LBN 0 +#define MC_CMD_MAC_STATS_IN_DMA_WIDTH 1 +#define MC_CMD_MAC_STATS_IN_CLEAR_OFST 8 +#define MC_CMD_MAC_STATS_IN_CLEAR_LBN 1 +#define MC_CMD_MAC_STATS_IN_CLEAR_WIDTH 1 +#define MC_CMD_MAC_STATS_IN_PERIODIC_CHANGE_OFST 8 +#define MC_CMD_MAC_STATS_IN_PERIODIC_CHANGE_LBN 2 +#define MC_CMD_MAC_STATS_IN_PERIODIC_CHANGE_WIDTH 1 +#define MC_CMD_MAC_STATS_IN_PERIODIC_ENABLE_OFST 8 +#define MC_CMD_MAC_STATS_IN_PERIODIC_ENABLE_LBN 3 +#define MC_CMD_MAC_STATS_IN_PERIODIC_ENABLE_WIDTH 1 +#define MC_CMD_MAC_STATS_IN_PERIODIC_CLEAR_OFST 8 +#define MC_CMD_MAC_STATS_IN_PERIODIC_CLEAR_LBN 4 +#define MC_CMD_MAC_STATS_IN_PERIODIC_CLEAR_WIDTH 1 +#define MC_CMD_MAC_STATS_IN_PERIODIC_NOEVENT_OFST 8 +#define MC_CMD_MAC_STATS_IN_PERIODIC_NOEVENT_LBN 5 +#define MC_CMD_MAC_STATS_IN_PERIODIC_NOEVENT_WIDTH 1 +#define MC_CMD_MAC_STATS_IN_PERIOD_MS_OFST 8 +#define MC_CMD_MAC_STATS_IN_PERIOD_MS_LBN 16 +#define MC_CMD_MAC_STATS_IN_PERIOD_MS_WIDTH 16 +/* DMA length. Should be set to MAC_STATS_NUM_STATS * sizeof(uint64_t), as + * returned by MC_CMD_GET_CAPABILITIES_V4_OUT. For legacy firmware not + * supporting MC_CMD_GET_CAPABILITIES_V4_OUT, DMA_LEN should be set to + * MC_CMD_MAC_NSTATS * sizeof(uint64_t) + */ +#define MC_CMD_MAC_STATS_IN_DMA_LEN_OFST 12 +#define MC_CMD_MAC_STATS_IN_DMA_LEN_LEN 4 +/* port id so vadapter stats can be provided */ +#define MC_CMD_MAC_STATS_IN_PORT_ID_OFST 16 +#define MC_CMD_MAC_STATS_IN_PORT_ID_LEN 4 + +/* MC_CMD_MAC_STATS_V2_IN msgrequest */ +#define MC_CMD_MAC_STATS_V2_IN_LEN 28 +/* ??? */ +#define MC_CMD_MAC_STATS_V2_IN_DMA_ADDR_OFST 0 +#define MC_CMD_MAC_STATS_V2_IN_DMA_ADDR_LEN 8 +#define MC_CMD_MAC_STATS_V2_IN_DMA_ADDR_LO_OFST 0 +#define MC_CMD_MAC_STATS_V2_IN_DMA_ADDR_LO_LEN 4 +#define MC_CMD_MAC_STATS_V2_IN_DMA_ADDR_LO_LBN 0 +#define MC_CMD_MAC_STATS_V2_IN_DMA_ADDR_LO_WIDTH 32 +#define MC_CMD_MAC_STATS_V2_IN_DMA_ADDR_HI_OFST 4 +#define MC_CMD_MAC_STATS_V2_IN_DMA_ADDR_HI_LEN 4 +#define MC_CMD_MAC_STATS_V2_IN_DMA_ADDR_HI_LBN 32 +#define MC_CMD_MAC_STATS_V2_IN_DMA_ADDR_HI_WIDTH 32 +#define MC_CMD_MAC_STATS_V2_IN_CMD_OFST 8 +#define MC_CMD_MAC_STATS_V2_IN_CMD_LEN 4 +#define MC_CMD_MAC_STATS_V2_IN_DMA_OFST 8 +#define MC_CMD_MAC_STATS_V2_IN_DMA_LBN 0 +#define MC_CMD_MAC_STATS_V2_IN_DMA_WIDTH 1 +#define MC_CMD_MAC_STATS_V2_IN_CLEAR_OFST 8 +#define MC_CMD_MAC_STATS_V2_IN_CLEAR_LBN 1 +#define MC_CMD_MAC_STATS_V2_IN_CLEAR_WIDTH 1 +#define MC_CMD_MAC_STATS_V2_IN_PERIODIC_CHANGE_OFST 8 +#define MC_CMD_MAC_STATS_V2_IN_PERIODIC_CHANGE_LBN 2 +#define MC_CMD_MAC_STATS_V2_IN_PERIODIC_CHANGE_WIDTH 1 +#define MC_CMD_MAC_STATS_V2_IN_PERIODIC_ENABLE_OFST 8 +#define MC_CMD_MAC_STATS_V2_IN_PERIODIC_ENABLE_LBN 3 +#define MC_CMD_MAC_STATS_V2_IN_PERIODIC_ENABLE_WIDTH 1 +#define MC_CMD_MAC_STATS_V2_IN_PERIODIC_CLEAR_OFST 8 +#define MC_CMD_MAC_STATS_V2_IN_PERIODIC_CLEAR_LBN 4 +#define MC_CMD_MAC_STATS_V2_IN_PERIODIC_CLEAR_WIDTH 1 +#define MC_CMD_MAC_STATS_V2_IN_PERIODIC_NOEVENT_OFST 8 +#define MC_CMD_MAC_STATS_V2_IN_PERIODIC_NOEVENT_LBN 5 +#define MC_CMD_MAC_STATS_V2_IN_PERIODIC_NOEVENT_WIDTH 1 +#define MC_CMD_MAC_STATS_V2_IN_PERIOD_MS_OFST 8 +#define MC_CMD_MAC_STATS_V2_IN_PERIOD_MS_LBN 16 +#define MC_CMD_MAC_STATS_V2_IN_PERIOD_MS_WIDTH 16 +/* DMA length. Should be set to MAC_STATS_NUM_STATS * sizeof(uint64_t), as + * returned by MC_CMD_GET_CAPABILITIES_V4_OUT. For legacy firmware not + * supporting MC_CMD_GET_CAPABILITIES_V4_OUT, DMA_LEN should be set to + * MC_CMD_MAC_NSTATS * sizeof(uint64_t) + */ +#define MC_CMD_MAC_STATS_V2_IN_DMA_LEN_OFST 12 +#define MC_CMD_MAC_STATS_V2_IN_DMA_LEN_LEN 4 +/* port id so vadapter stats can be provided */ +#define MC_CMD_MAC_STATS_V2_IN_PORT_ID_OFST 16 +#define MC_CMD_MAC_STATS_V2_IN_PORT_ID_LEN 4 +/* Target port to request statistics for. Uses MAE_LINK_ENDPOINT_SELECTOR which + * identifies a real or virtual network port by MAE port and link end. See the + * structure definition for more details + */ +#define MC_CMD_MAC_STATS_V2_IN_TARGET_OFST 20 +#define MC_CMD_MAC_STATS_V2_IN_TARGET_LEN 8 +#define MC_CMD_MAC_STATS_V2_IN_TARGET_LO_OFST 20 +#define MC_CMD_MAC_STATS_V2_IN_TARGET_LO_LEN 4 +#define MC_CMD_MAC_STATS_V2_IN_TARGET_LO_LBN 160 +#define MC_CMD_MAC_STATS_V2_IN_TARGET_LO_WIDTH 32 +#define MC_CMD_MAC_STATS_V2_IN_TARGET_HI_OFST 24 +#define MC_CMD_MAC_STATS_V2_IN_TARGET_HI_LEN 4 +#define MC_CMD_MAC_STATS_V2_IN_TARGET_HI_LBN 192 +#define MC_CMD_MAC_STATS_V2_IN_TARGET_HI_WIDTH 32 +#define MC_CMD_MAC_STATS_V2_IN_TARGET_MPORT_SELECTOR_OFST 20 +#define MC_CMD_MAC_STATS_V2_IN_TARGET_MPORT_SELECTOR_LEN 4 +#define MC_CMD_MAC_STATS_V2_IN_TARGET_MPORT_SELECTOR_FLAT_OFST 20 +#define MC_CMD_MAC_STATS_V2_IN_TARGET_MPORT_SELECTOR_FLAT_LEN 4 +#define MC_CMD_MAC_STATS_V2_IN_TARGET_MPORT_SELECTOR_TYPE_OFST 23 +#define MC_CMD_MAC_STATS_V2_IN_TARGET_MPORT_SELECTOR_TYPE_LEN 1 +#define MC_CMD_MAC_STATS_V2_IN_TARGET_MPORT_SELECTOR_MPORT_ID_OFST 20 +#define MC_CMD_MAC_STATS_V2_IN_TARGET_MPORT_SELECTOR_MPORT_ID_LEN 3 +#define MC_CMD_MAC_STATS_V2_IN_TARGET_MPORT_SELECTOR_PPORT_ID_LBN 160 +#define MC_CMD_MAC_STATS_V2_IN_TARGET_MPORT_SELECTOR_PPORT_ID_WIDTH 4 +#define MC_CMD_MAC_STATS_V2_IN_TARGET_MPORT_SELECTOR_FUNC_INTF_ID_LBN 180 +#define MC_CMD_MAC_STATS_V2_IN_TARGET_MPORT_SELECTOR_FUNC_INTF_ID_WIDTH 4 +#define MC_CMD_MAC_STATS_V2_IN_TARGET_MPORT_SELECTOR_FUNC_MH_PF_ID_LBN 176 +#define MC_CMD_MAC_STATS_V2_IN_TARGET_MPORT_SELECTOR_FUNC_MH_PF_ID_WIDTH 4 +#define MC_CMD_MAC_STATS_V2_IN_TARGET_MPORT_SELECTOR_FUNC_PF_ID_OFST 22 +#define MC_CMD_MAC_STATS_V2_IN_TARGET_MPORT_SELECTOR_FUNC_PF_ID_LEN 1 +#define MC_CMD_MAC_STATS_V2_IN_TARGET_MPORT_SELECTOR_FUNC_VF_ID_OFST 20 +#define MC_CMD_MAC_STATS_V2_IN_TARGET_MPORT_SELECTOR_FUNC_VF_ID_LEN 2 +#define MC_CMD_MAC_STATS_V2_IN_TARGET_LINK_END_OFST 24 +#define MC_CMD_MAC_STATS_V2_IN_TARGET_LINK_END_LEN 4 +#define MC_CMD_MAC_STATS_V2_IN_TARGET_FLAT_OFST 20 +#define MC_CMD_MAC_STATS_V2_IN_TARGET_FLAT_LEN 8 +#define MC_CMD_MAC_STATS_V2_IN_TARGET_FLAT_LO_OFST 20 +#define MC_CMD_MAC_STATS_V2_IN_TARGET_FLAT_LO_LEN 4 +#define MC_CMD_MAC_STATS_V2_IN_TARGET_FLAT_LO_LBN 160 +#define MC_CMD_MAC_STATS_V2_IN_TARGET_FLAT_LO_WIDTH 32 +#define MC_CMD_MAC_STATS_V2_IN_TARGET_FLAT_HI_OFST 24 +#define MC_CMD_MAC_STATS_V2_IN_TARGET_FLAT_HI_LEN 4 +#define MC_CMD_MAC_STATS_V2_IN_TARGET_FLAT_HI_LBN 192 +#define MC_CMD_MAC_STATS_V2_IN_TARGET_FLAT_HI_WIDTH 32 + +/* MC_CMD_MAC_STATS_OUT_DMA msgresponse */ +#define MC_CMD_MAC_STATS_OUT_DMA_LEN 0 + +/* MC_CMD_MAC_STATS_OUT_NO_DMA msgresponse */ +#define MC_CMD_MAC_STATS_OUT_NO_DMA_LEN (((MC_CMD_MAC_NSTATS*64))>>3) +#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_OFST 0 +#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_LEN 8 +#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_LO_OFST 0 +#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_LO_LEN 4 +#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_LO_LBN 0 +#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_LO_WIDTH 32 +#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_HI_OFST 4 +#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_HI_LEN 4 +#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_HI_LBN 32 +#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_HI_WIDTH 32 +#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS +#define MC_CMD_MAC_GENERATION_START 0x0 /* enum */ +#define MC_CMD_MAC_DMABUF_START 0x1 /* enum */ +#define MC_CMD_MAC_TX_PKTS 0x1 /* enum */ +#define MC_CMD_MAC_TX_PAUSE_PKTS 0x2 /* enum */ +#define MC_CMD_MAC_TX_CONTROL_PKTS 0x3 /* enum */ +#define MC_CMD_MAC_TX_UNICAST_PKTS 0x4 /* enum */ +#define MC_CMD_MAC_TX_MULTICAST_PKTS 0x5 /* enum */ +#define MC_CMD_MAC_TX_BROADCAST_PKTS 0x6 /* enum */ +#define MC_CMD_MAC_TX_BYTES 0x7 /* enum */ +#define MC_CMD_MAC_TX_BAD_BYTES 0x8 /* enum */ +#define MC_CMD_MAC_TX_LT64_PKTS 0x9 /* enum */ +#define MC_CMD_MAC_TX_64_PKTS 0xa /* enum */ +#define MC_CMD_MAC_TX_65_TO_127_PKTS 0xb /* enum */ +#define MC_CMD_MAC_TX_128_TO_255_PKTS 0xc /* enum */ +#define MC_CMD_MAC_TX_256_TO_511_PKTS 0xd /* enum */ +#define MC_CMD_MAC_TX_512_TO_1023_PKTS 0xe /* enum */ +#define MC_CMD_MAC_TX_1024_TO_15XX_PKTS 0xf /* enum */ +#define MC_CMD_MAC_TX_15XX_TO_JUMBO_PKTS 0x10 /* enum */ +#define MC_CMD_MAC_TX_GTJUMBO_PKTS 0x11 /* enum */ +#define MC_CMD_MAC_TX_BAD_FCS_PKTS 0x12 /* enum */ +#define MC_CMD_MAC_TX_SINGLE_COLLISION_PKTS 0x13 /* enum */ +#define MC_CMD_MAC_TX_MULTIPLE_COLLISION_PKTS 0x14 /* enum */ +#define MC_CMD_MAC_TX_EXCESSIVE_COLLISION_PKTS 0x15 /* enum */ +#define MC_CMD_MAC_TX_LATE_COLLISION_PKTS 0x16 /* enum */ +#define MC_CMD_MAC_TX_DEFERRED_PKTS 0x17 /* enum */ +#define MC_CMD_MAC_TX_EXCESSIVE_DEFERRED_PKTS 0x18 /* enum */ +#define MC_CMD_MAC_TX_NON_TCPUDP_PKTS 0x19 /* enum */ +#define MC_CMD_MAC_TX_MAC_SRC_ERR_PKTS 0x1a /* enum */ +#define MC_CMD_MAC_TX_IP_SRC_ERR_PKTS 0x1b /* enum */ +#define MC_CMD_MAC_RX_PKTS 0x1c /* enum */ +#define MC_CMD_MAC_RX_PAUSE_PKTS 0x1d /* enum */ +#define MC_CMD_MAC_RX_GOOD_PKTS 0x1e /* enum */ +#define MC_CMD_MAC_RX_CONTROL_PKTS 0x1f /* enum */ +#define MC_CMD_MAC_RX_UNICAST_PKTS 0x20 /* enum */ +#define MC_CMD_MAC_RX_MULTICAST_PKTS 0x21 /* enum */ +#define MC_CMD_MAC_RX_BROADCAST_PKTS 0x22 /* enum */ +#define MC_CMD_MAC_RX_BYTES 0x23 /* enum */ +#define MC_CMD_MAC_RX_BAD_BYTES 0x24 /* enum */ +#define MC_CMD_MAC_RX_64_PKTS 0x25 /* enum */ +#define MC_CMD_MAC_RX_65_TO_127_PKTS 0x26 /* enum */ +#define MC_CMD_MAC_RX_128_TO_255_PKTS 0x27 /* enum */ +#define MC_CMD_MAC_RX_256_TO_511_PKTS 0x28 /* enum */ +#define MC_CMD_MAC_RX_512_TO_1023_PKTS 0x29 /* enum */ +#define MC_CMD_MAC_RX_1024_TO_15XX_PKTS 0x2a /* enum */ +#define MC_CMD_MAC_RX_15XX_TO_JUMBO_PKTS 0x2b /* enum */ +#define MC_CMD_MAC_RX_GTJUMBO_PKTS 0x2c /* enum */ +#define MC_CMD_MAC_RX_UNDERSIZE_PKTS 0x2d /* enum */ +#define MC_CMD_MAC_RX_BAD_FCS_PKTS 0x2e /* enum */ +#define MC_CMD_MAC_RX_OVERFLOW_PKTS 0x2f /* enum */ +#define MC_CMD_MAC_RX_FALSE_CARRIER_PKTS 0x30 /* enum */ +#define MC_CMD_MAC_RX_SYMBOL_ERROR_PKTS 0x31 /* enum */ +#define MC_CMD_MAC_RX_ALIGN_ERROR_PKTS 0x32 /* enum */ +#define MC_CMD_MAC_RX_LENGTH_ERROR_PKTS 0x33 /* enum */ +#define MC_CMD_MAC_RX_INTERNAL_ERROR_PKTS 0x34 /* enum */ +#define MC_CMD_MAC_RX_JABBER_PKTS 0x35 /* enum */ +#define MC_CMD_MAC_RX_NODESC_DROPS 0x36 /* enum */ +#define MC_CMD_MAC_RX_LANES01_CHAR_ERR 0x37 /* enum */ +#define MC_CMD_MAC_RX_LANES23_CHAR_ERR 0x38 /* enum */ +#define MC_CMD_MAC_RX_LANES01_DISP_ERR 0x39 /* enum */ +#define MC_CMD_MAC_RX_LANES23_DISP_ERR 0x3a /* enum */ +#define MC_CMD_MAC_RX_MATCH_FAULT 0x3b /* enum */ +/* enum: PM trunc_bb_overflow counter. Valid for EF10 with PM_AND_RXDP_COUNTERS + * capability only. + */ +#define MC_CMD_MAC_PM_TRUNC_BB_OVERFLOW 0x3c +/* enum: PM discard_bb_overflow counter. Valid for EF10 with + * PM_AND_RXDP_COUNTERS capability only. + */ +#define MC_CMD_MAC_PM_DISCARD_BB_OVERFLOW 0x3d +/* enum: PM trunc_vfifo_full counter. Valid for EF10 with PM_AND_RXDP_COUNTERS + * capability only. + */ +#define MC_CMD_MAC_PM_TRUNC_VFIFO_FULL 0x3e +/* enum: PM discard_vfifo_full counter. Valid for EF10 with + * PM_AND_RXDP_COUNTERS capability only. + */ +#define MC_CMD_MAC_PM_DISCARD_VFIFO_FULL 0x3f +/* enum: PM trunc_qbb counter. Valid for EF10 with PM_AND_RXDP_COUNTERS + * capability only. + */ +#define MC_CMD_MAC_PM_TRUNC_QBB 0x40 +/* enum: PM discard_qbb counter. Valid for EF10 with PM_AND_RXDP_COUNTERS + * capability only. + */ +#define MC_CMD_MAC_PM_DISCARD_QBB 0x41 +/* enum: PM discard_mapping counter. Valid for EF10 with PM_AND_RXDP_COUNTERS + * capability only. + */ +#define MC_CMD_MAC_PM_DISCARD_MAPPING 0x42 +/* enum: RXDP counter: Number of packets dropped due to the queue being + * disabled. Valid for EF10 with PM_AND_RXDP_COUNTERS capability only. + */ +#define MC_CMD_MAC_RXDP_Q_DISABLED_PKTS 0x43 +/* enum: RXDP counter: Number of packets dropped by the DICPU. Valid for EF10 + * with PM_AND_RXDP_COUNTERS capability only. + */ +#define MC_CMD_MAC_RXDP_DI_DROPPED_PKTS 0x45 +/* enum: RXDP counter: Number of non-host packets. Valid for EF10 with + * PM_AND_RXDP_COUNTERS capability only. + */ +#define MC_CMD_MAC_RXDP_STREAMING_PKTS 0x46 +/* enum: RXDP counter: Number of times an hlb descriptor fetch was performed. + * Valid for EF10 with PM_AND_RXDP_COUNTERS capability only. + */ +#define MC_CMD_MAC_RXDP_HLB_FETCH_CONDITIONS 0x47 +/* enum: RXDP counter: Number of times the DPCPU waited for an existing + * descriptor fetch. Valid for EF10 with PM_AND_RXDP_COUNTERS capability only. + */ +#define MC_CMD_MAC_RXDP_HLB_WAIT_CONDITIONS 0x48 +#define MC_CMD_MAC_VADAPTER_RX_DMABUF_START 0x4c /* enum */ +#define MC_CMD_MAC_VADAPTER_RX_UNICAST_PACKETS 0x4c /* enum */ +#define MC_CMD_MAC_VADAPTER_RX_UNICAST_BYTES 0x4d /* enum */ +#define MC_CMD_MAC_VADAPTER_RX_MULTICAST_PACKETS 0x4e /* enum */ +#define MC_CMD_MAC_VADAPTER_RX_MULTICAST_BYTES 0x4f /* enum */ +#define MC_CMD_MAC_VADAPTER_RX_BROADCAST_PACKETS 0x50 /* enum */ +#define MC_CMD_MAC_VADAPTER_RX_BROADCAST_BYTES 0x51 /* enum */ +#define MC_CMD_MAC_VADAPTER_RX_BAD_PACKETS 0x52 /* enum */ +#define MC_CMD_MAC_VADAPTER_RX_BAD_BYTES 0x53 /* enum */ +#define MC_CMD_MAC_VADAPTER_RX_OVERFLOW 0x54 /* enum */ +#define MC_CMD_MAC_VADAPTER_TX_DMABUF_START 0x57 /* enum */ +#define MC_CMD_MAC_VADAPTER_TX_UNICAST_PACKETS 0x57 /* enum */ +#define MC_CMD_MAC_VADAPTER_TX_UNICAST_BYTES 0x58 /* enum */ +#define MC_CMD_MAC_VADAPTER_TX_MULTICAST_PACKETS 0x59 /* enum */ +#define MC_CMD_MAC_VADAPTER_TX_MULTICAST_BYTES 0x5a /* enum */ +#define MC_CMD_MAC_VADAPTER_TX_BROADCAST_PACKETS 0x5b /* enum */ +#define MC_CMD_MAC_VADAPTER_TX_BROADCAST_BYTES 0x5c /* enum */ +#define MC_CMD_MAC_VADAPTER_TX_BAD_PACKETS 0x5d /* enum */ +#define MC_CMD_MAC_VADAPTER_TX_BAD_BYTES 0x5e /* enum */ +#define MC_CMD_MAC_VADAPTER_TX_OVERFLOW 0x5f /* enum */ +/* enum: Start of GMAC stats buffer space, for Siena only. */ +#define MC_CMD_GMAC_DMABUF_START 0x40 +/* enum: End of GMAC stats buffer space, for Siena only. */ +#define MC_CMD_GMAC_DMABUF_END 0x5f +/* enum: GENERATION_END value, used together with GENERATION_START to verify + * consistency of DMAd data. For legacy firmware / drivers without extended + * stats (more precisely, when DMA_LEN == MC_CMD_MAC_NSTATS * + * sizeof(uint64_t)), this entry holds the GENERATION_END value. Otherwise, + * this value is invalid/ reserved and GENERATION_END is written as the last + * 64-bit word of the DMA buffer (at DMA_LEN - sizeof(uint64_t)). Note that + * this is consistent with the legacy behaviour, in the sense that entry 96 is + * the last 64-bit word in the buffer when DMA_LEN == MC_CMD_MAC_NSTATS * + * sizeof(uint64_t). See SF-109306-TC, Section 9.2 for details. + */ +#define MC_CMD_MAC_GENERATION_END 0x60 +#define MC_CMD_MAC_NSTATS 0x61 /* enum */ + +/* MC_CMD_MAC_STATS_V2_OUT_DMA msgresponse */ +#define MC_CMD_MAC_STATS_V2_OUT_DMA_LEN 0 + +/* MC_CMD_MAC_STATS_V2_OUT_NO_DMA msgresponse */ +#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_LEN (((MC_CMD_MAC_NSTATS_V2*64))>>3) +#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_OFST 0 +#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_LEN 8 +#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_LO_OFST 0 +#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_LO_LEN 4 +#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_LO_LBN 0 +#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_LO_WIDTH 32 +#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_HI_OFST 4 +#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_HI_LEN 4 +#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_HI_LBN 32 +#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_HI_WIDTH 32 +#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS_V2 +/* enum: Start of FEC stats buffer space, Medford2 and up */ +#define MC_CMD_MAC_FEC_DMABUF_START 0x61 +/* enum: Number of uncorrected FEC codewords on link (RS-FEC only for Medford2) + */ +#define MC_CMD_MAC_FEC_UNCORRECTED_ERRORS 0x61 +/* enum: Number of corrected FEC codewords on link (RS-FEC only for Medford2) + */ +#define MC_CMD_MAC_FEC_CORRECTED_ERRORS 0x62 +/* enum: Number of corrected 10-bit symbol errors, lane 0 (RS-FEC only) */ +#define MC_CMD_MAC_FEC_CORRECTED_SYMBOLS_LANE0 0x63 +/* enum: Number of corrected 10-bit symbol errors, lane 1 (RS-FEC only) */ +#define MC_CMD_MAC_FEC_CORRECTED_SYMBOLS_LANE1 0x64 +/* enum: Number of corrected 10-bit symbol errors, lane 2 (RS-FEC only) */ +#define MC_CMD_MAC_FEC_CORRECTED_SYMBOLS_LANE2 0x65 +/* enum: Number of corrected 10-bit symbol errors, lane 3 (RS-FEC only) */ +#define MC_CMD_MAC_FEC_CORRECTED_SYMBOLS_LANE3 0x66 +/* enum: This includes the space at offset 103 which is the final + * GENERATION_END in a MAC_STATS_V2 response and otherwise unused. + */ +#define MC_CMD_MAC_NSTATS_V2 0x68 +/* Other enum values, see field(s): */ +/* MC_CMD_MAC_STATS_OUT_NO_DMA/STATISTICS */ + +/* MC_CMD_MAC_STATS_V3_OUT_DMA msgresponse */ +#define MC_CMD_MAC_STATS_V3_OUT_DMA_LEN 0 + +/* MC_CMD_MAC_STATS_V3_OUT_NO_DMA msgresponse */ +#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_LEN (((MC_CMD_MAC_NSTATS_V3*64))>>3) +#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_OFST 0 +#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_LEN 8 +#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_LO_OFST 0 +#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_LO_LEN 4 +#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_LO_LBN 0 +#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_LO_WIDTH 32 +#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_HI_OFST 4 +#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_HI_LEN 4 +#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_HI_LBN 32 +#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_HI_WIDTH 32 +#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS_V3 +/* enum: Start of CTPIO stats buffer space, Medford2 and up */ +#define MC_CMD_MAC_CTPIO_DMABUF_START 0x68 +/* enum: Number of CTPIO fallbacks because a DMA packet was in progress on the + * target VI + */ +#define MC_CMD_MAC_CTPIO_VI_BUSY_FALLBACK 0x68 +/* enum: Number of times a CTPIO send wrote beyond frame end (informational + * only) + */ +#define MC_CMD_MAC_CTPIO_LONG_WRITE_SUCCESS 0x69 +/* enum: Number of CTPIO failures because the TX doorbell was written before + * the end of the frame data + */ +#define MC_CMD_MAC_CTPIO_MISSING_DBELL_FAIL 0x6a +/* enum: Number of CTPIO failures because the internal FIFO overflowed */ +#define MC_CMD_MAC_CTPIO_OVERFLOW_FAIL 0x6b +/* enum: Number of CTPIO failures because the host did not deliver data fast + * enough to avoid MAC underflow + */ +#define MC_CMD_MAC_CTPIO_UNDERFLOW_FAIL 0x6c +/* enum: Number of CTPIO failures because the host did not deliver all the + * frame data within the timeout + */ +#define MC_CMD_MAC_CTPIO_TIMEOUT_FAIL 0x6d +/* enum: Number of CTPIO failures because the frame data arrived out of order + * or with gaps + */ +#define MC_CMD_MAC_CTPIO_NONCONTIG_WR_FAIL 0x6e +/* enum: Number of CTPIO failures because the host started a new frame before + * completing the previous one + */ +#define MC_CMD_MAC_CTPIO_FRM_CLOBBER_FAIL 0x6f +/* enum: Number of CTPIO failures because a write was not a multiple of 32 bits + * or not 32-bit aligned + */ +#define MC_CMD_MAC_CTPIO_INVALID_WR_FAIL 0x70 +/* enum: Number of CTPIO fallbacks because another VI on the same port was + * sending a CTPIO frame + */ +#define MC_CMD_MAC_CTPIO_VI_CLOBBER_FALLBACK 0x71 +/* enum: Number of CTPIO fallbacks because target VI did not have CTPIO enabled + */ +#define MC_CMD_MAC_CTPIO_UNQUALIFIED_FALLBACK 0x72 +/* enum: Number of CTPIO fallbacks because length in header was less than 29 + * bytes + */ +#define MC_CMD_MAC_CTPIO_RUNT_FALLBACK 0x73 +/* enum: Total number of successful CTPIO sends on this port */ +#define MC_CMD_MAC_CTPIO_SUCCESS 0x74 +/* enum: Total number of CTPIO fallbacks on this port */ +#define MC_CMD_MAC_CTPIO_FALLBACK 0x75 +/* enum: Total number of CTPIO poisoned frames on this port, whether erased or + * not + */ +#define MC_CMD_MAC_CTPIO_POISON 0x76 +/* enum: Total number of CTPIO erased frames on this port */ +#define MC_CMD_MAC_CTPIO_ERASE 0x77 +/* enum: This includes the space at offset 120 which is the final + * GENERATION_END in a MAC_STATS_V3 response and otherwise unused. + */ +#define MC_CMD_MAC_NSTATS_V3 0x79 +/* Other enum values, see field(s): */ +/* MC_CMD_MAC_STATS_V2_OUT_NO_DMA/STATISTICS */ + +/* MC_CMD_MAC_STATS_V4_OUT_DMA msgresponse */ +#define MC_CMD_MAC_STATS_V4_OUT_DMA_LEN 0 + +/* MC_CMD_MAC_STATS_V4_OUT_NO_DMA msgresponse */ +#define MC_CMD_MAC_STATS_V4_OUT_NO_DMA_LEN (((MC_CMD_MAC_NSTATS_V4*64))>>3) +#define MC_CMD_MAC_STATS_V4_OUT_NO_DMA_STATISTICS_OFST 0 +#define MC_CMD_MAC_STATS_V4_OUT_NO_DMA_STATISTICS_LEN 8 +#define MC_CMD_MAC_STATS_V4_OUT_NO_DMA_STATISTICS_LO_OFST 0 +#define MC_CMD_MAC_STATS_V4_OUT_NO_DMA_STATISTICS_LO_LEN 4 +#define MC_CMD_MAC_STATS_V4_OUT_NO_DMA_STATISTICS_LO_LBN 0 +#define MC_CMD_MAC_STATS_V4_OUT_NO_DMA_STATISTICS_LO_WIDTH 32 +#define MC_CMD_MAC_STATS_V4_OUT_NO_DMA_STATISTICS_HI_OFST 4 +#define MC_CMD_MAC_STATS_V4_OUT_NO_DMA_STATISTICS_HI_LEN 4 +#define MC_CMD_MAC_STATS_V4_OUT_NO_DMA_STATISTICS_HI_LBN 32 +#define MC_CMD_MAC_STATS_V4_OUT_NO_DMA_STATISTICS_HI_WIDTH 32 +#define MC_CMD_MAC_STATS_V4_OUT_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS_V4 +/* enum: Start of V4 stats buffer space */ +#define MC_CMD_MAC_V4_DMABUF_START 0x79 +/* enum: RXDP counter: Number of packets truncated because scattering was + * disabled. + */ +#define MC_CMD_MAC_RXDP_SCATTER_DISABLED_TRUNC 0x79 +/* enum: RXDP counter: Number of times the RXDP head of line blocked waiting + * for descriptors. Will be zero unless RXDP_HLB_IDLE capability is set. + */ +#define MC_CMD_MAC_RXDP_HLB_IDLE 0x7a +/* enum: RXDP counter: Number of times the RXDP timed out while head of line + * blocking. Will be zero unless RXDP_HLB_IDLE capability is set. + */ +#define MC_CMD_MAC_RXDP_HLB_TIMEOUT 0x7b +/* enum: This includes the space at offset 124 which is the final + * GENERATION_END in a MAC_STATS_V4 response and otherwise unused. + */ +#define MC_CMD_MAC_NSTATS_V4 0x7d +/* Other enum values, see field(s): */ +/* MC_CMD_MAC_STATS_V3_OUT_NO_DMA/STATISTICS */ + + +/***********************************/ +/* MC_CMD_SRIOV + * to be documented + */ +#define MC_CMD_SRIOV 0x30 + +/* MC_CMD_SRIOV_IN msgrequest */ +#define MC_CMD_SRIOV_IN_LEN 12 +#define MC_CMD_SRIOV_IN_ENABLE_OFST 0 +#define MC_CMD_SRIOV_IN_ENABLE_LEN 4 +#define MC_CMD_SRIOV_IN_VI_BASE_OFST 4 +#define MC_CMD_SRIOV_IN_VI_BASE_LEN 4 +#define MC_CMD_SRIOV_IN_VF_COUNT_OFST 8 +#define MC_CMD_SRIOV_IN_VF_COUNT_LEN 4 + +/* MC_CMD_SRIOV_OUT msgresponse */ +#define MC_CMD_SRIOV_OUT_LEN 8 +#define MC_CMD_SRIOV_OUT_VI_SCALE_OFST 0 +#define MC_CMD_SRIOV_OUT_VI_SCALE_LEN 4 +#define MC_CMD_SRIOV_OUT_VF_TOTAL_OFST 4 +#define MC_CMD_SRIOV_OUT_VF_TOTAL_LEN 4 + +/* MC_CMD_MEMCPY_RECORD_TYPEDEF structuredef */ +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LEN 32 +/* this is only used for the first record */ +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_OFST 0 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_LEN 4 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_LBN 0 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_WIDTH 32 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_RID_OFST 4 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_RID_LEN 4 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_RID_LBN 32 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_RID_WIDTH 32 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_OFST 8 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_LEN 8 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_LO_OFST 8 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_LO_LEN 4 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_LO_LBN 64 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_LO_WIDTH 32 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_HI_OFST 12 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_HI_LEN 4 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_HI_LBN 96 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_HI_WIDTH 32 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_LBN 64 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_WIDTH 64 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_RID_OFST 16 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_RID_LEN 4 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_RID_INLINE 0x100 /* enum */ +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_RID_LBN 128 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_RID_WIDTH 32 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_OFST 20 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_LEN 8 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_LO_OFST 20 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_LO_LEN 4 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_LO_LBN 160 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_LO_WIDTH 32 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_HI_OFST 24 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_HI_LEN 4 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_HI_LBN 192 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_HI_WIDTH 32 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_LBN 160 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_WIDTH 64 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LENGTH_OFST 28 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LENGTH_LEN 4 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LENGTH_LBN 224 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LENGTH_WIDTH 32 + + +/***********************************/ +/* MC_CMD_MEMCPY + * DMA write data into (Rid,Addr), either by dma reading (Rid,Addr), or by data + * embedded directly in the command. + * + * A common pattern is for a client to use generation counts to signal a dma + * update of a datastructure. To facilitate this, this MCDI operation can + * contain multiple requests which are executed in strict order. Requests take + * the form of duplicating the entire MCDI request continuously (including the + * requests record, which is ignored in all but the first structure) + * + * The source data can either come from a DMA from the host, or it can be + * embedded within the request directly, thereby eliminating a DMA read. To + * indicate this, the client sets FROM_RID=%RID_INLINE, ADDR_HI=0, and + * ADDR_LO=offset, and inserts the data at %offset from the start of the + * payload. It's the callers responsibility to ensure that the embedded data + * doesn't overlap the records. + * + * Returns: 0, EINVAL (invalid RID) + */ +#define MC_CMD_MEMCPY 0x31 + +/* MC_CMD_MEMCPY_IN msgrequest */ +#define MC_CMD_MEMCPY_IN_LENMIN 32 +#define MC_CMD_MEMCPY_IN_LENMAX 224 +#define MC_CMD_MEMCPY_IN_LENMAX_MCDI2 992 +#define MC_CMD_MEMCPY_IN_LEN(num) (0+32*(num)) +#define MC_CMD_MEMCPY_IN_RECORD_NUM(len) (((len)-0)/32) +/* see MC_CMD_MEMCPY_RECORD_TYPEDEF */ +#define MC_CMD_MEMCPY_IN_RECORD_OFST 0 +#define MC_CMD_MEMCPY_IN_RECORD_LEN 32 +#define MC_CMD_MEMCPY_IN_RECORD_MINNUM 1 +#define MC_CMD_MEMCPY_IN_RECORD_MAXNUM 7 +#define MC_CMD_MEMCPY_IN_RECORD_MAXNUM_MCDI2 31 + +/* MC_CMD_MEMCPY_OUT msgresponse */ +#define MC_CMD_MEMCPY_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_WOL_FILTER_SET + * Set a WoL filter. + */ +#define MC_CMD_WOL_FILTER_SET 0x32 +#undef MC_CMD_0x32_PRIVILEGE_CTG + +#define MC_CMD_0x32_PRIVILEGE_CTG SRIOV_CTG_LINK + +/* MC_CMD_WOL_FILTER_SET_IN msgrequest */ +#define MC_CMD_WOL_FILTER_SET_IN_LEN 192 +#define MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 +#define MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_LEN 4 +#define MC_CMD_FILTER_MODE_SIMPLE 0x0 /* enum */ +#define MC_CMD_FILTER_MODE_STRUCTURED 0xffffffff /* enum */ +/* A type value of 1 is unused. */ +#define MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 +#define MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_LEN 4 +/* enum: Magic */ +#define MC_CMD_WOL_TYPE_MAGIC 0x0 +/* enum: MS Windows Magic */ +#define MC_CMD_WOL_TYPE_WIN_MAGIC 0x2 +/* enum: IPv4 Syn */ +#define MC_CMD_WOL_TYPE_IPV4_SYN 0x3 +/* enum: IPv6 Syn */ +#define MC_CMD_WOL_TYPE_IPV6_SYN 0x4 +/* enum: Bitmap */ +#define MC_CMD_WOL_TYPE_BITMAP 0x5 +/* enum: Link */ +#define MC_CMD_WOL_TYPE_LINK 0x6 +/* enum: (Above this for future use) */ +#define MC_CMD_WOL_TYPE_MAX 0x7 +#define MC_CMD_WOL_FILTER_SET_IN_DATA_OFST 8 +#define MC_CMD_WOL_FILTER_SET_IN_DATA_LEN 4 +#define MC_CMD_WOL_FILTER_SET_IN_DATA_NUM 46 + +/* MC_CMD_WOL_FILTER_SET_IN_MAGIC msgrequest */ +#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_LEN 16 +/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */ +/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_LEN 4 */ +/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */ +/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_LEN 4 */ +#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_OFST 8 +#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_LEN 8 +#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_LO_OFST 8 +#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_LO_LEN 4 +#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_LO_LBN 64 +#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_LO_WIDTH 32 +#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_HI_OFST 12 +#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_HI_LEN 4 +#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_HI_LBN 96 +#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_HI_WIDTH 32 + +/* MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN msgrequest */ +#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_LEN 20 +/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */ +/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_LEN 4 */ +/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */ +/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_LEN 4 */ +#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_SRC_IP_OFST 8 +#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_SRC_IP_LEN 4 +#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_DST_IP_OFST 12 +#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_DST_IP_LEN 4 +#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_SRC_PORT_OFST 16 +#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_SRC_PORT_LEN 2 +#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_DST_PORT_OFST 18 +#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_DST_PORT_LEN 2 + +/* MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN msgrequest */ +#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_LEN 44 +/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */ +/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_LEN 4 */ +/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */ +/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_LEN 4 */ +#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_SRC_IP_OFST 8 +#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_SRC_IP_LEN 16 +#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_DST_IP_OFST 24 +#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_DST_IP_LEN 16 +#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_SRC_PORT_OFST 40 +#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_SRC_PORT_LEN 2 +#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_DST_PORT_OFST 42 +#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_DST_PORT_LEN 2 + +/* MC_CMD_WOL_FILTER_SET_IN_BITMAP msgrequest */ +#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LEN 187 +/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */ +/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_LEN 4 */ +/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */ +/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_LEN 4 */ +#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_MASK_OFST 8 +#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_MASK_LEN 48 +#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_BITMAP_OFST 56 +#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_BITMAP_LEN 128 +#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LEN_OFST 184 +#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LEN_LEN 1 +#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LAYER3_OFST 185 +#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LAYER3_LEN 1 +#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LAYER4_OFST 186 +#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LAYER4_LEN 1 + +/* MC_CMD_WOL_FILTER_SET_IN_LINK msgrequest */ +#define MC_CMD_WOL_FILTER_SET_IN_LINK_LEN 12 +/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */ +/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_LEN 4 */ +/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */ +/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_LEN 4 */ +#define MC_CMD_WOL_FILTER_SET_IN_LINK_MASK_OFST 8 +#define MC_CMD_WOL_FILTER_SET_IN_LINK_MASK_LEN 4 +#define MC_CMD_WOL_FILTER_SET_IN_LINK_UP_OFST 8 +#define MC_CMD_WOL_FILTER_SET_IN_LINK_UP_LBN 0 +#define MC_CMD_WOL_FILTER_SET_IN_LINK_UP_WIDTH 1 +#define MC_CMD_WOL_FILTER_SET_IN_LINK_DOWN_OFST 8 +#define MC_CMD_WOL_FILTER_SET_IN_LINK_DOWN_LBN 1 +#define MC_CMD_WOL_FILTER_SET_IN_LINK_DOWN_WIDTH 1 + +/* MC_CMD_WOL_FILTER_SET_OUT msgresponse */ +#define MC_CMD_WOL_FILTER_SET_OUT_LEN 4 +#define MC_CMD_WOL_FILTER_SET_OUT_FILTER_ID_OFST 0 +#define MC_CMD_WOL_FILTER_SET_OUT_FILTER_ID_LEN 4 + + +/***********************************/ +/* MC_CMD_WOL_FILTER_REMOVE + * Remove a WoL filter. Locks required: None. Returns: 0, EINVAL, ENOSYS + */ +#define MC_CMD_WOL_FILTER_REMOVE 0x33 +#undef MC_CMD_0x33_PRIVILEGE_CTG + +#define MC_CMD_0x33_PRIVILEGE_CTG SRIOV_CTG_LINK + +/* MC_CMD_WOL_FILTER_REMOVE_IN msgrequest */ +#define MC_CMD_WOL_FILTER_REMOVE_IN_LEN 4 +#define MC_CMD_WOL_FILTER_REMOVE_IN_FILTER_ID_OFST 0 +#define MC_CMD_WOL_FILTER_REMOVE_IN_FILTER_ID_LEN 4 + +/* MC_CMD_WOL_FILTER_REMOVE_OUT msgresponse */ +#define MC_CMD_WOL_FILTER_REMOVE_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_WOL_FILTER_RESET + * Reset (i.e. remove all) WoL filters. Locks required: None. Returns: 0, + * ENOSYS + */ +#define MC_CMD_WOL_FILTER_RESET 0x34 +#undef MC_CMD_0x34_PRIVILEGE_CTG + +#define MC_CMD_0x34_PRIVILEGE_CTG SRIOV_CTG_LINK + +/* MC_CMD_WOL_FILTER_RESET_IN msgrequest */ +#define MC_CMD_WOL_FILTER_RESET_IN_LEN 4 +#define MC_CMD_WOL_FILTER_RESET_IN_MASK_OFST 0 +#define MC_CMD_WOL_FILTER_RESET_IN_MASK_LEN 4 +#define MC_CMD_WOL_FILTER_RESET_IN_WAKE_FILTERS 0x1 /* enum */ +#define MC_CMD_WOL_FILTER_RESET_IN_LIGHTSOUT_OFFLOADS 0x2 /* enum */ + +/* MC_CMD_WOL_FILTER_RESET_OUT msgresponse */ +#define MC_CMD_WOL_FILTER_RESET_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_SET_MCAST_HASH + * Set the MCAST hash value without otherwise reconfiguring the MAC + */ +#define MC_CMD_SET_MCAST_HASH 0x35 + +/* MC_CMD_SET_MCAST_HASH_IN msgrequest */ +#define MC_CMD_SET_MCAST_HASH_IN_LEN 32 +#define MC_CMD_SET_MCAST_HASH_IN_HASH0_OFST 0 +#define MC_CMD_SET_MCAST_HASH_IN_HASH0_LEN 16 +#define MC_CMD_SET_MCAST_HASH_IN_HASH1_OFST 16 +#define MC_CMD_SET_MCAST_HASH_IN_HASH1_LEN 16 + +/* MC_CMD_SET_MCAST_HASH_OUT msgresponse */ +#define MC_CMD_SET_MCAST_HASH_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_NVRAM_TYPES + * Return bitfield indicating available types of virtual NVRAM partitions. + * Locks required: none. Returns: 0 + */ +#define MC_CMD_NVRAM_TYPES 0x36 +#undef MC_CMD_0x36_PRIVILEGE_CTG + +#define MC_CMD_0x36_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_NVRAM_TYPES_IN msgrequest */ +#define MC_CMD_NVRAM_TYPES_IN_LEN 0 + +/* MC_CMD_NVRAM_TYPES_OUT msgresponse */ +#define MC_CMD_NVRAM_TYPES_OUT_LEN 4 +/* Bit mask of supported types. */ +#define MC_CMD_NVRAM_TYPES_OUT_TYPES_OFST 0 +#define MC_CMD_NVRAM_TYPES_OUT_TYPES_LEN 4 +/* enum: Disabled callisto. */ +#define MC_CMD_NVRAM_TYPE_DISABLED_CALLISTO 0x0 +/* enum: MC firmware. */ +#define MC_CMD_NVRAM_TYPE_MC_FW 0x1 +/* enum: MC backup firmware. */ +#define MC_CMD_NVRAM_TYPE_MC_FW_BACKUP 0x2 +/* enum: Static configuration Port0. */ +#define MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT0 0x3 +/* enum: Static configuration Port1. */ +#define MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT1 0x4 +/* enum: Dynamic configuration Port0. */ +#define MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT0 0x5 +/* enum: Dynamic configuration Port1. */ +#define MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT1 0x6 +/* enum: Expansion Rom. */ +#define MC_CMD_NVRAM_TYPE_EXP_ROM 0x7 +/* enum: Expansion Rom Configuration Port0. */ +#define MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT0 0x8 +/* enum: Expansion Rom Configuration Port1. */ +#define MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT1 0x9 +/* enum: Phy Configuration Port0. */ +#define MC_CMD_NVRAM_TYPE_PHY_PORT0 0xa +/* enum: Phy Configuration Port1. */ +#define MC_CMD_NVRAM_TYPE_PHY_PORT1 0xb +/* enum: Log. */ +#define MC_CMD_NVRAM_TYPE_LOG 0xc +/* enum: FPGA image. */ +#define MC_CMD_NVRAM_TYPE_FPGA 0xd +/* enum: FPGA backup image */ +#define MC_CMD_NVRAM_TYPE_FPGA_BACKUP 0xe +/* enum: FC firmware. */ +#define MC_CMD_NVRAM_TYPE_FC_FW 0xf +/* enum: FC backup firmware. */ +#define MC_CMD_NVRAM_TYPE_FC_FW_BACKUP 0x10 +/* enum: CPLD image. */ +#define MC_CMD_NVRAM_TYPE_CPLD 0x11 +/* enum: Licensing information. */ +#define MC_CMD_NVRAM_TYPE_LICENSE 0x12 +/* enum: FC Log. */ +#define MC_CMD_NVRAM_TYPE_FC_LOG 0x13 +/* enum: Additional flash on FPGA. */ +#define MC_CMD_NVRAM_TYPE_FC_EXTRA 0x14 + + +/***********************************/ +/* MC_CMD_NVRAM_INFO + * Read info about a virtual NVRAM partition. Locks required: none. Returns: 0, + * EINVAL (bad type). + */ +#define MC_CMD_NVRAM_INFO 0x37 +#undef MC_CMD_0x37_PRIVILEGE_CTG + +#define MC_CMD_0x37_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_NVRAM_INFO_IN msgrequest */ +#define MC_CMD_NVRAM_INFO_IN_LEN 4 +#define MC_CMD_NVRAM_INFO_IN_TYPE_OFST 0 +#define MC_CMD_NVRAM_INFO_IN_TYPE_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */ + +/* MC_CMD_NVRAM_INFO_OUT msgresponse */ +#define MC_CMD_NVRAM_INFO_OUT_LEN 24 +#define MC_CMD_NVRAM_INFO_OUT_TYPE_OFST 0 +#define MC_CMD_NVRAM_INFO_OUT_TYPE_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */ +#define MC_CMD_NVRAM_INFO_OUT_SIZE_OFST 4 +#define MC_CMD_NVRAM_INFO_OUT_SIZE_LEN 4 +#define MC_CMD_NVRAM_INFO_OUT_ERASESIZE_OFST 8 +#define MC_CMD_NVRAM_INFO_OUT_ERASESIZE_LEN 4 +#define MC_CMD_NVRAM_INFO_OUT_FLAGS_OFST 12 +#define MC_CMD_NVRAM_INFO_OUT_FLAGS_LEN 4 +#define MC_CMD_NVRAM_INFO_OUT_PROTECTED_OFST 12 +#define MC_CMD_NVRAM_INFO_OUT_PROTECTED_LBN 0 +#define MC_CMD_NVRAM_INFO_OUT_PROTECTED_WIDTH 1 +#define MC_CMD_NVRAM_INFO_OUT_TLV_OFST 12 +#define MC_CMD_NVRAM_INFO_OUT_TLV_LBN 1 +#define MC_CMD_NVRAM_INFO_OUT_TLV_WIDTH 1 +#define MC_CMD_NVRAM_INFO_OUT_READ_ONLY_IF_TSA_BOUND_OFST 12 +#define MC_CMD_NVRAM_INFO_OUT_READ_ONLY_IF_TSA_BOUND_LBN 2 +#define MC_CMD_NVRAM_INFO_OUT_READ_ONLY_IF_TSA_BOUND_WIDTH 1 +#define MC_CMD_NVRAM_INFO_OUT_CRC_OFST 12 +#define MC_CMD_NVRAM_INFO_OUT_CRC_LBN 3 +#define MC_CMD_NVRAM_INFO_OUT_CRC_WIDTH 1 +#define MC_CMD_NVRAM_INFO_OUT_READ_ONLY_OFST 12 +#define MC_CMD_NVRAM_INFO_OUT_READ_ONLY_LBN 5 +#define MC_CMD_NVRAM_INFO_OUT_READ_ONLY_WIDTH 1 +#define MC_CMD_NVRAM_INFO_OUT_CMAC_OFST 12 +#define MC_CMD_NVRAM_INFO_OUT_CMAC_LBN 6 +#define MC_CMD_NVRAM_INFO_OUT_CMAC_WIDTH 1 +#define MC_CMD_NVRAM_INFO_OUT_A_B_OFST 12 +#define MC_CMD_NVRAM_INFO_OUT_A_B_LBN 7 +#define MC_CMD_NVRAM_INFO_OUT_A_B_WIDTH 1 +#define MC_CMD_NVRAM_INFO_OUT_PHYSDEV_OFST 16 +#define MC_CMD_NVRAM_INFO_OUT_PHYSDEV_LEN 4 +#define MC_CMD_NVRAM_INFO_OUT_PHYSADDR_OFST 20 +#define MC_CMD_NVRAM_INFO_OUT_PHYSADDR_LEN 4 + +/* MC_CMD_NVRAM_INFO_V2_OUT msgresponse */ +#define MC_CMD_NVRAM_INFO_V2_OUT_LEN 28 +#define MC_CMD_NVRAM_INFO_V2_OUT_TYPE_OFST 0 +#define MC_CMD_NVRAM_INFO_V2_OUT_TYPE_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */ +#define MC_CMD_NVRAM_INFO_V2_OUT_SIZE_OFST 4 +#define MC_CMD_NVRAM_INFO_V2_OUT_SIZE_LEN 4 +#define MC_CMD_NVRAM_INFO_V2_OUT_ERASESIZE_OFST 8 +#define MC_CMD_NVRAM_INFO_V2_OUT_ERASESIZE_LEN 4 +#define MC_CMD_NVRAM_INFO_V2_OUT_FLAGS_OFST 12 +#define MC_CMD_NVRAM_INFO_V2_OUT_FLAGS_LEN 4 +#define MC_CMD_NVRAM_INFO_V2_OUT_PROTECTED_OFST 12 +#define MC_CMD_NVRAM_INFO_V2_OUT_PROTECTED_LBN 0 +#define MC_CMD_NVRAM_INFO_V2_OUT_PROTECTED_WIDTH 1 +#define MC_CMD_NVRAM_INFO_V2_OUT_TLV_OFST 12 +#define MC_CMD_NVRAM_INFO_V2_OUT_TLV_LBN 1 +#define MC_CMD_NVRAM_INFO_V2_OUT_TLV_WIDTH 1 +#define MC_CMD_NVRAM_INFO_V2_OUT_READ_ONLY_IF_TSA_BOUND_OFST 12 +#define MC_CMD_NVRAM_INFO_V2_OUT_READ_ONLY_IF_TSA_BOUND_LBN 2 +#define MC_CMD_NVRAM_INFO_V2_OUT_READ_ONLY_IF_TSA_BOUND_WIDTH 1 +#define MC_CMD_NVRAM_INFO_V2_OUT_READ_ONLY_OFST 12 +#define MC_CMD_NVRAM_INFO_V2_OUT_READ_ONLY_LBN 5 +#define MC_CMD_NVRAM_INFO_V2_OUT_READ_ONLY_WIDTH 1 +#define MC_CMD_NVRAM_INFO_V2_OUT_A_B_OFST 12 +#define MC_CMD_NVRAM_INFO_V2_OUT_A_B_LBN 7 +#define MC_CMD_NVRAM_INFO_V2_OUT_A_B_WIDTH 1 +#define MC_CMD_NVRAM_INFO_V2_OUT_PHYSDEV_OFST 16 +#define MC_CMD_NVRAM_INFO_V2_OUT_PHYSDEV_LEN 4 +#define MC_CMD_NVRAM_INFO_V2_OUT_PHYSADDR_OFST 20 +#define MC_CMD_NVRAM_INFO_V2_OUT_PHYSADDR_LEN 4 +/* Writes must be multiples of this size. Added to support the MUM on Sorrento. + */ +#define MC_CMD_NVRAM_INFO_V2_OUT_WRITESIZE_OFST 24 +#define MC_CMD_NVRAM_INFO_V2_OUT_WRITESIZE_LEN 4 + + +/***********************************/ +/* MC_CMD_NVRAM_UPDATE_START + * Start a group of update operations on a virtual NVRAM partition. Locks + * required: PHY_LOCK if type==*PHY*. Returns: 0, EINVAL (bad type), EACCES (if + * PHY_LOCK required and not held). In an adapter bound to a TSA controller, + * MC_CMD_NVRAM_UPDATE_START can only be used on a subset of partition types + * i.e. static config, dynamic config and expansion ROM config. Attempting to + * perform this operation on a restricted partition will return the error + * EPERM. + */ +#define MC_CMD_NVRAM_UPDATE_START 0x38 +#undef MC_CMD_0x38_PRIVILEGE_CTG + +#define MC_CMD_0x38_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_NVRAM_UPDATE_START_IN msgrequest: Legacy NVRAM_UPDATE_START request. + * Use NVRAM_UPDATE_START_V2_IN in new code + */ +#define MC_CMD_NVRAM_UPDATE_START_IN_LEN 4 +#define MC_CMD_NVRAM_UPDATE_START_IN_TYPE_OFST 0 +#define MC_CMD_NVRAM_UPDATE_START_IN_TYPE_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */ + +/* MC_CMD_NVRAM_UPDATE_START_V2_IN msgrequest: Extended NVRAM_UPDATE_START + * request with additional flags indicating version of command in use. See + * MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT for details of extended functionality. Use + * paired up with NVRAM_UPDATE_FINISH_V2_IN. + */ +#define MC_CMD_NVRAM_UPDATE_START_V2_IN_LEN 8 +#define MC_CMD_NVRAM_UPDATE_START_V2_IN_TYPE_OFST 0 +#define MC_CMD_NVRAM_UPDATE_START_V2_IN_TYPE_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */ +#define MC_CMD_NVRAM_UPDATE_START_V2_IN_FLAGS_OFST 4 +#define MC_CMD_NVRAM_UPDATE_START_V2_IN_FLAGS_LEN 4 +#define MC_CMD_NVRAM_UPDATE_START_V2_IN_FLAG_REPORT_VERIFY_RESULT_OFST 4 +#define MC_CMD_NVRAM_UPDATE_START_V2_IN_FLAG_REPORT_VERIFY_RESULT_LBN 0 +#define MC_CMD_NVRAM_UPDATE_START_V2_IN_FLAG_REPORT_VERIFY_RESULT_WIDTH 1 + +/* MC_CMD_NVRAM_UPDATE_START_OUT msgresponse */ +#define MC_CMD_NVRAM_UPDATE_START_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_NVRAM_READ + * Read data from a virtual NVRAM partition. Locks required: PHY_LOCK if + * type==*PHY*. Returns: 0, EINVAL (bad type/offset/length), EACCES (if + * PHY_LOCK required and not held) + */ +#define MC_CMD_NVRAM_READ 0x39 +#undef MC_CMD_0x39_PRIVILEGE_CTG + +#define MC_CMD_0x39_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_NVRAM_READ_IN msgrequest */ +#define MC_CMD_NVRAM_READ_IN_LEN 12 +#define MC_CMD_NVRAM_READ_IN_TYPE_OFST 0 +#define MC_CMD_NVRAM_READ_IN_TYPE_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */ +#define MC_CMD_NVRAM_READ_IN_OFFSET_OFST 4 +#define MC_CMD_NVRAM_READ_IN_OFFSET_LEN 4 +/* amount to read in bytes */ +#define MC_CMD_NVRAM_READ_IN_LENGTH_OFST 8 +#define MC_CMD_NVRAM_READ_IN_LENGTH_LEN 4 + +/* MC_CMD_NVRAM_READ_IN_V2 msgrequest */ +#define MC_CMD_NVRAM_READ_IN_V2_LEN 16 +#define MC_CMD_NVRAM_READ_IN_V2_TYPE_OFST 0 +#define MC_CMD_NVRAM_READ_IN_V2_TYPE_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */ +#define MC_CMD_NVRAM_READ_IN_V2_OFFSET_OFST 4 +#define MC_CMD_NVRAM_READ_IN_V2_OFFSET_LEN 4 +/* amount to read in bytes */ +#define MC_CMD_NVRAM_READ_IN_V2_LENGTH_OFST 8 +#define MC_CMD_NVRAM_READ_IN_V2_LENGTH_LEN 4 +/* Optional control info. If a partition is stored with an A/B versioning + * scheme (i.e. in more than one physical partition in NVRAM) the host can set + * this to control which underlying physical partition is used to read data + * from. This allows it to perform a read-modify-write-verify with the write + * lock continuously held by calling NVRAM_UPDATE_START, reading the old + * contents using MODE=TARGET_CURRENT, overwriting the old partition and then + * verifying by reading with MODE=TARGET_BACKUP. + */ +#define MC_CMD_NVRAM_READ_IN_V2_MODE_OFST 12 +#define MC_CMD_NVRAM_READ_IN_V2_MODE_LEN 4 +/* enum: Same as omitting MODE: caller sees data in current partition unless it + * holds the write lock in which case it sees data in the partition it is + * updating. + */ +#define MC_CMD_NVRAM_READ_IN_V2_DEFAULT 0x0 +/* enum: Read from the current partition of an A/B pair, even if holding the + * write lock. + */ +#define MC_CMD_NVRAM_READ_IN_V2_TARGET_CURRENT 0x1 +/* enum: Read from the non-current (i.e. to be updated) partition of an A/B + * pair + */ +#define MC_CMD_NVRAM_READ_IN_V2_TARGET_BACKUP 0x2 + +/* MC_CMD_NVRAM_READ_OUT msgresponse */ +#define MC_CMD_NVRAM_READ_OUT_LENMIN 1 +#define MC_CMD_NVRAM_READ_OUT_LENMAX 252 +#define MC_CMD_NVRAM_READ_OUT_LENMAX_MCDI2 1020 +#define MC_CMD_NVRAM_READ_OUT_LEN(num) (0+1*(num)) +#define MC_CMD_NVRAM_READ_OUT_READ_BUFFER_NUM(len) (((len)-0)/1) +#define MC_CMD_NVRAM_READ_OUT_READ_BUFFER_OFST 0 +#define MC_CMD_NVRAM_READ_OUT_READ_BUFFER_LEN 1 +#define MC_CMD_NVRAM_READ_OUT_READ_BUFFER_MINNUM 1 +#define MC_CMD_NVRAM_READ_OUT_READ_BUFFER_MAXNUM 252 +#define MC_CMD_NVRAM_READ_OUT_READ_BUFFER_MAXNUM_MCDI2 1020 + + +/***********************************/ +/* MC_CMD_NVRAM_WRITE + * Write data to a virtual NVRAM partition. Locks required: PHY_LOCK if + * type==*PHY*. Returns: 0, EINVAL (bad type/offset/length), EACCES (if + * PHY_LOCK required and not held) + */ +#define MC_CMD_NVRAM_WRITE 0x3a +#undef MC_CMD_0x3a_PRIVILEGE_CTG + +#define MC_CMD_0x3a_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_NVRAM_WRITE_IN msgrequest */ +#define MC_CMD_NVRAM_WRITE_IN_LENMIN 13 +#define MC_CMD_NVRAM_WRITE_IN_LENMAX 252 +#define MC_CMD_NVRAM_WRITE_IN_LENMAX_MCDI2 1020 +#define MC_CMD_NVRAM_WRITE_IN_LEN(num) (12+1*(num)) +#define MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_NUM(len) (((len)-12)/1) +#define MC_CMD_NVRAM_WRITE_IN_TYPE_OFST 0 +#define MC_CMD_NVRAM_WRITE_IN_TYPE_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */ +#define MC_CMD_NVRAM_WRITE_IN_OFFSET_OFST 4 +#define MC_CMD_NVRAM_WRITE_IN_OFFSET_LEN 4 +#define MC_CMD_NVRAM_WRITE_IN_LENGTH_OFST 8 +#define MC_CMD_NVRAM_WRITE_IN_LENGTH_LEN 4 +#define MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_OFST 12 +#define MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_LEN 1 +#define MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_MINNUM 1 +#define MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_MAXNUM 240 +#define MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_MAXNUM_MCDI2 1008 + +/* MC_CMD_NVRAM_WRITE_OUT msgresponse */ +#define MC_CMD_NVRAM_WRITE_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_NVRAM_ERASE + * Erase sector(s) from a virtual NVRAM partition. Locks required: PHY_LOCK if + * type==*PHY*. Returns: 0, EINVAL (bad type/offset/length), EACCES (if + * PHY_LOCK required and not held) + */ +#define MC_CMD_NVRAM_ERASE 0x3b +#undef MC_CMD_0x3b_PRIVILEGE_CTG + +#define MC_CMD_0x3b_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_NVRAM_ERASE_IN msgrequest */ +#define MC_CMD_NVRAM_ERASE_IN_LEN 12 +#define MC_CMD_NVRAM_ERASE_IN_TYPE_OFST 0 +#define MC_CMD_NVRAM_ERASE_IN_TYPE_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */ +#define MC_CMD_NVRAM_ERASE_IN_OFFSET_OFST 4 +#define MC_CMD_NVRAM_ERASE_IN_OFFSET_LEN 4 +#define MC_CMD_NVRAM_ERASE_IN_LENGTH_OFST 8 +#define MC_CMD_NVRAM_ERASE_IN_LENGTH_LEN 4 + +/* MC_CMD_NVRAM_ERASE_OUT msgresponse */ +#define MC_CMD_NVRAM_ERASE_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_NVRAM_UPDATE_FINISH + * Finish a group of update operations on a virtual NVRAM partition. Locks + * required: PHY_LOCK if type==*PHY*. Returns: 0, EINVAL (bad type/offset/ + * length), EACCES (if PHY_LOCK required and not held). In an adapter bound to + * a TSA controller, MC_CMD_NVRAM_UPDATE_FINISH can only be used on a subset of + * partition types i.e. static config, dynamic config and expansion ROM config. + * Attempting to perform this operation on a restricted partition will return + * the error EPERM. + */ +#define MC_CMD_NVRAM_UPDATE_FINISH 0x3c +#undef MC_CMD_0x3c_PRIVILEGE_CTG + +#define MC_CMD_0x3c_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_NVRAM_UPDATE_FINISH_IN msgrequest: Legacy NVRAM_UPDATE_FINISH + * request. Use NVRAM_UPDATE_FINISH_V2_IN in new code + */ +#define MC_CMD_NVRAM_UPDATE_FINISH_IN_LEN 8 +#define MC_CMD_NVRAM_UPDATE_FINISH_IN_TYPE_OFST 0 +#define MC_CMD_NVRAM_UPDATE_FINISH_IN_TYPE_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */ +#define MC_CMD_NVRAM_UPDATE_FINISH_IN_REBOOT_OFST 4 +#define MC_CMD_NVRAM_UPDATE_FINISH_IN_REBOOT_LEN 4 + +/* MC_CMD_NVRAM_UPDATE_FINISH_V2_IN msgrequest: Extended NVRAM_UPDATE_FINISH + * request with additional flags indicating version of NVRAM_UPDATE commands in + * use. See MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT for details of extended + * functionality. Use paired up with NVRAM_UPDATE_START_V2_IN. + */ +#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_LEN 12 +#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_TYPE_OFST 0 +#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_TYPE_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */ +#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_REBOOT_OFST 4 +#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_REBOOT_LEN 4 +#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAGS_OFST 8 +#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAGS_LEN 4 +#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAG_REPORT_VERIFY_RESULT_OFST 8 +#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAG_REPORT_VERIFY_RESULT_LBN 0 +#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAG_REPORT_VERIFY_RESULT_WIDTH 1 +#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAG_RUN_IN_BACKGROUND_OFST 8 +#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAG_RUN_IN_BACKGROUND_LBN 1 +#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAG_RUN_IN_BACKGROUND_WIDTH 1 +#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAG_POLL_VERIFY_RESULT_OFST 8 +#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAG_POLL_VERIFY_RESULT_LBN 2 +#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAG_POLL_VERIFY_RESULT_WIDTH 1 +#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAG_ABORT_OFST 8 +#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAG_ABORT_LBN 3 +#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAG_ABORT_WIDTH 1 + +/* MC_CMD_NVRAM_UPDATE_FINISH_OUT msgresponse: Legacy NVRAM_UPDATE_FINISH + * response. Use NVRAM_UPDATE_FINISH_V2_OUT in new code + */ +#define MC_CMD_NVRAM_UPDATE_FINISH_OUT_LEN 0 + +/* MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT msgresponse: + * + * Extended NVRAM_UPDATE_FINISH response that communicates the result of secure + * firmware validation where applicable back to the host. + * + * Medford only: For signed firmware images, such as those for medford, the MC + * firmware verifies the signature before marking the firmware image as valid. + * This process takes a few seconds to complete. So is likely to take more than + * the MCDI timeout. Hence signature verification is initiated when + * MC_CMD_NVRAM_UPDATE_FINISH_V2_IN is received by the firmware, however, the + * MCDI command is run in a background MCDI processing thread. This response + * payload includes the results of the signature verification. Note that the + * per-partition nvram lock in firmware is only released after the verification + * has completed. + */ +#define MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT_LEN 4 +/* Result of nvram update completion processing. Result codes that indicate an + * internal build failure and therefore not expected to be seen by customers in + * the field are marked with a prefix 'Internal-error'. + */ +#define MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT_RESULT_CODE_OFST 0 +#define MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT_RESULT_CODE_LEN 4 +/* enum: Invalid return code; only non-zero values are defined. Defined as + * unknown for backwards compatibility with NVRAM_UPDATE_FINISH_OUT. + */ +#define MC_CMD_NVRAM_VERIFY_RC_UNKNOWN 0x0 +/* enum: Verify succeeded without any errors. */ +#define MC_CMD_NVRAM_VERIFY_RC_SUCCESS 0x1 +/* enum: CMS format verification failed due to an internal error. */ +#define MC_CMD_NVRAM_VERIFY_RC_CMS_CHECK_FAILED 0x2 +/* enum: Invalid CMS format in image metadata. */ +#define MC_CMD_NVRAM_VERIFY_RC_INVALID_CMS_FORMAT 0x3 +/* enum: Message digest verification failed due to an internal error. */ +#define MC_CMD_NVRAM_VERIFY_RC_MESSAGE_DIGEST_CHECK_FAILED 0x4 +/* enum: Error in message digest calculated over the reflash-header, payload + * and reflash-trailer. + */ +#define MC_CMD_NVRAM_VERIFY_RC_BAD_MESSAGE_DIGEST 0x5 +/* enum: Signature verification failed due to an internal error. */ +#define MC_CMD_NVRAM_VERIFY_RC_SIGNATURE_CHECK_FAILED 0x6 +/* enum: There are no valid signatures in the image. */ +#define MC_CMD_NVRAM_VERIFY_RC_NO_VALID_SIGNATURES 0x7 +/* enum: Trusted approvers verification failed due to an internal error. */ +#define MC_CMD_NVRAM_VERIFY_RC_TRUSTED_APPROVERS_CHECK_FAILED 0x8 +/* enum: The Trusted approver's list is empty. */ +#define MC_CMD_NVRAM_VERIFY_RC_NO_TRUSTED_APPROVERS 0x9 +/* enum: Signature chain verification failed due to an internal error. */ +#define MC_CMD_NVRAM_VERIFY_RC_SIGNATURE_CHAIN_CHECK_FAILED 0xa +/* enum: The signers of the signatures in the image are not listed in the + * Trusted approver's list. + */ +#define MC_CMD_NVRAM_VERIFY_RC_NO_SIGNATURE_MATCH 0xb +/* enum: The image contains a test-signed certificate, but the adapter accepts + * only production signed images. + */ +#define MC_CMD_NVRAM_VERIFY_RC_REJECT_TEST_SIGNED 0xc +/* enum: The image has a lower security level than the current firmware. */ +#define MC_CMD_NVRAM_VERIFY_RC_SECURITY_LEVEL_DOWNGRADE 0xd +/* enum: Internal-error. The signed image is missing the 'contents' section, + * where the 'contents' section holds the actual image payload to be applied. + */ +#define MC_CMD_NVRAM_VERIFY_RC_CONTENT_NOT_FOUND 0xe +/* enum: Internal-error. The bundle header is invalid. */ +#define MC_CMD_NVRAM_VERIFY_RC_BUNDLE_CONTENT_HEADER_INVALID 0xf +/* enum: Internal-error. The bundle does not have a valid reflash image layout. + */ +#define MC_CMD_NVRAM_VERIFY_RC_BUNDLE_REFLASH_IMAGE_INVALID 0x10 +/* enum: Internal-error. The bundle has an inconsistent layout of components or + * incorrect checksum. + */ +#define MC_CMD_NVRAM_VERIFY_RC_BUNDLE_IMAGE_LAYOUT_INVALID 0x11 +/* enum: Internal-error. The bundle manifest is inconsistent with components in + * the bundle. + */ +#define MC_CMD_NVRAM_VERIFY_RC_BUNDLE_MANIFEST_INVALID 0x12 +/* enum: Internal-error. The number of components in a bundle do not match the + * number of components advertised by the bundle manifest. + */ +#define MC_CMD_NVRAM_VERIFY_RC_BUNDLE_MANIFEST_NUM_COMPONENTS_MISMATCH 0x13 +/* enum: Internal-error. The bundle contains too many components for the MC + * firmware to process + */ +#define MC_CMD_NVRAM_VERIFY_RC_BUNDLE_MANIFEST_TOO_MANY_COMPONENTS 0x14 +/* enum: Internal-error. The bundle manifest has an invalid/inconsistent + * component. + */ +#define MC_CMD_NVRAM_VERIFY_RC_BUNDLE_MANIFEST_COMPONENT_INVALID 0x15 +/* enum: Internal-error. The hash of a component does not match the hash stored + * in the bundle manifest. + */ +#define MC_CMD_NVRAM_VERIFY_RC_BUNDLE_MANIFEST_COMPONENT_HASH_MISMATCH 0x16 +/* enum: Internal-error. Component hash calculation failed. */ +#define MC_CMD_NVRAM_VERIFY_RC_BUNDLE_MANIFEST_COMPONENT_HASH_FAILED 0x17 +/* enum: Internal-error. The component does not have a valid reflash image + * layout. + */ +#define MC_CMD_NVRAM_VERIFY_RC_BUNDLE_COMPONENT_REFLASH_IMAGE_INVALID 0x18 +/* enum: The bundle processing code failed to copy a component to its target + * partition. + */ +#define MC_CMD_NVRAM_VERIFY_RC_BUNDLE_COMPONENT_COPY_FAILED 0x19 +/* enum: The update operation is in-progress. */ +#define MC_CMD_NVRAM_VERIFY_RC_PENDING 0x1a + + +/***********************************/ +/* MC_CMD_REBOOT + * Reboot the MC. + * + * The AFTER_ASSERTION flag is intended to be used when the driver notices an + * assertion failure (at which point it is expected to perform a complete tear + * down and reinitialise), to allow both ports to reset the MC once in an + * atomic fashion. + * + * Production mc firmwares are generally compiled with REBOOT_ON_ASSERT=1, + * which means that they will automatically reboot out of the assertion + * handler, so this is in practise an optional operation. It is still + * recommended that drivers execute this to support custom firmwares with + * REBOOT_ON_ASSERT=0. + * + * Locks required: NONE Returns: Nothing. You get back a response with ERR=1, + * DATALEN=0 + */ +#define MC_CMD_REBOOT 0x3d +#undef MC_CMD_0x3d_PRIVILEGE_CTG + +#define MC_CMD_0x3d_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND + +/* MC_CMD_REBOOT_IN msgrequest */ +#define MC_CMD_REBOOT_IN_LEN 4 +#define MC_CMD_REBOOT_IN_FLAGS_OFST 0 +#define MC_CMD_REBOOT_IN_FLAGS_LEN 4 +#define MC_CMD_REBOOT_FLAGS_AFTER_ASSERTION 0x1 /* enum */ + +/* MC_CMD_REBOOT_OUT msgresponse */ +#define MC_CMD_REBOOT_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_SCHEDINFO + * Request scheduler info. Locks required: NONE. Returns: An array of + * (timeslice,maximum overrun), one for each thread, in ascending order of + * thread address. + */ +#define MC_CMD_SCHEDINFO 0x3e +#undef MC_CMD_0x3e_PRIVILEGE_CTG + +#define MC_CMD_0x3e_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_SCHEDINFO_IN msgrequest */ +#define MC_CMD_SCHEDINFO_IN_LEN 0 + +/* MC_CMD_SCHEDINFO_OUT msgresponse */ +#define MC_CMD_SCHEDINFO_OUT_LENMIN 4 +#define MC_CMD_SCHEDINFO_OUT_LENMAX 252 +#define MC_CMD_SCHEDINFO_OUT_LENMAX_MCDI2 1020 +#define MC_CMD_SCHEDINFO_OUT_LEN(num) (0+4*(num)) +#define MC_CMD_SCHEDINFO_OUT_DATA_NUM(len) (((len)-0)/4) +#define MC_CMD_SCHEDINFO_OUT_DATA_OFST 0 +#define MC_CMD_SCHEDINFO_OUT_DATA_LEN 4 +#define MC_CMD_SCHEDINFO_OUT_DATA_MINNUM 1 +#define MC_CMD_SCHEDINFO_OUT_DATA_MAXNUM 63 +#define MC_CMD_SCHEDINFO_OUT_DATA_MAXNUM_MCDI2 255 + + +/***********************************/ +/* MC_CMD_REBOOT_MODE + * Set the mode for the next MC reboot. Locks required: NONE. Sets the reboot + * mode to the specified value. Returns the old mode. + */ +#define MC_CMD_REBOOT_MODE 0x3f +#undef MC_CMD_0x3f_PRIVILEGE_CTG + +#define MC_CMD_0x3f_PRIVILEGE_CTG SRIOV_CTG_INSECURE + +/* MC_CMD_REBOOT_MODE_IN msgrequest */ +#define MC_CMD_REBOOT_MODE_IN_LEN 4 +#define MC_CMD_REBOOT_MODE_IN_VALUE_OFST 0 +#define MC_CMD_REBOOT_MODE_IN_VALUE_LEN 4 +/* enum: Normal. */ +#define MC_CMD_REBOOT_MODE_NORMAL 0x0 +/* enum: Power-on Reset. */ +#define MC_CMD_REBOOT_MODE_POR 0x2 +/* enum: Snapper. */ +#define MC_CMD_REBOOT_MODE_SNAPPER 0x3 +/* enum: snapper fake POR */ +#define MC_CMD_REBOOT_MODE_SNAPPER_POR 0x4 +#define MC_CMD_REBOOT_MODE_IN_FAKE_OFST 0 +#define MC_CMD_REBOOT_MODE_IN_FAKE_LBN 7 +#define MC_CMD_REBOOT_MODE_IN_FAKE_WIDTH 1 + +/* MC_CMD_REBOOT_MODE_OUT msgresponse */ +#define MC_CMD_REBOOT_MODE_OUT_LEN 4 +#define MC_CMD_REBOOT_MODE_OUT_VALUE_OFST 0 +#define MC_CMD_REBOOT_MODE_OUT_VALUE_LEN 4 + + +/***********************************/ +/* MC_CMD_SENSOR_INFO + * Returns information about every available sensor. + * + * Each sensor has a single (16bit) value, and a corresponding state. The + * mapping between value and state is nominally determined by the MC, but may + * be implemented using up to 2 ranges per sensor. + * + * This call returns a mask (32bit) of the sensors that are supported by this + * platform, then an array of sensor information structures, in order of sensor + * type (but without gaps for unimplemented sensors). Each structure defines + * the ranges for the corresponding sensor. An unused range is indicated by + * equal limit values. If one range is used, a value outside that range results + * in STATE_FATAL. If two ranges are used, a value outside the second range + * results in STATE_FATAL while a value outside the first and inside the second + * range results in STATE_WARNING. + * + * Sensor masks and sensor information arrays are organised into pages. For + * backward compatibility, older host software can only use sensors in page 0. + * Bit 32 in the sensor mask was previously unused, and is no reserved for use + * as the next page flag. + * + * If the request does not contain a PAGE value then firmware will only return + * page 0 of sensor information, with bit 31 in the sensor mask cleared. + * + * If the request contains a PAGE value then firmware responds with the sensor + * mask and sensor information array for that page of sensors. In this case bit + * 31 in the mask is set if another page exists. + * + * Locks required: None Returns: 0 + */ +#define MC_CMD_SENSOR_INFO 0x41 +#undef MC_CMD_0x41_PRIVILEGE_CTG + +#define MC_CMD_0x41_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_SENSOR_INFO_IN msgrequest */ +#define MC_CMD_SENSOR_INFO_IN_LEN 0 + +/* MC_CMD_SENSOR_INFO_EXT_IN msgrequest */ +#define MC_CMD_SENSOR_INFO_EXT_IN_LEN 4 +/* Which page of sensors to report. + * + * Page 0 contains sensors 0 to 30 (sensor 31 is the next page bit). + * + * Page 1 contains sensors 32 to 62 (sensor 63 is the next page bit). etc. + */ +#define MC_CMD_SENSOR_INFO_EXT_IN_PAGE_OFST 0 +#define MC_CMD_SENSOR_INFO_EXT_IN_PAGE_LEN 4 + +/* MC_CMD_SENSOR_INFO_EXT_IN_V2 msgrequest */ +#define MC_CMD_SENSOR_INFO_EXT_IN_V2_LEN 8 +/* Which page of sensors to report. + * + * Page 0 contains sensors 0 to 30 (sensor 31 is the next page bit). + * + * Page 1 contains sensors 32 to 62 (sensor 63 is the next page bit). etc. + */ +#define MC_CMD_SENSOR_INFO_EXT_IN_V2_PAGE_OFST 0 +#define MC_CMD_SENSOR_INFO_EXT_IN_V2_PAGE_LEN 4 +/* Flags controlling information retrieved */ +#define MC_CMD_SENSOR_INFO_EXT_IN_V2_FLAGS_OFST 4 +#define MC_CMD_SENSOR_INFO_EXT_IN_V2_FLAGS_LEN 4 +#define MC_CMD_SENSOR_INFO_EXT_IN_V2_ENGINEERING_OFST 4 +#define MC_CMD_SENSOR_INFO_EXT_IN_V2_ENGINEERING_LBN 0 +#define MC_CMD_SENSOR_INFO_EXT_IN_V2_ENGINEERING_WIDTH 1 + +/* MC_CMD_SENSOR_INFO_OUT msgresponse */ +#define MC_CMD_SENSOR_INFO_OUT_LENMIN 4 +#define MC_CMD_SENSOR_INFO_OUT_LENMAX 252 +#define MC_CMD_SENSOR_INFO_OUT_LENMAX_MCDI2 1020 +#define MC_CMD_SENSOR_INFO_OUT_LEN(num) (4+8*(num)) +#define MC_CMD_SENSOR_INFO_OUT_MC_CMD_SENSOR_ENTRY_NUM(len) (((len)-4)/8) +#define MC_CMD_SENSOR_INFO_OUT_MASK_OFST 0 +#define MC_CMD_SENSOR_INFO_OUT_MASK_LEN 4 +/* enum: Controller temperature: degC */ +#define MC_CMD_SENSOR_CONTROLLER_TEMP 0x0 +/* enum: Phy common temperature: degC */ +#define MC_CMD_SENSOR_PHY_COMMON_TEMP 0x1 +/* enum: Controller cooling: bool */ +#define MC_CMD_SENSOR_CONTROLLER_COOLING 0x2 +/* enum: Phy 0 temperature: degC */ +#define MC_CMD_SENSOR_PHY0_TEMP 0x3 +/* enum: Phy 0 cooling: bool */ +#define MC_CMD_SENSOR_PHY0_COOLING 0x4 +/* enum: Phy 1 temperature: degC */ +#define MC_CMD_SENSOR_PHY1_TEMP 0x5 +/* enum: Phy 1 cooling: bool */ +#define MC_CMD_SENSOR_PHY1_COOLING 0x6 +/* enum: 1.0v power: mV */ +#define MC_CMD_SENSOR_IN_1V0 0x7 +/* enum: 1.2v power: mV */ +#define MC_CMD_SENSOR_IN_1V2 0x8 +/* enum: 1.8v power: mV */ +#define MC_CMD_SENSOR_IN_1V8 0x9 +/* enum: 2.5v power: mV */ +#define MC_CMD_SENSOR_IN_2V5 0xa +/* enum: 3.3v power: mV */ +#define MC_CMD_SENSOR_IN_3V3 0xb +/* enum: 12v power: mV */ +#define MC_CMD_SENSOR_IN_12V0 0xc +/* enum: 1.2v analogue power: mV */ +#define MC_CMD_SENSOR_IN_1V2A 0xd +/* enum: reference voltage: mV */ +#define MC_CMD_SENSOR_IN_VREF 0xe +/* enum: AOE FPGA power: mV */ +#define MC_CMD_SENSOR_OUT_VAOE 0xf +/* enum: AOE FPGA temperature: degC */ +#define MC_CMD_SENSOR_AOE_TEMP 0x10 +/* enum: AOE FPGA PSU temperature: degC */ +#define MC_CMD_SENSOR_PSU_AOE_TEMP 0x11 +/* enum: AOE PSU temperature: degC */ +#define MC_CMD_SENSOR_PSU_TEMP 0x12 +/* enum: Fan 0 speed: RPM */ +#define MC_CMD_SENSOR_FAN_0 0x13 +/* enum: Fan 1 speed: RPM */ +#define MC_CMD_SENSOR_FAN_1 0x14 +/* enum: Fan 2 speed: RPM */ +#define MC_CMD_SENSOR_FAN_2 0x15 +/* enum: Fan 3 speed: RPM */ +#define MC_CMD_SENSOR_FAN_3 0x16 +/* enum: Fan 4 speed: RPM */ +#define MC_CMD_SENSOR_FAN_4 0x17 +/* enum: AOE FPGA input power: mV */ +#define MC_CMD_SENSOR_IN_VAOE 0x18 +/* enum: AOE FPGA current: mA */ +#define MC_CMD_SENSOR_OUT_IAOE 0x19 +/* enum: AOE FPGA input current: mA */ +#define MC_CMD_SENSOR_IN_IAOE 0x1a +/* enum: NIC power consumption: W */ +#define MC_CMD_SENSOR_NIC_POWER 0x1b +/* enum: 0.9v power voltage: mV */ +#define MC_CMD_SENSOR_IN_0V9 0x1c +/* enum: 0.9v power current: mA */ +#define MC_CMD_SENSOR_IN_I0V9 0x1d +/* enum: 1.2v power current: mA */ +#define MC_CMD_SENSOR_IN_I1V2 0x1e +/* enum: Not a sensor: reserved for the next page flag */ +#define MC_CMD_SENSOR_PAGE0_NEXT 0x1f +/* enum: 0.9v power voltage (at ADC): mV */ +#define MC_CMD_SENSOR_IN_0V9_ADC 0x20 +/* enum: Controller temperature 2: degC */ +#define MC_CMD_SENSOR_CONTROLLER_2_TEMP 0x21 +/* enum: Voltage regulator internal temperature: degC */ +#define MC_CMD_SENSOR_VREG_INTERNAL_TEMP 0x22 +/* enum: 0.9V voltage regulator temperature: degC */ +#define MC_CMD_SENSOR_VREG_0V9_TEMP 0x23 +/* enum: 1.2V voltage regulator temperature: degC */ +#define MC_CMD_SENSOR_VREG_1V2_TEMP 0x24 +/* enum: controller internal temperature sensor voltage (internal ADC): mV */ +#define MC_CMD_SENSOR_CONTROLLER_VPTAT 0x25 +/* enum: controller internal temperature (internal ADC): degC */ +#define MC_CMD_SENSOR_CONTROLLER_INTERNAL_TEMP 0x26 +/* enum: controller internal temperature sensor voltage (external ADC): mV */ +#define MC_CMD_SENSOR_CONTROLLER_VPTAT_EXTADC 0x27 +/* enum: controller internal temperature (external ADC): degC */ +#define MC_CMD_SENSOR_CONTROLLER_INTERNAL_TEMP_EXTADC 0x28 +/* enum: ambient temperature: degC */ +#define MC_CMD_SENSOR_AMBIENT_TEMP 0x29 +/* enum: air flow: bool */ +#define MC_CMD_SENSOR_AIRFLOW 0x2a +/* enum: voltage between VSS08D and VSS08D at CSR: mV */ +#define MC_CMD_SENSOR_VDD08D_VSS08D_CSR 0x2b +/* enum: voltage between VSS08D and VSS08D at CSR (external ADC): mV */ +#define MC_CMD_SENSOR_VDD08D_VSS08D_CSR_EXTADC 0x2c +/* enum: Hotpoint temperature: degC */ +#define MC_CMD_SENSOR_HOTPOINT_TEMP 0x2d +/* enum: Port 0 PHY power switch over-current: bool */ +#define MC_CMD_SENSOR_PHY_POWER_PORT0 0x2e +/* enum: Port 1 PHY power switch over-current: bool */ +#define MC_CMD_SENSOR_PHY_POWER_PORT1 0x2f +/* enum: Mop-up microcontroller reference voltage: mV */ +#define MC_CMD_SENSOR_MUM_VCC 0x30 +/* enum: 0.9v power phase A voltage: mV */ +#define MC_CMD_SENSOR_IN_0V9_A 0x31 +/* enum: 0.9v power phase A current: mA */ +#define MC_CMD_SENSOR_IN_I0V9_A 0x32 +/* enum: 0.9V voltage regulator phase A temperature: degC */ +#define MC_CMD_SENSOR_VREG_0V9_A_TEMP 0x33 +/* enum: 0.9v power phase B voltage: mV */ +#define MC_CMD_SENSOR_IN_0V9_B 0x34 +/* enum: 0.9v power phase B current: mA */ +#define MC_CMD_SENSOR_IN_I0V9_B 0x35 +/* enum: 0.9V voltage regulator phase B temperature: degC */ +#define MC_CMD_SENSOR_VREG_0V9_B_TEMP 0x36 +/* enum: CCOM AVREG 1v2 supply (interval ADC): mV */ +#define MC_CMD_SENSOR_CCOM_AVREG_1V2_SUPPLY 0x37 +/* enum: CCOM AVREG 1v2 supply (external ADC): mV */ +#define MC_CMD_SENSOR_CCOM_AVREG_1V2_SUPPLY_EXTADC 0x38 +/* enum: CCOM AVREG 1v8 supply (interval ADC): mV */ +#define MC_CMD_SENSOR_CCOM_AVREG_1V8_SUPPLY 0x39 +/* enum: CCOM AVREG 1v8 supply (external ADC): mV */ +#define MC_CMD_SENSOR_CCOM_AVREG_1V8_SUPPLY_EXTADC 0x3a +/* enum: CCOM RTS temperature: degC */ +#define MC_CMD_SENSOR_CONTROLLER_RTS 0x3b +/* enum: Not a sensor: reserved for the next page flag */ +#define MC_CMD_SENSOR_PAGE1_NEXT 0x3f +/* enum: controller internal temperature sensor voltage on master core + * (internal ADC): mV + */ +#define MC_CMD_SENSOR_CONTROLLER_MASTER_VPTAT 0x40 +/* enum: controller internal temperature on master core (internal ADC): degC */ +#define MC_CMD_SENSOR_CONTROLLER_MASTER_INTERNAL_TEMP 0x41 +/* enum: controller internal temperature sensor voltage on master core + * (external ADC): mV + */ +#define MC_CMD_SENSOR_CONTROLLER_MASTER_VPTAT_EXTADC 0x42 +/* enum: controller internal temperature on master core (external ADC): degC */ +#define MC_CMD_SENSOR_CONTROLLER_MASTER_INTERNAL_TEMP_EXTADC 0x43 +/* enum: controller internal temperature on slave core sensor voltage (internal + * ADC): mV + */ +#define MC_CMD_SENSOR_CONTROLLER_SLAVE_VPTAT 0x44 +/* enum: controller internal temperature on slave core (internal ADC): degC */ +#define MC_CMD_SENSOR_CONTROLLER_SLAVE_INTERNAL_TEMP 0x45 +/* enum: controller internal temperature on slave core sensor voltage (external + * ADC): mV + */ +#define MC_CMD_SENSOR_CONTROLLER_SLAVE_VPTAT_EXTADC 0x46 +/* enum: controller internal temperature on slave core (external ADC): degC */ +#define MC_CMD_SENSOR_CONTROLLER_SLAVE_INTERNAL_TEMP_EXTADC 0x47 +/* enum: Voltage supplied to the SODIMMs from their power supply: mV */ +#define MC_CMD_SENSOR_SODIMM_VOUT 0x49 +/* enum: Temperature of SODIMM 0 (if installed): degC */ +#define MC_CMD_SENSOR_SODIMM_0_TEMP 0x4a +/* enum: Temperature of SODIMM 1 (if installed): degC */ +#define MC_CMD_SENSOR_SODIMM_1_TEMP 0x4b +/* enum: Voltage supplied to the QSFP #0 from their power supply: mV */ +#define MC_CMD_SENSOR_PHY0_VCC 0x4c +/* enum: Voltage supplied to the QSFP #1 from their power supply: mV */ +#define MC_CMD_SENSOR_PHY1_VCC 0x4d +/* enum: Controller die temperature (TDIODE): degC */ +#define MC_CMD_SENSOR_CONTROLLER_TDIODE_TEMP 0x4e +/* enum: Board temperature (front): degC */ +#define MC_CMD_SENSOR_BOARD_FRONT_TEMP 0x4f +/* enum: Board temperature (back): degC */ +#define MC_CMD_SENSOR_BOARD_BACK_TEMP 0x50 +/* enum: 1.8v power current: mA */ +#define MC_CMD_SENSOR_IN_I1V8 0x51 +/* enum: 2.5v power current: mA */ +#define MC_CMD_SENSOR_IN_I2V5 0x52 +/* enum: 3.3v power current: mA */ +#define MC_CMD_SENSOR_IN_I3V3 0x53 +/* enum: 12v power current: mA */ +#define MC_CMD_SENSOR_IN_I12V0 0x54 +/* enum: 1.3v power: mV */ +#define MC_CMD_SENSOR_IN_1V3 0x55 +/* enum: 1.3v power current: mA */ +#define MC_CMD_SENSOR_IN_I1V3 0x56 +/* enum: Engineering sensor 1 */ +#define MC_CMD_SENSOR_ENGINEERING_1 0x57 +/* enum: Engineering sensor 2 */ +#define MC_CMD_SENSOR_ENGINEERING_2 0x58 +/* enum: Engineering sensor 3 */ +#define MC_CMD_SENSOR_ENGINEERING_3 0x59 +/* enum: Engineering sensor 4 */ +#define MC_CMD_SENSOR_ENGINEERING_4 0x5a +/* enum: Engineering sensor 5 */ +#define MC_CMD_SENSOR_ENGINEERING_5 0x5b +/* enum: Engineering sensor 6 */ +#define MC_CMD_SENSOR_ENGINEERING_6 0x5c +/* enum: Engineering sensor 7 */ +#define MC_CMD_SENSOR_ENGINEERING_7 0x5d +/* enum: Engineering sensor 8 */ +#define MC_CMD_SENSOR_ENGINEERING_8 0x5e +/* enum: Not a sensor: reserved for the next page flag */ +#define MC_CMD_SENSOR_PAGE2_NEXT 0x5f +/* MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF */ +#define MC_CMD_SENSOR_ENTRY_OFST 4 +#define MC_CMD_SENSOR_ENTRY_LEN 8 +#define MC_CMD_SENSOR_ENTRY_LO_OFST 4 +#define MC_CMD_SENSOR_ENTRY_LO_LEN 4 +#define MC_CMD_SENSOR_ENTRY_LO_LBN 32 +#define MC_CMD_SENSOR_ENTRY_LO_WIDTH 32 +#define MC_CMD_SENSOR_ENTRY_HI_OFST 8 +#define MC_CMD_SENSOR_ENTRY_HI_LEN 4 +#define MC_CMD_SENSOR_ENTRY_HI_LBN 64 +#define MC_CMD_SENSOR_ENTRY_HI_WIDTH 32 +#define MC_CMD_SENSOR_ENTRY_MINNUM 0 +#define MC_CMD_SENSOR_ENTRY_MAXNUM 31 +#define MC_CMD_SENSOR_ENTRY_MAXNUM_MCDI2 127 + +/* MC_CMD_SENSOR_INFO_EXT_OUT msgresponse */ +#define MC_CMD_SENSOR_INFO_EXT_OUT_LENMIN 4 +#define MC_CMD_SENSOR_INFO_EXT_OUT_LENMAX 252 +#define MC_CMD_SENSOR_INFO_EXT_OUT_LENMAX_MCDI2 1020 +#define MC_CMD_SENSOR_INFO_EXT_OUT_LEN(num) (4+8*(num)) +#define MC_CMD_SENSOR_INFO_EXT_OUT_MC_CMD_SENSOR_ENTRY_NUM(len) (((len)-4)/8) +#define MC_CMD_SENSOR_INFO_EXT_OUT_MASK_OFST 0 +#define MC_CMD_SENSOR_INFO_EXT_OUT_MASK_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_SENSOR_INFO_OUT */ +#define MC_CMD_SENSOR_INFO_EXT_OUT_NEXT_PAGE_OFST 0 +#define MC_CMD_SENSOR_INFO_EXT_OUT_NEXT_PAGE_LBN 31 +#define MC_CMD_SENSOR_INFO_EXT_OUT_NEXT_PAGE_WIDTH 1 +/* MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF */ +/* MC_CMD_SENSOR_ENTRY_OFST 4 */ +/* MC_CMD_SENSOR_ENTRY_LEN 8 */ +/* MC_CMD_SENSOR_ENTRY_LO_OFST 4 */ +/* MC_CMD_SENSOR_ENTRY_LO_LEN 4 */ +/* MC_CMD_SENSOR_ENTRY_LO_LBN 32 */ +/* MC_CMD_SENSOR_ENTRY_LO_WIDTH 32 */ +/* MC_CMD_SENSOR_ENTRY_HI_OFST 8 */ +/* MC_CMD_SENSOR_ENTRY_HI_LEN 4 */ +/* MC_CMD_SENSOR_ENTRY_HI_LBN 64 */ +/* MC_CMD_SENSOR_ENTRY_HI_WIDTH 32 */ +/* MC_CMD_SENSOR_ENTRY_MINNUM 0 */ +/* MC_CMD_SENSOR_ENTRY_MAXNUM 31 */ +/* MC_CMD_SENSOR_ENTRY_MAXNUM_MCDI2 127 */ + +/* MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF structuredef */ +#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_LEN 8 +#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN1_OFST 0 +#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN1_LEN 2 +#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN1_LBN 0 +#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN1_WIDTH 16 +#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX1_OFST 2 +#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX1_LEN 2 +#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX1_LBN 16 +#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX1_WIDTH 16 +#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN2_OFST 4 +#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN2_LEN 2 +#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN2_LBN 32 +#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN2_WIDTH 16 +#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX2_OFST 6 +#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX2_LEN 2 +#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX2_LBN 48 +#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX2_WIDTH 16 + + +/***********************************/ +/* MC_CMD_READ_SENSORS + * Returns the current reading from each sensor. DMAs an array of sensor + * readings, in order of sensor type (but without gaps for unimplemented + * sensors), into host memory. Each array element is a + * MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF dword. + * + * If the request does not contain the LENGTH field then only sensors 0 to 30 + * are reported, to avoid DMA buffer overflow in older host software. If the + * sensor reading require more space than the LENGTH allows, then return + * EINVAL. + * + * The MC will send a SENSOREVT event every time any sensor changes state. The + * driver is responsible for ensuring that it doesn't miss any events. The + * board will function normally if all sensors are in STATE_OK or + * STATE_WARNING. Otherwise the board should not be expected to function. + */ +#define MC_CMD_READ_SENSORS 0x42 +#undef MC_CMD_0x42_PRIVILEGE_CTG + +#define MC_CMD_0x42_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_READ_SENSORS_IN msgrequest */ +#define MC_CMD_READ_SENSORS_IN_LEN 8 +/* DMA address of host buffer for sensor readings (must be 4Kbyte aligned). + * + * If the address is 0xffffffffffffffff send the readings in the response (used + * by cmdclient). + */ +#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_OFST 0 +#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_LEN 8 +#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_LO_OFST 0 +#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_LO_LEN 4 +#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_LO_LBN 0 +#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_LO_WIDTH 32 +#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_HI_OFST 4 +#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_HI_LEN 4 +#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_HI_LBN 32 +#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_HI_WIDTH 32 + +/* MC_CMD_READ_SENSORS_EXT_IN msgrequest */ +#define MC_CMD_READ_SENSORS_EXT_IN_LEN 12 +/* DMA address of host buffer for sensor readings (must be 4Kbyte aligned). + * + * If the address is 0xffffffffffffffff send the readings in the response (used + * by cmdclient). + */ +#define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_OFST 0 +#define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_LEN 8 +#define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_LO_OFST 0 +#define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_LO_LEN 4 +#define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_LO_LBN 0 +#define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_LO_WIDTH 32 +#define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_HI_OFST 4 +#define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_HI_LEN 4 +#define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_HI_LBN 32 +#define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_HI_WIDTH 32 +/* Size in bytes of host buffer. */ +#define MC_CMD_READ_SENSORS_EXT_IN_LENGTH_OFST 8 +#define MC_CMD_READ_SENSORS_EXT_IN_LENGTH_LEN 4 + +/* MC_CMD_READ_SENSORS_EXT_IN_V2 msgrequest */ +#define MC_CMD_READ_SENSORS_EXT_IN_V2_LEN 16 +/* DMA address of host buffer for sensor readings (must be 4Kbyte aligned). + * + * If the address is 0xffffffffffffffff send the readings in the response (used + * by cmdclient). + */ +#define MC_CMD_READ_SENSORS_EXT_IN_V2_DMA_ADDR_OFST 0 +#define MC_CMD_READ_SENSORS_EXT_IN_V2_DMA_ADDR_LEN 8 +#define MC_CMD_READ_SENSORS_EXT_IN_V2_DMA_ADDR_LO_OFST 0 +#define MC_CMD_READ_SENSORS_EXT_IN_V2_DMA_ADDR_LO_LEN 4 +#define MC_CMD_READ_SENSORS_EXT_IN_V2_DMA_ADDR_LO_LBN 0 +#define MC_CMD_READ_SENSORS_EXT_IN_V2_DMA_ADDR_LO_WIDTH 32 +#define MC_CMD_READ_SENSORS_EXT_IN_V2_DMA_ADDR_HI_OFST 4 +#define MC_CMD_READ_SENSORS_EXT_IN_V2_DMA_ADDR_HI_LEN 4 +#define MC_CMD_READ_SENSORS_EXT_IN_V2_DMA_ADDR_HI_LBN 32 +#define MC_CMD_READ_SENSORS_EXT_IN_V2_DMA_ADDR_HI_WIDTH 32 +/* Size in bytes of host buffer. */ +#define MC_CMD_READ_SENSORS_EXT_IN_V2_LENGTH_OFST 8 +#define MC_CMD_READ_SENSORS_EXT_IN_V2_LENGTH_LEN 4 +/* Flags controlling information retrieved */ +#define MC_CMD_READ_SENSORS_EXT_IN_V2_FLAGS_OFST 12 +#define MC_CMD_READ_SENSORS_EXT_IN_V2_FLAGS_LEN 4 +#define MC_CMD_READ_SENSORS_EXT_IN_V2_ENGINEERING_OFST 12 +#define MC_CMD_READ_SENSORS_EXT_IN_V2_ENGINEERING_LBN 0 +#define MC_CMD_READ_SENSORS_EXT_IN_V2_ENGINEERING_WIDTH 1 + +/* MC_CMD_READ_SENSORS_OUT msgresponse */ +#define MC_CMD_READ_SENSORS_OUT_LEN 0 + +/* MC_CMD_READ_SENSORS_EXT_OUT msgresponse */ +#define MC_CMD_READ_SENSORS_EXT_OUT_LEN 0 + +/* MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF structuredef */ +#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_LEN 4 +#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE_OFST 0 +#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE_LEN 2 +#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE_LBN 0 +#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE_WIDTH 16 +#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_OFST 2 +#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_LEN 1 +/* enum: Ok. */ +#define MC_CMD_SENSOR_STATE_OK 0x0 +/* enum: Breached warning threshold. */ +#define MC_CMD_SENSOR_STATE_WARNING 0x1 +/* enum: Breached fatal threshold. */ +#define MC_CMD_SENSOR_STATE_FATAL 0x2 +/* enum: Fault with sensor. */ +#define MC_CMD_SENSOR_STATE_BROKEN 0x3 +/* enum: Sensor is working but does not currently have a reading. */ +#define MC_CMD_SENSOR_STATE_NO_READING 0x4 +/* enum: Sensor initialisation failed. */ +#define MC_CMD_SENSOR_STATE_INIT_FAILED 0x5 +#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_LBN 16 +#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_WIDTH 8 +#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_TYPE_OFST 3 +#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_TYPE_LEN 1 +/* Enum values, see field(s): */ +/* MC_CMD_SENSOR_INFO/MC_CMD_SENSOR_INFO_OUT/MASK */ +#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_TYPE_LBN 24 +#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_TYPE_WIDTH 8 + + +/***********************************/ +/* MC_CMD_GET_PHY_STATE + * Report current state of PHY. A 'zombie' PHY is a PHY that has failed to boot + * (e.g. due to missing or corrupted firmware). Locks required: None. Return + * code: 0 + */ +#define MC_CMD_GET_PHY_STATE 0x43 +#undef MC_CMD_0x43_PRIVILEGE_CTG + +#define MC_CMD_0x43_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_PHY_STATE_IN msgrequest */ +#define MC_CMD_GET_PHY_STATE_IN_LEN 0 + +/* MC_CMD_GET_PHY_STATE_IN_V2 msgrequest */ +#define MC_CMD_GET_PHY_STATE_IN_V2_LEN 8 +/* Target port to request PHY state for. Uses MAE_LINK_ENDPOINT_SELECTOR which + * identifies a real or virtual network port by MAE port and link end. See the + * structure definition for more details. + */ +#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_OFST 0 +#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_LEN 8 +#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_LO_OFST 0 +#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_LO_LEN 4 +#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_LO_LBN 0 +#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_LO_WIDTH 32 +#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_HI_OFST 4 +#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_HI_LEN 4 +#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_HI_LBN 32 +#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_HI_WIDTH 32 +#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_MPORT_SELECTOR_OFST 0 +#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_MPORT_SELECTOR_LEN 4 +#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_MPORT_SELECTOR_FLAT_OFST 0 +#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_MPORT_SELECTOR_FLAT_LEN 4 +#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_MPORT_SELECTOR_TYPE_OFST 3 +#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_MPORT_SELECTOR_TYPE_LEN 1 +#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_MPORT_SELECTOR_MPORT_ID_OFST 0 +#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_MPORT_SELECTOR_MPORT_ID_LEN 3 +#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_MPORT_SELECTOR_PPORT_ID_LBN 0 +#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_MPORT_SELECTOR_PPORT_ID_WIDTH 4 +#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_MPORT_SELECTOR_FUNC_INTF_ID_LBN 20 +#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_MPORT_SELECTOR_FUNC_INTF_ID_WIDTH 4 +#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_MPORT_SELECTOR_FUNC_MH_PF_ID_LBN 16 +#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_MPORT_SELECTOR_FUNC_MH_PF_ID_WIDTH 4 +#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_MPORT_SELECTOR_FUNC_PF_ID_OFST 2 +#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_MPORT_SELECTOR_FUNC_PF_ID_LEN 1 +#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_MPORT_SELECTOR_FUNC_VF_ID_OFST 0 +#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_MPORT_SELECTOR_FUNC_VF_ID_LEN 2 +#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_LINK_END_OFST 4 +#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_LINK_END_LEN 4 +#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_FLAT_OFST 0 +#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_FLAT_LEN 8 +#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_FLAT_LO_OFST 0 +#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_FLAT_LO_LEN 4 +#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_FLAT_LO_LBN 0 +#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_FLAT_LO_WIDTH 32 +#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_FLAT_HI_OFST 4 +#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_FLAT_HI_LEN 4 +#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_FLAT_HI_LBN 32 +#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_FLAT_HI_WIDTH 32 + +/* MC_CMD_GET_PHY_STATE_OUT msgresponse */ +#define MC_CMD_GET_PHY_STATE_OUT_LEN 4 +#define MC_CMD_GET_PHY_STATE_OUT_STATE_OFST 0 +#define MC_CMD_GET_PHY_STATE_OUT_STATE_LEN 4 +/* enum: Ok. */ +#define MC_CMD_PHY_STATE_OK 0x1 +/* enum: Faulty. */ +#define MC_CMD_PHY_STATE_ZOMBIE 0x2 + + +/***********************************/ +/* MC_CMD_SETUP_8021QBB + * 802.1Qbb control. 8 Tx queues that map to priorities 0 - 7. Use all 1s to + * disable 802.Qbb for a given priority. + */ +#define MC_CMD_SETUP_8021QBB 0x44 + +/* MC_CMD_SETUP_8021QBB_IN msgrequest */ +#define MC_CMD_SETUP_8021QBB_IN_LEN 32 +#define MC_CMD_SETUP_8021QBB_IN_TXQS_OFST 0 +#define MC_CMD_SETUP_8021QBB_IN_TXQS_LEN 32 + +/* MC_CMD_SETUP_8021QBB_OUT msgresponse */ +#define MC_CMD_SETUP_8021QBB_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_WOL_FILTER_GET + * Retrieve ID of any WoL filters. Locks required: None. Returns: 0, ENOSYS + */ +#define MC_CMD_WOL_FILTER_GET 0x45 +#undef MC_CMD_0x45_PRIVILEGE_CTG + +#define MC_CMD_0x45_PRIVILEGE_CTG SRIOV_CTG_LINK + +/* MC_CMD_WOL_FILTER_GET_IN msgrequest */ +#define MC_CMD_WOL_FILTER_GET_IN_LEN 0 + +/* MC_CMD_WOL_FILTER_GET_OUT msgresponse */ +#define MC_CMD_WOL_FILTER_GET_OUT_LEN 4 +#define MC_CMD_WOL_FILTER_GET_OUT_FILTER_ID_OFST 0 +#define MC_CMD_WOL_FILTER_GET_OUT_FILTER_ID_LEN 4 + + +/***********************************/ +/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD + * Add a protocol offload to NIC for lights-out state. Locks required: None. + * Returns: 0, ENOSYS + */ +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD 0x46 +#undef MC_CMD_0x46_PRIVILEGE_CTG + +#define MC_CMD_0x46_PRIVILEGE_CTG SRIOV_CTG_LINK + +/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN msgrequest */ +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_LENMIN 8 +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_LENMAX 252 +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_LENMAX_MCDI2 1020 +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_LEN(num) (4+4*(num)) +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_NUM(len) (((len)-4)/4) +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0 +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_LEN 4 +#define MC_CMD_LIGHTSOUT_OFFLOAD_PROTOCOL_ARP 0x1 /* enum */ +#define MC_CMD_LIGHTSOUT_OFFLOAD_PROTOCOL_NS 0x2 /* enum */ +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_OFST 4 +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_LEN 4 +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_MINNUM 1 +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_MAXNUM 62 +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_MAXNUM_MCDI2 254 + +/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP msgrequest */ +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_LEN 14 +/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0 */ +/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_LEN 4 */ +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_MAC_OFST 4 +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_MAC_LEN 6 +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_IP_OFST 10 +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_IP_LEN 4 + +/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS msgrequest */ +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_LEN 42 +/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0 */ +/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_LEN 4 */ +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_MAC_OFST 4 +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_MAC_LEN 6 +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_SNIPV6_OFST 10 +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_SNIPV6_LEN 16 +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_IPV6_OFST 26 +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_IPV6_LEN 16 + +/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT msgresponse */ +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT_LEN 4 +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT_FILTER_ID_OFST 0 +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT_FILTER_ID_LEN 4 + + +/***********************************/ +/* MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD + * Remove a protocol offload from NIC for lights-out state. Locks required: + * None. Returns: 0, ENOSYS + */ +#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD 0x47 +#undef MC_CMD_0x47_PRIVILEGE_CTG + +#define MC_CMD_0x47_PRIVILEGE_CTG SRIOV_CTG_LINK + +/* MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN msgrequest */ +#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_LEN 8 +#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0 +#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_LEN 4 +#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_FILTER_ID_OFST 4 +#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_FILTER_ID_LEN 4 + +/* MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_OUT msgresponse */ +#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_MAC_RESET_RESTORE + * Restore MAC after block reset. Locks required: None. Returns: 0. + */ +#define MC_CMD_MAC_RESET_RESTORE 0x48 + +/* MC_CMD_MAC_RESET_RESTORE_IN msgrequest */ +#define MC_CMD_MAC_RESET_RESTORE_IN_LEN 0 + +/* MC_CMD_MAC_RESET_RESTORE_OUT msgresponse */ +#define MC_CMD_MAC_RESET_RESTORE_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_TESTASSERT + * Deliberately trigger an assert-detonation in the firmware for testing + * purposes (i.e. to allow tests that the driver copes gracefully). Locks + * required: None Returns: 0 + */ +#define MC_CMD_TESTASSERT 0x49 +#undef MC_CMD_0x49_PRIVILEGE_CTG + +#define MC_CMD_0x49_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND + +/* MC_CMD_TESTASSERT_IN msgrequest */ +#define MC_CMD_TESTASSERT_IN_LEN 0 + +/* MC_CMD_TESTASSERT_OUT msgresponse */ +#define MC_CMD_TESTASSERT_OUT_LEN 0 + +/* MC_CMD_TESTASSERT_V2_IN msgrequest */ +#define MC_CMD_TESTASSERT_V2_IN_LEN 4 +/* How to provoke the assertion */ +#define MC_CMD_TESTASSERT_V2_IN_TYPE_OFST 0 +#define MC_CMD_TESTASSERT_V2_IN_TYPE_LEN 4 +/* enum: Assert using the FAIL_ASSERTION_WITH_USEFUL_VALUES macro. Unless + * you're testing firmware, this is what you want. + */ +#define MC_CMD_TESTASSERT_V2_IN_FAIL_ASSERTION_WITH_USEFUL_VALUES 0x0 +/* enum: Assert using assert(0); */ +#define MC_CMD_TESTASSERT_V2_IN_ASSERT_FALSE 0x1 +/* enum: Deliberately trigger a watchdog */ +#define MC_CMD_TESTASSERT_V2_IN_WATCHDOG 0x2 +/* enum: Deliberately trigger a trap by loading from an invalid address */ +#define MC_CMD_TESTASSERT_V2_IN_LOAD_TRAP 0x3 +/* enum: Deliberately trigger a trap by storing to an invalid address */ +#define MC_CMD_TESTASSERT_V2_IN_STORE_TRAP 0x4 +/* enum: Jump to an invalid address */ +#define MC_CMD_TESTASSERT_V2_IN_JUMP_TRAP 0x5 + +/* MC_CMD_TESTASSERT_V2_OUT msgresponse */ +#define MC_CMD_TESTASSERT_V2_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_WORKAROUND + * Enable/Disable a given workaround. The mcfw will return EINVAL if it doesn't + * understand the given workaround number - which should not be treated as a + * hard error by client code. This op does not imply any semantics about each + * workaround, that's between the driver and the mcfw on a per-workaround + * basis. Locks required: None. Returns: 0, EINVAL . + */ +#define MC_CMD_WORKAROUND 0x4a +#undef MC_CMD_0x4a_PRIVILEGE_CTG + +#define MC_CMD_0x4a_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_WORKAROUND_IN msgrequest */ +#define MC_CMD_WORKAROUND_IN_LEN 8 +/* The enums here must correspond with those in MC_CMD_GET_WORKAROUND. */ +#define MC_CMD_WORKAROUND_IN_TYPE_OFST 0 +#define MC_CMD_WORKAROUND_IN_TYPE_LEN 4 +/* enum: Bug 17230 work around. */ +#define MC_CMD_WORKAROUND_BUG17230 0x1 +/* enum: Bug 35388 work around (unsafe EVQ writes). */ +#define MC_CMD_WORKAROUND_BUG35388 0x2 +/* enum: Bug35017 workaround (A64 tables must be identity map) */ +#define MC_CMD_WORKAROUND_BUG35017 0x3 +/* enum: Bug 41750 present (MC_CMD_TRIGGER_INTERRUPT won't work) */ +#define MC_CMD_WORKAROUND_BUG41750 0x4 +/* enum: Bug 42008 present (Interrupts can overtake associated events). Caution + * - before adding code that queries this workaround, remember that there's + * released Monza firmware that doesn't understand MC_CMD_WORKAROUND_BUG42008, + * and will hence (incorrectly) report that the bug doesn't exist. + */ +#define MC_CMD_WORKAROUND_BUG42008 0x5 +/* enum: Bug 26807 features present in firmware (multicast filter chaining) + * This feature cannot be turned on/off while there are any filters already + * present. The behaviour in such case depends on the acting client's privilege + * level. If the client has the admin privilege, then all functions that have + * filters installed will be FLRed and the FLR_DONE flag will be set. Otherwise + * the command will fail with MC_CMD_ERR_FILTERS_PRESENT. + */ +#define MC_CMD_WORKAROUND_BUG26807 0x6 +/* enum: Bug 61265 work around (broken EVQ TMR writes). */ +#define MC_CMD_WORKAROUND_BUG61265 0x7 +/* 0 = disable the workaround indicated by TYPE; any non-zero value = enable + * the workaround + */ +#define MC_CMD_WORKAROUND_IN_ENABLED_OFST 4 +#define MC_CMD_WORKAROUND_IN_ENABLED_LEN 4 + +/* MC_CMD_WORKAROUND_OUT msgresponse */ +#define MC_CMD_WORKAROUND_OUT_LEN 0 + +/* MC_CMD_WORKAROUND_EXT_OUT msgresponse: This response format will be used + * when (TYPE == MC_CMD_WORKAROUND_BUG26807) + */ +#define MC_CMD_WORKAROUND_EXT_OUT_LEN 4 +#define MC_CMD_WORKAROUND_EXT_OUT_FLAGS_OFST 0 +#define MC_CMD_WORKAROUND_EXT_OUT_FLAGS_LEN 4 +#define MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_OFST 0 +#define MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_LBN 0 +#define MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_WIDTH 1 + + +/***********************************/ +/* MC_CMD_GET_PHY_MEDIA_INFO + * Read media-specific data from PHY (e.g. SFP/SFP+ module ID information for + * SFP+ PHYs). The "media type" can be found via GET_PHY_CFG + * (GET_PHY_CFG_OUT_MEDIA_TYPE); the valid "page number" input values, and the + * output data, are interpreted on a per-type basis. For SFP+, PAGE=0 or 1 + * returns a 128-byte block read from module I2C address 0xA0 offset 0 or 0x80. + * For QSFP, PAGE=-1 is the lower (unbanked) page. PAGE=2 is the EEPROM and + * PAGE=3 is the module limits. For DSFP, module addressing requires a + * "BANK:PAGE". Not every bank has the same number of pages. See the Common + * Management Interface Specification (CMIS) for further details. A BANK:PAGE + * of "0xffff:0xffff" retrieves the lower (unbanked) page. Locks required - + * None. Return code - 0. + */ +#define MC_CMD_GET_PHY_MEDIA_INFO 0x4b +#undef MC_CMD_0x4b_PRIVILEGE_CTG + +#define MC_CMD_0x4b_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_GET_PHY_MEDIA_INFO_IN msgrequest */ +#define MC_CMD_GET_PHY_MEDIA_INFO_IN_LEN 4 +#define MC_CMD_GET_PHY_MEDIA_INFO_IN_PAGE_OFST 0 +#define MC_CMD_GET_PHY_MEDIA_INFO_IN_PAGE_LEN 4 +#define MC_CMD_GET_PHY_MEDIA_INFO_IN_DSFP_PAGE_OFST 0 +#define MC_CMD_GET_PHY_MEDIA_INFO_IN_DSFP_PAGE_LBN 0 +#define MC_CMD_GET_PHY_MEDIA_INFO_IN_DSFP_PAGE_WIDTH 16 +#define MC_CMD_GET_PHY_MEDIA_INFO_IN_DSFP_BANK_OFST 0 +#define MC_CMD_GET_PHY_MEDIA_INFO_IN_DSFP_BANK_LBN 16 +#define MC_CMD_GET_PHY_MEDIA_INFO_IN_DSFP_BANK_WIDTH 16 + +/* MC_CMD_GET_PHY_MEDIA_INFO_IN_V2 msgrequest */ +#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_LEN 12 +#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_PAGE_OFST 0 +#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_PAGE_LEN 4 +#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_DSFP_PAGE_OFST 0 +#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_DSFP_PAGE_LBN 0 +#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_DSFP_PAGE_WIDTH 16 +#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_DSFP_BANK_OFST 0 +#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_DSFP_BANK_LBN 16 +#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_DSFP_BANK_WIDTH 16 +/* Target port to request PHY state for. Uses MAE_LINK_ENDPOINT_SELECTOR which + * identifies a real or virtual network port by MAE port and link end. See the + * structure definition for more details + */ +#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_OFST 4 +#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_LEN 8 +#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_LO_OFST 4 +#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_LO_LEN 4 +#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_LO_LBN 32 +#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_LO_WIDTH 32 +#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_HI_OFST 8 +#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_HI_LEN 4 +#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_HI_LBN 64 +#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_HI_WIDTH 32 +#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_MPORT_SELECTOR_OFST 4 +#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_MPORT_SELECTOR_LEN 4 +#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_MPORT_SELECTOR_FLAT_OFST 4 +#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_MPORT_SELECTOR_FLAT_LEN 4 +#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_MPORT_SELECTOR_TYPE_OFST 7 +#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_MPORT_SELECTOR_TYPE_LEN 1 +#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_MPORT_SELECTOR_MPORT_ID_OFST 4 +#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_MPORT_SELECTOR_MPORT_ID_LEN 3 +#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_MPORT_SELECTOR_PPORT_ID_LBN 32 +#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_MPORT_SELECTOR_PPORT_ID_WIDTH 4 +#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_MPORT_SELECTOR_FUNC_INTF_ID_LBN 52 +#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_MPORT_SELECTOR_FUNC_INTF_ID_WIDTH 4 +#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_MPORT_SELECTOR_FUNC_MH_PF_ID_LBN 48 +#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_MPORT_SELECTOR_FUNC_MH_PF_ID_WIDTH 4 +#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_MPORT_SELECTOR_FUNC_PF_ID_OFST 6 +#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_MPORT_SELECTOR_FUNC_PF_ID_LEN 1 +#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_MPORT_SELECTOR_FUNC_VF_ID_OFST 4 +#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_MPORT_SELECTOR_FUNC_VF_ID_LEN 2 +#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_LINK_END_OFST 8 +#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_LINK_END_LEN 4 +#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_FLAT_OFST 4 +#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_FLAT_LEN 8 +#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_FLAT_LO_OFST 4 +#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_FLAT_LO_LEN 4 +#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_FLAT_LO_LBN 32 +#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_FLAT_LO_WIDTH 32 +#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_FLAT_HI_OFST 8 +#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_FLAT_HI_LEN 4 +#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_FLAT_HI_LBN 64 +#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_FLAT_HI_WIDTH 32 + +/* MC_CMD_GET_PHY_MEDIA_INFO_OUT msgresponse */ +#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMIN 5 +#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMAX 252 +#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMAX_MCDI2 1020 +#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LEN(num) (4+1*(num)) +#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_NUM(len) (((len)-4)/1) +/* in bytes */ +#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATALEN_OFST 0 +#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATALEN_LEN 4 +#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_OFST 4 +#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_LEN 1 +#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_MINNUM 1 +#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_MAXNUM 248 +#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_MAXNUM_MCDI2 1016 + + +/***********************************/ +/* MC_CMD_NVRAM_TEST + * Test a particular NVRAM partition for valid contents (where "valid" depends + * on the type of partition). + */ +#define MC_CMD_NVRAM_TEST 0x4c +#undef MC_CMD_0x4c_PRIVILEGE_CTG + +#define MC_CMD_0x4c_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND + +/* MC_CMD_NVRAM_TEST_IN msgrequest */ +#define MC_CMD_NVRAM_TEST_IN_LEN 4 +#define MC_CMD_NVRAM_TEST_IN_TYPE_OFST 0 +#define MC_CMD_NVRAM_TEST_IN_TYPE_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */ + +/* MC_CMD_NVRAM_TEST_OUT msgresponse */ +#define MC_CMD_NVRAM_TEST_OUT_LEN 4 +#define MC_CMD_NVRAM_TEST_OUT_RESULT_OFST 0 +#define MC_CMD_NVRAM_TEST_OUT_RESULT_LEN 4 +/* enum: Passed. */ +#define MC_CMD_NVRAM_TEST_PASS 0x0 +/* enum: Failed. */ +#define MC_CMD_NVRAM_TEST_FAIL 0x1 +/* enum: Not supported. */ +#define MC_CMD_NVRAM_TEST_NOTSUPP 0x2 + + +/***********************************/ +/* MC_CMD_MRSFP_TWEAK + * Read status and/or set parameters for the 'mrsfp' driver in mr_rusty builds. + * I2C I/O expander bits are always read; if equaliser parameters are supplied, + * they are configured first. Locks required: None. Return code: 0, EINVAL. + */ +#define MC_CMD_MRSFP_TWEAK 0x4d + +/* MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG msgrequest */ +#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_LEN 16 +/* 0-6 low->high de-emph. */ +#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_LEVEL_OFST 0 +#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_LEVEL_LEN 4 +/* 0-8 low->high ref.V */ +#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_DT_CFG_OFST 4 +#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_DT_CFG_LEN 4 +/* 0-8 0-8 low->high boost */ +#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_BOOST_OFST 8 +#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_BOOST_LEN 4 +/* 0-8 low->high ref.V */ +#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_DT_CFG_OFST 12 +#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_DT_CFG_LEN 4 + +/* MC_CMD_MRSFP_TWEAK_IN_READ_ONLY msgrequest */ +#define MC_CMD_MRSFP_TWEAK_IN_READ_ONLY_LEN 0 + +/* MC_CMD_MRSFP_TWEAK_OUT msgresponse */ +#define MC_CMD_MRSFP_TWEAK_OUT_LEN 12 +/* input bits */ +#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_INPUTS_OFST 0 +#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_INPUTS_LEN 4 +/* output bits */ +#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_OUTPUTS_OFST 4 +#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_OUTPUTS_LEN 4 +/* direction */ +#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_OFST 8 +#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_LEN 4 +/* enum: Out. */ +#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_OUT 0x0 +/* enum: In. */ +#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_IN 0x1 + + +/***********************************/ +/* MC_CMD_SENSOR_SET_LIMS + * Adjusts the sensor limits. This is a warranty-voiding operation. Returns: + * ENOENT if the sensor specified does not exist, EINVAL if the limits are out + * of range. + */ +#define MC_CMD_SENSOR_SET_LIMS 0x4e +#undef MC_CMD_0x4e_PRIVILEGE_CTG + +#define MC_CMD_0x4e_PRIVILEGE_CTG SRIOV_CTG_INSECURE + +/* MC_CMD_SENSOR_SET_LIMS_IN msgrequest */ +#define MC_CMD_SENSOR_SET_LIMS_IN_LEN 20 +#define MC_CMD_SENSOR_SET_LIMS_IN_SENSOR_OFST 0 +#define MC_CMD_SENSOR_SET_LIMS_IN_SENSOR_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_SENSOR_INFO/MC_CMD_SENSOR_INFO_OUT/MASK */ +/* interpretation is is sensor-specific. */ +#define MC_CMD_SENSOR_SET_LIMS_IN_LOW0_OFST 4 +#define MC_CMD_SENSOR_SET_LIMS_IN_LOW0_LEN 4 +/* interpretation is is sensor-specific. */ +#define MC_CMD_SENSOR_SET_LIMS_IN_HI0_OFST 8 +#define MC_CMD_SENSOR_SET_LIMS_IN_HI0_LEN 4 +/* interpretation is is sensor-specific. */ +#define MC_CMD_SENSOR_SET_LIMS_IN_LOW1_OFST 12 +#define MC_CMD_SENSOR_SET_LIMS_IN_LOW1_LEN 4 +/* interpretation is is sensor-specific. */ +#define MC_CMD_SENSOR_SET_LIMS_IN_HI1_OFST 16 +#define MC_CMD_SENSOR_SET_LIMS_IN_HI1_LEN 4 + +/* MC_CMD_SENSOR_SET_LIMS_OUT msgresponse */ +#define MC_CMD_SENSOR_SET_LIMS_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_GET_RESOURCE_LIMITS + */ +#define MC_CMD_GET_RESOURCE_LIMITS 0x4f + +/* MC_CMD_GET_RESOURCE_LIMITS_IN msgrequest */ +#define MC_CMD_GET_RESOURCE_LIMITS_IN_LEN 0 + +/* MC_CMD_GET_RESOURCE_LIMITS_OUT msgresponse */ +#define MC_CMD_GET_RESOURCE_LIMITS_OUT_LEN 16 +#define MC_CMD_GET_RESOURCE_LIMITS_OUT_BUFTBL_OFST 0 +#define MC_CMD_GET_RESOURCE_LIMITS_OUT_BUFTBL_LEN 4 +#define MC_CMD_GET_RESOURCE_LIMITS_OUT_EVQ_OFST 4 +#define MC_CMD_GET_RESOURCE_LIMITS_OUT_EVQ_LEN 4 +#define MC_CMD_GET_RESOURCE_LIMITS_OUT_RXQ_OFST 8 +#define MC_CMD_GET_RESOURCE_LIMITS_OUT_RXQ_LEN 4 +#define MC_CMD_GET_RESOURCE_LIMITS_OUT_TXQ_OFST 12 +#define MC_CMD_GET_RESOURCE_LIMITS_OUT_TXQ_LEN 4 + + +/***********************************/ +/* MC_CMD_NVRAM_PARTITIONS + * Reads the list of available virtual NVRAM partition types. Locks required: + * none. Returns: 0, EINVAL (bad type). + */ +#define MC_CMD_NVRAM_PARTITIONS 0x51 +#undef MC_CMD_0x51_PRIVILEGE_CTG + +#define MC_CMD_0x51_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_NVRAM_PARTITIONS_IN msgrequest */ +#define MC_CMD_NVRAM_PARTITIONS_IN_LEN 0 + +/* MC_CMD_NVRAM_PARTITIONS_OUT msgresponse */ +#define MC_CMD_NVRAM_PARTITIONS_OUT_LENMIN 4 +#define MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX 252 +#define MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX_MCDI2 1020 +#define MC_CMD_NVRAM_PARTITIONS_OUT_LEN(num) (4+4*(num)) +#define MC_CMD_NVRAM_PARTITIONS_OUT_TYPE_ID_NUM(len) (((len)-4)/4) +/* total number of partitions */ +#define MC_CMD_NVRAM_PARTITIONS_OUT_NUM_PARTITIONS_OFST 0 +#define MC_CMD_NVRAM_PARTITIONS_OUT_NUM_PARTITIONS_LEN 4 +/* type ID code for each of NUM_PARTITIONS partitions */ +#define MC_CMD_NVRAM_PARTITIONS_OUT_TYPE_ID_OFST 4 +#define MC_CMD_NVRAM_PARTITIONS_OUT_TYPE_ID_LEN 4 +#define MC_CMD_NVRAM_PARTITIONS_OUT_TYPE_ID_MINNUM 0 +#define MC_CMD_NVRAM_PARTITIONS_OUT_TYPE_ID_MAXNUM 62 +#define MC_CMD_NVRAM_PARTITIONS_OUT_TYPE_ID_MAXNUM_MCDI2 254 + + +/***********************************/ +/* MC_CMD_NVRAM_METADATA + * Reads soft metadata for a virtual NVRAM partition type. Locks required: + * none. Returns: 0, EINVAL (bad type). + */ +#define MC_CMD_NVRAM_METADATA 0x52 +#undef MC_CMD_0x52_PRIVILEGE_CTG + +#define MC_CMD_0x52_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_NVRAM_METADATA_IN msgrequest */ +#define MC_CMD_NVRAM_METADATA_IN_LEN 4 +/* Partition type ID code */ +#define MC_CMD_NVRAM_METADATA_IN_TYPE_OFST 0 +#define MC_CMD_NVRAM_METADATA_IN_TYPE_LEN 4 + +/* MC_CMD_NVRAM_METADATA_OUT msgresponse */ +#define MC_CMD_NVRAM_METADATA_OUT_LENMIN 20 +#define MC_CMD_NVRAM_METADATA_OUT_LENMAX 252 +#define MC_CMD_NVRAM_METADATA_OUT_LENMAX_MCDI2 1020 +#define MC_CMD_NVRAM_METADATA_OUT_LEN(num) (20+1*(num)) +#define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_NUM(len) (((len)-20)/1) +/* Partition type ID code */ +#define MC_CMD_NVRAM_METADATA_OUT_TYPE_OFST 0 +#define MC_CMD_NVRAM_METADATA_OUT_TYPE_LEN 4 +#define MC_CMD_NVRAM_METADATA_OUT_FLAGS_OFST 4 +#define MC_CMD_NVRAM_METADATA_OUT_FLAGS_LEN 4 +#define MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_VALID_OFST 4 +#define MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_VALID_LBN 0 +#define MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_VALID_WIDTH 1 +#define MC_CMD_NVRAM_METADATA_OUT_VERSION_VALID_OFST 4 +#define MC_CMD_NVRAM_METADATA_OUT_VERSION_VALID_LBN 1 +#define MC_CMD_NVRAM_METADATA_OUT_VERSION_VALID_WIDTH 1 +#define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_VALID_OFST 4 +#define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_VALID_LBN 2 +#define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_VALID_WIDTH 1 +/* Subtype ID code for content of this partition */ +#define MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_OFST 8 +#define MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_LEN 4 +/* 1st component of W.X.Y.Z version number for content of this partition */ +#define MC_CMD_NVRAM_METADATA_OUT_VERSION_W_OFST 12 +#define MC_CMD_NVRAM_METADATA_OUT_VERSION_W_LEN 2 +/* 2nd component of W.X.Y.Z version number for content of this partition */ +#define MC_CMD_NVRAM_METADATA_OUT_VERSION_X_OFST 14 +#define MC_CMD_NVRAM_METADATA_OUT_VERSION_X_LEN 2 +/* 3rd component of W.X.Y.Z version number for content of this partition */ +#define MC_CMD_NVRAM_METADATA_OUT_VERSION_Y_OFST 16 +#define MC_CMD_NVRAM_METADATA_OUT_VERSION_Y_LEN 2 +/* 4th component of W.X.Y.Z version number for content of this partition */ +#define MC_CMD_NVRAM_METADATA_OUT_VERSION_Z_OFST 18 +#define MC_CMD_NVRAM_METADATA_OUT_VERSION_Z_LEN 2 +/* Zero-terminated string describing the content of this partition */ +#define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_OFST 20 +#define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_LEN 1 +#define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_MINNUM 0 +#define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_MAXNUM 232 +#define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_MAXNUM_MCDI2 1000 + + +/***********************************/ +/* MC_CMD_GET_MAC_ADDRESSES + * Returns the base MAC, count and stride for the requesting function + */ +#define MC_CMD_GET_MAC_ADDRESSES 0x55 +#undef MC_CMD_0x55_PRIVILEGE_CTG + +#define MC_CMD_0x55_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_MAC_ADDRESSES_IN msgrequest */ +#define MC_CMD_GET_MAC_ADDRESSES_IN_LEN 0 + +/* MC_CMD_GET_MAC_ADDRESSES_OUT msgresponse */ +#define MC_CMD_GET_MAC_ADDRESSES_OUT_LEN 16 +/* Base MAC address */ +#define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_ADDR_BASE_OFST 0 +#define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_ADDR_BASE_LEN 6 +/* Padding */ +#define MC_CMD_GET_MAC_ADDRESSES_OUT_RESERVED_OFST 6 +#define MC_CMD_GET_MAC_ADDRESSES_OUT_RESERVED_LEN 2 +/* Number of allocated MAC addresses */ +#define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_COUNT_OFST 8 +#define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_COUNT_LEN 4 +/* Spacing of allocated MAC addresses */ +#define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_STRIDE_OFST 12 +#define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_STRIDE_LEN 4 + + +/***********************************/ +/* MC_CMD_CLP + * Perform a CLP related operation, see SF-110495-PS for details of CLP + * processing. This command has been extended to accomodate the requirements of + * different manufacturers which are to be found in SF-119187-TC, SF-119186-TC, + * SF-120509-TC and SF-117282-PS. + */ +#define MC_CMD_CLP 0x56 +#undef MC_CMD_0x56_PRIVILEGE_CTG + +#define MC_CMD_0x56_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND + +/* MC_CMD_CLP_IN msgrequest */ +#define MC_CMD_CLP_IN_LEN 4 +/* Sub operation */ +#define MC_CMD_CLP_IN_OP_OFST 0 +#define MC_CMD_CLP_IN_OP_LEN 4 +/* enum: Return to factory default settings */ +#define MC_CMD_CLP_OP_DEFAULT 0x1 +/* enum: Set MAC address */ +#define MC_CMD_CLP_OP_SET_MAC 0x2 +/* enum: Get MAC address */ +#define MC_CMD_CLP_OP_GET_MAC 0x3 +/* enum: Set UEFI/GPXE boot mode */ +#define MC_CMD_CLP_OP_SET_BOOT 0x4 +/* enum: Get UEFI/GPXE boot mode */ +#define MC_CMD_CLP_OP_GET_BOOT 0x5 + +/* MC_CMD_CLP_OUT msgresponse */ +#define MC_CMD_CLP_OUT_LEN 0 + +/* MC_CMD_CLP_IN_DEFAULT msgrequest */ +#define MC_CMD_CLP_IN_DEFAULT_LEN 4 +/* MC_CMD_CLP_IN_OP_OFST 0 */ +/* MC_CMD_CLP_IN_OP_LEN 4 */ + +/* MC_CMD_CLP_OUT_DEFAULT msgresponse */ +#define MC_CMD_CLP_OUT_DEFAULT_LEN 0 + +/* MC_CMD_CLP_IN_SET_MAC msgrequest */ +#define MC_CMD_CLP_IN_SET_MAC_LEN 12 +/* MC_CMD_CLP_IN_OP_OFST 0 */ +/* MC_CMD_CLP_IN_OP_LEN 4 */ +/* The MAC address assigned to port. A zero MAC address of 00:00:00:00:00:00 + * restores the permanent (factory-programmed) MAC address associated with the + * port. A non-zero MAC address persists until a PCIe reset or a power cycle. + */ +#define MC_CMD_CLP_IN_SET_MAC_ADDR_OFST 4 +#define MC_CMD_CLP_IN_SET_MAC_ADDR_LEN 6 +/* Padding */ +#define MC_CMD_CLP_IN_SET_MAC_RESERVED_OFST 10 +#define MC_CMD_CLP_IN_SET_MAC_RESERVED_LEN 2 + +/* MC_CMD_CLP_OUT_SET_MAC msgresponse */ +#define MC_CMD_CLP_OUT_SET_MAC_LEN 0 + +/* MC_CMD_CLP_IN_SET_MAC_V2 msgrequest */ +#define MC_CMD_CLP_IN_SET_MAC_V2_LEN 16 +/* MC_CMD_CLP_IN_OP_OFST 0 */ +/* MC_CMD_CLP_IN_OP_LEN 4 */ +/* The MAC address assigned to port. A zero MAC address of 00:00:00:00:00:00 + * restores the permanent (factory-programmed) MAC address associated with the + * port. A non-zero MAC address persists until a PCIe reset or a power cycle. + */ +#define MC_CMD_CLP_IN_SET_MAC_V2_ADDR_OFST 4 +#define MC_CMD_CLP_IN_SET_MAC_V2_ADDR_LEN 6 +/* Padding */ +#define MC_CMD_CLP_IN_SET_MAC_V2_RESERVED_OFST 10 +#define MC_CMD_CLP_IN_SET_MAC_V2_RESERVED_LEN 2 +#define MC_CMD_CLP_IN_SET_MAC_V2_FLAGS_OFST 12 +#define MC_CMD_CLP_IN_SET_MAC_V2_FLAGS_LEN 4 +#define MC_CMD_CLP_IN_SET_MAC_V2_VIRTUAL_OFST 12 +#define MC_CMD_CLP_IN_SET_MAC_V2_VIRTUAL_LBN 0 +#define MC_CMD_CLP_IN_SET_MAC_V2_VIRTUAL_WIDTH 1 + +/* MC_CMD_CLP_IN_GET_MAC msgrequest */ +#define MC_CMD_CLP_IN_GET_MAC_LEN 4 +/* MC_CMD_CLP_IN_OP_OFST 0 */ +/* MC_CMD_CLP_IN_OP_LEN 4 */ + +/* MC_CMD_CLP_IN_GET_MAC_V2 msgrequest */ +#define MC_CMD_CLP_IN_GET_MAC_V2_LEN 8 +/* MC_CMD_CLP_IN_OP_OFST 0 */ +/* MC_CMD_CLP_IN_OP_LEN 4 */ +#define MC_CMD_CLP_IN_GET_MAC_V2_FLAGS_OFST 4 +#define MC_CMD_CLP_IN_GET_MAC_V2_FLAGS_LEN 4 +#define MC_CMD_CLP_IN_GET_MAC_V2_PERMANENT_OFST 4 +#define MC_CMD_CLP_IN_GET_MAC_V2_PERMANENT_LBN 0 +#define MC_CMD_CLP_IN_GET_MAC_V2_PERMANENT_WIDTH 1 + +/* MC_CMD_CLP_OUT_GET_MAC msgresponse */ +#define MC_CMD_CLP_OUT_GET_MAC_LEN 8 +/* MAC address assigned to port */ +#define MC_CMD_CLP_OUT_GET_MAC_ADDR_OFST 0 +#define MC_CMD_CLP_OUT_GET_MAC_ADDR_LEN 6 +/* Padding */ +#define MC_CMD_CLP_OUT_GET_MAC_RESERVED_OFST 6 +#define MC_CMD_CLP_OUT_GET_MAC_RESERVED_LEN 2 + +/* MC_CMD_CLP_IN_SET_BOOT msgrequest */ +#define MC_CMD_CLP_IN_SET_BOOT_LEN 5 +/* MC_CMD_CLP_IN_OP_OFST 0 */ +/* MC_CMD_CLP_IN_OP_LEN 4 */ +/* Boot flag */ +#define MC_CMD_CLP_IN_SET_BOOT_FLAG_OFST 4 +#define MC_CMD_CLP_IN_SET_BOOT_FLAG_LEN 1 + +/* MC_CMD_CLP_OUT_SET_BOOT msgresponse */ +#define MC_CMD_CLP_OUT_SET_BOOT_LEN 0 + +/* MC_CMD_CLP_IN_GET_BOOT msgrequest */ +#define MC_CMD_CLP_IN_GET_BOOT_LEN 4 +/* MC_CMD_CLP_IN_OP_OFST 0 */ +/* MC_CMD_CLP_IN_OP_LEN 4 */ + +/* MC_CMD_CLP_OUT_GET_BOOT msgresponse */ +#define MC_CMD_CLP_OUT_GET_BOOT_LEN 4 +/* Boot flag */ +#define MC_CMD_CLP_OUT_GET_BOOT_FLAG_OFST 0 +#define MC_CMD_CLP_OUT_GET_BOOT_FLAG_LEN 1 +/* Padding */ +#define MC_CMD_CLP_OUT_GET_BOOT_RESERVED_OFST 1 +#define MC_CMD_CLP_OUT_GET_BOOT_RESERVED_LEN 3 + + +/***********************************/ +/* MC_CMD_MUM + * Perform a MUM operation + */ +#define MC_CMD_MUM 0x57 +#undef MC_CMD_0x57_PRIVILEGE_CTG + +#define MC_CMD_0x57_PRIVILEGE_CTG SRIOV_CTG_INSECURE + +/* MC_CMD_MUM_IN msgrequest */ +#define MC_CMD_MUM_IN_LEN 4 +#define MC_CMD_MUM_IN_OP_HDR_OFST 0 +#define MC_CMD_MUM_IN_OP_HDR_LEN 4 +#define MC_CMD_MUM_IN_OP_OFST 0 +#define MC_CMD_MUM_IN_OP_LBN 0 +#define MC_CMD_MUM_IN_OP_WIDTH 8 +/* enum: NULL MCDI command to MUM */ +#define MC_CMD_MUM_OP_NULL 0x1 +/* enum: Get MUM version */ +#define MC_CMD_MUM_OP_GET_VERSION 0x2 +/* enum: Issue raw I2C command to MUM */ +#define MC_CMD_MUM_OP_RAW_CMD 0x3 +/* enum: Read from registers on devices connected to MUM. */ +#define MC_CMD_MUM_OP_READ 0x4 +/* enum: Write to registers on devices connected to MUM. */ +#define MC_CMD_MUM_OP_WRITE 0x5 +/* enum: Control UART logging. */ +#define MC_CMD_MUM_OP_LOG 0x6 +/* enum: Operations on MUM GPIO lines */ +#define MC_CMD_MUM_OP_GPIO 0x7 +/* enum: Get sensor readings from MUM */ +#define MC_CMD_MUM_OP_READ_SENSORS 0x8 +/* enum: Initiate clock programming on the MUM */ +#define MC_CMD_MUM_OP_PROGRAM_CLOCKS 0x9 +/* enum: Initiate FPGA load from flash on the MUM */ +#define MC_CMD_MUM_OP_FPGA_LOAD 0xa +/* enum: Request sensor reading from MUM ADC resulting from earlier request via + * MUM ATB + */ +#define MC_CMD_MUM_OP_READ_ATB_SENSOR 0xb +/* enum: Send commands relating to the QSFP ports via the MUM for PHY + * operations + */ +#define MC_CMD_MUM_OP_QSFP 0xc +/* enum: Request discrete and SODIMM DDR info (type, size, speed grade, voltage + * level) from MUM + */ +#define MC_CMD_MUM_OP_READ_DDR_INFO 0xd + +/* MC_CMD_MUM_IN_NULL msgrequest */ +#define MC_CMD_MUM_IN_NULL_LEN 4 +/* MUM cmd header */ +#define MC_CMD_MUM_IN_CMD_OFST 0 +#define MC_CMD_MUM_IN_CMD_LEN 4 + +/* MC_CMD_MUM_IN_GET_VERSION msgrequest */ +#define MC_CMD_MUM_IN_GET_VERSION_LEN 4 +/* MUM cmd header */ +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ + +/* MC_CMD_MUM_IN_READ msgrequest */ +#define MC_CMD_MUM_IN_READ_LEN 16 +/* MUM cmd header */ +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ +/* ID of (device connected to MUM) to read from registers of */ +#define MC_CMD_MUM_IN_READ_DEVICE_OFST 4 +#define MC_CMD_MUM_IN_READ_DEVICE_LEN 4 +/* enum: Hittite HMC1035 clock generator on Sorrento board */ +#define MC_CMD_MUM_DEV_HITTITE 0x1 +/* enum: Hittite HMC1035 clock generator for NIC-side on Sorrento board */ +#define MC_CMD_MUM_DEV_HITTITE_NIC 0x2 +/* 32-bit address to read from */ +#define MC_CMD_MUM_IN_READ_ADDR_OFST 8 +#define MC_CMD_MUM_IN_READ_ADDR_LEN 4 +/* Number of words to read. */ +#define MC_CMD_MUM_IN_READ_NUMWORDS_OFST 12 +#define MC_CMD_MUM_IN_READ_NUMWORDS_LEN 4 + +/* MC_CMD_MUM_IN_WRITE msgrequest */ +#define MC_CMD_MUM_IN_WRITE_LENMIN 16 +#define MC_CMD_MUM_IN_WRITE_LENMAX 252 +#define MC_CMD_MUM_IN_WRITE_LENMAX_MCDI2 1020 +#define MC_CMD_MUM_IN_WRITE_LEN(num) (12+4*(num)) +#define MC_CMD_MUM_IN_WRITE_BUFFER_NUM(len) (((len)-12)/4) +/* MUM cmd header */ +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ +/* ID of (device connected to MUM) to write to registers of */ +#define MC_CMD_MUM_IN_WRITE_DEVICE_OFST 4 +#define MC_CMD_MUM_IN_WRITE_DEVICE_LEN 4 +/* enum: Hittite HMC1035 clock generator on Sorrento board */ +/* MC_CMD_MUM_DEV_HITTITE 0x1 */ +/* 32-bit address to write to */ +#define MC_CMD_MUM_IN_WRITE_ADDR_OFST 8 +#define MC_CMD_MUM_IN_WRITE_ADDR_LEN 4 +/* Words to write */ +#define MC_CMD_MUM_IN_WRITE_BUFFER_OFST 12 +#define MC_CMD_MUM_IN_WRITE_BUFFER_LEN 4 +#define MC_CMD_MUM_IN_WRITE_BUFFER_MINNUM 1 +#define MC_CMD_MUM_IN_WRITE_BUFFER_MAXNUM 60 +#define MC_CMD_MUM_IN_WRITE_BUFFER_MAXNUM_MCDI2 252 + +/* MC_CMD_MUM_IN_RAW_CMD msgrequest */ +#define MC_CMD_MUM_IN_RAW_CMD_LENMIN 17 +#define MC_CMD_MUM_IN_RAW_CMD_LENMAX 252 +#define MC_CMD_MUM_IN_RAW_CMD_LENMAX_MCDI2 1020 +#define MC_CMD_MUM_IN_RAW_CMD_LEN(num) (16+1*(num)) +#define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_NUM(len) (((len)-16)/1) +/* MUM cmd header */ +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ +/* MUM I2C cmd code */ +#define MC_CMD_MUM_IN_RAW_CMD_CMD_CODE_OFST 4 +#define MC_CMD_MUM_IN_RAW_CMD_CMD_CODE_LEN 4 +/* Number of bytes to write */ +#define MC_CMD_MUM_IN_RAW_CMD_NUM_WRITE_OFST 8 +#define MC_CMD_MUM_IN_RAW_CMD_NUM_WRITE_LEN 4 +/* Number of bytes to read */ +#define MC_CMD_MUM_IN_RAW_CMD_NUM_READ_OFST 12 +#define MC_CMD_MUM_IN_RAW_CMD_NUM_READ_LEN 4 +/* Bytes to write */ +#define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_OFST 16 +#define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_LEN 1 +#define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_MINNUM 1 +#define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_MAXNUM 236 +#define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_MAXNUM_MCDI2 1004 + +/* MC_CMD_MUM_IN_LOG msgrequest */ +#define MC_CMD_MUM_IN_LOG_LEN 8 +/* MUM cmd header */ +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ +#define MC_CMD_MUM_IN_LOG_OP_OFST 4 +#define MC_CMD_MUM_IN_LOG_OP_LEN 4 +#define MC_CMD_MUM_IN_LOG_OP_UART 0x1 /* enum */ + +/* MC_CMD_MUM_IN_LOG_OP_UART msgrequest */ +#define MC_CMD_MUM_IN_LOG_OP_UART_LEN 12 +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ +/* MC_CMD_MUM_IN_LOG_OP_OFST 4 */ +/* MC_CMD_MUM_IN_LOG_OP_LEN 4 */ +/* Enable/disable debug output to UART */ +#define MC_CMD_MUM_IN_LOG_OP_UART_ENABLE_OFST 8 +#define MC_CMD_MUM_IN_LOG_OP_UART_ENABLE_LEN 4 + +/* MC_CMD_MUM_IN_GPIO msgrequest */ +#define MC_CMD_MUM_IN_GPIO_LEN 8 +/* MUM cmd header */ +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ +#define MC_CMD_MUM_IN_GPIO_HDR_OFST 4 +#define MC_CMD_MUM_IN_GPIO_HDR_LEN 4 +#define MC_CMD_MUM_IN_GPIO_OPCODE_OFST 4 +#define MC_CMD_MUM_IN_GPIO_OPCODE_LBN 0 +#define MC_CMD_MUM_IN_GPIO_OPCODE_WIDTH 8 +#define MC_CMD_MUM_IN_GPIO_IN_READ 0x0 /* enum */ +#define MC_CMD_MUM_IN_GPIO_OUT_WRITE 0x1 /* enum */ +#define MC_CMD_MUM_IN_GPIO_OUT_READ 0x2 /* enum */ +#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE 0x3 /* enum */ +#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ 0x4 /* enum */ +#define MC_CMD_MUM_IN_GPIO_OP 0x5 /* enum */ + +/* MC_CMD_MUM_IN_GPIO_IN_READ msgrequest */ +#define MC_CMD_MUM_IN_GPIO_IN_READ_LEN 8 +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ +#define MC_CMD_MUM_IN_GPIO_IN_READ_HDR_OFST 4 +#define MC_CMD_MUM_IN_GPIO_IN_READ_HDR_LEN 4 + +/* MC_CMD_MUM_IN_GPIO_OUT_WRITE msgrequest */ +#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_LEN 16 +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ +#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_HDR_OFST 4 +#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_HDR_LEN 4 +/* The first 32-bit word to be written to the GPIO OUT register. */ +#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_GPIOMASK1_OFST 8 +#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_GPIOMASK1_LEN 4 +/* The second 32-bit word to be written to the GPIO OUT register. */ +#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_GPIOMASK2_OFST 12 +#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_GPIOMASK2_LEN 4 + +/* MC_CMD_MUM_IN_GPIO_OUT_READ msgrequest */ +#define MC_CMD_MUM_IN_GPIO_OUT_READ_LEN 8 +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ +#define MC_CMD_MUM_IN_GPIO_OUT_READ_HDR_OFST 4 +#define MC_CMD_MUM_IN_GPIO_OUT_READ_HDR_LEN 4 + +/* MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE msgrequest */ +#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_LEN 16 +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ +#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_HDR_OFST 4 +#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_HDR_LEN 4 +/* The first 32-bit word to be written to the GPIO OUT ENABLE register. */ +#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_GPIOMASK1_OFST 8 +#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_GPIOMASK1_LEN 4 +/* The second 32-bit word to be written to the GPIO OUT ENABLE register. */ +#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_GPIOMASK2_OFST 12 +#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_GPIOMASK2_LEN 4 + +/* MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ msgrequest */ +#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ_LEN 8 +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ +#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ_HDR_OFST 4 +#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ_HDR_LEN 4 + +/* MC_CMD_MUM_IN_GPIO_OP msgrequest */ +#define MC_CMD_MUM_IN_GPIO_OP_LEN 8 +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ +#define MC_CMD_MUM_IN_GPIO_OP_HDR_OFST 4 +#define MC_CMD_MUM_IN_GPIO_OP_HDR_LEN 4 +#define MC_CMD_MUM_IN_GPIO_OP_BITWISE_OP_OFST 4 +#define MC_CMD_MUM_IN_GPIO_OP_BITWISE_OP_LBN 8 +#define MC_CMD_MUM_IN_GPIO_OP_BITWISE_OP_WIDTH 8 +#define MC_CMD_MUM_IN_GPIO_OP_OUT_READ 0x0 /* enum */ +#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE 0x1 /* enum */ +#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG 0x2 /* enum */ +#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE 0x3 /* enum */ +#define MC_CMD_MUM_IN_GPIO_OP_GPIO_NUMBER_OFST 4 +#define MC_CMD_MUM_IN_GPIO_OP_GPIO_NUMBER_LBN 16 +#define MC_CMD_MUM_IN_GPIO_OP_GPIO_NUMBER_WIDTH 8 + +/* MC_CMD_MUM_IN_GPIO_OP_OUT_READ msgrequest */ +#define MC_CMD_MUM_IN_GPIO_OP_OUT_READ_LEN 8 +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ +#define MC_CMD_MUM_IN_GPIO_OP_OUT_READ_HDR_OFST 4 +#define MC_CMD_MUM_IN_GPIO_OP_OUT_READ_HDR_LEN 4 + +/* MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE msgrequest */ +#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_LEN 8 +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ +#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_HDR_OFST 4 +#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_HDR_LEN 4 +#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_WRITEBIT_OFST 4 +#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_WRITEBIT_LBN 24 +#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_WRITEBIT_WIDTH 8 + +/* MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG msgrequest */ +#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_LEN 8 +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ +#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_HDR_OFST 4 +#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_HDR_LEN 4 +#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_CFG_OFST 4 +#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_CFG_LBN 24 +#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_CFG_WIDTH 8 + +/* MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE msgrequest */ +#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_LEN 8 +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ +#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_HDR_OFST 4 +#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_HDR_LEN 4 +#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_ENABLEBIT_OFST 4 +#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_ENABLEBIT_LBN 24 +#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_ENABLEBIT_WIDTH 8 + +/* MC_CMD_MUM_IN_READ_SENSORS msgrequest */ +#define MC_CMD_MUM_IN_READ_SENSORS_LEN 8 +/* MUM cmd header */ +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ +#define MC_CMD_MUM_IN_READ_SENSORS_PARAMS_OFST 4 +#define MC_CMD_MUM_IN_READ_SENSORS_PARAMS_LEN 4 +#define MC_CMD_MUM_IN_READ_SENSORS_SENSOR_ID_OFST 4 +#define MC_CMD_MUM_IN_READ_SENSORS_SENSOR_ID_LBN 0 +#define MC_CMD_MUM_IN_READ_SENSORS_SENSOR_ID_WIDTH 8 +#define MC_CMD_MUM_IN_READ_SENSORS_NUM_SENSORS_OFST 4 +#define MC_CMD_MUM_IN_READ_SENSORS_NUM_SENSORS_LBN 8 +#define MC_CMD_MUM_IN_READ_SENSORS_NUM_SENSORS_WIDTH 8 + +/* MC_CMD_MUM_IN_PROGRAM_CLOCKS msgrequest */ +#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_LEN 12 +/* MUM cmd header */ +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ +/* Bit-mask of clocks to be programmed */ +#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_MASK_OFST 4 +#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_MASK_LEN 4 +#define MC_CMD_MUM_CLOCK_ID_FPGA 0x0 /* enum */ +#define MC_CMD_MUM_CLOCK_ID_DDR 0x1 /* enum */ +#define MC_CMD_MUM_CLOCK_ID_NIC 0x2 /* enum */ +/* Control flags for clock programming */ +#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_FLAGS_OFST 8 +#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_FLAGS_LEN 4 +#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_OVERCLOCK_110_OFST 8 +#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_OVERCLOCK_110_LBN 0 +#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_OVERCLOCK_110_WIDTH 1 +#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_NIC_FROM_FPGA_OFST 8 +#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_NIC_FROM_FPGA_LBN 1 +#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_NIC_FROM_FPGA_WIDTH 1 +#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_REF_FROM_XO_OFST 8 +#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_REF_FROM_XO_LBN 2 +#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_REF_FROM_XO_WIDTH 1 + +/* MC_CMD_MUM_IN_FPGA_LOAD msgrequest */ +#define MC_CMD_MUM_IN_FPGA_LOAD_LEN 8 +/* MUM cmd header */ +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ +/* Enable/Disable FPGA config from flash */ +#define MC_CMD_MUM_IN_FPGA_LOAD_ENABLE_OFST 4 +#define MC_CMD_MUM_IN_FPGA_LOAD_ENABLE_LEN 4 + +/* MC_CMD_MUM_IN_READ_ATB_SENSOR msgrequest */ +#define MC_CMD_MUM_IN_READ_ATB_SENSOR_LEN 4 +/* MUM cmd header */ +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ + +/* MC_CMD_MUM_IN_QSFP msgrequest */ +#define MC_CMD_MUM_IN_QSFP_LEN 12 +/* MUM cmd header */ +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ +#define MC_CMD_MUM_IN_QSFP_HDR_OFST 4 +#define MC_CMD_MUM_IN_QSFP_HDR_LEN 4 +#define MC_CMD_MUM_IN_QSFP_OPCODE_OFST 4 +#define MC_CMD_MUM_IN_QSFP_OPCODE_LBN 0 +#define MC_CMD_MUM_IN_QSFP_OPCODE_WIDTH 4 +#define MC_CMD_MUM_IN_QSFP_INIT 0x0 /* enum */ +#define MC_CMD_MUM_IN_QSFP_RECONFIGURE 0x1 /* enum */ +#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP 0x2 /* enum */ +#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO 0x3 /* enum */ +#define MC_CMD_MUM_IN_QSFP_FILL_STATS 0x4 /* enum */ +#define MC_CMD_MUM_IN_QSFP_POLL_BIST 0x5 /* enum */ +#define MC_CMD_MUM_IN_QSFP_IDX_OFST 8 +#define MC_CMD_MUM_IN_QSFP_IDX_LEN 4 + +/* MC_CMD_MUM_IN_QSFP_INIT msgrequest */ +#define MC_CMD_MUM_IN_QSFP_INIT_LEN 16 +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ +#define MC_CMD_MUM_IN_QSFP_INIT_HDR_OFST 4 +#define MC_CMD_MUM_IN_QSFP_INIT_HDR_LEN 4 +#define MC_CMD_MUM_IN_QSFP_INIT_IDX_OFST 8 +#define MC_CMD_MUM_IN_QSFP_INIT_IDX_LEN 4 +#define MC_CMD_MUM_IN_QSFP_INIT_CAGE_OFST 12 +#define MC_CMD_MUM_IN_QSFP_INIT_CAGE_LEN 4 + +/* MC_CMD_MUM_IN_QSFP_RECONFIGURE msgrequest */ +#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_LEN 24 +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ +#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_HDR_OFST 4 +#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_HDR_LEN 4 +#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_IDX_OFST 8 +#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_IDX_LEN 4 +#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_TX_DISABLE_OFST 12 +#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_TX_DISABLE_LEN 4 +#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_PORT_LANES_OFST 16 +#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_PORT_LANES_LEN 4 +#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_PORT_LINK_SPEED_OFST 20 +#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_PORT_LINK_SPEED_LEN 4 + +/* MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP msgrequest */ +#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_LEN 12 +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ +#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_HDR_OFST 4 +#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_HDR_LEN 4 +#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_IDX_OFST 8 +#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_IDX_LEN 4 + +/* MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO msgrequest */ +#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_LEN 16 +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ +#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_HDR_OFST 4 +#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_HDR_LEN 4 +#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_IDX_OFST 8 +#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_IDX_LEN 4 +#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_PAGE_OFST 12 +#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_PAGE_LEN 4 + +/* MC_CMD_MUM_IN_QSFP_FILL_STATS msgrequest */ +#define MC_CMD_MUM_IN_QSFP_FILL_STATS_LEN 12 +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ +#define MC_CMD_MUM_IN_QSFP_FILL_STATS_HDR_OFST 4 +#define MC_CMD_MUM_IN_QSFP_FILL_STATS_HDR_LEN 4 +#define MC_CMD_MUM_IN_QSFP_FILL_STATS_IDX_OFST 8 +#define MC_CMD_MUM_IN_QSFP_FILL_STATS_IDX_LEN 4 + +/* MC_CMD_MUM_IN_QSFP_POLL_BIST msgrequest */ +#define MC_CMD_MUM_IN_QSFP_POLL_BIST_LEN 12 +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ +#define MC_CMD_MUM_IN_QSFP_POLL_BIST_HDR_OFST 4 +#define MC_CMD_MUM_IN_QSFP_POLL_BIST_HDR_LEN 4 +#define MC_CMD_MUM_IN_QSFP_POLL_BIST_IDX_OFST 8 +#define MC_CMD_MUM_IN_QSFP_POLL_BIST_IDX_LEN 4 + +/* MC_CMD_MUM_IN_READ_DDR_INFO msgrequest */ +#define MC_CMD_MUM_IN_READ_DDR_INFO_LEN 4 +/* MUM cmd header */ +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ + +/* MC_CMD_MUM_OUT msgresponse */ +#define MC_CMD_MUM_OUT_LEN 0 + +/* MC_CMD_MUM_OUT_NULL msgresponse */ +#define MC_CMD_MUM_OUT_NULL_LEN 0 + +/* MC_CMD_MUM_OUT_GET_VERSION msgresponse */ +#define MC_CMD_MUM_OUT_GET_VERSION_LEN 12 +#define MC_CMD_MUM_OUT_GET_VERSION_FIRMWARE_OFST 0 +#define MC_CMD_MUM_OUT_GET_VERSION_FIRMWARE_LEN 4 +#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_OFST 4 +#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_LEN 8 +#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_LO_OFST 4 +#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_LO_LEN 4 +#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_LO_LBN 32 +#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_LO_WIDTH 32 +#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_HI_OFST 8 +#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_HI_LEN 4 +#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_HI_LBN 64 +#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_HI_WIDTH 32 + +/* MC_CMD_MUM_OUT_RAW_CMD msgresponse */ +#define MC_CMD_MUM_OUT_RAW_CMD_LENMIN 1 +#define MC_CMD_MUM_OUT_RAW_CMD_LENMAX 252 +#define MC_CMD_MUM_OUT_RAW_CMD_LENMAX_MCDI2 1020 +#define MC_CMD_MUM_OUT_RAW_CMD_LEN(num) (0+1*(num)) +#define MC_CMD_MUM_OUT_RAW_CMD_DATA_NUM(len) (((len)-0)/1) +/* returned data */ +#define MC_CMD_MUM_OUT_RAW_CMD_DATA_OFST 0 +#define MC_CMD_MUM_OUT_RAW_CMD_DATA_LEN 1 +#define MC_CMD_MUM_OUT_RAW_CMD_DATA_MINNUM 1 +#define MC_CMD_MUM_OUT_RAW_CMD_DATA_MAXNUM 252 +#define MC_CMD_MUM_OUT_RAW_CMD_DATA_MAXNUM_MCDI2 1020 + +/* MC_CMD_MUM_OUT_READ msgresponse */ +#define MC_CMD_MUM_OUT_READ_LENMIN 4 +#define MC_CMD_MUM_OUT_READ_LENMAX 252 +#define MC_CMD_MUM_OUT_READ_LENMAX_MCDI2 1020 +#define MC_CMD_MUM_OUT_READ_LEN(num) (0+4*(num)) +#define MC_CMD_MUM_OUT_READ_BUFFER_NUM(len) (((len)-0)/4) +#define MC_CMD_MUM_OUT_READ_BUFFER_OFST 0 +#define MC_CMD_MUM_OUT_READ_BUFFER_LEN 4 +#define MC_CMD_MUM_OUT_READ_BUFFER_MINNUM 1 +#define MC_CMD_MUM_OUT_READ_BUFFER_MAXNUM 63 +#define MC_CMD_MUM_OUT_READ_BUFFER_MAXNUM_MCDI2 255 + +/* MC_CMD_MUM_OUT_WRITE msgresponse */ +#define MC_CMD_MUM_OUT_WRITE_LEN 0 + +/* MC_CMD_MUM_OUT_LOG msgresponse */ +#define MC_CMD_MUM_OUT_LOG_LEN 0 + +/* MC_CMD_MUM_OUT_LOG_OP_UART msgresponse */ +#define MC_CMD_MUM_OUT_LOG_OP_UART_LEN 0 + +/* MC_CMD_MUM_OUT_GPIO_IN_READ msgresponse */ +#define MC_CMD_MUM_OUT_GPIO_IN_READ_LEN 8 +/* The first 32-bit word read from the GPIO IN register. */ +#define MC_CMD_MUM_OUT_GPIO_IN_READ_GPIOMASK1_OFST 0 +#define MC_CMD_MUM_OUT_GPIO_IN_READ_GPIOMASK1_LEN 4 +/* The second 32-bit word read from the GPIO IN register. */ +#define MC_CMD_MUM_OUT_GPIO_IN_READ_GPIOMASK2_OFST 4 +#define MC_CMD_MUM_OUT_GPIO_IN_READ_GPIOMASK2_LEN 4 + +/* MC_CMD_MUM_OUT_GPIO_OUT_WRITE msgresponse */ +#define MC_CMD_MUM_OUT_GPIO_OUT_WRITE_LEN 0 + +/* MC_CMD_MUM_OUT_GPIO_OUT_READ msgresponse */ +#define MC_CMD_MUM_OUT_GPIO_OUT_READ_LEN 8 +/* The first 32-bit word read from the GPIO OUT register. */ +#define MC_CMD_MUM_OUT_GPIO_OUT_READ_GPIOMASK1_OFST 0 +#define MC_CMD_MUM_OUT_GPIO_OUT_READ_GPIOMASK1_LEN 4 +/* The second 32-bit word read from the GPIO OUT register. */ +#define MC_CMD_MUM_OUT_GPIO_OUT_READ_GPIOMASK2_OFST 4 +#define MC_CMD_MUM_OUT_GPIO_OUT_READ_GPIOMASK2_LEN 4 + +/* MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_WRITE msgresponse */ +#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_WRITE_LEN 0 + +/* MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ msgresponse */ +#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_LEN 8 +#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_GPIOMASK1_OFST 0 +#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_GPIOMASK1_LEN 4 +#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_GPIOMASK2_OFST 4 +#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_GPIOMASK2_LEN 4 + +/* MC_CMD_MUM_OUT_GPIO_OP_OUT_READ msgresponse */ +#define MC_CMD_MUM_OUT_GPIO_OP_OUT_READ_LEN 4 +#define MC_CMD_MUM_OUT_GPIO_OP_OUT_READ_BIT_READ_OFST 0 +#define MC_CMD_MUM_OUT_GPIO_OP_OUT_READ_BIT_READ_LEN 4 + +/* MC_CMD_MUM_OUT_GPIO_OP_OUT_WRITE msgresponse */ +#define MC_CMD_MUM_OUT_GPIO_OP_OUT_WRITE_LEN 0 + +/* MC_CMD_MUM_OUT_GPIO_OP_OUT_CONFIG msgresponse */ +#define MC_CMD_MUM_OUT_GPIO_OP_OUT_CONFIG_LEN 0 + +/* MC_CMD_MUM_OUT_GPIO_OP_OUT_ENABLE msgresponse */ +#define MC_CMD_MUM_OUT_GPIO_OP_OUT_ENABLE_LEN 0 + +/* MC_CMD_MUM_OUT_READ_SENSORS msgresponse */ +#define MC_CMD_MUM_OUT_READ_SENSORS_LENMIN 4 +#define MC_CMD_MUM_OUT_READ_SENSORS_LENMAX 252 +#define MC_CMD_MUM_OUT_READ_SENSORS_LENMAX_MCDI2 1020 +#define MC_CMD_MUM_OUT_READ_SENSORS_LEN(num) (0+4*(num)) +#define MC_CMD_MUM_OUT_READ_SENSORS_DATA_NUM(len) (((len)-0)/4) +#define MC_CMD_MUM_OUT_READ_SENSORS_DATA_OFST 0 +#define MC_CMD_MUM_OUT_READ_SENSORS_DATA_LEN 4 +#define MC_CMD_MUM_OUT_READ_SENSORS_DATA_MINNUM 1 +#define MC_CMD_MUM_OUT_READ_SENSORS_DATA_MAXNUM 63 +#define MC_CMD_MUM_OUT_READ_SENSORS_DATA_MAXNUM_MCDI2 255 +#define MC_CMD_MUM_OUT_READ_SENSORS_READING_OFST 0 +#define MC_CMD_MUM_OUT_READ_SENSORS_READING_LBN 0 +#define MC_CMD_MUM_OUT_READ_SENSORS_READING_WIDTH 16 +#define MC_CMD_MUM_OUT_READ_SENSORS_STATE_OFST 0 +#define MC_CMD_MUM_OUT_READ_SENSORS_STATE_LBN 16 +#define MC_CMD_MUM_OUT_READ_SENSORS_STATE_WIDTH 8 +#define MC_CMD_MUM_OUT_READ_SENSORS_TYPE_OFST 0 +#define MC_CMD_MUM_OUT_READ_SENSORS_TYPE_LBN 24 +#define MC_CMD_MUM_OUT_READ_SENSORS_TYPE_WIDTH 8 + +/* MC_CMD_MUM_OUT_PROGRAM_CLOCKS msgresponse */ +#define MC_CMD_MUM_OUT_PROGRAM_CLOCKS_LEN 4 +#define MC_CMD_MUM_OUT_PROGRAM_CLOCKS_OK_MASK_OFST 0 +#define MC_CMD_MUM_OUT_PROGRAM_CLOCKS_OK_MASK_LEN 4 + +/* MC_CMD_MUM_OUT_FPGA_LOAD msgresponse */ +#define MC_CMD_MUM_OUT_FPGA_LOAD_LEN 0 + +/* MC_CMD_MUM_OUT_READ_ATB_SENSOR msgresponse */ +#define MC_CMD_MUM_OUT_READ_ATB_SENSOR_LEN 4 +#define MC_CMD_MUM_OUT_READ_ATB_SENSOR_RESULT_OFST 0 +#define MC_CMD_MUM_OUT_READ_ATB_SENSOR_RESULT_LEN 4 + +/* MC_CMD_MUM_OUT_QSFP_INIT msgresponse */ +#define MC_CMD_MUM_OUT_QSFP_INIT_LEN 0 + +/* MC_CMD_MUM_OUT_QSFP_RECONFIGURE msgresponse */ +#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_LEN 8 +#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LP_CAP_OFST 0 +#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LP_CAP_LEN 4 +#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_FLAGS_OFST 4 +#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_FLAGS_LEN 4 +#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_READY_OFST 4 +#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_READY_LBN 0 +#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_READY_WIDTH 1 +#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LINK_UP_OFST 4 +#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LINK_UP_LBN 1 +#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LINK_UP_WIDTH 1 + +/* MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP msgresponse */ +#define MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP_LEN 4 +#define MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP_PORT_PHY_LP_CAP_OFST 0 +#define MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP_PORT_PHY_LP_CAP_LEN 4 + +/* MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO msgresponse */ +#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_LENMIN 5 +#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_LENMAX 252 +#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_LENMAX_MCDI2 1020 +#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_LEN(num) (4+1*(num)) +#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_NUM(len) (((len)-4)/1) +/* in bytes */ +#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATALEN_OFST 0 +#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATALEN_LEN 4 +#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_OFST 4 +#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_LEN 1 +#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_MINNUM 1 +#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_MAXNUM 248 +#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_MAXNUM_MCDI2 1016 + +/* MC_CMD_MUM_OUT_QSFP_FILL_STATS msgresponse */ +#define MC_CMD_MUM_OUT_QSFP_FILL_STATS_LEN 8 +#define MC_CMD_MUM_OUT_QSFP_FILL_STATS_PORT_PHY_STATS_PMA_PMD_LINK_UP_OFST 0 +#define MC_CMD_MUM_OUT_QSFP_FILL_STATS_PORT_PHY_STATS_PMA_PMD_LINK_UP_LEN 4 +#define MC_CMD_MUM_OUT_QSFP_FILL_STATS_PORT_PHY_STATS_PCS_LINK_UP_OFST 4 +#define MC_CMD_MUM_OUT_QSFP_FILL_STATS_PORT_PHY_STATS_PCS_LINK_UP_LEN 4 + +/* MC_CMD_MUM_OUT_QSFP_POLL_BIST msgresponse */ +#define MC_CMD_MUM_OUT_QSFP_POLL_BIST_LEN 4 +#define MC_CMD_MUM_OUT_QSFP_POLL_BIST_TEST_OFST 0 +#define MC_CMD_MUM_OUT_QSFP_POLL_BIST_TEST_LEN 4 + +/* MC_CMD_MUM_OUT_READ_DDR_INFO msgresponse */ +#define MC_CMD_MUM_OUT_READ_DDR_INFO_LENMIN 24 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_LENMAX 248 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_LENMAX_MCDI2 1016 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_LEN(num) (8+8*(num)) +#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_NUM(len) (((len)-8)/8) +/* Discrete (soldered) DDR resistor strap info */ +#define MC_CMD_MUM_OUT_READ_DDR_INFO_DISCRETE_DDR_INFO_OFST 0 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_DISCRETE_DDR_INFO_LEN 4 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_VRATIO_OFST 0 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_VRATIO_LBN 0 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_VRATIO_WIDTH 16 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED1_OFST 0 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED1_LBN 16 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED1_WIDTH 16 +/* Number of SODIMM info records */ +#define MC_CMD_MUM_OUT_READ_DDR_INFO_NUM_RECORDS_OFST 4 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_NUM_RECORDS_LEN 4 +/* Array of SODIMM info records */ +#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_OFST 8 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_LEN 8 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_LO_OFST 8 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_LO_LEN 4 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_LO_LBN 64 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_LO_WIDTH 32 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_HI_OFST 12 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_HI_LEN 4 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_HI_LBN 96 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_HI_WIDTH 32 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_MINNUM 2 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_MAXNUM 30 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_MAXNUM_MCDI2 126 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_BANK_ID_OFST 8 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_BANK_ID_LBN 0 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_BANK_ID_WIDTH 8 +/* enum: SODIMM bank 1 (Top SODIMM for Sorrento) */ +#define MC_CMD_MUM_OUT_READ_DDR_INFO_BANK1 0x0 +/* enum: SODIMM bank 2 (Bottom SODDIMM for Sorrento) */ +#define MC_CMD_MUM_OUT_READ_DDR_INFO_BANK2 0x1 +/* enum: Total number of SODIMM banks */ +#define MC_CMD_MUM_OUT_READ_DDR_INFO_NUM_BANKS 0x2 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_TYPE_OFST 8 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_TYPE_LBN 8 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_TYPE_WIDTH 8 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_RANK_OFST 8 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_RANK_LBN 16 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_RANK_WIDTH 4 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_VOLTAGE_OFST 8 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_VOLTAGE_LBN 20 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_VOLTAGE_WIDTH 4 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_NOT_POWERED 0x0 /* enum */ +#define MC_CMD_MUM_OUT_READ_DDR_INFO_1V25 0x1 /* enum */ +#define MC_CMD_MUM_OUT_READ_DDR_INFO_1V35 0x2 /* enum */ +#define MC_CMD_MUM_OUT_READ_DDR_INFO_1V5 0x3 /* enum */ +/* enum: Values 5-15 are reserved for future usage */ +#define MC_CMD_MUM_OUT_READ_DDR_INFO_1V8 0x4 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_SIZE_OFST 8 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_SIZE_LBN 24 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_SIZE_WIDTH 8 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_SPEED_OFST 8 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_SPEED_LBN 32 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_SPEED_WIDTH 16 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_STATE_OFST 8 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_STATE_LBN 48 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_STATE_WIDTH 4 +/* enum: No module present */ +#define MC_CMD_MUM_OUT_READ_DDR_INFO_ABSENT 0x0 +/* enum: Module present supported and powered on */ +#define MC_CMD_MUM_OUT_READ_DDR_INFO_PRESENT_POWERED 0x1 +/* enum: Module present but bad type */ +#define MC_CMD_MUM_OUT_READ_DDR_INFO_PRESENT_BAD_TYPE 0x2 +/* enum: Module present but incompatible voltage */ +#define MC_CMD_MUM_OUT_READ_DDR_INFO_PRESENT_BAD_VOLTAGE 0x3 +/* enum: Module present but unknown SPD */ +#define MC_CMD_MUM_OUT_READ_DDR_INFO_PRESENT_BAD_SPD 0x4 +/* enum: Module present but slot cannot support it */ +#define MC_CMD_MUM_OUT_READ_DDR_INFO_PRESENT_BAD_SLOT 0x5 +/* enum: Modules may or may not be present, but cannot establish contact by I2C + */ +#define MC_CMD_MUM_OUT_READ_DDR_INFO_NOT_REACHABLE 0x6 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED2_OFST 8 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED2_LBN 52 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED2_WIDTH 12 + +/* MC_CMD_DYNAMIC_SENSORS_LIMITS structuredef: Set of sensor limits. This + * should match the equivalent structure in the sensor_query SPHINX service. + */ +#define MC_CMD_DYNAMIC_SENSORS_LIMITS_LEN 24 +/* A value below this will trigger a warning event. */ +#define MC_CMD_DYNAMIC_SENSORS_LIMITS_LO_WARNING_OFST 0 +#define MC_CMD_DYNAMIC_SENSORS_LIMITS_LO_WARNING_LEN 4 +#define MC_CMD_DYNAMIC_SENSORS_LIMITS_LO_WARNING_LBN 0 +#define MC_CMD_DYNAMIC_SENSORS_LIMITS_LO_WARNING_WIDTH 32 +/* A value below this will trigger a critical event. */ +#define MC_CMD_DYNAMIC_SENSORS_LIMITS_LO_CRITICAL_OFST 4 +#define MC_CMD_DYNAMIC_SENSORS_LIMITS_LO_CRITICAL_LEN 4 +#define MC_CMD_DYNAMIC_SENSORS_LIMITS_LO_CRITICAL_LBN 32 +#define MC_CMD_DYNAMIC_SENSORS_LIMITS_LO_CRITICAL_WIDTH 32 +/* A value below this will shut down the card. */ +#define MC_CMD_DYNAMIC_SENSORS_LIMITS_LO_FATAL_OFST 8 +#define MC_CMD_DYNAMIC_SENSORS_LIMITS_LO_FATAL_LEN 4 +#define MC_CMD_DYNAMIC_SENSORS_LIMITS_LO_FATAL_LBN 64 +#define MC_CMD_DYNAMIC_SENSORS_LIMITS_LO_FATAL_WIDTH 32 +/* A value above this will trigger a warning event. */ +#define MC_CMD_DYNAMIC_SENSORS_LIMITS_HI_WARNING_OFST 12 +#define MC_CMD_DYNAMIC_SENSORS_LIMITS_HI_WARNING_LEN 4 +#define MC_CMD_DYNAMIC_SENSORS_LIMITS_HI_WARNING_LBN 96 +#define MC_CMD_DYNAMIC_SENSORS_LIMITS_HI_WARNING_WIDTH 32 +/* A value above this will trigger a critical event. */ +#define MC_CMD_DYNAMIC_SENSORS_LIMITS_HI_CRITICAL_OFST 16 +#define MC_CMD_DYNAMIC_SENSORS_LIMITS_HI_CRITICAL_LEN 4 +#define MC_CMD_DYNAMIC_SENSORS_LIMITS_HI_CRITICAL_LBN 128 +#define MC_CMD_DYNAMIC_SENSORS_LIMITS_HI_CRITICAL_WIDTH 32 +/* A value above this will shut down the card. */ +#define MC_CMD_DYNAMIC_SENSORS_LIMITS_HI_FATAL_OFST 20 +#define MC_CMD_DYNAMIC_SENSORS_LIMITS_HI_FATAL_LEN 4 +#define MC_CMD_DYNAMIC_SENSORS_LIMITS_HI_FATAL_LBN 160 +#define MC_CMD_DYNAMIC_SENSORS_LIMITS_HI_FATAL_WIDTH 32 + +/* MC_CMD_DYNAMIC_SENSORS_DESCRIPTION structuredef: Description of a sensor. + * This should match the equivalent structure in the sensor_query SPHINX + * service. + */ +#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_LEN 64 +/* The handle used to identify the sensor in calls to + * MC_CMD_DYNAMIC_SENSORS_GET_VALUES + */ +#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_HANDLE_OFST 0 +#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_HANDLE_LEN 4 +#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_HANDLE_LBN 0 +#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_HANDLE_WIDTH 32 +/* A human-readable name for the sensor (zero terminated string, max 32 bytes) + */ +#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_NAME_OFST 4 +#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_NAME_LEN 32 +#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_NAME_LBN 32 +#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_NAME_WIDTH 256 +/* The type of the sensor device, and by implication the unit of that the + * values will be reported in + */ +#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_TYPE_OFST 36 +#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_TYPE_LEN 4 +/* enum: A voltage sensor. Unit is mV */ +#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_VOLTAGE 0x0 +/* enum: A current sensor. Unit is mA */ +#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_CURRENT 0x1 +/* enum: A power sensor. Unit is mW */ +#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_POWER 0x2 +/* enum: A temperature sensor. Unit is Celsius */ +#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_TEMPERATURE 0x3 +/* enum: A cooling fan sensor. Unit is RPM */ +#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_FAN 0x4 +#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_TYPE_LBN 288 +#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_TYPE_WIDTH 32 +/* A single MC_CMD_DYNAMIC_SENSORS_LIMITS structure */ +#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_LIMITS_OFST 40 +#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_LIMITS_LEN 24 +#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_LIMITS_LBN 320 +#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_LIMITS_WIDTH 192 + +/* MC_CMD_DYNAMIC_SENSORS_READING structuredef: State and value of a sensor. + * This should match the equivalent structure in the sensor_query SPHINX + * service. + */ +#define MC_CMD_DYNAMIC_SENSORS_READING_LEN 12 +/* The handle used to identify the sensor */ +#define MC_CMD_DYNAMIC_SENSORS_READING_HANDLE_OFST 0 +#define MC_CMD_DYNAMIC_SENSORS_READING_HANDLE_LEN 4 +#define MC_CMD_DYNAMIC_SENSORS_READING_HANDLE_LBN 0 +#define MC_CMD_DYNAMIC_SENSORS_READING_HANDLE_WIDTH 32 +/* The current value of the sensor */ +#define MC_CMD_DYNAMIC_SENSORS_READING_VALUE_OFST 4 +#define MC_CMD_DYNAMIC_SENSORS_READING_VALUE_LEN 4 +#define MC_CMD_DYNAMIC_SENSORS_READING_VALUE_LBN 32 +#define MC_CMD_DYNAMIC_SENSORS_READING_VALUE_WIDTH 32 +/* The sensor's condition, e.g. good, broken or removed */ +#define MC_CMD_DYNAMIC_SENSORS_READING_STATE_OFST 8 +#define MC_CMD_DYNAMIC_SENSORS_READING_STATE_LEN 4 +/* enum: Sensor working normally within limits */ +#define MC_CMD_DYNAMIC_SENSORS_READING_OK 0x0 +/* enum: Warning threshold breached */ +#define MC_CMD_DYNAMIC_SENSORS_READING_WARNING 0x1 +/* enum: Critical threshold breached */ +#define MC_CMD_DYNAMIC_SENSORS_READING_CRITICAL 0x2 +/* enum: Fatal threshold breached */ +#define MC_CMD_DYNAMIC_SENSORS_READING_FATAL 0x3 +/* enum: Sensor not working */ +#define MC_CMD_DYNAMIC_SENSORS_READING_BROKEN 0x4 +/* enum: Sensor working but no reading available */ +#define MC_CMD_DYNAMIC_SENSORS_READING_NO_READING 0x5 +/* enum: Sensor initialization failed */ +#define MC_CMD_DYNAMIC_SENSORS_READING_INIT_FAILED 0x6 +#define MC_CMD_DYNAMIC_SENSORS_READING_STATE_LBN 64 +#define MC_CMD_DYNAMIC_SENSORS_READING_STATE_WIDTH 32 + + +/***********************************/ +/* MC_CMD_DYNAMIC_SENSORS_LIST + * Return a complete list of handles for sensors currently managed by the MC, + * and a generation count for this version of the sensor table. On systems + * advertising the DYNAMIC_SENSORS capability bit, this replaces the + * MC_CMD_READ_SENSORS command. On multi-MC systems this may include sensors + * added by the NMC. Sensor handles are persistent for the lifetime of the + * sensor and are used to identify sensors in + * MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS and + * MC_CMD_DYNAMIC_SENSORS_GET_VALUES. The generation count is maintained by the + * MC, is persistent across reboots and will be incremented each time the + * sensor table is modified. When the table is modified, a + * CODE_DYNAMIC_SENSORS_CHANGE event will be generated containing the new + * generation count. The driver should compare this against the current + * generation count, and if it is different, call MC_CMD_DYNAMIC_SENSORS_LIST + * again to update it's copy of the sensor table. The sensor count is provided + * to allow a future path to supporting more than + * MC_CMD_DYNAMIC_SENSORS_GET_READINGS_IN_HANDLES_MAXNUM_MCDI2 sensors, i.e. + * the maximum number that will fit in a single response. As this is a fairly + * large number (253) it is not anticipated that this will be needed in the + * near future, so can currently be ignored. On Riverhead this command is + * implemented as a wrapper for `list` in the sensor_query SPHINX service. + */ +#define MC_CMD_DYNAMIC_SENSORS_LIST 0x66 +#undef MC_CMD_0x66_PRIVILEGE_CTG + +#define MC_CMD_0x66_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_DYNAMIC_SENSORS_LIST_IN msgrequest */ +#define MC_CMD_DYNAMIC_SENSORS_LIST_IN_LEN 0 + +/* MC_CMD_DYNAMIC_SENSORS_LIST_OUT msgresponse */ +#define MC_CMD_DYNAMIC_SENSORS_LIST_OUT_LENMIN 8 +#define MC_CMD_DYNAMIC_SENSORS_LIST_OUT_LENMAX 252 +#define MC_CMD_DYNAMIC_SENSORS_LIST_OUT_LENMAX_MCDI2 1020 +#define MC_CMD_DYNAMIC_SENSORS_LIST_OUT_LEN(num) (8+4*(num)) +#define MC_CMD_DYNAMIC_SENSORS_LIST_OUT_HANDLES_NUM(len) (((len)-8)/4) +/* Generation count, which will be updated each time a sensor is added to or + * removed from the MC sensor table. + */ +#define MC_CMD_DYNAMIC_SENSORS_LIST_OUT_GENERATION_OFST 0 +#define MC_CMD_DYNAMIC_SENSORS_LIST_OUT_GENERATION_LEN 4 +/* Number of sensors managed by the MC. Note that in principle, this can be + * larger than the size of the HANDLES array. + */ +#define MC_CMD_DYNAMIC_SENSORS_LIST_OUT_COUNT_OFST 4 +#define MC_CMD_DYNAMIC_SENSORS_LIST_OUT_COUNT_LEN 4 +/* Array of sensor handles */ +#define MC_CMD_DYNAMIC_SENSORS_LIST_OUT_HANDLES_OFST 8 +#define MC_CMD_DYNAMIC_SENSORS_LIST_OUT_HANDLES_LEN 4 +#define MC_CMD_DYNAMIC_SENSORS_LIST_OUT_HANDLES_MINNUM 0 +#define MC_CMD_DYNAMIC_SENSORS_LIST_OUT_HANDLES_MAXNUM 61 +#define MC_CMD_DYNAMIC_SENSORS_LIST_OUT_HANDLES_MAXNUM_MCDI2 253 + + +/***********************************/ +/* MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS + * Get descriptions for a set of sensors, specified as an array of sensor + * handles as returned by MC_CMD_DYNAMIC_SENSORS_LIST. Any handles which do not + * correspond to a sensor currently managed by the MC will be dropped from from + * the response. This may happen when a sensor table update is in progress, and + * effectively means the set of usable sensors is the intersection between the + * sets of sensors known to the driver and the MC. On Riverhead this command is + * implemented as a wrapper for `get_descriptions` in the sensor_query SPHINX + * service. + */ +#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS 0x67 +#undef MC_CMD_0x67_PRIVILEGE_CTG + +#define MC_CMD_0x67_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_IN msgrequest */ +#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_IN_LENMIN 0 +#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_IN_LENMAX 252 +#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_IN_LENMAX_MCDI2 1020 +#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_IN_LEN(num) (0+4*(num)) +#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_IN_HANDLES_NUM(len) (((len)-0)/4) +/* Array of sensor handles */ +#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_IN_HANDLES_OFST 0 +#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_IN_HANDLES_LEN 4 +#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_IN_HANDLES_MINNUM 0 +#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_IN_HANDLES_MAXNUM 63 +#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_IN_HANDLES_MAXNUM_MCDI2 255 + +/* MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_OUT msgresponse */ +#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_OUT_LENMIN 0 +#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_OUT_LENMAX 192 +#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_OUT_LENMAX_MCDI2 960 +#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_OUT_LEN(num) (0+64*(num)) +#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_OUT_SENSORS_NUM(len) (((len)-0)/64) +/* Array of MC_CMD_DYNAMIC_SENSORS_DESCRIPTION structures */ +#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_OUT_SENSORS_OFST 0 +#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_OUT_SENSORS_LEN 64 +#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_OUT_SENSORS_MINNUM 0 +#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_OUT_SENSORS_MAXNUM 3 +#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_OUT_SENSORS_MAXNUM_MCDI2 15 + + +/***********************************/ +/* MC_CMD_DYNAMIC_SENSORS_GET_READINGS + * Read the state and value for a set of sensors, specified as an array of + * sensor handles as returned by MC_CMD_DYNAMIC_SENSORS_LIST. In the case of a + * broken sensor, then the state of the response's MC_CMD_DYNAMIC_SENSORS_VALUE + * entry will be set to BROKEN, and any value provided should be treated as + * erroneous. Any handles which do not correspond to a sensor currently managed + * by the MC will be dropped from from the response. This may happen when a + * sensor table update is in progress, and effectively means the set of usable + * sensors is the intersection between the sets of sensors known to the driver + * and the MC. On Riverhead this command is implemented as a wrapper for + * `get_readings` in the sensor_query SPHINX service. + */ +#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS 0x68 +#undef MC_CMD_0x68_PRIVILEGE_CTG + +#define MC_CMD_0x68_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_DYNAMIC_SENSORS_GET_READINGS_IN msgrequest */ +#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_IN_LENMIN 0 +#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_IN_LENMAX 252 +#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_IN_LENMAX_MCDI2 1020 +#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_IN_LEN(num) (0+4*(num)) +#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_IN_HANDLES_NUM(len) (((len)-0)/4) +/* Array of sensor handles */ +#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_IN_HANDLES_OFST 0 +#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_IN_HANDLES_LEN 4 +#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_IN_HANDLES_MINNUM 0 +#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_IN_HANDLES_MAXNUM 63 +#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_IN_HANDLES_MAXNUM_MCDI2 255 + +/* MC_CMD_DYNAMIC_SENSORS_GET_READINGS_OUT msgresponse */ +#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_OUT_LENMIN 0 +#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_OUT_LENMAX 252 +#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_OUT_LENMAX_MCDI2 1020 +#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_OUT_LEN(num) (0+12*(num)) +#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_OUT_VALUES_NUM(len) (((len)-0)/12) +/* Array of MC_CMD_DYNAMIC_SENSORS_READING structures */ +#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_OUT_VALUES_OFST 0 +#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_OUT_VALUES_LEN 12 +#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_OUT_VALUES_MINNUM 0 +#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_OUT_VALUES_MAXNUM 21 +#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_OUT_VALUES_MAXNUM_MCDI2 85 + + +/***********************************/ +/* MC_CMD_EVENT_CTRL + * Configure which categories of unsolicited events the driver expects to + * receive (Riverhead). + */ +#define MC_CMD_EVENT_CTRL 0x69 +#undef MC_CMD_0x69_PRIVILEGE_CTG + +#define MC_CMD_0x69_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_EVENT_CTRL_IN msgrequest */ +#define MC_CMD_EVENT_CTRL_IN_LENMIN 0 +#define MC_CMD_EVENT_CTRL_IN_LENMAX 252 +#define MC_CMD_EVENT_CTRL_IN_LENMAX_MCDI2 1020 +#define MC_CMD_EVENT_CTRL_IN_LEN(num) (0+4*(num)) +#define MC_CMD_EVENT_CTRL_IN_EVENT_TYPE_NUM(len) (((len)-0)/4) +/* Array of event categories for which the driver wishes to receive events. */ +#define MC_CMD_EVENT_CTRL_IN_EVENT_TYPE_OFST 0 +#define MC_CMD_EVENT_CTRL_IN_EVENT_TYPE_LEN 4 +#define MC_CMD_EVENT_CTRL_IN_EVENT_TYPE_MINNUM 0 +#define MC_CMD_EVENT_CTRL_IN_EVENT_TYPE_MAXNUM 63 +#define MC_CMD_EVENT_CTRL_IN_EVENT_TYPE_MAXNUM_MCDI2 255 +/* enum: Driver wishes to receive LINKCHANGE events. */ +#define MC_CMD_EVENT_CTRL_IN_MCDI_EVENT_CODE_LINKCHANGE 0x0 +/* enum: Driver wishes to receive SENSOR_CHANGE and SENSOR_STATE_CHANGE events. + */ +#define MC_CMD_EVENT_CTRL_IN_MCDI_EVENT_CODE_SENSOREVT 0x1 +/* enum: Driver wishes to receive receive errors. */ +#define MC_CMD_EVENT_CTRL_IN_MCDI_EVENT_CODE_RX_ERR 0x2 +/* enum: Driver wishes to receive transmit errors. */ +#define MC_CMD_EVENT_CTRL_IN_MCDI_EVENT_CODE_TX_ERR 0x3 +/* enum: Driver wishes to receive firmware alerts. */ +#define MC_CMD_EVENT_CTRL_IN_MCDI_EVENT_CODE_FWALERT 0x4 +/* enum: Driver wishes to receive reboot events. */ +#define MC_CMD_EVENT_CTRL_IN_MCDI_EVENT_CODE_MC_REBOOT 0x5 + +/* MC_CMD_EVENT_CTRL_OUT msgrequest */ +#define MC_CMD_EVENT_CTRL_OUT_LEN 0 + +/* EVB_PORT_ID structuredef */ +#define EVB_PORT_ID_LEN 4 +#define EVB_PORT_ID_PORT_ID_OFST 0 +#define EVB_PORT_ID_PORT_ID_LEN 4 +/* enum: An invalid port handle. */ +#define EVB_PORT_ID_NULL 0x0 +/* enum: The port assigned to this function.. */ +#define EVB_PORT_ID_ASSIGNED 0x1000000 +/* enum: External network port 0 */ +#define EVB_PORT_ID_MAC0 0x2000000 +/* enum: External network port 1 */ +#define EVB_PORT_ID_MAC1 0x2000001 +/* enum: External network port 2 */ +#define EVB_PORT_ID_MAC2 0x2000002 +/* enum: External network port 3 */ +#define EVB_PORT_ID_MAC3 0x2000003 +#define EVB_PORT_ID_PORT_ID_LBN 0 +#define EVB_PORT_ID_PORT_ID_WIDTH 32 + +/* EVB_VLAN_TAG structuredef */ +#define EVB_VLAN_TAG_LEN 2 +/* The VLAN tag value */ +#define EVB_VLAN_TAG_VLAN_ID_LBN 0 +#define EVB_VLAN_TAG_VLAN_ID_WIDTH 12 +#define EVB_VLAN_TAG_MODE_LBN 12 +#define EVB_VLAN_TAG_MODE_WIDTH 4 +/* enum: Insert the VLAN. */ +#define EVB_VLAN_TAG_INSERT 0x0 +/* enum: Replace the VLAN if already present. */ +#define EVB_VLAN_TAG_REPLACE 0x1 + +/* BUFTBL_ENTRY structuredef */ +#define BUFTBL_ENTRY_LEN 12 +/* the owner ID */ +#define BUFTBL_ENTRY_OID_OFST 0 +#define BUFTBL_ENTRY_OID_LEN 2 +#define BUFTBL_ENTRY_OID_LBN 0 +#define BUFTBL_ENTRY_OID_WIDTH 16 +/* the page parameter as one of ESE_DZ_SMC_PAGE_SIZE_ */ +#define BUFTBL_ENTRY_PGSZ_OFST 2 +#define BUFTBL_ENTRY_PGSZ_LEN 2 +#define BUFTBL_ENTRY_PGSZ_LBN 16 +#define BUFTBL_ENTRY_PGSZ_WIDTH 16 +/* the raw 64-bit address field from the SMC, not adjusted for page size */ +#define BUFTBL_ENTRY_RAWADDR_OFST 4 +#define BUFTBL_ENTRY_RAWADDR_LEN 8 +#define BUFTBL_ENTRY_RAWADDR_LO_OFST 4 +#define BUFTBL_ENTRY_RAWADDR_LO_LEN 4 +#define BUFTBL_ENTRY_RAWADDR_LO_LBN 32 +#define BUFTBL_ENTRY_RAWADDR_LO_WIDTH 32 +#define BUFTBL_ENTRY_RAWADDR_HI_OFST 8 +#define BUFTBL_ENTRY_RAWADDR_HI_LEN 4 +#define BUFTBL_ENTRY_RAWADDR_HI_LBN 64 +#define BUFTBL_ENTRY_RAWADDR_HI_WIDTH 32 +#define BUFTBL_ENTRY_RAWADDR_LBN 32 +#define BUFTBL_ENTRY_RAWADDR_WIDTH 64 + +/* NVRAM_PARTITION_TYPE structuredef */ +#define NVRAM_PARTITION_TYPE_LEN 2 +#define NVRAM_PARTITION_TYPE_ID_OFST 0 +#define NVRAM_PARTITION_TYPE_ID_LEN 2 +/* enum: Primary MC firmware partition */ +#define NVRAM_PARTITION_TYPE_MC_FIRMWARE 0x100 +/* enum: NMC firmware partition (this is intentionally an alias of MC_FIRMWARE) + */ +#define NVRAM_PARTITION_TYPE_NMC_FIRMWARE 0x100 +/* enum: Secondary MC firmware partition */ +#define NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP 0x200 +/* enum: Expansion ROM partition */ +#define NVRAM_PARTITION_TYPE_EXPANSION_ROM 0x300 +/* enum: Static configuration TLV partition */ +#define NVRAM_PARTITION_TYPE_STATIC_CONFIG 0x400 +/* enum: Factory configuration TLV partition (this is intentionally an alias of + * STATIC_CONFIG) + */ +#define NVRAM_PARTITION_TYPE_FACTORY_CONFIG 0x400 +/* enum: Dynamic configuration TLV partition */ +#define NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG 0x500 +/* enum: User configuration TLV partition (this is intentionally an alias of + * DYNAMIC_CONFIG) + */ +#define NVRAM_PARTITION_TYPE_USER_CONFIG 0x500 +/* enum: Expansion ROM configuration data for port 0 */ +#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT0 0x600 +/* enum: Synonym for EXPROM_CONFIG_PORT0 as used in pmap files */ +#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG 0x600 +/* enum: Expansion ROM configuration data for port 1 */ +#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT1 0x601 +/* enum: Expansion ROM configuration data for port 2 */ +#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT2 0x602 +/* enum: Expansion ROM configuration data for port 3 */ +#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT3 0x603 +/* enum: Non-volatile log output partition */ +#define NVRAM_PARTITION_TYPE_LOG 0x700 +/* enum: Non-volatile log output partition for NMC firmware (this is + * intentionally an alias of LOG) + */ +#define NVRAM_PARTITION_TYPE_NMC_LOG 0x700 +/* enum: Non-volatile log output of second core on dual-core device */ +#define NVRAM_PARTITION_TYPE_LOG_SLAVE 0x701 +/* enum: Device state dump output partition */ +#define NVRAM_PARTITION_TYPE_DUMP 0x800 +/* enum: Crash log partition for NMC firmware */ +#define NVRAM_PARTITION_TYPE_NMC_CRASH_LOG 0x801 +/* enum: Application license key storage partition */ +#define NVRAM_PARTITION_TYPE_LICENSE 0x900 +/* enum: Start of range used for PHY partitions (low 8 bits are the PHY ID) */ +#define NVRAM_PARTITION_TYPE_PHY_MIN 0xa00 +/* enum: End of range used for PHY partitions (low 8 bits are the PHY ID) */ +#define NVRAM_PARTITION_TYPE_PHY_MAX 0xaff +/* enum: Primary FPGA partition */ +#define NVRAM_PARTITION_TYPE_FPGA 0xb00 +/* enum: Secondary FPGA partition */ +#define NVRAM_PARTITION_TYPE_FPGA_BACKUP 0xb01 +/* enum: FC firmware partition */ +#define NVRAM_PARTITION_TYPE_FC_FIRMWARE 0xb02 +/* enum: FC License partition */ +#define NVRAM_PARTITION_TYPE_FC_LICENSE 0xb03 +/* enum: Non-volatile log output partition for FC */ +#define NVRAM_PARTITION_TYPE_FC_LOG 0xb04 +/* enum: FPGA Stage 1 bitstream */ +#define NVRAM_PARTITION_TYPE_FPGA_STAGE1 0xb05 +/* enum: FPGA Stage 2 bitstream */ +#define NVRAM_PARTITION_TYPE_FPGA_STAGE2 0xb06 +/* enum: FPGA User XCLBIN / Programmable Region 0 bitstream */ +#define NVRAM_PARTITION_TYPE_FPGA_REGION0 0xb07 +/* enum: FPGA User XCLBIN (this is intentionally an alias of FPGA_REGION0) */ +#define NVRAM_PARTITION_TYPE_FPGA_XCLBIN_USER 0xb07 +/* enum: FPGA jump instruction (a.k.a. boot) partition to select Stage1 + * bitstream + */ +#define NVRAM_PARTITION_TYPE_FPGA_JUMP 0xb08 +/* enum: FPGA Validate XCLBIN */ +#define NVRAM_PARTITION_TYPE_FPGA_XCLBIN_VALIDATE 0xb09 +/* enum: FPGA XOCL Configuration information */ +#define NVRAM_PARTITION_TYPE_FPGA_XOCL_CONFIG 0xb0a +/* enum: MUM firmware partition */ +#define NVRAM_PARTITION_TYPE_MUM_FIRMWARE 0xc00 +/* enum: SUC firmware partition (this is intentionally an alias of + * MUM_FIRMWARE) + */ +#define NVRAM_PARTITION_TYPE_SUC_FIRMWARE 0xc00 +/* enum: MUM Non-volatile log output partition. */ +#define NVRAM_PARTITION_TYPE_MUM_LOG 0xc01 +/* enum: SUC Non-volatile log output partition (this is intentionally an alias + * of MUM_LOG). + */ +#define NVRAM_PARTITION_TYPE_SUC_LOG 0xc01 +/* enum: MUM Application table partition. */ +#define NVRAM_PARTITION_TYPE_MUM_APPTABLE 0xc02 +/* enum: MUM boot rom partition. */ +#define NVRAM_PARTITION_TYPE_MUM_BOOT_ROM 0xc03 +/* enum: MUM production signatures & calibration rom partition. */ +#define NVRAM_PARTITION_TYPE_MUM_PROD_ROM 0xc04 +/* enum: MUM user signatures & calibration rom partition. */ +#define NVRAM_PARTITION_TYPE_MUM_USER_ROM 0xc05 +/* enum: MUM fuses and lockbits partition. */ +#define NVRAM_PARTITION_TYPE_MUM_FUSELOCK 0xc06 +/* enum: UEFI expansion ROM if separate from PXE */ +#define NVRAM_PARTITION_TYPE_EXPANSION_UEFI 0xd00 +/* enum: Used by the expansion ROM for logging */ +#define NVRAM_PARTITION_TYPE_PXE_LOG 0x1000 +/* enum: Non-volatile log output partition for Expansion ROM (this is + * intentionally an alias of PXE_LOG). + */ +#define NVRAM_PARTITION_TYPE_EXPROM_LOG 0x1000 +/* enum: Used for XIP code of shmbooted images */ +#define NVRAM_PARTITION_TYPE_XIP_SCRATCH 0x1100 +/* enum: Spare partition 2 */ +#define NVRAM_PARTITION_TYPE_SPARE_2 0x1200 +/* enum: Manufacturing partition. Used during manufacture to pass information + * between XJTAG and Manftest. + */ +#define NVRAM_PARTITION_TYPE_MANUFACTURING 0x1300 +/* enum: Deployment configuration TLV partition (this is intentionally an alias + * of MANUFACTURING) + */ +#define NVRAM_PARTITION_TYPE_DEPLOYMENT_CONFIG 0x1300 +/* enum: Spare partition 4 */ +#define NVRAM_PARTITION_TYPE_SPARE_4 0x1400 +/* enum: Spare partition 5 */ +#define NVRAM_PARTITION_TYPE_SPARE_5 0x1500 +/* enum: Partition for reporting MC status. See mc_flash_layout.h + * medford_mc_status_hdr_t for layout on Medford. + */ +#define NVRAM_PARTITION_TYPE_STATUS 0x1600 +/* enum: Spare partition 13 */ +#define NVRAM_PARTITION_TYPE_SPARE_13 0x1700 +/* enum: Spare partition 14 */ +#define NVRAM_PARTITION_TYPE_SPARE_14 0x1800 +/* enum: Spare partition 15 */ +#define NVRAM_PARTITION_TYPE_SPARE_15 0x1900 +/* enum: Spare partition 16 */ +#define NVRAM_PARTITION_TYPE_SPARE_16 0x1a00 +/* enum: Factory defaults for dynamic configuration */ +#define NVRAM_PARTITION_TYPE_DYNCONFIG_DEFAULTS 0x1b00 +/* enum: Factory defaults for expansion ROM configuration */ +#define NVRAM_PARTITION_TYPE_ROMCONFIG_DEFAULTS 0x1c00 +/* enum: Field Replaceable Unit inventory information for use on IPMI + * platforms. See SF-119124-PS. The STATIC_CONFIG partition may contain a + * subset of the information stored in this partition. + */ +#define NVRAM_PARTITION_TYPE_FRU_INFORMATION 0x1d00 +/* enum: Bundle image partition */ +#define NVRAM_PARTITION_TYPE_BUNDLE 0x1e00 +/* enum: Bundle metadata partition that holds additional information related to + * a bundle update in TLV format + */ +#define NVRAM_PARTITION_TYPE_BUNDLE_METADATA 0x1e01 +/* enum: Bundle update non-volatile log output partition */ +#define NVRAM_PARTITION_TYPE_BUNDLE_LOG 0x1e02 +/* enum: Partition for Solarflare gPXE bootrom installed via Bundle update. */ +#define NVRAM_PARTITION_TYPE_EXPANSION_ROM_INTERNAL 0x1e03 +/* enum: Partition to store ASN.1 format Bundle Signature for checking. */ +#define NVRAM_PARTITION_TYPE_BUNDLE_SIGNATURE 0x1e04 +/* enum: Test partition on SmartNIC system microcontroller (SUC) */ +#define NVRAM_PARTITION_TYPE_SUC_TEST 0x1f00 +/* enum: System microcontroller access to primary FPGA flash. */ +#define NVRAM_PARTITION_TYPE_SUC_FPGA_PRIMARY 0x1f01 +/* enum: System microcontroller access to secondary FPGA flash (if present) */ +#define NVRAM_PARTITION_TYPE_SUC_FPGA_SECONDARY 0x1f02 +/* enum: System microcontroller access to primary System-on-Chip flash */ +#define NVRAM_PARTITION_TYPE_SUC_SOC_PRIMARY 0x1f03 +/* enum: System microcontroller access to secondary System-on-Chip flash (if + * present) + */ +#define NVRAM_PARTITION_TYPE_SUC_SOC_SECONDARY 0x1f04 +/* enum: System microcontroller critical failure logs. Contains structured + * details of sensors leading up to a critical failure (where the board is shut + * down). + */ +#define NVRAM_PARTITION_TYPE_SUC_FAILURE_LOG 0x1f05 +/* enum: System-on-Chip configuration information (see XN-200467-PS). */ +#define NVRAM_PARTITION_TYPE_SUC_SOC_CONFIG 0x1f07 +/* enum: System-on-Chip update information. */ +#define NVRAM_PARTITION_TYPE_SOC_UPDATE 0x2003 +/* enum: Start of reserved value range (firmware may use for any purpose) */ +#define NVRAM_PARTITION_TYPE_RESERVED_VALUES_MIN 0xff00 +/* enum: End of reserved value range (firmware may use for any purpose) */ +#define NVRAM_PARTITION_TYPE_RESERVED_VALUES_MAX 0xfffd +/* enum: Recovery partition map (provided if real map is missing or corrupt) */ +#define NVRAM_PARTITION_TYPE_RECOVERY_MAP 0xfffe +/* enum: Recovery Flash Partition Table, see SF-122606-TC. (this is + * intentionally an alias of RECOVERY_MAP) + */ +#define NVRAM_PARTITION_TYPE_RECOVERY_FPT 0xfffe +/* enum: Partition map (real map as stored in flash) */ +#define NVRAM_PARTITION_TYPE_PARTITION_MAP 0xffff +/* enum: Flash Partition Table, see SF-122606-TC. (this is intentionally an + * alias of PARTITION_MAP) + */ +#define NVRAM_PARTITION_TYPE_FPT 0xffff +#define NVRAM_PARTITION_TYPE_ID_LBN 0 +#define NVRAM_PARTITION_TYPE_ID_WIDTH 16 + +/* LICENSED_APP_ID structuredef */ +#define LICENSED_APP_ID_LEN 4 +#define LICENSED_APP_ID_ID_OFST 0 +#define LICENSED_APP_ID_ID_LEN 4 +/* enum: OpenOnload */ +#define LICENSED_APP_ID_ONLOAD 0x1 +/* enum: PTP timestamping */ +#define LICENSED_APP_ID_PTP 0x2 +/* enum: SolarCapture Pro */ +#define LICENSED_APP_ID_SOLARCAPTURE_PRO 0x4 +/* enum: SolarSecure filter engine */ +#define LICENSED_APP_ID_SOLARSECURE 0x8 +/* enum: Performance monitor */ +#define LICENSED_APP_ID_PERF_MONITOR 0x10 +/* enum: SolarCapture Live */ +#define LICENSED_APP_ID_SOLARCAPTURE_LIVE 0x20 +/* enum: Capture SolarSystem */ +#define LICENSED_APP_ID_CAPTURE_SOLARSYSTEM 0x40 +/* enum: Network Access Control */ +#define LICENSED_APP_ID_NETWORK_ACCESS_CONTROL 0x80 +/* enum: TCP Direct */ +#define LICENSED_APP_ID_TCP_DIRECT 0x100 +/* enum: Low Latency */ +#define LICENSED_APP_ID_LOW_LATENCY 0x200 +/* enum: SolarCapture Tap */ +#define LICENSED_APP_ID_SOLARCAPTURE_TAP 0x400 +/* enum: Capture SolarSystem 40G */ +#define LICENSED_APP_ID_CAPTURE_SOLARSYSTEM_40G 0x800 +/* enum: Capture SolarSystem 1G */ +#define LICENSED_APP_ID_CAPTURE_SOLARSYSTEM_1G 0x1000 +/* enum: ScaleOut Onload */ +#define LICENSED_APP_ID_SCALEOUT_ONLOAD 0x2000 +/* enum: SCS Network Analytics Dashboard */ +#define LICENSED_APP_ID_DSHBRD 0x4000 +/* enum: SolarCapture Trading Analytics */ +#define LICENSED_APP_ID_SCATRD 0x8000 +#define LICENSED_APP_ID_ID_LBN 0 +#define LICENSED_APP_ID_ID_WIDTH 32 + +/* LICENSED_FEATURES structuredef */ +#define LICENSED_FEATURES_LEN 8 +/* Bitmask of licensed firmware features */ +#define LICENSED_FEATURES_MASK_OFST 0 +#define LICENSED_FEATURES_MASK_LEN 8 +#define LICENSED_FEATURES_MASK_LO_OFST 0 +#define LICENSED_FEATURES_MASK_LO_LEN 4 +#define LICENSED_FEATURES_MASK_LO_LBN 0 +#define LICENSED_FEATURES_MASK_LO_WIDTH 32 +#define LICENSED_FEATURES_MASK_HI_OFST 4 +#define LICENSED_FEATURES_MASK_HI_LEN 4 +#define LICENSED_FEATURES_MASK_HI_LBN 32 +#define LICENSED_FEATURES_MASK_HI_WIDTH 32 +#define LICENSED_FEATURES_RX_CUT_THROUGH_OFST 0 +#define LICENSED_FEATURES_RX_CUT_THROUGH_LBN 0 +#define LICENSED_FEATURES_RX_CUT_THROUGH_WIDTH 1 +#define LICENSED_FEATURES_PIO_OFST 0 +#define LICENSED_FEATURES_PIO_LBN 1 +#define LICENSED_FEATURES_PIO_WIDTH 1 +#define LICENSED_FEATURES_EVQ_TIMER_OFST 0 +#define LICENSED_FEATURES_EVQ_TIMER_LBN 2 +#define LICENSED_FEATURES_EVQ_TIMER_WIDTH 1 +#define LICENSED_FEATURES_CLOCK_OFST 0 +#define LICENSED_FEATURES_CLOCK_LBN 3 +#define LICENSED_FEATURES_CLOCK_WIDTH 1 +#define LICENSED_FEATURES_RX_TIMESTAMPS_OFST 0 +#define LICENSED_FEATURES_RX_TIMESTAMPS_LBN 4 +#define LICENSED_FEATURES_RX_TIMESTAMPS_WIDTH 1 +#define LICENSED_FEATURES_TX_TIMESTAMPS_OFST 0 +#define LICENSED_FEATURES_TX_TIMESTAMPS_LBN 5 +#define LICENSED_FEATURES_TX_TIMESTAMPS_WIDTH 1 +#define LICENSED_FEATURES_RX_SNIFF_OFST 0 +#define LICENSED_FEATURES_RX_SNIFF_LBN 6 +#define LICENSED_FEATURES_RX_SNIFF_WIDTH 1 +#define LICENSED_FEATURES_TX_SNIFF_OFST 0 +#define LICENSED_FEATURES_TX_SNIFF_LBN 7 +#define LICENSED_FEATURES_TX_SNIFF_WIDTH 1 +#define LICENSED_FEATURES_PROXY_FILTER_OPS_OFST 0 +#define LICENSED_FEATURES_PROXY_FILTER_OPS_LBN 8 +#define LICENSED_FEATURES_PROXY_FILTER_OPS_WIDTH 1 +#define LICENSED_FEATURES_EVENT_CUT_THROUGH_OFST 0 +#define LICENSED_FEATURES_EVENT_CUT_THROUGH_LBN 9 +#define LICENSED_FEATURES_EVENT_CUT_THROUGH_WIDTH 1 +#define LICENSED_FEATURES_MASK_LBN 0 +#define LICENSED_FEATURES_MASK_WIDTH 64 + +/* LICENSED_V3_APPS structuredef */ +#define LICENSED_V3_APPS_LEN 8 +/* Bitmask of licensed applications */ +#define LICENSED_V3_APPS_MASK_OFST 0 +#define LICENSED_V3_APPS_MASK_LEN 8 +#define LICENSED_V3_APPS_MASK_LO_OFST 0 +#define LICENSED_V3_APPS_MASK_LO_LEN 4 +#define LICENSED_V3_APPS_MASK_LO_LBN 0 +#define LICENSED_V3_APPS_MASK_LO_WIDTH 32 +#define LICENSED_V3_APPS_MASK_HI_OFST 4 +#define LICENSED_V3_APPS_MASK_HI_LEN 4 +#define LICENSED_V3_APPS_MASK_HI_LBN 32 +#define LICENSED_V3_APPS_MASK_HI_WIDTH 32 +#define LICENSED_V3_APPS_ONLOAD_OFST 0 +#define LICENSED_V3_APPS_ONLOAD_LBN 0 +#define LICENSED_V3_APPS_ONLOAD_WIDTH 1 +#define LICENSED_V3_APPS_PTP_OFST 0 +#define LICENSED_V3_APPS_PTP_LBN 1 +#define LICENSED_V3_APPS_PTP_WIDTH 1 +#define LICENSED_V3_APPS_SOLARCAPTURE_PRO_OFST 0 +#define LICENSED_V3_APPS_SOLARCAPTURE_PRO_LBN 2 +#define LICENSED_V3_APPS_SOLARCAPTURE_PRO_WIDTH 1 +#define LICENSED_V3_APPS_SOLARSECURE_OFST 0 +#define LICENSED_V3_APPS_SOLARSECURE_LBN 3 +#define LICENSED_V3_APPS_SOLARSECURE_WIDTH 1 +#define LICENSED_V3_APPS_PERF_MONITOR_OFST 0 +#define LICENSED_V3_APPS_PERF_MONITOR_LBN 4 +#define LICENSED_V3_APPS_PERF_MONITOR_WIDTH 1 +#define LICENSED_V3_APPS_SOLARCAPTURE_LIVE_OFST 0 +#define LICENSED_V3_APPS_SOLARCAPTURE_LIVE_LBN 5 +#define LICENSED_V3_APPS_SOLARCAPTURE_LIVE_WIDTH 1 +#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_OFST 0 +#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_LBN 6 +#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_WIDTH 1 +#define LICENSED_V3_APPS_NETWORK_ACCESS_CONTROL_OFST 0 +#define LICENSED_V3_APPS_NETWORK_ACCESS_CONTROL_LBN 7 +#define LICENSED_V3_APPS_NETWORK_ACCESS_CONTROL_WIDTH 1 +#define LICENSED_V3_APPS_TCP_DIRECT_OFST 0 +#define LICENSED_V3_APPS_TCP_DIRECT_LBN 8 +#define LICENSED_V3_APPS_TCP_DIRECT_WIDTH 1 +#define LICENSED_V3_APPS_LOW_LATENCY_OFST 0 +#define LICENSED_V3_APPS_LOW_LATENCY_LBN 9 +#define LICENSED_V3_APPS_LOW_LATENCY_WIDTH 1 +#define LICENSED_V3_APPS_SOLARCAPTURE_TAP_OFST 0 +#define LICENSED_V3_APPS_SOLARCAPTURE_TAP_LBN 10 +#define LICENSED_V3_APPS_SOLARCAPTURE_TAP_WIDTH 1 +#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_40G_OFST 0 +#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_40G_LBN 11 +#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_40G_WIDTH 1 +#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_1G_OFST 0 +#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_1G_LBN 12 +#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_1G_WIDTH 1 +#define LICENSED_V3_APPS_SCALEOUT_ONLOAD_OFST 0 +#define LICENSED_V3_APPS_SCALEOUT_ONLOAD_LBN 13 +#define LICENSED_V3_APPS_SCALEOUT_ONLOAD_WIDTH 1 +#define LICENSED_V3_APPS_DSHBRD_OFST 0 +#define LICENSED_V3_APPS_DSHBRD_LBN 14 +#define LICENSED_V3_APPS_DSHBRD_WIDTH 1 +#define LICENSED_V3_APPS_SCATRD_OFST 0 +#define LICENSED_V3_APPS_SCATRD_LBN 15 +#define LICENSED_V3_APPS_SCATRD_WIDTH 1 +#define LICENSED_V3_APPS_MASK_LBN 0 +#define LICENSED_V3_APPS_MASK_WIDTH 64 + +/* LICENSED_V3_FEATURES structuredef */ +#define LICENSED_V3_FEATURES_LEN 8 +/* Bitmask of licensed firmware features */ +#define LICENSED_V3_FEATURES_MASK_OFST 0 +#define LICENSED_V3_FEATURES_MASK_LEN 8 +#define LICENSED_V3_FEATURES_MASK_LO_OFST 0 +#define LICENSED_V3_FEATURES_MASK_LO_LEN 4 +#define LICENSED_V3_FEATURES_MASK_LO_LBN 0 +#define LICENSED_V3_FEATURES_MASK_LO_WIDTH 32 +#define LICENSED_V3_FEATURES_MASK_HI_OFST 4 +#define LICENSED_V3_FEATURES_MASK_HI_LEN 4 +#define LICENSED_V3_FEATURES_MASK_HI_LBN 32 +#define LICENSED_V3_FEATURES_MASK_HI_WIDTH 32 +#define LICENSED_V3_FEATURES_RX_CUT_THROUGH_OFST 0 +#define LICENSED_V3_FEATURES_RX_CUT_THROUGH_LBN 0 +#define LICENSED_V3_FEATURES_RX_CUT_THROUGH_WIDTH 1 +#define LICENSED_V3_FEATURES_PIO_OFST 0 +#define LICENSED_V3_FEATURES_PIO_LBN 1 +#define LICENSED_V3_FEATURES_PIO_WIDTH 1 +#define LICENSED_V3_FEATURES_EVQ_TIMER_OFST 0 +#define LICENSED_V3_FEATURES_EVQ_TIMER_LBN 2 +#define LICENSED_V3_FEATURES_EVQ_TIMER_WIDTH 1 +#define LICENSED_V3_FEATURES_CLOCK_OFST 0 +#define LICENSED_V3_FEATURES_CLOCK_LBN 3 +#define LICENSED_V3_FEATURES_CLOCK_WIDTH 1 +#define LICENSED_V3_FEATURES_RX_TIMESTAMPS_OFST 0 +#define LICENSED_V3_FEATURES_RX_TIMESTAMPS_LBN 4 +#define LICENSED_V3_FEATURES_RX_TIMESTAMPS_WIDTH 1 +#define LICENSED_V3_FEATURES_TX_TIMESTAMPS_OFST 0 +#define LICENSED_V3_FEATURES_TX_TIMESTAMPS_LBN 5 +#define LICENSED_V3_FEATURES_TX_TIMESTAMPS_WIDTH 1 +#define LICENSED_V3_FEATURES_RX_SNIFF_OFST 0 +#define LICENSED_V3_FEATURES_RX_SNIFF_LBN 6 +#define LICENSED_V3_FEATURES_RX_SNIFF_WIDTH 1 +#define LICENSED_V3_FEATURES_TX_SNIFF_OFST 0 +#define LICENSED_V3_FEATURES_TX_SNIFF_LBN 7 +#define LICENSED_V3_FEATURES_TX_SNIFF_WIDTH 1 +#define LICENSED_V3_FEATURES_PROXY_FILTER_OPS_OFST 0 +#define LICENSED_V3_FEATURES_PROXY_FILTER_OPS_LBN 8 +#define LICENSED_V3_FEATURES_PROXY_FILTER_OPS_WIDTH 1 +#define LICENSED_V3_FEATURES_EVENT_CUT_THROUGH_OFST 0 +#define LICENSED_V3_FEATURES_EVENT_CUT_THROUGH_LBN 9 +#define LICENSED_V3_FEATURES_EVENT_CUT_THROUGH_WIDTH 1 +#define LICENSED_V3_FEATURES_MASK_LBN 0 +#define LICENSED_V3_FEATURES_MASK_WIDTH 64 + +/* TX_TIMESTAMP_EVENT structuredef */ +#define TX_TIMESTAMP_EVENT_LEN 6 +/* lower 16 bits of timestamp data */ +#define TX_TIMESTAMP_EVENT_TSTAMP_DATA_LO_OFST 0 +#define TX_TIMESTAMP_EVENT_TSTAMP_DATA_LO_LEN 2 +#define TX_TIMESTAMP_EVENT_TSTAMP_DATA_LO_LBN 0 +#define TX_TIMESTAMP_EVENT_TSTAMP_DATA_LO_WIDTH 16 +/* Type of TX event, ordinary TX completion, low or high part of TX timestamp + */ +#define TX_TIMESTAMP_EVENT_TX_EV_TYPE_OFST 3 +#define TX_TIMESTAMP_EVENT_TX_EV_TYPE_LEN 1 +/* enum: This is a TX completion event, not a timestamp */ +#define TX_TIMESTAMP_EVENT_TX_EV_COMPLETION 0x0 +/* enum: This is a TX completion event for a CTPIO transmit. The event format + * is the same as for TX_EV_COMPLETION. + */ +#define TX_TIMESTAMP_EVENT_TX_EV_CTPIO_COMPLETION 0x11 +/* enum: This is the low part of a TX timestamp for a CTPIO transmission. The + * event format is the same as for TX_EV_TSTAMP_LO + */ +#define TX_TIMESTAMP_EVENT_TX_EV_CTPIO_TS_LO 0x12 +/* enum: This is the high part of a TX timestamp for a CTPIO transmission. The + * event format is the same as for TX_EV_TSTAMP_HI + */ +#define TX_TIMESTAMP_EVENT_TX_EV_CTPIO_TS_HI 0x13 +/* enum: This is the low part of a TX timestamp event */ +#define TX_TIMESTAMP_EVENT_TX_EV_TSTAMP_LO 0x51 +/* enum: This is the high part of a TX timestamp event */ +#define TX_TIMESTAMP_EVENT_TX_EV_TSTAMP_HI 0x52 +#define TX_TIMESTAMP_EVENT_TX_EV_TYPE_LBN 24 +#define TX_TIMESTAMP_EVENT_TX_EV_TYPE_WIDTH 8 +/* upper 16 bits of timestamp data */ +#define TX_TIMESTAMP_EVENT_TSTAMP_DATA_HI_OFST 4 +#define TX_TIMESTAMP_EVENT_TSTAMP_DATA_HI_LEN 2 +#define TX_TIMESTAMP_EVENT_TSTAMP_DATA_HI_LBN 32 +#define TX_TIMESTAMP_EVENT_TSTAMP_DATA_HI_WIDTH 16 + +/* RSS_MODE structuredef */ +#define RSS_MODE_LEN 1 +/* The RSS mode for a particular packet type is a value from 0 - 15 which can + * be considered as 4 bits selecting which fields are included in the hash. (A + * value 0 effectively disables RSS spreading for the packet type.) The YAML + * generation tools require this structure to be a whole number of bytes wide, + * but only 4 bits are relevant. + */ +#define RSS_MODE_HASH_SELECTOR_OFST 0 +#define RSS_MODE_HASH_SELECTOR_LEN 1 +#define RSS_MODE_HASH_SRC_ADDR_OFST 0 +#define RSS_MODE_HASH_SRC_ADDR_LBN 0 +#define RSS_MODE_HASH_SRC_ADDR_WIDTH 1 +#define RSS_MODE_HASH_DST_ADDR_OFST 0 +#define RSS_MODE_HASH_DST_ADDR_LBN 1 +#define RSS_MODE_HASH_DST_ADDR_WIDTH 1 +#define RSS_MODE_HASH_SRC_PORT_OFST 0 +#define RSS_MODE_HASH_SRC_PORT_LBN 2 +#define RSS_MODE_HASH_SRC_PORT_WIDTH 1 +#define RSS_MODE_HASH_DST_PORT_OFST 0 +#define RSS_MODE_HASH_DST_PORT_LBN 3 +#define RSS_MODE_HASH_DST_PORT_WIDTH 1 +#define RSS_MODE_HASH_SELECTOR_LBN 0 +#define RSS_MODE_HASH_SELECTOR_WIDTH 8 + +/* CTPIO_STATS_MAP structuredef */ +#define CTPIO_STATS_MAP_LEN 4 +/* The (function relative) VI number */ +#define CTPIO_STATS_MAP_VI_OFST 0 +#define CTPIO_STATS_MAP_VI_LEN 2 +#define CTPIO_STATS_MAP_VI_LBN 0 +#define CTPIO_STATS_MAP_VI_WIDTH 16 +/* The target bucket for the VI */ +#define CTPIO_STATS_MAP_BUCKET_OFST 2 +#define CTPIO_STATS_MAP_BUCKET_LEN 2 +#define CTPIO_STATS_MAP_BUCKET_LBN 16 +#define CTPIO_STATS_MAP_BUCKET_WIDTH 16 + +/* MESSAGE_TYPE structuredef: When present this defines the meaning of a + * message, and is used to protect against chosen message attacks in signed + * messages, regardless their origin. The message type also defines the + * signature cryptographic algorithm, encoding, and message fields included in + * the signature. The values are used in different commands but must be unique + * across all commands, e.g. MC_CMD_TSA_BIND_IN_SECURE_UNBIND uses different + * message type than MC_CMD_SECURE_NIC_INFO_IN_STATUS. + */ +#define MESSAGE_TYPE_LEN 4 +#define MESSAGE_TYPE_MESSAGE_TYPE_OFST 0 +#define MESSAGE_TYPE_MESSAGE_TYPE_LEN 4 +#define MESSAGE_TYPE_UNUSED 0x0 /* enum */ +/* enum: Message type value for the response to a + * MC_CMD_TSA_BIND_IN_SECURE_UNBIND message. TSA_SECURE_UNBIND messages are + * ECDSA SECP384R1 signed using SHA384 message digest algorithm over fields + * MESSAGE_TYPE, TSANID, TSAID, and UNBINDTOKEN, and encoded as suggested by + * RFC6979 (section 2.4). + */ +#define MESSAGE_TYPE_TSA_SECURE_UNBIND 0x1 +/* enum: Message type value for the response to a + * MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION message. TSA_SECURE_DECOMMISSION + * messages are ECDSA SECP384R1 signed using SHA384 message digest algorithm + * over fields MESSAGE_TYPE, TSAID, USER, and REASON, and encoded as suggested + * by RFC6979 (section 2.4). + */ +#define MESSAGE_TYPE_TSA_SECURE_DECOMMISSION 0x2 +/* enum: Message type value for the response to a + * MC_CMD_SECURE_NIC_INFO_IN_STATUS message. This enum value is not sequential + * to other message types for backwards compatibility as the message type for + * MC_CMD_SECURE_NIC_INFO_IN_STATUS was defined before the existence of this + * global enum. + */ +#define MESSAGE_TYPE_SECURE_NIC_INFO_STATUS 0xdb4 +#define MESSAGE_TYPE_MESSAGE_TYPE_LBN 0 +#define MESSAGE_TYPE_MESSAGE_TYPE_WIDTH 32 + + +/***********************************/ +/* MC_CMD_READ_REGS + * Get a dump of the MCPU registers + */ +#define MC_CMD_READ_REGS 0x50 +#undef MC_CMD_0x50_PRIVILEGE_CTG + +#define MC_CMD_0x50_PRIVILEGE_CTG SRIOV_CTG_INSECURE + +/* MC_CMD_READ_REGS_IN msgrequest */ +#define MC_CMD_READ_REGS_IN_LEN 0 + +/* MC_CMD_READ_REGS_OUT msgresponse */ +#define MC_CMD_READ_REGS_OUT_LEN 308 +/* Whether the corresponding register entry contains a valid value */ +#define MC_CMD_READ_REGS_OUT_MASK_OFST 0 +#define MC_CMD_READ_REGS_OUT_MASK_LEN 16 +/* Same order as MIPS GDB (r0-r31, sr, lo, hi, bad, cause, 32 x float, fsr, + * fir, fp) + */ +#define MC_CMD_READ_REGS_OUT_REGS_OFST 16 +#define MC_CMD_READ_REGS_OUT_REGS_LEN 4 +#define MC_CMD_READ_REGS_OUT_REGS_NUM 73 + + +/***********************************/ +/* MC_CMD_INIT_EVQ + * Set up an event queue according to the supplied parameters. The IN arguments + * end with an address for each 4k of host memory required to back the EVQ. + */ +#define MC_CMD_INIT_EVQ 0x80 +#undef MC_CMD_0x80_PRIVILEGE_CTG + +#define MC_CMD_0x80_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_INIT_EVQ_IN msgrequest */ +#define MC_CMD_INIT_EVQ_IN_LENMIN 44 +#define MC_CMD_INIT_EVQ_IN_LENMAX 548 +#define MC_CMD_INIT_EVQ_IN_LENMAX_MCDI2 548 +#define MC_CMD_INIT_EVQ_IN_LEN(num) (36+8*(num)) +#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_NUM(len) (((len)-36)/8) +/* Size, in entries */ +#define MC_CMD_INIT_EVQ_IN_SIZE_OFST 0 +#define MC_CMD_INIT_EVQ_IN_SIZE_LEN 4 +/* Desired instance. Must be set to a specific instance, which is a function + * local queue index. The calling client must be the currently-assigned user of + * this VI (see MC_CMD_SET_VI_USER). + */ +#define MC_CMD_INIT_EVQ_IN_INSTANCE_OFST 4 +#define MC_CMD_INIT_EVQ_IN_INSTANCE_LEN 4 +/* The initial timer value. The load value is ignored if the timer mode is DIS. + */ +#define MC_CMD_INIT_EVQ_IN_TMR_LOAD_OFST 8 +#define MC_CMD_INIT_EVQ_IN_TMR_LOAD_LEN 4 +/* The reload value is ignored in one-shot modes */ +#define MC_CMD_INIT_EVQ_IN_TMR_RELOAD_OFST 12 +#define MC_CMD_INIT_EVQ_IN_TMR_RELOAD_LEN 4 +/* tbd */ +#define MC_CMD_INIT_EVQ_IN_FLAGS_OFST 16 +#define MC_CMD_INIT_EVQ_IN_FLAGS_LEN 4 +#define MC_CMD_INIT_EVQ_IN_FLAG_INTERRUPTING_OFST 16 +#define MC_CMD_INIT_EVQ_IN_FLAG_INTERRUPTING_LBN 0 +#define MC_CMD_INIT_EVQ_IN_FLAG_INTERRUPTING_WIDTH 1 +#define MC_CMD_INIT_EVQ_IN_FLAG_RPTR_DOS_OFST 16 +#define MC_CMD_INIT_EVQ_IN_FLAG_RPTR_DOS_LBN 1 +#define MC_CMD_INIT_EVQ_IN_FLAG_RPTR_DOS_WIDTH 1 +#define MC_CMD_INIT_EVQ_IN_FLAG_INT_ARMD_OFST 16 +#define MC_CMD_INIT_EVQ_IN_FLAG_INT_ARMD_LBN 2 +#define MC_CMD_INIT_EVQ_IN_FLAG_INT_ARMD_WIDTH 1 +#define MC_CMD_INIT_EVQ_IN_FLAG_CUT_THRU_OFST 16 +#define MC_CMD_INIT_EVQ_IN_FLAG_CUT_THRU_LBN 3 +#define MC_CMD_INIT_EVQ_IN_FLAG_CUT_THRU_WIDTH 1 +#define MC_CMD_INIT_EVQ_IN_FLAG_RX_MERGE_OFST 16 +#define MC_CMD_INIT_EVQ_IN_FLAG_RX_MERGE_LBN 4 +#define MC_CMD_INIT_EVQ_IN_FLAG_RX_MERGE_WIDTH 1 +#define MC_CMD_INIT_EVQ_IN_FLAG_TX_MERGE_OFST 16 +#define MC_CMD_INIT_EVQ_IN_FLAG_TX_MERGE_LBN 5 +#define MC_CMD_INIT_EVQ_IN_FLAG_TX_MERGE_WIDTH 1 +#define MC_CMD_INIT_EVQ_IN_FLAG_USE_TIMER_OFST 16 +#define MC_CMD_INIT_EVQ_IN_FLAG_USE_TIMER_LBN 6 +#define MC_CMD_INIT_EVQ_IN_FLAG_USE_TIMER_WIDTH 1 +#define MC_CMD_INIT_EVQ_IN_TMR_MODE_OFST 20 +#define MC_CMD_INIT_EVQ_IN_TMR_MODE_LEN 4 +/* enum: Disabled */ +#define MC_CMD_INIT_EVQ_IN_TMR_MODE_DIS 0x0 +/* enum: Immediate */ +#define MC_CMD_INIT_EVQ_IN_TMR_IMMED_START 0x1 +/* enum: Triggered */ +#define MC_CMD_INIT_EVQ_IN_TMR_TRIG_START 0x2 +/* enum: Hold-off */ +#define MC_CMD_INIT_EVQ_IN_TMR_INT_HLDOFF 0x3 +/* Target EVQ for wakeups if in wakeup mode. */ +#define MC_CMD_INIT_EVQ_IN_TARGET_EVQ_OFST 24 +#define MC_CMD_INIT_EVQ_IN_TARGET_EVQ_LEN 4 +/* Target interrupt if in interrupting mode (note union with target EVQ). Use + * MC_CMD_RESOURCE_INSTANCE_ANY unless a specific one required for test + * purposes. + */ +#define MC_CMD_INIT_EVQ_IN_IRQ_NUM_OFST 24 +#define MC_CMD_INIT_EVQ_IN_IRQ_NUM_LEN 4 +/* Event Counter Mode. */ +#define MC_CMD_INIT_EVQ_IN_COUNT_MODE_OFST 28 +#define MC_CMD_INIT_EVQ_IN_COUNT_MODE_LEN 4 +/* enum: Disabled */ +#define MC_CMD_INIT_EVQ_IN_COUNT_MODE_DIS 0x0 +/* enum: Disabled */ +#define MC_CMD_INIT_EVQ_IN_COUNT_MODE_RX 0x1 +/* enum: Disabled */ +#define MC_CMD_INIT_EVQ_IN_COUNT_MODE_TX 0x2 +/* enum: Disabled */ +#define MC_CMD_INIT_EVQ_IN_COUNT_MODE_RXTX 0x3 +/* Event queue packet count threshold. */ +#define MC_CMD_INIT_EVQ_IN_COUNT_THRSHLD_OFST 32 +#define MC_CMD_INIT_EVQ_IN_COUNT_THRSHLD_LEN 4 +/* 64-bit address of 4k of 4k-aligned host memory buffer */ +#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_OFST 36 +#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_LEN 8 +#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_LO_OFST 36 +#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_LO_LEN 4 +#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_LO_LBN 288 +#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_LO_WIDTH 32 +#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_HI_OFST 40 +#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_HI_LEN 4 +#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_HI_LBN 320 +#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_HI_WIDTH 32 +#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_MINNUM 1 +#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_MAXNUM 64 +#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_MAXNUM_MCDI2 64 + +/* MC_CMD_INIT_EVQ_OUT msgresponse */ +#define MC_CMD_INIT_EVQ_OUT_LEN 4 +/* Only valid if INTRFLAG was true */ +#define MC_CMD_INIT_EVQ_OUT_IRQ_OFST 0 +#define MC_CMD_INIT_EVQ_OUT_IRQ_LEN 4 + +/* MC_CMD_INIT_EVQ_V2_IN msgrequest */ +#define MC_CMD_INIT_EVQ_V2_IN_LENMIN 44 +#define MC_CMD_INIT_EVQ_V2_IN_LENMAX 548 +#define MC_CMD_INIT_EVQ_V2_IN_LENMAX_MCDI2 548 +#define MC_CMD_INIT_EVQ_V2_IN_LEN(num) (36+8*(num)) +#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_NUM(len) (((len)-36)/8) +/* Size, in entries */ +#define MC_CMD_INIT_EVQ_V2_IN_SIZE_OFST 0 +#define MC_CMD_INIT_EVQ_V2_IN_SIZE_LEN 4 +/* Desired instance. Must be set to a specific instance, which is a function + * local queue index. The calling client must be the currently-assigned user of + * this VI (see MC_CMD_SET_VI_USER). + */ +#define MC_CMD_INIT_EVQ_V2_IN_INSTANCE_OFST 4 +#define MC_CMD_INIT_EVQ_V2_IN_INSTANCE_LEN 4 +/* The initial timer value. The load value is ignored if the timer mode is DIS. + */ +#define MC_CMD_INIT_EVQ_V2_IN_TMR_LOAD_OFST 8 +#define MC_CMD_INIT_EVQ_V2_IN_TMR_LOAD_LEN 4 +/* The reload value is ignored in one-shot modes */ +#define MC_CMD_INIT_EVQ_V2_IN_TMR_RELOAD_OFST 12 +#define MC_CMD_INIT_EVQ_V2_IN_TMR_RELOAD_LEN 4 +/* tbd */ +#define MC_CMD_INIT_EVQ_V2_IN_FLAGS_OFST 16 +#define MC_CMD_INIT_EVQ_V2_IN_FLAGS_LEN 4 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_INTERRUPTING_OFST 16 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_INTERRUPTING_LBN 0 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_INTERRUPTING_WIDTH 1 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_RPTR_DOS_OFST 16 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_RPTR_DOS_LBN 1 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_RPTR_DOS_WIDTH 1 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_INT_ARMD_OFST 16 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_INT_ARMD_LBN 2 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_INT_ARMD_WIDTH 1 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_CUT_THRU_OFST 16 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_CUT_THRU_LBN 3 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_CUT_THRU_WIDTH 1 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_RX_MERGE_OFST 16 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_RX_MERGE_LBN 4 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_RX_MERGE_WIDTH 1 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_TX_MERGE_OFST 16 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_TX_MERGE_LBN 5 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_TX_MERGE_WIDTH 1 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_USE_TIMER_OFST 16 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_USE_TIMER_LBN 6 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_USE_TIMER_WIDTH 1 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_OFST 16 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_LBN 7 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_WIDTH 4 +/* enum: All initialisation flags specified by host. */ +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_MANUAL 0x0 +/* enum: MEDFORD only. Certain initialisation flags specified by host may be + * over-ridden by firmware based on licenses and firmware variant in order to + * provide the lowest latency achievable. See + * MC_CMD_INIT_EVQ_V2/MC_CMD_INIT_EVQ_V2_OUT/FLAGS for list of affected flags. + */ +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_LOW_LATENCY 0x1 +/* enum: MEDFORD only. Certain initialisation flags specified by host may be + * over-ridden by firmware based on licenses and firmware variant in order to + * provide the best throughput achievable. See + * MC_CMD_INIT_EVQ_V2/MC_CMD_INIT_EVQ_V2_OUT/FLAGS for list of affected flags. + */ +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_THROUGHPUT 0x2 +/* enum: MEDFORD only. Certain initialisation flags may be over-ridden by + * firmware based on licenses and firmware variant. See + * MC_CMD_INIT_EVQ_V2/MC_CMD_INIT_EVQ_V2_OUT/FLAGS for list of affected flags. + */ +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_AUTO 0x3 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_EXT_WIDTH_OFST 16 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_EXT_WIDTH_LBN 11 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_EXT_WIDTH_WIDTH 1 +#define MC_CMD_INIT_EVQ_V2_IN_TMR_MODE_OFST 20 +#define MC_CMD_INIT_EVQ_V2_IN_TMR_MODE_LEN 4 +/* enum: Disabled */ +#define MC_CMD_INIT_EVQ_V2_IN_TMR_MODE_DIS 0x0 +/* enum: Immediate */ +#define MC_CMD_INIT_EVQ_V2_IN_TMR_IMMED_START 0x1 +/* enum: Triggered */ +#define MC_CMD_INIT_EVQ_V2_IN_TMR_TRIG_START 0x2 +/* enum: Hold-off */ +#define MC_CMD_INIT_EVQ_V2_IN_TMR_INT_HLDOFF 0x3 +/* Target EVQ for wakeups if in wakeup mode. */ +#define MC_CMD_INIT_EVQ_V2_IN_TARGET_EVQ_OFST 24 +#define MC_CMD_INIT_EVQ_V2_IN_TARGET_EVQ_LEN 4 +/* Target interrupt if in interrupting mode (note union with target EVQ). Use + * MC_CMD_RESOURCE_INSTANCE_ANY unless a specific one required for test + * purposes. + */ +#define MC_CMD_INIT_EVQ_V2_IN_IRQ_NUM_OFST 24 +#define MC_CMD_INIT_EVQ_V2_IN_IRQ_NUM_LEN 4 +/* Event Counter Mode. */ +#define MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_OFST 28 +#define MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_LEN 4 +/* enum: Disabled */ +#define MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_DIS 0x0 +/* enum: Disabled */ +#define MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_RX 0x1 +/* enum: Disabled */ +#define MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_TX 0x2 +/* enum: Disabled */ +#define MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_RXTX 0x3 +/* Event queue packet count threshold. */ +#define MC_CMD_INIT_EVQ_V2_IN_COUNT_THRSHLD_OFST 32 +#define MC_CMD_INIT_EVQ_V2_IN_COUNT_THRSHLD_LEN 4 +/* 64-bit address of 4k of 4k-aligned host memory buffer */ +#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_OFST 36 +#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_LEN 8 +#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_LO_OFST 36 +#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_LO_LEN 4 +#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_LO_LBN 288 +#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_LO_WIDTH 32 +#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_HI_OFST 40 +#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_HI_LEN 4 +#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_HI_LBN 320 +#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_HI_WIDTH 32 +#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_MINNUM 1 +#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_MAXNUM 64 +#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_MAXNUM_MCDI2 64 + +/* MC_CMD_INIT_EVQ_V2_OUT msgresponse */ +#define MC_CMD_INIT_EVQ_V2_OUT_LEN 8 +/* Only valid if INTRFLAG was true */ +#define MC_CMD_INIT_EVQ_V2_OUT_IRQ_OFST 0 +#define MC_CMD_INIT_EVQ_V2_OUT_IRQ_LEN 4 +/* Actual configuration applied on the card */ +#define MC_CMD_INIT_EVQ_V2_OUT_FLAGS_OFST 4 +#define MC_CMD_INIT_EVQ_V2_OUT_FLAGS_LEN 4 +#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_CUT_THRU_OFST 4 +#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_CUT_THRU_LBN 0 +#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_CUT_THRU_WIDTH 1 +#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_RX_MERGE_OFST 4 +#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_RX_MERGE_LBN 1 +#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_RX_MERGE_WIDTH 1 +#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_TX_MERGE_OFST 4 +#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_TX_MERGE_LBN 2 +#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_TX_MERGE_WIDTH 1 +#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_RXQ_FORCE_EV_MERGING_OFST 4 +#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_RXQ_FORCE_EV_MERGING_LBN 3 +#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_RXQ_FORCE_EV_MERGING_WIDTH 1 + +/* MC_CMD_INIT_EVQ_V3_IN msgrequest: Extended request to specify per-queue + * event merge timeouts. + */ +#define MC_CMD_INIT_EVQ_V3_IN_LEN 556 +/* Size, in entries */ +#define MC_CMD_INIT_EVQ_V3_IN_SIZE_OFST 0 +#define MC_CMD_INIT_EVQ_V3_IN_SIZE_LEN 4 +/* Desired instance. Must be set to a specific instance, which is a function + * local queue index. The calling client must be the currently-assigned user of + * this VI (see MC_CMD_SET_VI_USER). + */ +#define MC_CMD_INIT_EVQ_V3_IN_INSTANCE_OFST 4 +#define MC_CMD_INIT_EVQ_V3_IN_INSTANCE_LEN 4 +/* The initial timer value. The load value is ignored if the timer mode is DIS. + */ +#define MC_CMD_INIT_EVQ_V3_IN_TMR_LOAD_OFST 8 +#define MC_CMD_INIT_EVQ_V3_IN_TMR_LOAD_LEN 4 +/* The reload value is ignored in one-shot modes */ +#define MC_CMD_INIT_EVQ_V3_IN_TMR_RELOAD_OFST 12 +#define MC_CMD_INIT_EVQ_V3_IN_TMR_RELOAD_LEN 4 +/* tbd */ +#define MC_CMD_INIT_EVQ_V3_IN_FLAGS_OFST 16 +#define MC_CMD_INIT_EVQ_V3_IN_FLAGS_LEN 4 +#define MC_CMD_INIT_EVQ_V3_IN_FLAG_INTERRUPTING_OFST 16 +#define MC_CMD_INIT_EVQ_V3_IN_FLAG_INTERRUPTING_LBN 0 +#define MC_CMD_INIT_EVQ_V3_IN_FLAG_INTERRUPTING_WIDTH 1 +#define MC_CMD_INIT_EVQ_V3_IN_FLAG_RPTR_DOS_OFST 16 +#define MC_CMD_INIT_EVQ_V3_IN_FLAG_RPTR_DOS_LBN 1 +#define MC_CMD_INIT_EVQ_V3_IN_FLAG_RPTR_DOS_WIDTH 1 +#define MC_CMD_INIT_EVQ_V3_IN_FLAG_INT_ARMD_OFST 16 +#define MC_CMD_INIT_EVQ_V3_IN_FLAG_INT_ARMD_LBN 2 +#define MC_CMD_INIT_EVQ_V3_IN_FLAG_INT_ARMD_WIDTH 1 +#define MC_CMD_INIT_EVQ_V3_IN_FLAG_CUT_THRU_OFST 16 +#define MC_CMD_INIT_EVQ_V3_IN_FLAG_CUT_THRU_LBN 3 +#define MC_CMD_INIT_EVQ_V3_IN_FLAG_CUT_THRU_WIDTH 1 +#define MC_CMD_INIT_EVQ_V3_IN_FLAG_RX_MERGE_OFST 16 +#define MC_CMD_INIT_EVQ_V3_IN_FLAG_RX_MERGE_LBN 4 +#define MC_CMD_INIT_EVQ_V3_IN_FLAG_RX_MERGE_WIDTH 1 +#define MC_CMD_INIT_EVQ_V3_IN_FLAG_TX_MERGE_OFST 16 +#define MC_CMD_INIT_EVQ_V3_IN_FLAG_TX_MERGE_LBN 5 +#define MC_CMD_INIT_EVQ_V3_IN_FLAG_TX_MERGE_WIDTH 1 +#define MC_CMD_INIT_EVQ_V3_IN_FLAG_USE_TIMER_OFST 16 +#define MC_CMD_INIT_EVQ_V3_IN_FLAG_USE_TIMER_LBN 6 +#define MC_CMD_INIT_EVQ_V3_IN_FLAG_USE_TIMER_WIDTH 1 +#define MC_CMD_INIT_EVQ_V3_IN_FLAG_TYPE_OFST 16 +#define MC_CMD_INIT_EVQ_V3_IN_FLAG_TYPE_LBN 7 +#define MC_CMD_INIT_EVQ_V3_IN_FLAG_TYPE_WIDTH 4 +/* enum: All initialisation flags specified by host. */ +#define MC_CMD_INIT_EVQ_V3_IN_FLAG_TYPE_MANUAL 0x0 +/* enum: MEDFORD only. Certain initialisation flags specified by host may be + * over-ridden by firmware based on licenses and firmware variant in order to + * provide the lowest latency achievable. See + * MC_CMD_INIT_EVQ_V2/MC_CMD_INIT_EVQ_V2_OUT/FLAGS for list of affected flags. + */ +#define MC_CMD_INIT_EVQ_V3_IN_FLAG_TYPE_LOW_LATENCY 0x1 +/* enum: MEDFORD only. Certain initialisation flags specified by host may be + * over-ridden by firmware based on licenses and firmware variant in order to + * provide the best throughput achievable. See + * MC_CMD_INIT_EVQ_V2/MC_CMD_INIT_EVQ_V2_OUT/FLAGS for list of affected flags. + */ +#define MC_CMD_INIT_EVQ_V3_IN_FLAG_TYPE_THROUGHPUT 0x2 +/* enum: MEDFORD only. Certain initialisation flags may be over-ridden by + * firmware based on licenses and firmware variant. See + * MC_CMD_INIT_EVQ_V2/MC_CMD_INIT_EVQ_V2_OUT/FLAGS for list of affected flags. + */ +#define MC_CMD_INIT_EVQ_V3_IN_FLAG_TYPE_AUTO 0x3 +#define MC_CMD_INIT_EVQ_V3_IN_FLAG_EXT_WIDTH_OFST 16 +#define MC_CMD_INIT_EVQ_V3_IN_FLAG_EXT_WIDTH_LBN 11 +#define MC_CMD_INIT_EVQ_V3_IN_FLAG_EXT_WIDTH_WIDTH 1 +#define MC_CMD_INIT_EVQ_V3_IN_TMR_MODE_OFST 20 +#define MC_CMD_INIT_EVQ_V3_IN_TMR_MODE_LEN 4 +/* enum: Disabled */ +#define MC_CMD_INIT_EVQ_V3_IN_TMR_MODE_DIS 0x0 +/* enum: Immediate */ +#define MC_CMD_INIT_EVQ_V3_IN_TMR_IMMED_START 0x1 +/* enum: Triggered */ +#define MC_CMD_INIT_EVQ_V3_IN_TMR_TRIG_START 0x2 +/* enum: Hold-off */ +#define MC_CMD_INIT_EVQ_V3_IN_TMR_INT_HLDOFF 0x3 +/* Target EVQ for wakeups if in wakeup mode. */ +#define MC_CMD_INIT_EVQ_V3_IN_TARGET_EVQ_OFST 24 +#define MC_CMD_INIT_EVQ_V3_IN_TARGET_EVQ_LEN 4 +/* Target interrupt if in interrupting mode (note union with target EVQ). Use + * MC_CMD_RESOURCE_INSTANCE_ANY unless a specific one required for test + * purposes. + */ +#define MC_CMD_INIT_EVQ_V3_IN_IRQ_NUM_OFST 24 +#define MC_CMD_INIT_EVQ_V3_IN_IRQ_NUM_LEN 4 +/* Event Counter Mode. */ +#define MC_CMD_INIT_EVQ_V3_IN_COUNT_MODE_OFST 28 +#define MC_CMD_INIT_EVQ_V3_IN_COUNT_MODE_LEN 4 +/* enum: Disabled */ +#define MC_CMD_INIT_EVQ_V3_IN_COUNT_MODE_DIS 0x0 +/* enum: Disabled */ +#define MC_CMD_INIT_EVQ_V3_IN_COUNT_MODE_RX 0x1 +/* enum: Disabled */ +#define MC_CMD_INIT_EVQ_V3_IN_COUNT_MODE_TX 0x2 +/* enum: Disabled */ +#define MC_CMD_INIT_EVQ_V3_IN_COUNT_MODE_RXTX 0x3 +/* Event queue packet count threshold. */ +#define MC_CMD_INIT_EVQ_V3_IN_COUNT_THRSHLD_OFST 32 +#define MC_CMD_INIT_EVQ_V3_IN_COUNT_THRSHLD_LEN 4 +/* 64-bit address of 4k of 4k-aligned host memory buffer */ +#define MC_CMD_INIT_EVQ_V3_IN_DMA_ADDR_OFST 36 +#define MC_CMD_INIT_EVQ_V3_IN_DMA_ADDR_LEN 8 +#define MC_CMD_INIT_EVQ_V3_IN_DMA_ADDR_LO_OFST 36 +#define MC_CMD_INIT_EVQ_V3_IN_DMA_ADDR_LO_LEN 4 +#define MC_CMD_INIT_EVQ_V3_IN_DMA_ADDR_LO_LBN 288 +#define MC_CMD_INIT_EVQ_V3_IN_DMA_ADDR_LO_WIDTH 32 +#define MC_CMD_INIT_EVQ_V3_IN_DMA_ADDR_HI_OFST 40 +#define MC_CMD_INIT_EVQ_V3_IN_DMA_ADDR_HI_LEN 4 +#define MC_CMD_INIT_EVQ_V3_IN_DMA_ADDR_HI_LBN 320 +#define MC_CMD_INIT_EVQ_V3_IN_DMA_ADDR_HI_WIDTH 32 +#define MC_CMD_INIT_EVQ_V3_IN_DMA_ADDR_MINNUM 1 +#define MC_CMD_INIT_EVQ_V3_IN_DMA_ADDR_MAXNUM 64 +#define MC_CMD_INIT_EVQ_V3_IN_DMA_ADDR_MAXNUM_MCDI2 64 +/* Receive event merge timeout to configure, in nanoseconds. The valid range + * and granularity are device specific. Specify 0 to use the firmware's default + * value. This field is ignored and per-queue merging is disabled if + * MC_CMD_INIT_EVQ/MC_CMD_INIT_EVQ_IN/FLAG_RX_MERGE is not set. + */ +#define MC_CMD_INIT_EVQ_V3_IN_RX_MERGE_TIMEOUT_NS_OFST 548 +#define MC_CMD_INIT_EVQ_V3_IN_RX_MERGE_TIMEOUT_NS_LEN 4 +/* Transmit event merge timeout to configure, in nanoseconds. The valid range + * and granularity are device specific. Specify 0 to use the firmware's default + * value. This field is ignored and per-queue merging is disabled if + * MC_CMD_INIT_EVQ/MC_CMD_INIT_EVQ_IN/FLAG_TX_MERGE is not set. + */ +#define MC_CMD_INIT_EVQ_V3_IN_TX_MERGE_TIMEOUT_NS_OFST 552 +#define MC_CMD_INIT_EVQ_V3_IN_TX_MERGE_TIMEOUT_NS_LEN 4 + +/* MC_CMD_INIT_EVQ_V3_OUT msgresponse */ +#define MC_CMD_INIT_EVQ_V3_OUT_LEN 8 +/* Only valid if INTRFLAG was true */ +#define MC_CMD_INIT_EVQ_V3_OUT_IRQ_OFST 0 +#define MC_CMD_INIT_EVQ_V3_OUT_IRQ_LEN 4 +/* Actual configuration applied on the card */ +#define MC_CMD_INIT_EVQ_V3_OUT_FLAGS_OFST 4 +#define MC_CMD_INIT_EVQ_V3_OUT_FLAGS_LEN 4 +#define MC_CMD_INIT_EVQ_V3_OUT_FLAG_CUT_THRU_OFST 4 +#define MC_CMD_INIT_EVQ_V3_OUT_FLAG_CUT_THRU_LBN 0 +#define MC_CMD_INIT_EVQ_V3_OUT_FLAG_CUT_THRU_WIDTH 1 +#define MC_CMD_INIT_EVQ_V3_OUT_FLAG_RX_MERGE_OFST 4 +#define MC_CMD_INIT_EVQ_V3_OUT_FLAG_RX_MERGE_LBN 1 +#define MC_CMD_INIT_EVQ_V3_OUT_FLAG_RX_MERGE_WIDTH 1 +#define MC_CMD_INIT_EVQ_V3_OUT_FLAG_TX_MERGE_OFST 4 +#define MC_CMD_INIT_EVQ_V3_OUT_FLAG_TX_MERGE_LBN 2 +#define MC_CMD_INIT_EVQ_V3_OUT_FLAG_TX_MERGE_WIDTH 1 +#define MC_CMD_INIT_EVQ_V3_OUT_FLAG_RXQ_FORCE_EV_MERGING_OFST 4 +#define MC_CMD_INIT_EVQ_V3_OUT_FLAG_RXQ_FORCE_EV_MERGING_LBN 3 +#define MC_CMD_INIT_EVQ_V3_OUT_FLAG_RXQ_FORCE_EV_MERGING_WIDTH 1 + +/* QUEUE_CRC_MODE structuredef */ +#define QUEUE_CRC_MODE_LEN 1 +#define QUEUE_CRC_MODE_MODE_LBN 0 +#define QUEUE_CRC_MODE_MODE_WIDTH 4 +/* enum: No CRC. */ +#define QUEUE_CRC_MODE_NONE 0x0 +/* enum: CRC Fiber channel over ethernet. */ +#define QUEUE_CRC_MODE_FCOE 0x1 +/* enum: CRC (digest) iSCSI header only. */ +#define QUEUE_CRC_MODE_ISCSI_HDR 0x2 +/* enum: CRC (digest) iSCSI header and payload. */ +#define QUEUE_CRC_MODE_ISCSI 0x3 +/* enum: CRC Fiber channel over IP over ethernet. */ +#define QUEUE_CRC_MODE_FCOIPOE 0x4 +/* enum: CRC MPA. */ +#define QUEUE_CRC_MODE_MPA 0x5 +#define QUEUE_CRC_MODE_SPARE_LBN 4 +#define QUEUE_CRC_MODE_SPARE_WIDTH 4 + + +/***********************************/ +/* MC_CMD_INIT_RXQ + * set up a receive queue according to the supplied parameters. The IN + * arguments end with an address for each 4k of host memory required to back + * the RXQ. + */ +#define MC_CMD_INIT_RXQ 0x81 +#undef MC_CMD_0x81_PRIVILEGE_CTG + +#define MC_CMD_0x81_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_INIT_RXQ_IN msgrequest: Legacy RXQ_INIT request. Use extended version + * in new code. + */ +#define MC_CMD_INIT_RXQ_IN_LENMIN 36 +#define MC_CMD_INIT_RXQ_IN_LENMAX 252 +#define MC_CMD_INIT_RXQ_IN_LENMAX_MCDI2 1020 +#define MC_CMD_INIT_RXQ_IN_LEN(num) (28+8*(num)) +#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_NUM(len) (((len)-28)/8) +/* Size, in entries */ +#define MC_CMD_INIT_RXQ_IN_SIZE_OFST 0 +#define MC_CMD_INIT_RXQ_IN_SIZE_LEN 4 +/* The EVQ to send events to. This is an index originally specified to INIT_EVQ + */ +#define MC_CMD_INIT_RXQ_IN_TARGET_EVQ_OFST 4 +#define MC_CMD_INIT_RXQ_IN_TARGET_EVQ_LEN 4 +/* The value to put in the event data. Check hardware spec. for valid range. */ +#define MC_CMD_INIT_RXQ_IN_LABEL_OFST 8 +#define MC_CMD_INIT_RXQ_IN_LABEL_LEN 4 +/* Desired instance. Must be set to a specific instance, which is a function + * local queue index. The calling client must be the currently-assigned user of + * this VI (see MC_CMD_SET_VI_USER). + */ +#define MC_CMD_INIT_RXQ_IN_INSTANCE_OFST 12 +#define MC_CMD_INIT_RXQ_IN_INSTANCE_LEN 4 +/* There will be more flags here. */ +#define MC_CMD_INIT_RXQ_IN_FLAGS_OFST 16 +#define MC_CMD_INIT_RXQ_IN_FLAGS_LEN 4 +#define MC_CMD_INIT_RXQ_IN_FLAG_BUFF_MODE_OFST 16 +#define MC_CMD_INIT_RXQ_IN_FLAG_BUFF_MODE_LBN 0 +#define MC_CMD_INIT_RXQ_IN_FLAG_BUFF_MODE_WIDTH 1 +#define MC_CMD_INIT_RXQ_IN_FLAG_HDR_SPLIT_OFST 16 +#define MC_CMD_INIT_RXQ_IN_FLAG_HDR_SPLIT_LBN 1 +#define MC_CMD_INIT_RXQ_IN_FLAG_HDR_SPLIT_WIDTH 1 +#define MC_CMD_INIT_RXQ_IN_FLAG_TIMESTAMP_OFST 16 +#define MC_CMD_INIT_RXQ_IN_FLAG_TIMESTAMP_LBN 2 +#define MC_CMD_INIT_RXQ_IN_FLAG_TIMESTAMP_WIDTH 1 +#define MC_CMD_INIT_RXQ_IN_CRC_MODE_OFST 16 +#define MC_CMD_INIT_RXQ_IN_CRC_MODE_LBN 3 +#define MC_CMD_INIT_RXQ_IN_CRC_MODE_WIDTH 4 +#define MC_CMD_INIT_RXQ_IN_FLAG_CHAIN_OFST 16 +#define MC_CMD_INIT_RXQ_IN_FLAG_CHAIN_LBN 7 +#define MC_CMD_INIT_RXQ_IN_FLAG_CHAIN_WIDTH 1 +#define MC_CMD_INIT_RXQ_IN_FLAG_PREFIX_OFST 16 +#define MC_CMD_INIT_RXQ_IN_FLAG_PREFIX_LBN 8 +#define MC_CMD_INIT_RXQ_IN_FLAG_PREFIX_WIDTH 1 +#define MC_CMD_INIT_RXQ_IN_FLAG_DISABLE_SCATTER_OFST 16 +#define MC_CMD_INIT_RXQ_IN_FLAG_DISABLE_SCATTER_LBN 9 +#define MC_CMD_INIT_RXQ_IN_FLAG_DISABLE_SCATTER_WIDTH 1 +#define MC_CMD_INIT_RXQ_IN_UNUSED_OFST 16 +#define MC_CMD_INIT_RXQ_IN_UNUSED_LBN 10 +#define MC_CMD_INIT_RXQ_IN_UNUSED_WIDTH 1 +/* Owner ID to use if in buffer mode (zero if physical) */ +#define MC_CMD_INIT_RXQ_IN_OWNER_ID_OFST 20 +#define MC_CMD_INIT_RXQ_IN_OWNER_ID_LEN 4 +/* The port ID associated with the v-adaptor which should contain this DMAQ. */ +#define MC_CMD_INIT_RXQ_IN_PORT_ID_OFST 24 +#define MC_CMD_INIT_RXQ_IN_PORT_ID_LEN 4 +/* 64-bit address of 4k of 4k-aligned host memory buffer */ +#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_OFST 28 +#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_LEN 8 +#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_LO_OFST 28 +#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_LO_LEN 4 +#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_LO_LBN 224 +#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_LO_WIDTH 32 +#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_HI_OFST 32 +#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_HI_LEN 4 +#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_HI_LBN 256 +#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_HI_WIDTH 32 +#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_MINNUM 1 +#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_MAXNUM 28 +#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_MAXNUM_MCDI2 124 + +/* MC_CMD_INIT_RXQ_EXT_IN msgrequest: Extended RXQ_INIT with additional mode + * flags + */ +#define MC_CMD_INIT_RXQ_EXT_IN_LEN 544 +/* Size, in entries */ +#define MC_CMD_INIT_RXQ_EXT_IN_SIZE_OFST 0 +#define MC_CMD_INIT_RXQ_EXT_IN_SIZE_LEN 4 +/* The EVQ to send events to. This is an index originally specified to + * INIT_EVQ. If DMA_MODE == PACKED_STREAM this must be equal to INSTANCE. + */ +#define MC_CMD_INIT_RXQ_EXT_IN_TARGET_EVQ_OFST 4 +#define MC_CMD_INIT_RXQ_EXT_IN_TARGET_EVQ_LEN 4 +/* The value to put in the event data. Check hardware spec. for valid range. + * This field is ignored if DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER or DMA_MODE + * == PACKED_STREAM. + */ +#define MC_CMD_INIT_RXQ_EXT_IN_LABEL_OFST 8 +#define MC_CMD_INIT_RXQ_EXT_IN_LABEL_LEN 4 +/* Desired instance. Must be set to a specific instance, which is a function + * local queue index. The calling client must be the currently-assigned user of + * this VI (see MC_CMD_SET_VI_USER). + */ +#define MC_CMD_INIT_RXQ_EXT_IN_INSTANCE_OFST 12 +#define MC_CMD_INIT_RXQ_EXT_IN_INSTANCE_LEN 4 +/* There will be more flags here. */ +#define MC_CMD_INIT_RXQ_EXT_IN_FLAGS_OFST 16 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAGS_LEN 4 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_BUFF_MODE_OFST 16 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_BUFF_MODE_LBN 0 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_BUFF_MODE_WIDTH 1 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_HDR_SPLIT_OFST 16 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_HDR_SPLIT_LBN 1 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_HDR_SPLIT_WIDTH 1 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_TIMESTAMP_OFST 16 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_TIMESTAMP_LBN 2 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_TIMESTAMP_WIDTH 1 +#define MC_CMD_INIT_RXQ_EXT_IN_CRC_MODE_OFST 16 +#define MC_CMD_INIT_RXQ_EXT_IN_CRC_MODE_LBN 3 +#define MC_CMD_INIT_RXQ_EXT_IN_CRC_MODE_WIDTH 4 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_CHAIN_OFST 16 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_CHAIN_LBN 7 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_CHAIN_WIDTH 1 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_PREFIX_OFST 16 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_PREFIX_LBN 8 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_PREFIX_WIDTH 1 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_DISABLE_SCATTER_OFST 16 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_DISABLE_SCATTER_LBN 9 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_DISABLE_SCATTER_WIDTH 1 +#define MC_CMD_INIT_RXQ_EXT_IN_DMA_MODE_OFST 16 +#define MC_CMD_INIT_RXQ_EXT_IN_DMA_MODE_LBN 10 +#define MC_CMD_INIT_RXQ_EXT_IN_DMA_MODE_WIDTH 4 +/* enum: One packet per descriptor (for normal networking) */ +#define MC_CMD_INIT_RXQ_EXT_IN_SINGLE_PACKET 0x0 +/* enum: Pack multiple packets into large descriptors (for SolarCapture) */ +#define MC_CMD_INIT_RXQ_EXT_IN_PACKED_STREAM 0x1 +/* enum: Pack multiple packets into large descriptors using the format designed + * to maximise packet rate. This mode uses 1 "bucket" per descriptor with + * multiple fixed-size packet buffers within each bucket. For a full + * description see SF-119419-TC. This mode is only supported by "dpdk" datapath + * firmware. + */ +#define MC_CMD_INIT_RXQ_EXT_IN_EQUAL_STRIDE_SUPER_BUFFER 0x2 +/* enum: Deprecated name for EQUAL_STRIDE_SUPER_BUFFER. */ +#define MC_CMD_INIT_RXQ_EXT_IN_EQUAL_STRIDE_PACKED_STREAM 0x2 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_SNAPSHOT_MODE_OFST 16 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_SNAPSHOT_MODE_LBN 14 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_SNAPSHOT_MODE_WIDTH 1 +#define MC_CMD_INIT_RXQ_EXT_IN_PACKED_STREAM_BUFF_SIZE_OFST 16 +#define MC_CMD_INIT_RXQ_EXT_IN_PACKED_STREAM_BUFF_SIZE_LBN 15 +#define MC_CMD_INIT_RXQ_EXT_IN_PACKED_STREAM_BUFF_SIZE_WIDTH 3 +#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_1M 0x0 /* enum */ +#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_512K 0x1 /* enum */ +#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_256K 0x2 /* enum */ +#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_128K 0x3 /* enum */ +#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_64K 0x4 /* enum */ +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_WANT_OUTER_CLASSES_OFST 16 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_WANT_OUTER_CLASSES_LBN 18 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_WANT_OUTER_CLASSES_WIDTH 1 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_FORCE_EV_MERGING_OFST 16 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_FORCE_EV_MERGING_LBN 19 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_FORCE_EV_MERGING_WIDTH 1 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_NO_CONT_EV_OFST 16 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_NO_CONT_EV_LBN 20 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_NO_CONT_EV_WIDTH 1 +/* Owner ID to use if in buffer mode (zero if physical) */ +#define MC_CMD_INIT_RXQ_EXT_IN_OWNER_ID_OFST 20 +#define MC_CMD_INIT_RXQ_EXT_IN_OWNER_ID_LEN 4 +/* The port ID associated with the v-adaptor which should contain this DMAQ. */ +#define MC_CMD_INIT_RXQ_EXT_IN_PORT_ID_OFST 24 +#define MC_CMD_INIT_RXQ_EXT_IN_PORT_ID_LEN 4 +/* 64-bit address of 4k of 4k-aligned host memory buffer */ +#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_OFST 28 +#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_LEN 8 +#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_LO_OFST 28 +#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_LO_LEN 4 +#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_LO_LBN 224 +#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_LO_WIDTH 32 +#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_HI_OFST 32 +#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_HI_LEN 4 +#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_HI_LBN 256 +#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_HI_WIDTH 32 +#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_MINNUM 0 +#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_MAXNUM 64 +#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_MAXNUM_MCDI2 64 +/* Maximum length of packet to receive, if SNAPSHOT_MODE flag is set */ +#define MC_CMD_INIT_RXQ_EXT_IN_SNAPSHOT_LENGTH_OFST 540 +#define MC_CMD_INIT_RXQ_EXT_IN_SNAPSHOT_LENGTH_LEN 4 + +/* MC_CMD_INIT_RXQ_V3_IN msgrequest */ +#define MC_CMD_INIT_RXQ_V3_IN_LEN 560 +/* Size, in entries */ +#define MC_CMD_INIT_RXQ_V3_IN_SIZE_OFST 0 +#define MC_CMD_INIT_RXQ_V3_IN_SIZE_LEN 4 +/* The EVQ to send events to. This is an index originally specified to + * INIT_EVQ. If DMA_MODE == PACKED_STREAM this must be equal to INSTANCE. + */ +#define MC_CMD_INIT_RXQ_V3_IN_TARGET_EVQ_OFST 4 +#define MC_CMD_INIT_RXQ_V3_IN_TARGET_EVQ_LEN 4 +/* The value to put in the event data. Check hardware spec. for valid range. + * This field is ignored if DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER or DMA_MODE + * == PACKED_STREAM. + */ +#define MC_CMD_INIT_RXQ_V3_IN_LABEL_OFST 8 +#define MC_CMD_INIT_RXQ_V3_IN_LABEL_LEN 4 +/* Desired instance. Must be set to a specific instance, which is a function + * local queue index. The calling client must be the currently-assigned user of + * this VI (see MC_CMD_SET_VI_USER). + */ +#define MC_CMD_INIT_RXQ_V3_IN_INSTANCE_OFST 12 +#define MC_CMD_INIT_RXQ_V3_IN_INSTANCE_LEN 4 +/* There will be more flags here. */ +#define MC_CMD_INIT_RXQ_V3_IN_FLAGS_OFST 16 +#define MC_CMD_INIT_RXQ_V3_IN_FLAGS_LEN 4 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_BUFF_MODE_OFST 16 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_BUFF_MODE_LBN 0 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_BUFF_MODE_WIDTH 1 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_HDR_SPLIT_OFST 16 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_HDR_SPLIT_LBN 1 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_HDR_SPLIT_WIDTH 1 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_TIMESTAMP_OFST 16 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_TIMESTAMP_LBN 2 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_TIMESTAMP_WIDTH 1 +#define MC_CMD_INIT_RXQ_V3_IN_CRC_MODE_OFST 16 +#define MC_CMD_INIT_RXQ_V3_IN_CRC_MODE_LBN 3 +#define MC_CMD_INIT_RXQ_V3_IN_CRC_MODE_WIDTH 4 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_CHAIN_OFST 16 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_CHAIN_LBN 7 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_CHAIN_WIDTH 1 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_PREFIX_OFST 16 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_PREFIX_LBN 8 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_PREFIX_WIDTH 1 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_DISABLE_SCATTER_OFST 16 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_DISABLE_SCATTER_LBN 9 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_DISABLE_SCATTER_WIDTH 1 +#define MC_CMD_INIT_RXQ_V3_IN_DMA_MODE_OFST 16 +#define MC_CMD_INIT_RXQ_V3_IN_DMA_MODE_LBN 10 +#define MC_CMD_INIT_RXQ_V3_IN_DMA_MODE_WIDTH 4 +/* enum: One packet per descriptor (for normal networking) */ +#define MC_CMD_INIT_RXQ_V3_IN_SINGLE_PACKET 0x0 +/* enum: Pack multiple packets into large descriptors (for SolarCapture) */ +#define MC_CMD_INIT_RXQ_V3_IN_PACKED_STREAM 0x1 +/* enum: Pack multiple packets into large descriptors using the format designed + * to maximise packet rate. This mode uses 1 "bucket" per descriptor with + * multiple fixed-size packet buffers within each bucket. For a full + * description see SF-119419-TC. This mode is only supported by "dpdk" datapath + * firmware. + */ +#define MC_CMD_INIT_RXQ_V3_IN_EQUAL_STRIDE_SUPER_BUFFER 0x2 +/* enum: Deprecated name for EQUAL_STRIDE_SUPER_BUFFER. */ +#define MC_CMD_INIT_RXQ_V3_IN_EQUAL_STRIDE_PACKED_STREAM 0x2 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_SNAPSHOT_MODE_OFST 16 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_SNAPSHOT_MODE_LBN 14 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_SNAPSHOT_MODE_WIDTH 1 +#define MC_CMD_INIT_RXQ_V3_IN_PACKED_STREAM_BUFF_SIZE_OFST 16 +#define MC_CMD_INIT_RXQ_V3_IN_PACKED_STREAM_BUFF_SIZE_LBN 15 +#define MC_CMD_INIT_RXQ_V3_IN_PACKED_STREAM_BUFF_SIZE_WIDTH 3 +#define MC_CMD_INIT_RXQ_V3_IN_PS_BUFF_1M 0x0 /* enum */ +#define MC_CMD_INIT_RXQ_V3_IN_PS_BUFF_512K 0x1 /* enum */ +#define MC_CMD_INIT_RXQ_V3_IN_PS_BUFF_256K 0x2 /* enum */ +#define MC_CMD_INIT_RXQ_V3_IN_PS_BUFF_128K 0x3 /* enum */ +#define MC_CMD_INIT_RXQ_V3_IN_PS_BUFF_64K 0x4 /* enum */ +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_WANT_OUTER_CLASSES_OFST 16 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_WANT_OUTER_CLASSES_LBN 18 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_WANT_OUTER_CLASSES_WIDTH 1 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_FORCE_EV_MERGING_OFST 16 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_FORCE_EV_MERGING_LBN 19 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_FORCE_EV_MERGING_WIDTH 1 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_NO_CONT_EV_OFST 16 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_NO_CONT_EV_LBN 20 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_NO_CONT_EV_WIDTH 1 +/* Owner ID to use if in buffer mode (zero if physical) */ +#define MC_CMD_INIT_RXQ_V3_IN_OWNER_ID_OFST 20 +#define MC_CMD_INIT_RXQ_V3_IN_OWNER_ID_LEN 4 +/* The port ID associated with the v-adaptor which should contain this DMAQ. */ +#define MC_CMD_INIT_RXQ_V3_IN_PORT_ID_OFST 24 +#define MC_CMD_INIT_RXQ_V3_IN_PORT_ID_LEN 4 +/* 64-bit address of 4k of 4k-aligned host memory buffer */ +#define MC_CMD_INIT_RXQ_V3_IN_DMA_ADDR_OFST 28 +#define MC_CMD_INIT_RXQ_V3_IN_DMA_ADDR_LEN 8 +#define MC_CMD_INIT_RXQ_V3_IN_DMA_ADDR_LO_OFST 28 +#define MC_CMD_INIT_RXQ_V3_IN_DMA_ADDR_LO_LEN 4 +#define MC_CMD_INIT_RXQ_V3_IN_DMA_ADDR_LO_LBN 224 +#define MC_CMD_INIT_RXQ_V3_IN_DMA_ADDR_LO_WIDTH 32 +#define MC_CMD_INIT_RXQ_V3_IN_DMA_ADDR_HI_OFST 32 +#define MC_CMD_INIT_RXQ_V3_IN_DMA_ADDR_HI_LEN 4 +#define MC_CMD_INIT_RXQ_V3_IN_DMA_ADDR_HI_LBN 256 +#define MC_CMD_INIT_RXQ_V3_IN_DMA_ADDR_HI_WIDTH 32 +#define MC_CMD_INIT_RXQ_V3_IN_DMA_ADDR_MINNUM 0 +#define MC_CMD_INIT_RXQ_V3_IN_DMA_ADDR_MAXNUM 64 +#define MC_CMD_INIT_RXQ_V3_IN_DMA_ADDR_MAXNUM_MCDI2 64 +/* Maximum length of packet to receive, if SNAPSHOT_MODE flag is set */ +#define MC_CMD_INIT_RXQ_V3_IN_SNAPSHOT_LENGTH_OFST 540 +#define MC_CMD_INIT_RXQ_V3_IN_SNAPSHOT_LENGTH_LEN 4 +/* The number of packet buffers that will be contained within each + * EQUAL_STRIDE_SUPER_BUFFER format bucket supplied by the driver. This field + * is ignored unless DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER. + */ +#define MC_CMD_INIT_RXQ_V3_IN_ES_PACKET_BUFFERS_PER_BUCKET_OFST 544 +#define MC_CMD_INIT_RXQ_V3_IN_ES_PACKET_BUFFERS_PER_BUCKET_LEN 4 +/* The length in bytes of the area in each packet buffer that can be written to + * by the adapter. This is used to store the packet prefix and the packet + * payload. This length does not include any end padding added by the driver. + * This field is ignored unless DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER. + */ +#define MC_CMD_INIT_RXQ_V3_IN_ES_MAX_DMA_LEN_OFST 548 +#define MC_CMD_INIT_RXQ_V3_IN_ES_MAX_DMA_LEN_LEN 4 +/* The length in bytes of a single packet buffer within a + * EQUAL_STRIDE_SUPER_BUFFER format bucket. This field is ignored unless + * DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER. + */ +#define MC_CMD_INIT_RXQ_V3_IN_ES_PACKET_STRIDE_OFST 552 +#define MC_CMD_INIT_RXQ_V3_IN_ES_PACKET_STRIDE_LEN 4 +/* The maximum time in nanoseconds that the datapath will be backpressured if + * there are no RX descriptors available. If the timeout is reached and there + * are still no descriptors then the packet will be dropped. A timeout of 0 + * means the datapath will never be blocked. This field is ignored unless + * DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER. + */ +#define MC_CMD_INIT_RXQ_V3_IN_ES_HEAD_OF_LINE_BLOCK_TIMEOUT_OFST 556 +#define MC_CMD_INIT_RXQ_V3_IN_ES_HEAD_OF_LINE_BLOCK_TIMEOUT_LEN 4 + +/* MC_CMD_INIT_RXQ_V4_IN msgrequest: INIT_RXQ request with new field required + * for systems with a QDMA (currently, Riverhead) + */ +#define MC_CMD_INIT_RXQ_V4_IN_LEN 564 +/* Size, in entries */ +#define MC_CMD_INIT_RXQ_V4_IN_SIZE_OFST 0 +#define MC_CMD_INIT_RXQ_V4_IN_SIZE_LEN 4 +/* The EVQ to send events to. This is an index originally specified to + * INIT_EVQ. If DMA_MODE == PACKED_STREAM this must be equal to INSTANCE. + */ +#define MC_CMD_INIT_RXQ_V4_IN_TARGET_EVQ_OFST 4 +#define MC_CMD_INIT_RXQ_V4_IN_TARGET_EVQ_LEN 4 +/* The value to put in the event data. Check hardware spec. for valid range. + * This field is ignored if DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER or DMA_MODE + * == PACKED_STREAM. + */ +#define MC_CMD_INIT_RXQ_V4_IN_LABEL_OFST 8 +#define MC_CMD_INIT_RXQ_V4_IN_LABEL_LEN 4 +/* Desired instance. Must be set to a specific instance, which is a function + * local queue index. The calling client must be the currently-assigned user of + * this VI (see MC_CMD_SET_VI_USER). + */ +#define MC_CMD_INIT_RXQ_V4_IN_INSTANCE_OFST 12 +#define MC_CMD_INIT_RXQ_V4_IN_INSTANCE_LEN 4 +/* There will be more flags here. */ +#define MC_CMD_INIT_RXQ_V4_IN_FLAGS_OFST 16 +#define MC_CMD_INIT_RXQ_V4_IN_FLAGS_LEN 4 +#define MC_CMD_INIT_RXQ_V4_IN_FLAG_BUFF_MODE_OFST 16 +#define MC_CMD_INIT_RXQ_V4_IN_FLAG_BUFF_MODE_LBN 0 +#define MC_CMD_INIT_RXQ_V4_IN_FLAG_BUFF_MODE_WIDTH 1 +#define MC_CMD_INIT_RXQ_V4_IN_FLAG_HDR_SPLIT_OFST 16 +#define MC_CMD_INIT_RXQ_V4_IN_FLAG_HDR_SPLIT_LBN 1 +#define MC_CMD_INIT_RXQ_V4_IN_FLAG_HDR_SPLIT_WIDTH 1 +#define MC_CMD_INIT_RXQ_V4_IN_FLAG_TIMESTAMP_OFST 16 +#define MC_CMD_INIT_RXQ_V4_IN_FLAG_TIMESTAMP_LBN 2 +#define MC_CMD_INIT_RXQ_V4_IN_FLAG_TIMESTAMP_WIDTH 1 +#define MC_CMD_INIT_RXQ_V4_IN_CRC_MODE_OFST 16 +#define MC_CMD_INIT_RXQ_V4_IN_CRC_MODE_LBN 3 +#define MC_CMD_INIT_RXQ_V4_IN_CRC_MODE_WIDTH 4 +#define MC_CMD_INIT_RXQ_V4_IN_FLAG_CHAIN_OFST 16 +#define MC_CMD_INIT_RXQ_V4_IN_FLAG_CHAIN_LBN 7 +#define MC_CMD_INIT_RXQ_V4_IN_FLAG_CHAIN_WIDTH 1 +#define MC_CMD_INIT_RXQ_V4_IN_FLAG_PREFIX_OFST 16 +#define MC_CMD_INIT_RXQ_V4_IN_FLAG_PREFIX_LBN 8 +#define MC_CMD_INIT_RXQ_V4_IN_FLAG_PREFIX_WIDTH 1 +#define MC_CMD_INIT_RXQ_V4_IN_FLAG_DISABLE_SCATTER_OFST 16 +#define MC_CMD_INIT_RXQ_V4_IN_FLAG_DISABLE_SCATTER_LBN 9 +#define MC_CMD_INIT_RXQ_V4_IN_FLAG_DISABLE_SCATTER_WIDTH 1 +#define MC_CMD_INIT_RXQ_V4_IN_DMA_MODE_OFST 16 +#define MC_CMD_INIT_RXQ_V4_IN_DMA_MODE_LBN 10 +#define MC_CMD_INIT_RXQ_V4_IN_DMA_MODE_WIDTH 4 +/* enum: One packet per descriptor (for normal networking) */ +#define MC_CMD_INIT_RXQ_V4_IN_SINGLE_PACKET 0x0 +/* enum: Pack multiple packets into large descriptors (for SolarCapture) */ +#define MC_CMD_INIT_RXQ_V4_IN_PACKED_STREAM 0x1 +/* enum: Pack multiple packets into large descriptors using the format designed + * to maximise packet rate. This mode uses 1 "bucket" per descriptor with + * multiple fixed-size packet buffers within each bucket. For a full + * description see SF-119419-TC. This mode is only supported by "dpdk" datapath + * firmware. + */ +#define MC_CMD_INIT_RXQ_V4_IN_EQUAL_STRIDE_SUPER_BUFFER 0x2 +/* enum: Deprecated name for EQUAL_STRIDE_SUPER_BUFFER. */ +#define MC_CMD_INIT_RXQ_V4_IN_EQUAL_STRIDE_PACKED_STREAM 0x2 +#define MC_CMD_INIT_RXQ_V4_IN_FLAG_SNAPSHOT_MODE_OFST 16 +#define MC_CMD_INIT_RXQ_V4_IN_FLAG_SNAPSHOT_MODE_LBN 14 +#define MC_CMD_INIT_RXQ_V4_IN_FLAG_SNAPSHOT_MODE_WIDTH 1 +#define MC_CMD_INIT_RXQ_V4_IN_PACKED_STREAM_BUFF_SIZE_OFST 16 +#define MC_CMD_INIT_RXQ_V4_IN_PACKED_STREAM_BUFF_SIZE_LBN 15 +#define MC_CMD_INIT_RXQ_V4_IN_PACKED_STREAM_BUFF_SIZE_WIDTH 3 +#define MC_CMD_INIT_RXQ_V4_IN_PS_BUFF_1M 0x0 /* enum */ +#define MC_CMD_INIT_RXQ_V4_IN_PS_BUFF_512K 0x1 /* enum */ +#define MC_CMD_INIT_RXQ_V4_IN_PS_BUFF_256K 0x2 /* enum */ +#define MC_CMD_INIT_RXQ_V4_IN_PS_BUFF_128K 0x3 /* enum */ +#define MC_CMD_INIT_RXQ_V4_IN_PS_BUFF_64K 0x4 /* enum */ +#define MC_CMD_INIT_RXQ_V4_IN_FLAG_WANT_OUTER_CLASSES_OFST 16 +#define MC_CMD_INIT_RXQ_V4_IN_FLAG_WANT_OUTER_CLASSES_LBN 18 +#define MC_CMD_INIT_RXQ_V4_IN_FLAG_WANT_OUTER_CLASSES_WIDTH 1 +#define MC_CMD_INIT_RXQ_V4_IN_FLAG_FORCE_EV_MERGING_OFST 16 +#define MC_CMD_INIT_RXQ_V4_IN_FLAG_FORCE_EV_MERGING_LBN 19 +#define MC_CMD_INIT_RXQ_V4_IN_FLAG_FORCE_EV_MERGING_WIDTH 1 +#define MC_CMD_INIT_RXQ_V4_IN_FLAG_NO_CONT_EV_OFST 16 +#define MC_CMD_INIT_RXQ_V4_IN_FLAG_NO_CONT_EV_LBN 20 +#define MC_CMD_INIT_RXQ_V4_IN_FLAG_NO_CONT_EV_WIDTH 1 +/* Owner ID to use if in buffer mode (zero if physical) */ +#define MC_CMD_INIT_RXQ_V4_IN_OWNER_ID_OFST 20 +#define MC_CMD_INIT_RXQ_V4_IN_OWNER_ID_LEN 4 +/* The port ID associated with the v-adaptor which should contain this DMAQ. */ +#define MC_CMD_INIT_RXQ_V4_IN_PORT_ID_OFST 24 +#define MC_CMD_INIT_RXQ_V4_IN_PORT_ID_LEN 4 +/* 64-bit address of 4k of 4k-aligned host memory buffer */ +#define MC_CMD_INIT_RXQ_V4_IN_DMA_ADDR_OFST 28 +#define MC_CMD_INIT_RXQ_V4_IN_DMA_ADDR_LEN 8 +#define MC_CMD_INIT_RXQ_V4_IN_DMA_ADDR_LO_OFST 28 +#define MC_CMD_INIT_RXQ_V4_IN_DMA_ADDR_LO_LEN 4 +#define MC_CMD_INIT_RXQ_V4_IN_DMA_ADDR_LO_LBN 224 +#define MC_CMD_INIT_RXQ_V4_IN_DMA_ADDR_LO_WIDTH 32 +#define MC_CMD_INIT_RXQ_V4_IN_DMA_ADDR_HI_OFST 32 +#define MC_CMD_INIT_RXQ_V4_IN_DMA_ADDR_HI_LEN 4 +#define MC_CMD_INIT_RXQ_V4_IN_DMA_ADDR_HI_LBN 256 +#define MC_CMD_INIT_RXQ_V4_IN_DMA_ADDR_HI_WIDTH 32 +#define MC_CMD_INIT_RXQ_V4_IN_DMA_ADDR_MINNUM 0 +#define MC_CMD_INIT_RXQ_V4_IN_DMA_ADDR_MAXNUM 64 +#define MC_CMD_INIT_RXQ_V4_IN_DMA_ADDR_MAXNUM_MCDI2 64 +/* Maximum length of packet to receive, if SNAPSHOT_MODE flag is set */ +#define MC_CMD_INIT_RXQ_V4_IN_SNAPSHOT_LENGTH_OFST 540 +#define MC_CMD_INIT_RXQ_V4_IN_SNAPSHOT_LENGTH_LEN 4 +/* The number of packet buffers that will be contained within each + * EQUAL_STRIDE_SUPER_BUFFER format bucket supplied by the driver. This field + * is ignored unless DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER. + */ +#define MC_CMD_INIT_RXQ_V4_IN_ES_PACKET_BUFFERS_PER_BUCKET_OFST 544 +#define MC_CMD_INIT_RXQ_V4_IN_ES_PACKET_BUFFERS_PER_BUCKET_LEN 4 +/* The length in bytes of the area in each packet buffer that can be written to + * by the adapter. This is used to store the packet prefix and the packet + * payload. This length does not include any end padding added by the driver. + * This field is ignored unless DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER. + */ +#define MC_CMD_INIT_RXQ_V4_IN_ES_MAX_DMA_LEN_OFST 548 +#define MC_CMD_INIT_RXQ_V4_IN_ES_MAX_DMA_LEN_LEN 4 +/* The length in bytes of a single packet buffer within a + * EQUAL_STRIDE_SUPER_BUFFER format bucket. This field is ignored unless + * DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER. + */ +#define MC_CMD_INIT_RXQ_V4_IN_ES_PACKET_STRIDE_OFST 552 +#define MC_CMD_INIT_RXQ_V4_IN_ES_PACKET_STRIDE_LEN 4 +/* The maximum time in nanoseconds that the datapath will be backpressured if + * there are no RX descriptors available. If the timeout is reached and there + * are still no descriptors then the packet will be dropped. A timeout of 0 + * means the datapath will never be blocked. This field is ignored unless + * DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER. + */ +#define MC_CMD_INIT_RXQ_V4_IN_ES_HEAD_OF_LINE_BLOCK_TIMEOUT_OFST 556 +#define MC_CMD_INIT_RXQ_V4_IN_ES_HEAD_OF_LINE_BLOCK_TIMEOUT_LEN 4 +/* V4 message data */ +#define MC_CMD_INIT_RXQ_V4_IN_V4_DATA_OFST 560 +#define MC_CMD_INIT_RXQ_V4_IN_V4_DATA_LEN 4 +/* Size in bytes of buffers attached to descriptors posted to this queue. Set + * to zero if using this message on non-QDMA based platforms. Currently in + * Riverhead there is a global limit of eight different buffer sizes across all + * active queues. A 2KB and 4KB buffer is guaranteed to be available, but a + * request for a different buffer size will fail if there are already eight + * other buffer sizes in use. In future Riverhead this limit will go away and + * any size will be accepted. + */ +#define MC_CMD_INIT_RXQ_V4_IN_BUFFER_SIZE_BYTES_OFST 560 +#define MC_CMD_INIT_RXQ_V4_IN_BUFFER_SIZE_BYTES_LEN 4 + +/* MC_CMD_INIT_RXQ_V5_IN msgrequest: INIT_RXQ request with ability to request a + * different RX packet prefix + */ +#define MC_CMD_INIT_RXQ_V5_IN_LEN 568 +/* Size, in entries */ +#define MC_CMD_INIT_RXQ_V5_IN_SIZE_OFST 0 +#define MC_CMD_INIT_RXQ_V5_IN_SIZE_LEN 4 +/* The EVQ to send events to. This is an index originally specified to + * INIT_EVQ. If DMA_MODE == PACKED_STREAM this must be equal to INSTANCE. + */ +#define MC_CMD_INIT_RXQ_V5_IN_TARGET_EVQ_OFST 4 +#define MC_CMD_INIT_RXQ_V5_IN_TARGET_EVQ_LEN 4 +/* The value to put in the event data. Check hardware spec. for valid range. + * This field is ignored if DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER or DMA_MODE + * == PACKED_STREAM. + */ +#define MC_CMD_INIT_RXQ_V5_IN_LABEL_OFST 8 +#define MC_CMD_INIT_RXQ_V5_IN_LABEL_LEN 4 +/* Desired instance. Must be set to a specific instance, which is a function + * local queue index. The calling client must be the currently-assigned user of + * this VI (see MC_CMD_SET_VI_USER). + */ +#define MC_CMD_INIT_RXQ_V5_IN_INSTANCE_OFST 12 +#define MC_CMD_INIT_RXQ_V5_IN_INSTANCE_LEN 4 +/* There will be more flags here. */ +#define MC_CMD_INIT_RXQ_V5_IN_FLAGS_OFST 16 +#define MC_CMD_INIT_RXQ_V5_IN_FLAGS_LEN 4 +#define MC_CMD_INIT_RXQ_V5_IN_FLAG_BUFF_MODE_OFST 16 +#define MC_CMD_INIT_RXQ_V5_IN_FLAG_BUFF_MODE_LBN 0 +#define MC_CMD_INIT_RXQ_V5_IN_FLAG_BUFF_MODE_WIDTH 1 +#define MC_CMD_INIT_RXQ_V5_IN_FLAG_HDR_SPLIT_OFST 16 +#define MC_CMD_INIT_RXQ_V5_IN_FLAG_HDR_SPLIT_LBN 1 +#define MC_CMD_INIT_RXQ_V5_IN_FLAG_HDR_SPLIT_WIDTH 1 +#define MC_CMD_INIT_RXQ_V5_IN_FLAG_TIMESTAMP_OFST 16 +#define MC_CMD_INIT_RXQ_V5_IN_FLAG_TIMESTAMP_LBN 2 +#define MC_CMD_INIT_RXQ_V5_IN_FLAG_TIMESTAMP_WIDTH 1 +#define MC_CMD_INIT_RXQ_V5_IN_CRC_MODE_OFST 16 +#define MC_CMD_INIT_RXQ_V5_IN_CRC_MODE_LBN 3 +#define MC_CMD_INIT_RXQ_V5_IN_CRC_MODE_WIDTH 4 +#define MC_CMD_INIT_RXQ_V5_IN_FLAG_CHAIN_OFST 16 +#define MC_CMD_INIT_RXQ_V5_IN_FLAG_CHAIN_LBN 7 +#define MC_CMD_INIT_RXQ_V5_IN_FLAG_CHAIN_WIDTH 1 +#define MC_CMD_INIT_RXQ_V5_IN_FLAG_PREFIX_OFST 16 +#define MC_CMD_INIT_RXQ_V5_IN_FLAG_PREFIX_LBN 8 +#define MC_CMD_INIT_RXQ_V5_IN_FLAG_PREFIX_WIDTH 1 +#define MC_CMD_INIT_RXQ_V5_IN_FLAG_DISABLE_SCATTER_OFST 16 +#define MC_CMD_INIT_RXQ_V5_IN_FLAG_DISABLE_SCATTER_LBN 9 +#define MC_CMD_INIT_RXQ_V5_IN_FLAG_DISABLE_SCATTER_WIDTH 1 +#define MC_CMD_INIT_RXQ_V5_IN_DMA_MODE_OFST 16 +#define MC_CMD_INIT_RXQ_V5_IN_DMA_MODE_LBN 10 +#define MC_CMD_INIT_RXQ_V5_IN_DMA_MODE_WIDTH 4 +/* enum: One packet per descriptor (for normal networking) */ +#define MC_CMD_INIT_RXQ_V5_IN_SINGLE_PACKET 0x0 +/* enum: Pack multiple packets into large descriptors (for SolarCapture) */ +#define MC_CMD_INIT_RXQ_V5_IN_PACKED_STREAM 0x1 +/* enum: Pack multiple packets into large descriptors using the format designed + * to maximise packet rate. This mode uses 1 "bucket" per descriptor with + * multiple fixed-size packet buffers within each bucket. For a full + * description see SF-119419-TC. This mode is only supported by "dpdk" datapath + * firmware. + */ +#define MC_CMD_INIT_RXQ_V5_IN_EQUAL_STRIDE_SUPER_BUFFER 0x2 +/* enum: Deprecated name for EQUAL_STRIDE_SUPER_BUFFER. */ +#define MC_CMD_INIT_RXQ_V5_IN_EQUAL_STRIDE_PACKED_STREAM 0x2 +#define MC_CMD_INIT_RXQ_V5_IN_FLAG_SNAPSHOT_MODE_OFST 16 +#define MC_CMD_INIT_RXQ_V5_IN_FLAG_SNAPSHOT_MODE_LBN 14 +#define MC_CMD_INIT_RXQ_V5_IN_FLAG_SNAPSHOT_MODE_WIDTH 1 +#define MC_CMD_INIT_RXQ_V5_IN_PACKED_STREAM_BUFF_SIZE_OFST 16 +#define MC_CMD_INIT_RXQ_V5_IN_PACKED_STREAM_BUFF_SIZE_LBN 15 +#define MC_CMD_INIT_RXQ_V5_IN_PACKED_STREAM_BUFF_SIZE_WIDTH 3 +#define MC_CMD_INIT_RXQ_V5_IN_PS_BUFF_1M 0x0 /* enum */ +#define MC_CMD_INIT_RXQ_V5_IN_PS_BUFF_512K 0x1 /* enum */ +#define MC_CMD_INIT_RXQ_V5_IN_PS_BUFF_256K 0x2 /* enum */ +#define MC_CMD_INIT_RXQ_V5_IN_PS_BUFF_128K 0x3 /* enum */ +#define MC_CMD_INIT_RXQ_V5_IN_PS_BUFF_64K 0x4 /* enum */ +#define MC_CMD_INIT_RXQ_V5_IN_FLAG_WANT_OUTER_CLASSES_OFST 16 +#define MC_CMD_INIT_RXQ_V5_IN_FLAG_WANT_OUTER_CLASSES_LBN 18 +#define MC_CMD_INIT_RXQ_V5_IN_FLAG_WANT_OUTER_CLASSES_WIDTH 1 +#define MC_CMD_INIT_RXQ_V5_IN_FLAG_FORCE_EV_MERGING_OFST 16 +#define MC_CMD_INIT_RXQ_V5_IN_FLAG_FORCE_EV_MERGING_LBN 19 +#define MC_CMD_INIT_RXQ_V5_IN_FLAG_FORCE_EV_MERGING_WIDTH 1 +#define MC_CMD_INIT_RXQ_V5_IN_FLAG_NO_CONT_EV_OFST 16 +#define MC_CMD_INIT_RXQ_V5_IN_FLAG_NO_CONT_EV_LBN 20 +#define MC_CMD_INIT_RXQ_V5_IN_FLAG_NO_CONT_EV_WIDTH 1 +/* Owner ID to use if in buffer mode (zero if physical) */ +#define MC_CMD_INIT_RXQ_V5_IN_OWNER_ID_OFST 20 +#define MC_CMD_INIT_RXQ_V5_IN_OWNER_ID_LEN 4 +/* The port ID associated with the v-adaptor which should contain this DMAQ. */ +#define MC_CMD_INIT_RXQ_V5_IN_PORT_ID_OFST 24 +#define MC_CMD_INIT_RXQ_V5_IN_PORT_ID_LEN 4 +/* 64-bit address of 4k of 4k-aligned host memory buffer */ +#define MC_CMD_INIT_RXQ_V5_IN_DMA_ADDR_OFST 28 +#define MC_CMD_INIT_RXQ_V5_IN_DMA_ADDR_LEN 8 +#define MC_CMD_INIT_RXQ_V5_IN_DMA_ADDR_LO_OFST 28 +#define MC_CMD_INIT_RXQ_V5_IN_DMA_ADDR_LO_LEN 4 +#define MC_CMD_INIT_RXQ_V5_IN_DMA_ADDR_LO_LBN 224 +#define MC_CMD_INIT_RXQ_V5_IN_DMA_ADDR_LO_WIDTH 32 +#define MC_CMD_INIT_RXQ_V5_IN_DMA_ADDR_HI_OFST 32 +#define MC_CMD_INIT_RXQ_V5_IN_DMA_ADDR_HI_LEN 4 +#define MC_CMD_INIT_RXQ_V5_IN_DMA_ADDR_HI_LBN 256 +#define MC_CMD_INIT_RXQ_V5_IN_DMA_ADDR_HI_WIDTH 32 +#define MC_CMD_INIT_RXQ_V5_IN_DMA_ADDR_MINNUM 0 +#define MC_CMD_INIT_RXQ_V5_IN_DMA_ADDR_MAXNUM 64 +#define MC_CMD_INIT_RXQ_V5_IN_DMA_ADDR_MAXNUM_MCDI2 64 +/* Maximum length of packet to receive, if SNAPSHOT_MODE flag is set */ +#define MC_CMD_INIT_RXQ_V5_IN_SNAPSHOT_LENGTH_OFST 540 +#define MC_CMD_INIT_RXQ_V5_IN_SNAPSHOT_LENGTH_LEN 4 +/* The number of packet buffers that will be contained within each + * EQUAL_STRIDE_SUPER_BUFFER format bucket supplied by the driver. This field + * is ignored unless DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER. + */ +#define MC_CMD_INIT_RXQ_V5_IN_ES_PACKET_BUFFERS_PER_BUCKET_OFST 544 +#define MC_CMD_INIT_RXQ_V5_IN_ES_PACKET_BUFFERS_PER_BUCKET_LEN 4 +/* The length in bytes of the area in each packet buffer that can be written to + * by the adapter. This is used to store the packet prefix and the packet + * payload. This length does not include any end padding added by the driver. + * This field is ignored unless DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER. + */ +#define MC_CMD_INIT_RXQ_V5_IN_ES_MAX_DMA_LEN_OFST 548 +#define MC_CMD_INIT_RXQ_V5_IN_ES_MAX_DMA_LEN_LEN 4 +/* The length in bytes of a single packet buffer within a + * EQUAL_STRIDE_SUPER_BUFFER format bucket. This field is ignored unless + * DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER. + */ +#define MC_CMD_INIT_RXQ_V5_IN_ES_PACKET_STRIDE_OFST 552 +#define MC_CMD_INIT_RXQ_V5_IN_ES_PACKET_STRIDE_LEN 4 +/* The maximum time in nanoseconds that the datapath will be backpressured if + * there are no RX descriptors available. If the timeout is reached and there + * are still no descriptors then the packet will be dropped. A timeout of 0 + * means the datapath will never be blocked. This field is ignored unless + * DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER. + */ +#define MC_CMD_INIT_RXQ_V5_IN_ES_HEAD_OF_LINE_BLOCK_TIMEOUT_OFST 556 +#define MC_CMD_INIT_RXQ_V5_IN_ES_HEAD_OF_LINE_BLOCK_TIMEOUT_LEN 4 +/* V4 message data */ +#define MC_CMD_INIT_RXQ_V5_IN_V4_DATA_OFST 560 +#define MC_CMD_INIT_RXQ_V5_IN_V4_DATA_LEN 4 +/* Size in bytes of buffers attached to descriptors posted to this queue. Set + * to zero if using this message on non-QDMA based platforms. Currently in + * Riverhead there is a global limit of eight different buffer sizes across all + * active queues. A 2KB and 4KB buffer is guaranteed to be available, but a + * request for a different buffer size will fail if there are already eight + * other buffer sizes in use. In future Riverhead this limit will go away and + * any size will be accepted. + */ +#define MC_CMD_INIT_RXQ_V5_IN_BUFFER_SIZE_BYTES_OFST 560 +#define MC_CMD_INIT_RXQ_V5_IN_BUFFER_SIZE_BYTES_LEN 4 +/* Prefix id for the RX prefix format to use on packets delivered this queue. + * Zero is always a valid prefix id and means the default prefix format + * documented for the platform. Other prefix ids can be obtained by calling + * MC_CMD_GET_RX_PREFIX_ID with a requested set of prefix fields. + */ +#define MC_CMD_INIT_RXQ_V5_IN_RX_PREFIX_ID_OFST 564 +#define MC_CMD_INIT_RXQ_V5_IN_RX_PREFIX_ID_LEN 4 + +/* MC_CMD_INIT_RXQ_OUT msgresponse */ +#define MC_CMD_INIT_RXQ_OUT_LEN 0 + +/* MC_CMD_INIT_RXQ_EXT_OUT msgresponse */ +#define MC_CMD_INIT_RXQ_EXT_OUT_LEN 0 + +/* MC_CMD_INIT_RXQ_V3_OUT msgresponse */ +#define MC_CMD_INIT_RXQ_V3_OUT_LEN 0 + +/* MC_CMD_INIT_RXQ_V4_OUT msgresponse */ +#define MC_CMD_INIT_RXQ_V4_OUT_LEN 0 + +/* MC_CMD_INIT_RXQ_V5_OUT msgresponse */ +#define MC_CMD_INIT_RXQ_V5_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_INIT_TXQ + */ +#define MC_CMD_INIT_TXQ 0x82 +#undef MC_CMD_0x82_PRIVILEGE_CTG + +#define MC_CMD_0x82_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_INIT_TXQ_IN msgrequest: Legacy INIT_TXQ request. Use extended version + * in new code. + */ +#define MC_CMD_INIT_TXQ_IN_LENMIN 36 +#define MC_CMD_INIT_TXQ_IN_LENMAX 252 +#define MC_CMD_INIT_TXQ_IN_LENMAX_MCDI2 1020 +#define MC_CMD_INIT_TXQ_IN_LEN(num) (28+8*(num)) +#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_NUM(len) (((len)-28)/8) +/* Size, in entries */ +#define MC_CMD_INIT_TXQ_IN_SIZE_OFST 0 +#define MC_CMD_INIT_TXQ_IN_SIZE_LEN 4 +/* The EVQ to send events to. This is an index originally specified to + * INIT_EVQ. + */ +#define MC_CMD_INIT_TXQ_IN_TARGET_EVQ_OFST 4 +#define MC_CMD_INIT_TXQ_IN_TARGET_EVQ_LEN 4 +/* The value to put in the event data. Check hardware spec. for valid range. */ +#define MC_CMD_INIT_TXQ_IN_LABEL_OFST 8 +#define MC_CMD_INIT_TXQ_IN_LABEL_LEN 4 +/* Desired instance. Must be set to a specific instance, which is a function + * local queue index. The calling client must be the currently-assigned user of + * this VI (see MC_CMD_SET_VI_USER). + */ +#define MC_CMD_INIT_TXQ_IN_INSTANCE_OFST 12 +#define MC_CMD_INIT_TXQ_IN_INSTANCE_LEN 4 +/* There will be more flags here. */ +#define MC_CMD_INIT_TXQ_IN_FLAGS_OFST 16 +#define MC_CMD_INIT_TXQ_IN_FLAGS_LEN 4 +#define MC_CMD_INIT_TXQ_IN_FLAG_BUFF_MODE_OFST 16 +#define MC_CMD_INIT_TXQ_IN_FLAG_BUFF_MODE_LBN 0 +#define MC_CMD_INIT_TXQ_IN_FLAG_BUFF_MODE_WIDTH 1 +#define MC_CMD_INIT_TXQ_IN_FLAG_IP_CSUM_DIS_OFST 16 +#define MC_CMD_INIT_TXQ_IN_FLAG_IP_CSUM_DIS_LBN 1 +#define MC_CMD_INIT_TXQ_IN_FLAG_IP_CSUM_DIS_WIDTH 1 +#define MC_CMD_INIT_TXQ_IN_FLAG_TCP_CSUM_DIS_OFST 16 +#define MC_CMD_INIT_TXQ_IN_FLAG_TCP_CSUM_DIS_LBN 2 +#define MC_CMD_INIT_TXQ_IN_FLAG_TCP_CSUM_DIS_WIDTH 1 +#define MC_CMD_INIT_TXQ_IN_FLAG_TCP_UDP_ONLY_OFST 16 +#define MC_CMD_INIT_TXQ_IN_FLAG_TCP_UDP_ONLY_LBN 3 +#define MC_CMD_INIT_TXQ_IN_FLAG_TCP_UDP_ONLY_WIDTH 1 +#define MC_CMD_INIT_TXQ_IN_CRC_MODE_OFST 16 +#define MC_CMD_INIT_TXQ_IN_CRC_MODE_LBN 4 +#define MC_CMD_INIT_TXQ_IN_CRC_MODE_WIDTH 4 +#define MC_CMD_INIT_TXQ_IN_FLAG_TIMESTAMP_OFST 16 +#define MC_CMD_INIT_TXQ_IN_FLAG_TIMESTAMP_LBN 8 +#define MC_CMD_INIT_TXQ_IN_FLAG_TIMESTAMP_WIDTH 1 +#define MC_CMD_INIT_TXQ_IN_FLAG_PACER_BYPASS_OFST 16 +#define MC_CMD_INIT_TXQ_IN_FLAG_PACER_BYPASS_LBN 9 +#define MC_CMD_INIT_TXQ_IN_FLAG_PACER_BYPASS_WIDTH 1 +#define MC_CMD_INIT_TXQ_IN_FLAG_INNER_IP_CSUM_EN_OFST 16 +#define MC_CMD_INIT_TXQ_IN_FLAG_INNER_IP_CSUM_EN_LBN 10 +#define MC_CMD_INIT_TXQ_IN_FLAG_INNER_IP_CSUM_EN_WIDTH 1 +#define MC_CMD_INIT_TXQ_IN_FLAG_INNER_TCP_CSUM_EN_OFST 16 +#define MC_CMD_INIT_TXQ_IN_FLAG_INNER_TCP_CSUM_EN_LBN 11 +#define MC_CMD_INIT_TXQ_IN_FLAG_INNER_TCP_CSUM_EN_WIDTH 1 +/* Owner ID to use if in buffer mode (zero if physical) */ +#define MC_CMD_INIT_TXQ_IN_OWNER_ID_OFST 20 +#define MC_CMD_INIT_TXQ_IN_OWNER_ID_LEN 4 +/* The port ID associated with the v-adaptor which should contain this DMAQ. */ +#define MC_CMD_INIT_TXQ_IN_PORT_ID_OFST 24 +#define MC_CMD_INIT_TXQ_IN_PORT_ID_LEN 4 +/* 64-bit address of 4k of 4k-aligned host memory buffer */ +#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_OFST 28 +#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_LEN 8 +#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_LO_OFST 28 +#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_LO_LEN 4 +#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_LO_LBN 224 +#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_LO_WIDTH 32 +#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_HI_OFST 32 +#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_HI_LEN 4 +#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_HI_LBN 256 +#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_HI_WIDTH 32 +#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_MINNUM 1 +#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_MAXNUM 28 +#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_MAXNUM_MCDI2 124 + +/* MC_CMD_INIT_TXQ_EXT_IN msgrequest: Extended INIT_TXQ with additional mode + * flags + */ +#define MC_CMD_INIT_TXQ_EXT_IN_LEN 544 +/* Size, in entries */ +#define MC_CMD_INIT_TXQ_EXT_IN_SIZE_OFST 0 +#define MC_CMD_INIT_TXQ_EXT_IN_SIZE_LEN 4 +/* The EVQ to send events to. This is an index originally specified to + * INIT_EVQ. + */ +#define MC_CMD_INIT_TXQ_EXT_IN_TARGET_EVQ_OFST 4 +#define MC_CMD_INIT_TXQ_EXT_IN_TARGET_EVQ_LEN 4 +/* The value to put in the event data. Check hardware spec. for valid range. */ +#define MC_CMD_INIT_TXQ_EXT_IN_LABEL_OFST 8 +#define MC_CMD_INIT_TXQ_EXT_IN_LABEL_LEN 4 +/* Desired instance. Must be set to a specific instance, which is a function + * local queue index. The calling client must be the currently-assigned user of + * this VI (see MC_CMD_SET_VI_USER). + */ +#define MC_CMD_INIT_TXQ_EXT_IN_INSTANCE_OFST 12 +#define MC_CMD_INIT_TXQ_EXT_IN_INSTANCE_LEN 4 +/* There will be more flags here. */ +#define MC_CMD_INIT_TXQ_EXT_IN_FLAGS_OFST 16 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAGS_LEN 4 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_BUFF_MODE_OFST 16 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_BUFF_MODE_LBN 0 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_BUFF_MODE_WIDTH 1 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_IP_CSUM_DIS_OFST 16 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_IP_CSUM_DIS_LBN 1 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_IP_CSUM_DIS_WIDTH 1 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TCP_CSUM_DIS_OFST 16 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TCP_CSUM_DIS_LBN 2 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TCP_CSUM_DIS_WIDTH 1 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TCP_UDP_ONLY_OFST 16 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TCP_UDP_ONLY_LBN 3 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TCP_UDP_ONLY_WIDTH 1 +#define MC_CMD_INIT_TXQ_EXT_IN_CRC_MODE_OFST 16 +#define MC_CMD_INIT_TXQ_EXT_IN_CRC_MODE_LBN 4 +#define MC_CMD_INIT_TXQ_EXT_IN_CRC_MODE_WIDTH 4 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TIMESTAMP_OFST 16 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TIMESTAMP_LBN 8 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TIMESTAMP_WIDTH 1 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_PACER_BYPASS_OFST 16 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_PACER_BYPASS_LBN 9 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_PACER_BYPASS_WIDTH 1 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_INNER_IP_CSUM_EN_OFST 16 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_INNER_IP_CSUM_EN_LBN 10 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_INNER_IP_CSUM_EN_WIDTH 1 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_INNER_TCP_CSUM_EN_OFST 16 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_INNER_TCP_CSUM_EN_LBN 11 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_INNER_TCP_CSUM_EN_WIDTH 1 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TSOV2_EN_OFST 16 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TSOV2_EN_LBN 12 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TSOV2_EN_WIDTH 1 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_CTPIO_OFST 16 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_CTPIO_LBN 13 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_CTPIO_WIDTH 1 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_CTPIO_UTHRESH_OFST 16 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_CTPIO_UTHRESH_LBN 14 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_CTPIO_UTHRESH_WIDTH 1 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_M2M_D2C_OFST 16 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_M2M_D2C_LBN 15 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_M2M_D2C_WIDTH 1 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_DESC_PROXY_OFST 16 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_DESC_PROXY_LBN 16 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_DESC_PROXY_WIDTH 1 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_ABS_TARGET_EVQ_OFST 16 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_ABS_TARGET_EVQ_LBN 17 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_ABS_TARGET_EVQ_WIDTH 1 +/* Owner ID to use if in buffer mode (zero if physical) */ +#define MC_CMD_INIT_TXQ_EXT_IN_OWNER_ID_OFST 20 +#define MC_CMD_INIT_TXQ_EXT_IN_OWNER_ID_LEN 4 +/* The port ID associated with the v-adaptor which should contain this DMAQ. */ +#define MC_CMD_INIT_TXQ_EXT_IN_PORT_ID_OFST 24 +#define MC_CMD_INIT_TXQ_EXT_IN_PORT_ID_LEN 4 +/* 64-bit address of 4k of 4k-aligned host memory buffer */ +#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_OFST 28 +#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_LEN 8 +#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_LO_OFST 28 +#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_LO_LEN 4 +#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_LO_LBN 224 +#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_LO_WIDTH 32 +#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_HI_OFST 32 +#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_HI_LEN 4 +#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_HI_LBN 256 +#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_HI_WIDTH 32 +#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_MINNUM 0 +#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_MAXNUM 64 +#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_MAXNUM_MCDI2 64 +/* Flags related to Qbb flow control mode. */ +#define MC_CMD_INIT_TXQ_EXT_IN_QBB_FLAGS_OFST 540 +#define MC_CMD_INIT_TXQ_EXT_IN_QBB_FLAGS_LEN 4 +#define MC_CMD_INIT_TXQ_EXT_IN_QBB_ENABLE_OFST 540 +#define MC_CMD_INIT_TXQ_EXT_IN_QBB_ENABLE_LBN 0 +#define MC_CMD_INIT_TXQ_EXT_IN_QBB_ENABLE_WIDTH 1 +#define MC_CMD_INIT_TXQ_EXT_IN_QBB_PRIORITY_OFST 540 +#define MC_CMD_INIT_TXQ_EXT_IN_QBB_PRIORITY_LBN 1 +#define MC_CMD_INIT_TXQ_EXT_IN_QBB_PRIORITY_WIDTH 3 + +/* MC_CMD_INIT_TXQ_OUT msgresponse */ +#define MC_CMD_INIT_TXQ_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_FINI_EVQ + * Teardown an EVQ. + * + * All DMAQs or EVQs that point to the EVQ to tear down must be torn down first + * or the operation will fail with EBUSY + */ +#define MC_CMD_FINI_EVQ 0x83 +#undef MC_CMD_0x83_PRIVILEGE_CTG + +#define MC_CMD_0x83_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_FINI_EVQ_IN msgrequest */ +#define MC_CMD_FINI_EVQ_IN_LEN 4 +/* Instance of EVQ to destroy. Should be the same instance as that previously + * passed to INIT_EVQ + */ +#define MC_CMD_FINI_EVQ_IN_INSTANCE_OFST 0 +#define MC_CMD_FINI_EVQ_IN_INSTANCE_LEN 4 + +/* MC_CMD_FINI_EVQ_OUT msgresponse */ +#define MC_CMD_FINI_EVQ_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_FINI_RXQ + * Teardown a RXQ. + */ +#define MC_CMD_FINI_RXQ 0x84 +#undef MC_CMD_0x84_PRIVILEGE_CTG + +#define MC_CMD_0x84_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_FINI_RXQ_IN msgrequest */ +#define MC_CMD_FINI_RXQ_IN_LEN 4 +/* Instance of RXQ to destroy */ +#define MC_CMD_FINI_RXQ_IN_INSTANCE_OFST 0 +#define MC_CMD_FINI_RXQ_IN_INSTANCE_LEN 4 + +/* MC_CMD_FINI_RXQ_OUT msgresponse */ +#define MC_CMD_FINI_RXQ_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_FINI_TXQ + * Teardown a TXQ. + */ +#define MC_CMD_FINI_TXQ 0x85 +#undef MC_CMD_0x85_PRIVILEGE_CTG + +#define MC_CMD_0x85_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_FINI_TXQ_IN msgrequest */ +#define MC_CMD_FINI_TXQ_IN_LEN 4 +/* Instance of TXQ to destroy */ +#define MC_CMD_FINI_TXQ_IN_INSTANCE_OFST 0 +#define MC_CMD_FINI_TXQ_IN_INSTANCE_LEN 4 + +/* MC_CMD_FINI_TXQ_OUT msgresponse */ +#define MC_CMD_FINI_TXQ_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_DRIVER_EVENT + * Generate an event on an EVQ belonging to the function issuing the command. + */ +#define MC_CMD_DRIVER_EVENT 0x86 +#undef MC_CMD_0x86_PRIVILEGE_CTG + +#define MC_CMD_0x86_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_DRIVER_EVENT_IN msgrequest */ +#define MC_CMD_DRIVER_EVENT_IN_LEN 12 +/* Handle of target EVQ */ +#define MC_CMD_DRIVER_EVENT_IN_EVQ_OFST 0 +#define MC_CMD_DRIVER_EVENT_IN_EVQ_LEN 4 +/* Bits 0 - 63 of event */ +#define MC_CMD_DRIVER_EVENT_IN_DATA_OFST 4 +#define MC_CMD_DRIVER_EVENT_IN_DATA_LEN 8 +#define MC_CMD_DRIVER_EVENT_IN_DATA_LO_OFST 4 +#define MC_CMD_DRIVER_EVENT_IN_DATA_LO_LEN 4 +#define MC_CMD_DRIVER_EVENT_IN_DATA_LO_LBN 32 +#define MC_CMD_DRIVER_EVENT_IN_DATA_LO_WIDTH 32 +#define MC_CMD_DRIVER_EVENT_IN_DATA_HI_OFST 8 +#define MC_CMD_DRIVER_EVENT_IN_DATA_HI_LEN 4 +#define MC_CMD_DRIVER_EVENT_IN_DATA_HI_LBN 64 +#define MC_CMD_DRIVER_EVENT_IN_DATA_HI_WIDTH 32 + +/* MC_CMD_DRIVER_EVENT_OUT msgresponse */ +#define MC_CMD_DRIVER_EVENT_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_PROXY_CMD + * Execute an arbitrary MCDI command on behalf of a different function, subject + * to security restrictions. The command to be proxied follows immediately + * afterward in the host buffer (or on the UART). This command supercedes + * MC_CMD_SET_FUNC, which remains available for Siena but now deprecated. + */ +#define MC_CMD_PROXY_CMD 0x5b +#undef MC_CMD_0x5b_PRIVILEGE_CTG + +#define MC_CMD_0x5b_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_PROXY_CMD_IN msgrequest */ +#define MC_CMD_PROXY_CMD_IN_LEN 4 +/* The handle of the target function. */ +#define MC_CMD_PROXY_CMD_IN_TARGET_OFST 0 +#define MC_CMD_PROXY_CMD_IN_TARGET_LEN 4 +#define MC_CMD_PROXY_CMD_IN_TARGET_PF_OFST 0 +#define MC_CMD_PROXY_CMD_IN_TARGET_PF_LBN 0 +#define MC_CMD_PROXY_CMD_IN_TARGET_PF_WIDTH 16 +#define MC_CMD_PROXY_CMD_IN_TARGET_VF_OFST 0 +#define MC_CMD_PROXY_CMD_IN_TARGET_VF_LBN 16 +#define MC_CMD_PROXY_CMD_IN_TARGET_VF_WIDTH 16 +#define MC_CMD_PROXY_CMD_IN_VF_NULL 0xffff /* enum */ + +/* MC_CMD_PROXY_CMD_OUT msgresponse */ +#define MC_CMD_PROXY_CMD_OUT_LEN 0 + +/* MC_PROXY_STATUS_BUFFER structuredef: Host memory status buffer used to + * manage proxied requests + */ +#define MC_PROXY_STATUS_BUFFER_LEN 16 +/* Handle allocated by the firmware for this proxy transaction */ +#define MC_PROXY_STATUS_BUFFER_HANDLE_OFST 0 +#define MC_PROXY_STATUS_BUFFER_HANDLE_LEN 4 +/* enum: An invalid handle. */ +#define MC_PROXY_STATUS_BUFFER_HANDLE_INVALID 0x0 +#define MC_PROXY_STATUS_BUFFER_HANDLE_LBN 0 +#define MC_PROXY_STATUS_BUFFER_HANDLE_WIDTH 32 +/* The requesting physical function number */ +#define MC_PROXY_STATUS_BUFFER_PF_OFST 4 +#define MC_PROXY_STATUS_BUFFER_PF_LEN 2 +#define MC_PROXY_STATUS_BUFFER_PF_LBN 32 +#define MC_PROXY_STATUS_BUFFER_PF_WIDTH 16 +/* The requesting virtual function number. Set to VF_NULL if the target is a + * PF. + */ +#define MC_PROXY_STATUS_BUFFER_VF_OFST 6 +#define MC_PROXY_STATUS_BUFFER_VF_LEN 2 +#define MC_PROXY_STATUS_BUFFER_VF_LBN 48 +#define MC_PROXY_STATUS_BUFFER_VF_WIDTH 16 +/* The target function RID. */ +#define MC_PROXY_STATUS_BUFFER_RID_OFST 8 +#define MC_PROXY_STATUS_BUFFER_RID_LEN 2 +#define MC_PROXY_STATUS_BUFFER_RID_LBN 64 +#define MC_PROXY_STATUS_BUFFER_RID_WIDTH 16 +/* The status of the proxy as described in MC_CMD_PROXY_COMPLETE. */ +#define MC_PROXY_STATUS_BUFFER_STATUS_OFST 10 +#define MC_PROXY_STATUS_BUFFER_STATUS_LEN 2 +#define MC_PROXY_STATUS_BUFFER_STATUS_LBN 80 +#define MC_PROXY_STATUS_BUFFER_STATUS_WIDTH 16 +/* If a request is authorized rather than carried out by the host, this is the + * elevated privilege mask granted to the requesting function. + */ +#define MC_PROXY_STATUS_BUFFER_GRANTED_PRIVILEGES_OFST 12 +#define MC_PROXY_STATUS_BUFFER_GRANTED_PRIVILEGES_LEN 4 +#define MC_PROXY_STATUS_BUFFER_GRANTED_PRIVILEGES_LBN 96 +#define MC_PROXY_STATUS_BUFFER_GRANTED_PRIVILEGES_WIDTH 32 + + +/***********************************/ +/* MC_CMD_PROXY_CONFIGURE + * Enable/disable authorization of MCDI requests from unprivileged functions by + * a designated admin function + */ +#define MC_CMD_PROXY_CONFIGURE 0x58 +#undef MC_CMD_0x58_PRIVILEGE_CTG + +#define MC_CMD_0x58_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_PROXY_CONFIGURE_IN msgrequest */ +#define MC_CMD_PROXY_CONFIGURE_IN_LEN 108 +#define MC_CMD_PROXY_CONFIGURE_IN_FLAGS_OFST 0 +#define MC_CMD_PROXY_CONFIGURE_IN_FLAGS_LEN 4 +#define MC_CMD_PROXY_CONFIGURE_IN_ENABLE_OFST 0 +#define MC_CMD_PROXY_CONFIGURE_IN_ENABLE_LBN 0 +#define MC_CMD_PROXY_CONFIGURE_IN_ENABLE_WIDTH 1 +/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS + * of blocks, each of the size REQUEST_BLOCK_SIZE. + */ +#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_OFST 4 +#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_LEN 8 +#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_LO_OFST 4 +#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_LO_LEN 4 +#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_LO_LBN 32 +#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_LO_WIDTH 32 +#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_HI_OFST 8 +#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_HI_LEN 4 +#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_HI_LBN 64 +#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_HI_WIDTH 32 +/* Must be a power of 2 */ +#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BLOCK_SIZE_OFST 12 +#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BLOCK_SIZE_LEN 4 +/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS + * of blocks, each of the size REPLY_BLOCK_SIZE. + */ +#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_OFST 16 +#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_LEN 8 +#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_LO_OFST 16 +#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_LO_LEN 4 +#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_LO_LBN 128 +#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_LO_WIDTH 32 +#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_HI_OFST 20 +#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_HI_LEN 4 +#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_HI_LBN 160 +#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_HI_WIDTH 32 +/* Must be a power of 2 */ +#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BLOCK_SIZE_OFST 24 +#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BLOCK_SIZE_LEN 4 +/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS + * of blocks, each of the size STATUS_BLOCK_SIZE. This buffer is only needed if + * host intends to complete proxied operations by using MC_CMD_PROXY_CMD. + */ +#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_OFST 28 +#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_LEN 8 +#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_LO_OFST 28 +#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_LO_LEN 4 +#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_LO_LBN 224 +#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_LO_WIDTH 32 +#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_HI_OFST 32 +#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_HI_LEN 4 +#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_HI_LBN 256 +#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_HI_WIDTH 32 +/* Must be a power of 2, or zero if this buffer is not provided */ +#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BLOCK_SIZE_OFST 36 +#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BLOCK_SIZE_LEN 4 +/* Applies to all three buffers */ +#define MC_CMD_PROXY_CONFIGURE_IN_NUM_BLOCKS_OFST 40 +#define MC_CMD_PROXY_CONFIGURE_IN_NUM_BLOCKS_LEN 4 +/* A bit mask defining which MCDI operations may be proxied */ +#define MC_CMD_PROXY_CONFIGURE_IN_ALLOWED_MCDI_MASK_OFST 44 +#define MC_CMD_PROXY_CONFIGURE_IN_ALLOWED_MCDI_MASK_LEN 64 + +/* MC_CMD_PROXY_CONFIGURE_EXT_IN msgrequest */ +#define MC_CMD_PROXY_CONFIGURE_EXT_IN_LEN 112 +#define MC_CMD_PROXY_CONFIGURE_EXT_IN_FLAGS_OFST 0 +#define MC_CMD_PROXY_CONFIGURE_EXT_IN_FLAGS_LEN 4 +#define MC_CMD_PROXY_CONFIGURE_EXT_IN_ENABLE_OFST 0 +#define MC_CMD_PROXY_CONFIGURE_EXT_IN_ENABLE_LBN 0 +#define MC_CMD_PROXY_CONFIGURE_EXT_IN_ENABLE_WIDTH 1 +/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS + * of blocks, each of the size REQUEST_BLOCK_SIZE. + */ +#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_OFST 4 +#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_LEN 8 +#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_LO_OFST 4 +#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_LO_LEN 4 +#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_LO_LBN 32 +#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_LO_WIDTH 32 +#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_HI_OFST 8 +#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_HI_LEN 4 +#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_HI_LBN 64 +#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_HI_WIDTH 32 +/* Must be a power of 2 */ +#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BLOCK_SIZE_OFST 12 +#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BLOCK_SIZE_LEN 4 +/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS + * of blocks, each of the size REPLY_BLOCK_SIZE. + */ +#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_OFST 16 +#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_LEN 8 +#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_LO_OFST 16 +#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_LO_LEN 4 +#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_LO_LBN 128 +#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_LO_WIDTH 32 +#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_HI_OFST 20 +#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_HI_LEN 4 +#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_HI_LBN 160 +#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_HI_WIDTH 32 +/* Must be a power of 2 */ +#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BLOCK_SIZE_OFST 24 +#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BLOCK_SIZE_LEN 4 +/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS + * of blocks, each of the size STATUS_BLOCK_SIZE. This buffer is only needed if + * host intends to complete proxied operations by using MC_CMD_PROXY_CMD. + */ +#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_OFST 28 +#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_LEN 8 +#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_LO_OFST 28 +#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_LO_LEN 4 +#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_LO_LBN 224 +#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_LO_WIDTH 32 +#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_HI_OFST 32 +#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_HI_LEN 4 +#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_HI_LBN 256 +#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_HI_WIDTH 32 +/* Must be a power of 2, or zero if this buffer is not provided */ +#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BLOCK_SIZE_OFST 36 +#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BLOCK_SIZE_LEN 4 +/* Applies to all three buffers */ +#define MC_CMD_PROXY_CONFIGURE_EXT_IN_NUM_BLOCKS_OFST 40 +#define MC_CMD_PROXY_CONFIGURE_EXT_IN_NUM_BLOCKS_LEN 4 +/* A bit mask defining which MCDI operations may be proxied */ +#define MC_CMD_PROXY_CONFIGURE_EXT_IN_ALLOWED_MCDI_MASK_OFST 44 +#define MC_CMD_PROXY_CONFIGURE_EXT_IN_ALLOWED_MCDI_MASK_LEN 64 +#define MC_CMD_PROXY_CONFIGURE_EXT_IN_RESERVED_OFST 108 +#define MC_CMD_PROXY_CONFIGURE_EXT_IN_RESERVED_LEN 4 + +/* MC_CMD_PROXY_CONFIGURE_OUT msgresponse */ +#define MC_CMD_PROXY_CONFIGURE_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_PROXY_COMPLETE + * Tells FW that a requested proxy operation has either been completed (by + * using MC_CMD_PROXY_CMD) or authorized/declined. May only be sent by the + * function that enabled proxying/authorization (by using + * MC_CMD_PROXY_CONFIGURE). + */ +#define MC_CMD_PROXY_COMPLETE 0x5f +#undef MC_CMD_0x5f_PRIVILEGE_CTG + +#define MC_CMD_0x5f_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_PROXY_COMPLETE_IN msgrequest */ +#define MC_CMD_PROXY_COMPLETE_IN_LEN 12 +#define MC_CMD_PROXY_COMPLETE_IN_BLOCK_INDEX_OFST 0 +#define MC_CMD_PROXY_COMPLETE_IN_BLOCK_INDEX_LEN 4 +#define MC_CMD_PROXY_COMPLETE_IN_STATUS_OFST 4 +#define MC_CMD_PROXY_COMPLETE_IN_STATUS_LEN 4 +/* enum: The operation has been completed by using MC_CMD_PROXY_CMD, the reply + * is stored in the REPLY_BUFF. + */ +#define MC_CMD_PROXY_COMPLETE_IN_COMPLETE 0x0 +/* enum: The operation has been authorized. The originating function may now + * try again. + */ +#define MC_CMD_PROXY_COMPLETE_IN_AUTHORIZED 0x1 +/* enum: The operation has been declined. */ +#define MC_CMD_PROXY_COMPLETE_IN_DECLINED 0x2 +/* enum: The authorization failed because the relevant application did not + * respond in time. + */ +#define MC_CMD_PROXY_COMPLETE_IN_TIMEDOUT 0x3 +#define MC_CMD_PROXY_COMPLETE_IN_HANDLE_OFST 8 +#define MC_CMD_PROXY_COMPLETE_IN_HANDLE_LEN 4 + +/* MC_CMD_PROXY_COMPLETE_OUT msgresponse */ +#define MC_CMD_PROXY_COMPLETE_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_ALLOC_BUFTBL_CHUNK + * Allocate a set of buffer table entries using the specified owner ID. This + * operation allocates the required buffer table entries (and fails if it + * cannot do so). The buffer table entries will initially be zeroed. + */ +#define MC_CMD_ALLOC_BUFTBL_CHUNK 0x87 +#undef MC_CMD_0x87_PRIVILEGE_CTG + +#define MC_CMD_0x87_PRIVILEGE_CTG SRIOV_CTG_ONLOAD + +/* MC_CMD_ALLOC_BUFTBL_CHUNK_IN msgrequest */ +#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_LEN 8 +/* Owner ID to use */ +#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_OWNER_OFST 0 +#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_OWNER_LEN 4 +/* Size of buffer table pages to use, in bytes (note that only a few values are + * legal on any specific hardware). + */ +#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_PAGE_SIZE_OFST 4 +#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_PAGE_SIZE_LEN 4 + +/* MC_CMD_ALLOC_BUFTBL_CHUNK_OUT msgresponse */ +#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_LEN 12 +#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_HANDLE_OFST 0 +#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_HANDLE_LEN 4 +#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_NUMENTRIES_OFST 4 +#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_NUMENTRIES_LEN 4 +/* Buffer table IDs for use in DMA descriptors. */ +#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_ID_OFST 8 +#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_ID_LEN 4 + + +/***********************************/ +/* MC_CMD_PROGRAM_BUFTBL_ENTRIES + * Reprogram a set of buffer table entries in the specified chunk. + */ +#define MC_CMD_PROGRAM_BUFTBL_ENTRIES 0x88 +#undef MC_CMD_0x88_PRIVILEGE_CTG + +#define MC_CMD_0x88_PRIVILEGE_CTG SRIOV_CTG_ONLOAD + +/* MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN msgrequest */ +#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LENMIN 20 +#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LENMAX 268 +#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LENMAX_MCDI2 268 +#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LEN(num) (12+8*(num)) +#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_NUM(len) (((len)-12)/8) +#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_HANDLE_OFST 0 +#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_HANDLE_LEN 4 +/* ID */ +#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_FIRSTID_OFST 4 +#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_FIRSTID_LEN 4 +/* Num entries */ +#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_NUMENTRIES_OFST 8 +#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_NUMENTRIES_LEN 4 +/* Buffer table entry address */ +#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_OFST 12 +#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_LEN 8 +#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_LO_OFST 12 +#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_LO_LEN 4 +#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_LO_LBN 96 +#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_LO_WIDTH 32 +#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_HI_OFST 16 +#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_HI_LEN 4 +#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_HI_LBN 128 +#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_HI_WIDTH 32 +#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_MINNUM 1 +#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_MAXNUM 32 +#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_MAXNUM_MCDI2 32 + +/* MC_CMD_PROGRAM_BUFTBL_ENTRIES_OUT msgresponse */ +#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_FREE_BUFTBL_CHUNK + */ +#define MC_CMD_FREE_BUFTBL_CHUNK 0x89 +#undef MC_CMD_0x89_PRIVILEGE_CTG + +#define MC_CMD_0x89_PRIVILEGE_CTG SRIOV_CTG_ONLOAD + +/* MC_CMD_FREE_BUFTBL_CHUNK_IN msgrequest */ +#define MC_CMD_FREE_BUFTBL_CHUNK_IN_LEN 4 +#define MC_CMD_FREE_BUFTBL_CHUNK_IN_HANDLE_OFST 0 +#define MC_CMD_FREE_BUFTBL_CHUNK_IN_HANDLE_LEN 4 + +/* MC_CMD_FREE_BUFTBL_CHUNK_OUT msgresponse */ +#define MC_CMD_FREE_BUFTBL_CHUNK_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_FILTER_OP + * Multiplexed MCDI call for filter operations + */ +#define MC_CMD_FILTER_OP 0x8a +#undef MC_CMD_0x8a_PRIVILEGE_CTG + +#define MC_CMD_0x8a_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_FILTER_OP_IN msgrequest */ +#define MC_CMD_FILTER_OP_IN_LEN 108 +/* identifies the type of operation requested */ +#define MC_CMD_FILTER_OP_IN_OP_OFST 0 +#define MC_CMD_FILTER_OP_IN_OP_LEN 4 +/* enum: single-recipient filter insert */ +#define MC_CMD_FILTER_OP_IN_OP_INSERT 0x0 +/* enum: single-recipient filter remove */ +#define MC_CMD_FILTER_OP_IN_OP_REMOVE 0x1 +/* enum: multi-recipient filter subscribe */ +#define MC_CMD_FILTER_OP_IN_OP_SUBSCRIBE 0x2 +/* enum: multi-recipient filter unsubscribe */ +#define MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE 0x3 +/* enum: replace one recipient with another (warning - the filter handle may + * change) + */ +#define MC_CMD_FILTER_OP_IN_OP_REPLACE 0x4 +/* filter handle (for remove / unsubscribe operations) */ +#define MC_CMD_FILTER_OP_IN_HANDLE_OFST 4 +#define MC_CMD_FILTER_OP_IN_HANDLE_LEN 8 +#define MC_CMD_FILTER_OP_IN_HANDLE_LO_OFST 4 +#define MC_CMD_FILTER_OP_IN_HANDLE_LO_LEN 4 +#define MC_CMD_FILTER_OP_IN_HANDLE_LO_LBN 32 +#define MC_CMD_FILTER_OP_IN_HANDLE_LO_WIDTH 32 +#define MC_CMD_FILTER_OP_IN_HANDLE_HI_OFST 8 +#define MC_CMD_FILTER_OP_IN_HANDLE_HI_LEN 4 +#define MC_CMD_FILTER_OP_IN_HANDLE_HI_LBN 64 +#define MC_CMD_FILTER_OP_IN_HANDLE_HI_WIDTH 32 +/* The port ID associated with the v-adaptor which should contain this filter. + */ +#define MC_CMD_FILTER_OP_IN_PORT_ID_OFST 12 +#define MC_CMD_FILTER_OP_IN_PORT_ID_LEN 4 +/* fields to include in match criteria */ +#define MC_CMD_FILTER_OP_IN_MATCH_FIELDS_OFST 16 +#define MC_CMD_FILTER_OP_IN_MATCH_FIELDS_LEN 4 +#define MC_CMD_FILTER_OP_IN_MATCH_SRC_IP_OFST 16 +#define MC_CMD_FILTER_OP_IN_MATCH_SRC_IP_LBN 0 +#define MC_CMD_FILTER_OP_IN_MATCH_SRC_IP_WIDTH 1 +#define MC_CMD_FILTER_OP_IN_MATCH_DST_IP_OFST 16 +#define MC_CMD_FILTER_OP_IN_MATCH_DST_IP_LBN 1 +#define MC_CMD_FILTER_OP_IN_MATCH_DST_IP_WIDTH 1 +#define MC_CMD_FILTER_OP_IN_MATCH_SRC_MAC_OFST 16 +#define MC_CMD_FILTER_OP_IN_MATCH_SRC_MAC_LBN 2 +#define MC_CMD_FILTER_OP_IN_MATCH_SRC_MAC_WIDTH 1 +#define MC_CMD_FILTER_OP_IN_MATCH_SRC_PORT_OFST 16 +#define MC_CMD_FILTER_OP_IN_MATCH_SRC_PORT_LBN 3 +#define MC_CMD_FILTER_OP_IN_MATCH_SRC_PORT_WIDTH 1 +#define MC_CMD_FILTER_OP_IN_MATCH_DST_MAC_OFST 16 +#define MC_CMD_FILTER_OP_IN_MATCH_DST_MAC_LBN 4 +#define MC_CMD_FILTER_OP_IN_MATCH_DST_MAC_WIDTH 1 +#define MC_CMD_FILTER_OP_IN_MATCH_DST_PORT_OFST 16 +#define MC_CMD_FILTER_OP_IN_MATCH_DST_PORT_LBN 5 +#define MC_CMD_FILTER_OP_IN_MATCH_DST_PORT_WIDTH 1 +#define MC_CMD_FILTER_OP_IN_MATCH_ETHER_TYPE_OFST 16 +#define MC_CMD_FILTER_OP_IN_MATCH_ETHER_TYPE_LBN 6 +#define MC_CMD_FILTER_OP_IN_MATCH_ETHER_TYPE_WIDTH 1 +#define MC_CMD_FILTER_OP_IN_MATCH_INNER_VLAN_OFST 16 +#define MC_CMD_FILTER_OP_IN_MATCH_INNER_VLAN_LBN 7 +#define MC_CMD_FILTER_OP_IN_MATCH_INNER_VLAN_WIDTH 1 +#define MC_CMD_FILTER_OP_IN_MATCH_OUTER_VLAN_OFST 16 +#define MC_CMD_FILTER_OP_IN_MATCH_OUTER_VLAN_LBN 8 +#define MC_CMD_FILTER_OP_IN_MATCH_OUTER_VLAN_WIDTH 1 +#define MC_CMD_FILTER_OP_IN_MATCH_IP_PROTO_OFST 16 +#define MC_CMD_FILTER_OP_IN_MATCH_IP_PROTO_LBN 9 +#define MC_CMD_FILTER_OP_IN_MATCH_IP_PROTO_WIDTH 1 +#define MC_CMD_FILTER_OP_IN_MATCH_FWDEF0_OFST 16 +#define MC_CMD_FILTER_OP_IN_MATCH_FWDEF0_LBN 10 +#define MC_CMD_FILTER_OP_IN_MATCH_FWDEF0_WIDTH 1 +#define MC_CMD_FILTER_OP_IN_MATCH_FWDEF1_OFST 16 +#define MC_CMD_FILTER_OP_IN_MATCH_FWDEF1_LBN 11 +#define MC_CMD_FILTER_OP_IN_MATCH_FWDEF1_WIDTH 1 +#define MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_IPV4_MCAST_DST_OFST 16 +#define MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_IPV4_MCAST_DST_LBN 29 +#define MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_IPV4_MCAST_DST_WIDTH 1 +#define MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_MCAST_DST_OFST 16 +#define MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_MCAST_DST_LBN 30 +#define MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_MCAST_DST_WIDTH 1 +#define MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_UCAST_DST_OFST 16 +#define MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_UCAST_DST_LBN 31 +#define MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_UCAST_DST_WIDTH 1 +/* receive destination */ +#define MC_CMD_FILTER_OP_IN_RX_DEST_OFST 20 +#define MC_CMD_FILTER_OP_IN_RX_DEST_LEN 4 +/* enum: drop packets */ +#define MC_CMD_FILTER_OP_IN_RX_DEST_DROP 0x0 +/* enum: receive to host */ +#define MC_CMD_FILTER_OP_IN_RX_DEST_HOST 0x1 +/* enum: receive to MC */ +#define MC_CMD_FILTER_OP_IN_RX_DEST_MC 0x2 +/* enum: loop back to TXDP 0 */ +#define MC_CMD_FILTER_OP_IN_RX_DEST_TX0 0x3 +/* enum: loop back to TXDP 1 */ +#define MC_CMD_FILTER_OP_IN_RX_DEST_TX1 0x4 +/* receive queue handle (for multiple queue modes, this is the base queue) */ +#define MC_CMD_FILTER_OP_IN_RX_QUEUE_OFST 24 +#define MC_CMD_FILTER_OP_IN_RX_QUEUE_LEN 4 +/* receive mode */ +#define MC_CMD_FILTER_OP_IN_RX_MODE_OFST 28 +#define MC_CMD_FILTER_OP_IN_RX_MODE_LEN 4 +/* enum: receive to just the specified queue */ +#define MC_CMD_FILTER_OP_IN_RX_MODE_SIMPLE 0x0 +/* enum: receive to multiple queues using RSS context */ +#define MC_CMD_FILTER_OP_IN_RX_MODE_RSS 0x1 +/* enum: receive to multiple queues using .1p mapping */ +#define MC_CMD_FILTER_OP_IN_RX_MODE_DOT1P_MAPPING 0x2 +/* enum: install a filter entry that will never match; for test purposes only + */ +#define MC_CMD_FILTER_OP_IN_RX_MODE_TEST_NEVER_MATCH 0x80000000 +/* RSS context (for RX_MODE_RSS) or .1p mapping handle (for + * RX_MODE_DOT1P_MAPPING), as returned by MC_CMD_RSS_CONTEXT_ALLOC or + * MC_CMD_DOT1P_MAPPING_ALLOC. + */ +#define MC_CMD_FILTER_OP_IN_RX_CONTEXT_OFST 32 +#define MC_CMD_FILTER_OP_IN_RX_CONTEXT_LEN 4 +/* transmit domain (reserved; set to 0) */ +#define MC_CMD_FILTER_OP_IN_TX_DOMAIN_OFST 36 +#define MC_CMD_FILTER_OP_IN_TX_DOMAIN_LEN 4 +/* transmit destination (either set the MAC and/or PM bits for explicit + * control, or set this field to TX_DEST_DEFAULT for sensible default + * behaviour) + */ +#define MC_CMD_FILTER_OP_IN_TX_DEST_OFST 40 +#define MC_CMD_FILTER_OP_IN_TX_DEST_LEN 4 +/* enum: request default behaviour (based on filter type) */ +#define MC_CMD_FILTER_OP_IN_TX_DEST_DEFAULT 0xffffffff +#define MC_CMD_FILTER_OP_IN_TX_DEST_MAC_OFST 40 +#define MC_CMD_FILTER_OP_IN_TX_DEST_MAC_LBN 0 +#define MC_CMD_FILTER_OP_IN_TX_DEST_MAC_WIDTH 1 +#define MC_CMD_FILTER_OP_IN_TX_DEST_PM_OFST 40 +#define MC_CMD_FILTER_OP_IN_TX_DEST_PM_LBN 1 +#define MC_CMD_FILTER_OP_IN_TX_DEST_PM_WIDTH 1 +/* source MAC address to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_IN_SRC_MAC_OFST 44 +#define MC_CMD_FILTER_OP_IN_SRC_MAC_LEN 6 +/* source port to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_IN_SRC_PORT_OFST 50 +#define MC_CMD_FILTER_OP_IN_SRC_PORT_LEN 2 +/* destination MAC address to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_IN_DST_MAC_OFST 52 +#define MC_CMD_FILTER_OP_IN_DST_MAC_LEN 6 +/* destination port to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_IN_DST_PORT_OFST 58 +#define MC_CMD_FILTER_OP_IN_DST_PORT_LEN 2 +/* Ethernet type to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_IN_ETHER_TYPE_OFST 60 +#define MC_CMD_FILTER_OP_IN_ETHER_TYPE_LEN 2 +/* Inner VLAN tag to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_IN_INNER_VLAN_OFST 62 +#define MC_CMD_FILTER_OP_IN_INNER_VLAN_LEN 2 +/* Outer VLAN tag to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_IN_OUTER_VLAN_OFST 64 +#define MC_CMD_FILTER_OP_IN_OUTER_VLAN_LEN 2 +/* IP protocol to match (in low byte; set high byte to 0) */ +#define MC_CMD_FILTER_OP_IN_IP_PROTO_OFST 66 +#define MC_CMD_FILTER_OP_IN_IP_PROTO_LEN 2 +/* Firmware defined register 0 to match (reserved; set to 0) */ +#define MC_CMD_FILTER_OP_IN_FWDEF0_OFST 68 +#define MC_CMD_FILTER_OP_IN_FWDEF0_LEN 4 +/* Firmware defined register 1 to match (reserved; set to 0) */ +#define MC_CMD_FILTER_OP_IN_FWDEF1_OFST 72 +#define MC_CMD_FILTER_OP_IN_FWDEF1_LEN 4 +/* source IP address to match (as bytes in network order; set last 12 bytes to + * 0 for IPv4 address) + */ +#define MC_CMD_FILTER_OP_IN_SRC_IP_OFST 76 +#define MC_CMD_FILTER_OP_IN_SRC_IP_LEN 16 +/* destination IP address to match (as bytes in network order; set last 12 + * bytes to 0 for IPv4 address) + */ +#define MC_CMD_FILTER_OP_IN_DST_IP_OFST 92 +#define MC_CMD_FILTER_OP_IN_DST_IP_LEN 16 + +/* MC_CMD_FILTER_OP_EXT_IN msgrequest: Extension to MC_CMD_FILTER_OP_IN to + * include handling of VXLAN/NVGRE encapsulated frame filtering (which is + * supported on Medford only). + */ +#define MC_CMD_FILTER_OP_EXT_IN_LEN 172 +/* identifies the type of operation requested */ +#define MC_CMD_FILTER_OP_EXT_IN_OP_OFST 0 +#define MC_CMD_FILTER_OP_EXT_IN_OP_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_FILTER_OP_IN/OP */ +/* filter handle (for remove / unsubscribe operations) */ +#define MC_CMD_FILTER_OP_EXT_IN_HANDLE_OFST 4 +#define MC_CMD_FILTER_OP_EXT_IN_HANDLE_LEN 8 +#define MC_CMD_FILTER_OP_EXT_IN_HANDLE_LO_OFST 4 +#define MC_CMD_FILTER_OP_EXT_IN_HANDLE_LO_LEN 4 +#define MC_CMD_FILTER_OP_EXT_IN_HANDLE_LO_LBN 32 +#define MC_CMD_FILTER_OP_EXT_IN_HANDLE_LO_WIDTH 32 +#define MC_CMD_FILTER_OP_EXT_IN_HANDLE_HI_OFST 8 +#define MC_CMD_FILTER_OP_EXT_IN_HANDLE_HI_LEN 4 +#define MC_CMD_FILTER_OP_EXT_IN_HANDLE_HI_LBN 64 +#define MC_CMD_FILTER_OP_EXT_IN_HANDLE_HI_WIDTH 32 +/* The port ID associated with the v-adaptor which should contain this filter. + */ +#define MC_CMD_FILTER_OP_EXT_IN_PORT_ID_OFST 12 +#define MC_CMD_FILTER_OP_EXT_IN_PORT_ID_LEN 4 +/* fields to include in match criteria */ +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_FIELDS_OFST 16 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_FIELDS_LEN 4 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_IP_OFST 16 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_IP_LBN 0 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_IP_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_IP_OFST 16 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_IP_LBN 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_IP_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_MAC_OFST 16 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_MAC_LBN 2 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_MAC_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_PORT_OFST 16 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_PORT_LBN 3 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_PORT_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_MAC_OFST 16 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_MAC_LBN 4 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_MAC_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_PORT_OFST 16 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_PORT_LBN 5 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_PORT_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_ETHER_TYPE_OFST 16 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_ETHER_TYPE_LBN 6 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_ETHER_TYPE_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_INNER_VLAN_OFST 16 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_INNER_VLAN_LBN 7 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_INNER_VLAN_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_OUTER_VLAN_OFST 16 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_OUTER_VLAN_LBN 8 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_OUTER_VLAN_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IP_PROTO_OFST 16 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IP_PROTO_LBN 9 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IP_PROTO_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_FWDEF0_OFST 16 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_FWDEF0_LBN 10 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_FWDEF0_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_VNI_OR_VSID_OFST 16 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_VNI_OR_VSID_LBN 11 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_VNI_OR_VSID_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_IP_OFST 16 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_IP_LBN 12 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_IP_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_IP_OFST 16 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_IP_LBN 13 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_IP_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_MAC_OFST 16 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_MAC_LBN 14 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_MAC_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_PORT_OFST 16 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_PORT_LBN 15 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_PORT_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_MAC_OFST 16 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_MAC_LBN 16 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_MAC_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_PORT_OFST 16 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_PORT_LBN 17 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_PORT_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_ETHER_TYPE_OFST 16 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_ETHER_TYPE_LBN 18 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_ETHER_TYPE_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_INNER_VLAN_OFST 16 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_INNER_VLAN_LBN 19 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_INNER_VLAN_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_OUTER_VLAN_OFST 16 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_OUTER_VLAN_LBN 20 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_OUTER_VLAN_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_IP_PROTO_OFST 16 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_IP_PROTO_LBN 21 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_IP_PROTO_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_FWDEF0_OFST 16 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_FWDEF0_LBN 22 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_FWDEF0_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_FWDEF1_OFST 16 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_FWDEF1_LBN 23 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_FWDEF1_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_OFST 16 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_LBN 24 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_OFST 16 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_LBN 25 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_IPV4_MCAST_DST_OFST 16 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_IPV4_MCAST_DST_LBN 29 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_IPV4_MCAST_DST_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_MCAST_DST_OFST 16 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_MCAST_DST_LBN 30 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_MCAST_DST_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_UCAST_DST_OFST 16 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_UCAST_DST_LBN 31 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_UCAST_DST_WIDTH 1 +/* receive destination */ +#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_OFST 20 +#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_LEN 4 +/* enum: drop packets */ +#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_DROP 0x0 +/* enum: receive to host */ +#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_HOST 0x1 +/* enum: receive to MC */ +#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_MC 0x2 +/* enum: loop back to TXDP 0 */ +#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_TX0 0x3 +/* enum: loop back to TXDP 1 */ +#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_TX1 0x4 +/* receive queue handle (for multiple queue modes, this is the base queue) */ +#define MC_CMD_FILTER_OP_EXT_IN_RX_QUEUE_OFST 24 +#define MC_CMD_FILTER_OP_EXT_IN_RX_QUEUE_LEN 4 +/* receive mode */ +#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_OFST 28 +#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_LEN 4 +/* enum: receive to just the specified queue */ +#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_SIMPLE 0x0 +/* enum: receive to multiple queues using RSS context */ +#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_RSS 0x1 +/* enum: receive to multiple queues using .1p mapping */ +#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_DOT1P_MAPPING 0x2 +/* enum: install a filter entry that will never match; for test purposes only + */ +#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_TEST_NEVER_MATCH 0x80000000 +/* RSS context (for RX_MODE_RSS) or .1p mapping handle (for + * RX_MODE_DOT1P_MAPPING), as returned by MC_CMD_RSS_CONTEXT_ALLOC or + * MC_CMD_DOT1P_MAPPING_ALLOC. + */ +#define MC_CMD_FILTER_OP_EXT_IN_RX_CONTEXT_OFST 32 +#define MC_CMD_FILTER_OP_EXT_IN_RX_CONTEXT_LEN 4 +/* transmit domain (reserved; set to 0) */ +#define MC_CMD_FILTER_OP_EXT_IN_TX_DOMAIN_OFST 36 +#define MC_CMD_FILTER_OP_EXT_IN_TX_DOMAIN_LEN 4 +/* transmit destination (either set the MAC and/or PM bits for explicit + * control, or set this field to TX_DEST_DEFAULT for sensible default + * behaviour) + */ +#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_OFST 40 +#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_LEN 4 +/* enum: request default behaviour (based on filter type) */ +#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_DEFAULT 0xffffffff +#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_MAC_OFST 40 +#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_MAC_LBN 0 +#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_MAC_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_PM_OFST 40 +#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_PM_LBN 1 +#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_PM_WIDTH 1 +/* source MAC address to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_EXT_IN_SRC_MAC_OFST 44 +#define MC_CMD_FILTER_OP_EXT_IN_SRC_MAC_LEN 6 +/* source port to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_EXT_IN_SRC_PORT_OFST 50 +#define MC_CMD_FILTER_OP_EXT_IN_SRC_PORT_LEN 2 +/* destination MAC address to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_EXT_IN_DST_MAC_OFST 52 +#define MC_CMD_FILTER_OP_EXT_IN_DST_MAC_LEN 6 +/* destination port to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_EXT_IN_DST_PORT_OFST 58 +#define MC_CMD_FILTER_OP_EXT_IN_DST_PORT_LEN 2 +/* Ethernet type to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_EXT_IN_ETHER_TYPE_OFST 60 +#define MC_CMD_FILTER_OP_EXT_IN_ETHER_TYPE_LEN 2 +/* Inner VLAN tag to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_EXT_IN_INNER_VLAN_OFST 62 +#define MC_CMD_FILTER_OP_EXT_IN_INNER_VLAN_LEN 2 +/* Outer VLAN tag to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_EXT_IN_OUTER_VLAN_OFST 64 +#define MC_CMD_FILTER_OP_EXT_IN_OUTER_VLAN_LEN 2 +/* IP protocol to match (in low byte; set high byte to 0) */ +#define MC_CMD_FILTER_OP_EXT_IN_IP_PROTO_OFST 66 +#define MC_CMD_FILTER_OP_EXT_IN_IP_PROTO_LEN 2 +/* Firmware defined register 0 to match (reserved; set to 0) */ +#define MC_CMD_FILTER_OP_EXT_IN_FWDEF0_OFST 68 +#define MC_CMD_FILTER_OP_EXT_IN_FWDEF0_LEN 4 +/* VNI (for VXLAN/Geneve, when IP protocol is UDP) or VSID (for NVGRE, when IP + * protocol is GRE) to match (as bytes in network order; set last byte to 0 for + * VXLAN/NVGRE, or 1 for Geneve) + */ +#define MC_CMD_FILTER_OP_EXT_IN_VNI_OR_VSID_OFST 72 +#define MC_CMD_FILTER_OP_EXT_IN_VNI_OR_VSID_LEN 4 +#define MC_CMD_FILTER_OP_EXT_IN_VNI_VALUE_OFST 72 +#define MC_CMD_FILTER_OP_EXT_IN_VNI_VALUE_LBN 0 +#define MC_CMD_FILTER_OP_EXT_IN_VNI_VALUE_WIDTH 24 +#define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_OFST 72 +#define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_LBN 24 +#define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_WIDTH 8 +/* enum: Match VXLAN traffic with this VNI */ +#define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_VXLAN 0x0 +/* enum: Match Geneve traffic with this VNI */ +#define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_GENEVE 0x1 +/* enum: Reserved for experimental development use */ +#define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_EXPERIMENTAL 0xfe +#define MC_CMD_FILTER_OP_EXT_IN_VSID_VALUE_OFST 72 +#define MC_CMD_FILTER_OP_EXT_IN_VSID_VALUE_LBN 0 +#define MC_CMD_FILTER_OP_EXT_IN_VSID_VALUE_WIDTH 24 +#define MC_CMD_FILTER_OP_EXT_IN_VSID_TYPE_OFST 72 +#define MC_CMD_FILTER_OP_EXT_IN_VSID_TYPE_LBN 24 +#define MC_CMD_FILTER_OP_EXT_IN_VSID_TYPE_WIDTH 8 +/* enum: Match NVGRE traffic with this VSID */ +#define MC_CMD_FILTER_OP_EXT_IN_VSID_TYPE_NVGRE 0x0 +/* source IP address to match (as bytes in network order; set last 12 bytes to + * 0 for IPv4 address) + */ +#define MC_CMD_FILTER_OP_EXT_IN_SRC_IP_OFST 76 +#define MC_CMD_FILTER_OP_EXT_IN_SRC_IP_LEN 16 +/* destination IP address to match (as bytes in network order; set last 12 + * bytes to 0 for IPv4 address) + */ +#define MC_CMD_FILTER_OP_EXT_IN_DST_IP_OFST 92 +#define MC_CMD_FILTER_OP_EXT_IN_DST_IP_LEN 16 +/* VXLAN/NVGRE inner frame source MAC address to match (as bytes in network + * order) + */ +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_SRC_MAC_OFST 108 +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_SRC_MAC_LEN 6 +/* VXLAN/NVGRE inner frame source port to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_SRC_PORT_OFST 114 +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_SRC_PORT_LEN 2 +/* VXLAN/NVGRE inner frame destination MAC address to match (as bytes in + * network order) + */ +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_MAC_OFST 116 +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_MAC_LEN 6 +/* VXLAN/NVGRE inner frame destination port to match (as bytes in network + * order) + */ +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_PORT_OFST 122 +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_PORT_LEN 2 +/* VXLAN/NVGRE inner frame Ethernet type to match (as bytes in network order) + */ +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_ETHER_TYPE_OFST 124 +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_ETHER_TYPE_LEN 2 +/* VXLAN/NVGRE inner frame Inner VLAN tag to match (as bytes in network order) + */ +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_INNER_VLAN_OFST 126 +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_INNER_VLAN_LEN 2 +/* VXLAN/NVGRE inner frame Outer VLAN tag to match (as bytes in network order) + */ +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_OUTER_VLAN_OFST 128 +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_OUTER_VLAN_LEN 2 +/* VXLAN/NVGRE inner frame IP protocol to match (in low byte; set high byte to + * 0) + */ +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_IP_PROTO_OFST 130 +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_IP_PROTO_LEN 2 +/* VXLAN/NVGRE inner frame Firmware defined register 0 to match (reserved; set + * to 0) + */ +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_FWDEF0_OFST 132 +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_FWDEF0_LEN 4 +/* VXLAN/NVGRE inner frame Firmware defined register 1 to match (reserved; set + * to 0) + */ +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_FWDEF1_OFST 136 +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_FWDEF1_LEN 4 +/* VXLAN/NVGRE inner frame source IP address to match (as bytes in network + * order; set last 12 bytes to 0 for IPv4 address) + */ +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_SRC_IP_OFST 140 +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_SRC_IP_LEN 16 +/* VXLAN/NVGRE inner frame destination IP address to match (as bytes in network + * order; set last 12 bytes to 0 for IPv4 address) + */ +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_IP_OFST 156 +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_IP_LEN 16 + +/* MC_CMD_FILTER_OP_V3_IN msgrequest: FILTER_OP extension to support additional + * filter actions for EF100. Some of these actions are also supported on EF10, + * for Intel's DPDK (Data Plane Development Kit, dpdk.org) via its rte_flow + * API. In the latter case, this extension is only useful with the sfc_efx + * driver included as part of DPDK, used in conjunction with the dpdk datapath + * firmware variant. + */ +#define MC_CMD_FILTER_OP_V3_IN_LEN 180 +/* identifies the type of operation requested */ +#define MC_CMD_FILTER_OP_V3_IN_OP_OFST 0 +#define MC_CMD_FILTER_OP_V3_IN_OP_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_FILTER_OP_IN/OP */ +/* filter handle (for remove / unsubscribe operations) */ +#define MC_CMD_FILTER_OP_V3_IN_HANDLE_OFST 4 +#define MC_CMD_FILTER_OP_V3_IN_HANDLE_LEN 8 +#define MC_CMD_FILTER_OP_V3_IN_HANDLE_LO_OFST 4 +#define MC_CMD_FILTER_OP_V3_IN_HANDLE_LO_LEN 4 +#define MC_CMD_FILTER_OP_V3_IN_HANDLE_LO_LBN 32 +#define MC_CMD_FILTER_OP_V3_IN_HANDLE_LO_WIDTH 32 +#define MC_CMD_FILTER_OP_V3_IN_HANDLE_HI_OFST 8 +#define MC_CMD_FILTER_OP_V3_IN_HANDLE_HI_LEN 4 +#define MC_CMD_FILTER_OP_V3_IN_HANDLE_HI_LBN 64 +#define MC_CMD_FILTER_OP_V3_IN_HANDLE_HI_WIDTH 32 +/* The port ID associated with the v-adaptor which should contain this filter. + */ +#define MC_CMD_FILTER_OP_V3_IN_PORT_ID_OFST 12 +#define MC_CMD_FILTER_OP_V3_IN_PORT_ID_LEN 4 +/* fields to include in match criteria */ +#define MC_CMD_FILTER_OP_V3_IN_MATCH_FIELDS_OFST 16 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_FIELDS_LEN 4 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_SRC_IP_OFST 16 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_SRC_IP_LBN 0 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_SRC_IP_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_DST_IP_OFST 16 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_DST_IP_LBN 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_DST_IP_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_SRC_MAC_OFST 16 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_SRC_MAC_LBN 2 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_SRC_MAC_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_SRC_PORT_OFST 16 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_SRC_PORT_LBN 3 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_SRC_PORT_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_DST_MAC_OFST 16 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_DST_MAC_LBN 4 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_DST_MAC_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_DST_PORT_OFST 16 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_DST_PORT_LBN 5 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_DST_PORT_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_ETHER_TYPE_OFST 16 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_ETHER_TYPE_LBN 6 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_ETHER_TYPE_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_INNER_VLAN_OFST 16 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_INNER_VLAN_LBN 7 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_INNER_VLAN_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_OUTER_VLAN_OFST 16 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_OUTER_VLAN_LBN 8 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_OUTER_VLAN_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IP_PROTO_OFST 16 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IP_PROTO_LBN 9 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IP_PROTO_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_FWDEF0_OFST 16 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_FWDEF0_LBN 10 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_FWDEF0_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_VNI_OR_VSID_OFST 16 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_VNI_OR_VSID_LBN 11 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_VNI_OR_VSID_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_SRC_IP_OFST 16 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_SRC_IP_LBN 12 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_SRC_IP_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_DST_IP_OFST 16 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_DST_IP_LBN 13 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_DST_IP_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_SRC_MAC_OFST 16 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_SRC_MAC_LBN 14 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_SRC_MAC_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_SRC_PORT_OFST 16 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_SRC_PORT_LBN 15 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_SRC_PORT_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_DST_MAC_OFST 16 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_DST_MAC_LBN 16 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_DST_MAC_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_DST_PORT_OFST 16 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_DST_PORT_LBN 17 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_DST_PORT_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_ETHER_TYPE_OFST 16 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_ETHER_TYPE_LBN 18 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_ETHER_TYPE_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_INNER_VLAN_OFST 16 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_INNER_VLAN_LBN 19 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_INNER_VLAN_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_OUTER_VLAN_OFST 16 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_OUTER_VLAN_LBN 20 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_OUTER_VLAN_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_IP_PROTO_OFST 16 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_IP_PROTO_LBN 21 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_IP_PROTO_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_FWDEF0_OFST 16 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_FWDEF0_LBN 22 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_FWDEF0_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_FWDEF1_OFST 16 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_FWDEF1_LBN 23 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_FWDEF1_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_OFST 16 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_LBN 24 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_OFST 16 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_LBN 25 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_UNKNOWN_IPV4_MCAST_DST_OFST 16 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_UNKNOWN_IPV4_MCAST_DST_LBN 29 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_UNKNOWN_IPV4_MCAST_DST_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_UNKNOWN_MCAST_DST_OFST 16 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_UNKNOWN_MCAST_DST_LBN 30 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_UNKNOWN_MCAST_DST_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_UNKNOWN_UCAST_DST_OFST 16 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_UNKNOWN_UCAST_DST_LBN 31 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_UNKNOWN_UCAST_DST_WIDTH 1 +/* receive destination */ +#define MC_CMD_FILTER_OP_V3_IN_RX_DEST_OFST 20 +#define MC_CMD_FILTER_OP_V3_IN_RX_DEST_LEN 4 +/* enum: drop packets */ +#define MC_CMD_FILTER_OP_V3_IN_RX_DEST_DROP 0x0 +/* enum: receive to host */ +#define MC_CMD_FILTER_OP_V3_IN_RX_DEST_HOST 0x1 +/* enum: receive to MC */ +#define MC_CMD_FILTER_OP_V3_IN_RX_DEST_MC 0x2 +/* enum: loop back to TXDP 0 */ +#define MC_CMD_FILTER_OP_V3_IN_RX_DEST_TX0 0x3 +/* enum: loop back to TXDP 1 */ +#define MC_CMD_FILTER_OP_V3_IN_RX_DEST_TX1 0x4 +/* receive queue handle (for multiple queue modes, this is the base queue) */ +#define MC_CMD_FILTER_OP_V3_IN_RX_QUEUE_OFST 24 +#define MC_CMD_FILTER_OP_V3_IN_RX_QUEUE_LEN 4 +/* receive mode */ +#define MC_CMD_FILTER_OP_V3_IN_RX_MODE_OFST 28 +#define MC_CMD_FILTER_OP_V3_IN_RX_MODE_LEN 4 +/* enum: receive to just the specified queue */ +#define MC_CMD_FILTER_OP_V3_IN_RX_MODE_SIMPLE 0x0 +/* enum: receive to multiple queues using RSS context */ +#define MC_CMD_FILTER_OP_V3_IN_RX_MODE_RSS 0x1 +/* enum: receive to multiple queues using .1p mapping */ +#define MC_CMD_FILTER_OP_V3_IN_RX_MODE_DOT1P_MAPPING 0x2 +/* enum: install a filter entry that will never match; for test purposes only + */ +#define MC_CMD_FILTER_OP_V3_IN_RX_MODE_TEST_NEVER_MATCH 0x80000000 +/* RSS context (for RX_MODE_RSS) or .1p mapping handle (for + * RX_MODE_DOT1P_MAPPING), as returned by MC_CMD_RSS_CONTEXT_ALLOC or + * MC_CMD_DOT1P_MAPPING_ALLOC. + */ +#define MC_CMD_FILTER_OP_V3_IN_RX_CONTEXT_OFST 32 +#define MC_CMD_FILTER_OP_V3_IN_RX_CONTEXT_LEN 4 +/* transmit domain (reserved; set to 0) */ +#define MC_CMD_FILTER_OP_V3_IN_TX_DOMAIN_OFST 36 +#define MC_CMD_FILTER_OP_V3_IN_TX_DOMAIN_LEN 4 +/* transmit destination (either set the MAC and/or PM bits for explicit + * control, or set this field to TX_DEST_DEFAULT for sensible default + * behaviour) + */ +#define MC_CMD_FILTER_OP_V3_IN_TX_DEST_OFST 40 +#define MC_CMD_FILTER_OP_V3_IN_TX_DEST_LEN 4 +/* enum: request default behaviour (based on filter type) */ +#define MC_CMD_FILTER_OP_V3_IN_TX_DEST_DEFAULT 0xffffffff +#define MC_CMD_FILTER_OP_V3_IN_TX_DEST_MAC_OFST 40 +#define MC_CMD_FILTER_OP_V3_IN_TX_DEST_MAC_LBN 0 +#define MC_CMD_FILTER_OP_V3_IN_TX_DEST_MAC_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_TX_DEST_PM_OFST 40 +#define MC_CMD_FILTER_OP_V3_IN_TX_DEST_PM_LBN 1 +#define MC_CMD_FILTER_OP_V3_IN_TX_DEST_PM_WIDTH 1 +/* source MAC address to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_V3_IN_SRC_MAC_OFST 44 +#define MC_CMD_FILTER_OP_V3_IN_SRC_MAC_LEN 6 +/* source port to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_V3_IN_SRC_PORT_OFST 50 +#define MC_CMD_FILTER_OP_V3_IN_SRC_PORT_LEN 2 +/* destination MAC address to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_V3_IN_DST_MAC_OFST 52 +#define MC_CMD_FILTER_OP_V3_IN_DST_MAC_LEN 6 +/* destination port to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_V3_IN_DST_PORT_OFST 58 +#define MC_CMD_FILTER_OP_V3_IN_DST_PORT_LEN 2 +/* Ethernet type to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_V3_IN_ETHER_TYPE_OFST 60 +#define MC_CMD_FILTER_OP_V3_IN_ETHER_TYPE_LEN 2 +/* Inner VLAN tag to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_V3_IN_INNER_VLAN_OFST 62 +#define MC_CMD_FILTER_OP_V3_IN_INNER_VLAN_LEN 2 +/* Outer VLAN tag to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_V3_IN_OUTER_VLAN_OFST 64 +#define MC_CMD_FILTER_OP_V3_IN_OUTER_VLAN_LEN 2 +/* IP protocol to match (in low byte; set high byte to 0) */ +#define MC_CMD_FILTER_OP_V3_IN_IP_PROTO_OFST 66 +#define MC_CMD_FILTER_OP_V3_IN_IP_PROTO_LEN 2 +/* Firmware defined register 0 to match (reserved; set to 0) */ +#define MC_CMD_FILTER_OP_V3_IN_FWDEF0_OFST 68 +#define MC_CMD_FILTER_OP_V3_IN_FWDEF0_LEN 4 +/* VNI (for VXLAN/Geneve, when IP protocol is UDP) or VSID (for NVGRE, when IP + * protocol is GRE) to match (as bytes in network order; set last byte to 0 for + * VXLAN/NVGRE, or 1 for Geneve) + */ +#define MC_CMD_FILTER_OP_V3_IN_VNI_OR_VSID_OFST 72 +#define MC_CMD_FILTER_OP_V3_IN_VNI_OR_VSID_LEN 4 +#define MC_CMD_FILTER_OP_V3_IN_VNI_VALUE_OFST 72 +#define MC_CMD_FILTER_OP_V3_IN_VNI_VALUE_LBN 0 +#define MC_CMD_FILTER_OP_V3_IN_VNI_VALUE_WIDTH 24 +#define MC_CMD_FILTER_OP_V3_IN_VNI_TYPE_OFST 72 +#define MC_CMD_FILTER_OP_V3_IN_VNI_TYPE_LBN 24 +#define MC_CMD_FILTER_OP_V3_IN_VNI_TYPE_WIDTH 8 +/* enum: Match VXLAN traffic with this VNI */ +#define MC_CMD_FILTER_OP_V3_IN_VNI_TYPE_VXLAN 0x0 +/* enum: Match Geneve traffic with this VNI */ +#define MC_CMD_FILTER_OP_V3_IN_VNI_TYPE_GENEVE 0x1 +/* enum: Reserved for experimental development use */ +#define MC_CMD_FILTER_OP_V3_IN_VNI_TYPE_EXPERIMENTAL 0xfe +#define MC_CMD_FILTER_OP_V3_IN_VSID_VALUE_OFST 72 +#define MC_CMD_FILTER_OP_V3_IN_VSID_VALUE_LBN 0 +#define MC_CMD_FILTER_OP_V3_IN_VSID_VALUE_WIDTH 24 +#define MC_CMD_FILTER_OP_V3_IN_VSID_TYPE_OFST 72 +#define MC_CMD_FILTER_OP_V3_IN_VSID_TYPE_LBN 24 +#define MC_CMD_FILTER_OP_V3_IN_VSID_TYPE_WIDTH 8 +/* enum: Match NVGRE traffic with this VSID */ +#define MC_CMD_FILTER_OP_V3_IN_VSID_TYPE_NVGRE 0x0 +/* source IP address to match (as bytes in network order; set last 12 bytes to + * 0 for IPv4 address) + */ +#define MC_CMD_FILTER_OP_V3_IN_SRC_IP_OFST 76 +#define MC_CMD_FILTER_OP_V3_IN_SRC_IP_LEN 16 +/* destination IP address to match (as bytes in network order; set last 12 + * bytes to 0 for IPv4 address) + */ +#define MC_CMD_FILTER_OP_V3_IN_DST_IP_OFST 92 +#define MC_CMD_FILTER_OP_V3_IN_DST_IP_LEN 16 +/* VXLAN/NVGRE inner frame source MAC address to match (as bytes in network + * order) + */ +#define MC_CMD_FILTER_OP_V3_IN_IFRM_SRC_MAC_OFST 108 +#define MC_CMD_FILTER_OP_V3_IN_IFRM_SRC_MAC_LEN 6 +/* VXLAN/NVGRE inner frame source port to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_V3_IN_IFRM_SRC_PORT_OFST 114 +#define MC_CMD_FILTER_OP_V3_IN_IFRM_SRC_PORT_LEN 2 +/* VXLAN/NVGRE inner frame destination MAC address to match (as bytes in + * network order) + */ +#define MC_CMD_FILTER_OP_V3_IN_IFRM_DST_MAC_OFST 116 +#define MC_CMD_FILTER_OP_V3_IN_IFRM_DST_MAC_LEN 6 +/* VXLAN/NVGRE inner frame destination port to match (as bytes in network + * order) + */ +#define MC_CMD_FILTER_OP_V3_IN_IFRM_DST_PORT_OFST 122 +#define MC_CMD_FILTER_OP_V3_IN_IFRM_DST_PORT_LEN 2 +/* VXLAN/NVGRE inner frame Ethernet type to match (as bytes in network order) + */ +#define MC_CMD_FILTER_OP_V3_IN_IFRM_ETHER_TYPE_OFST 124 +#define MC_CMD_FILTER_OP_V3_IN_IFRM_ETHER_TYPE_LEN 2 +/* VXLAN/NVGRE inner frame Inner VLAN tag to match (as bytes in network order) + */ +#define MC_CMD_FILTER_OP_V3_IN_IFRM_INNER_VLAN_OFST 126 +#define MC_CMD_FILTER_OP_V3_IN_IFRM_INNER_VLAN_LEN 2 +/* VXLAN/NVGRE inner frame Outer VLAN tag to match (as bytes in network order) + */ +#define MC_CMD_FILTER_OP_V3_IN_IFRM_OUTER_VLAN_OFST 128 +#define MC_CMD_FILTER_OP_V3_IN_IFRM_OUTER_VLAN_LEN 2 +/* VXLAN/NVGRE inner frame IP protocol to match (in low byte; set high byte to + * 0) + */ +#define MC_CMD_FILTER_OP_V3_IN_IFRM_IP_PROTO_OFST 130 +#define MC_CMD_FILTER_OP_V3_IN_IFRM_IP_PROTO_LEN 2 +/* VXLAN/NVGRE inner frame Firmware defined register 0 to match (reserved; set + * to 0) + */ +#define MC_CMD_FILTER_OP_V3_IN_IFRM_FWDEF0_OFST 132 +#define MC_CMD_FILTER_OP_V3_IN_IFRM_FWDEF0_LEN 4 +/* VXLAN/NVGRE inner frame Firmware defined register 1 to match (reserved; set + * to 0) + */ +#define MC_CMD_FILTER_OP_V3_IN_IFRM_FWDEF1_OFST 136 +#define MC_CMD_FILTER_OP_V3_IN_IFRM_FWDEF1_LEN 4 +/* VXLAN/NVGRE inner frame source IP address to match (as bytes in network + * order; set last 12 bytes to 0 for IPv4 address) + */ +#define MC_CMD_FILTER_OP_V3_IN_IFRM_SRC_IP_OFST 140 +#define MC_CMD_FILTER_OP_V3_IN_IFRM_SRC_IP_LEN 16 +/* VXLAN/NVGRE inner frame destination IP address to match (as bytes in network + * order; set last 12 bytes to 0 for IPv4 address) + */ +#define MC_CMD_FILTER_OP_V3_IN_IFRM_DST_IP_OFST 156 +#define MC_CMD_FILTER_OP_V3_IN_IFRM_DST_IP_LEN 16 +/* Flags controlling mutations of the packet and/or metadata when the filter is + * matched. The user_mark and user_flag fields' logic is as follows: if + * (req.MATCH_BITOR_FLAG == 1) user_flag = req.MATCH_SET_FLAG bit_or user_flag; + * else user_flag = req.MATCH_SET_FLAG; if (req.MATCH_SET_MARK == 0) user_mark + * = 0; else if (req.MATCH_BITOR_MARK == 1) user_mark = req.MATCH_SET_MARK + * bit_or user_mark; else user_mark = req.MATCH_SET_MARK; N.B. These flags + * overlap with the MATCH_ACTION field, which is deprecated in favour of this + * field. For the cases where these flags induce a valid encoding of the + * MATCH_ACTION field, the semantics agree. + */ +#define MC_CMD_FILTER_OP_V3_IN_MATCH_ACTION_FLAGS_OFST 172 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_ACTION_FLAGS_LEN 4 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_SET_FLAG_OFST 172 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_SET_FLAG_LBN 0 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_SET_FLAG_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_SET_MARK_OFST 172 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_SET_MARK_LBN 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_SET_MARK_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_BITOR_FLAG_OFST 172 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_BITOR_FLAG_LBN 2 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_BITOR_FLAG_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_BITOR_MARK_OFST 172 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_BITOR_MARK_LBN 3 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_BITOR_MARK_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_STRIP_VLAN_OFST 172 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_STRIP_VLAN_LBN 4 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_STRIP_VLAN_WIDTH 1 +/* Deprecated: the overlapping MATCH_ACTION_FLAGS field exposes all of the + * functionality of this field in an ABI-backwards-compatible manner, and + * should be used instead. Any future extensions should be made to the + * MATCH_ACTION_FLAGS field, and not to this field. Set an action for all + * packets matching this filter. The DPDK driver and (on EF10) dpdk f/w variant + * use their own specific delivery structures, which are documented in the DPDK + * Firmware Driver Interface (SF-119419-TC). Requesting anything other than + * MATCH_ACTION_NONE on an EF10 NIC running another f/w variant will cause the + * filter insertion to fail with ENOTSUP. + */ +#define MC_CMD_FILTER_OP_V3_IN_MATCH_ACTION_OFST 172 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_ACTION_LEN 4 +/* enum: do nothing extra */ +#define MC_CMD_FILTER_OP_V3_IN_MATCH_ACTION_NONE 0x0 +/* enum: Set the match flag in the packet prefix for packets matching the + * filter (only with dpdk firmware, otherwise fails with ENOTSUP). Used to + * support the DPDK rte_flow "FLAG" action. + */ +#define MC_CMD_FILTER_OP_V3_IN_MATCH_ACTION_FLAG 0x1 +/* enum: Insert MATCH_MARK_VALUE into the packet prefix for packets matching + * the filter (only with dpdk firmware, otherwise fails with ENOTSUP). Used to + * support the DPDK rte_flow "MARK" action. + */ +#define MC_CMD_FILTER_OP_V3_IN_MATCH_ACTION_MARK 0x2 +/* the mark value for MATCH_ACTION_MARK. Requesting a value larger than the + * maximum (obtained from MC_CMD_GET_CAPABILITIES_V5/FILTER_ACTION_MARK_MAX) + * will cause the filter insertion to fail with EINVAL. + */ +#define MC_CMD_FILTER_OP_V3_IN_MATCH_MARK_VALUE_OFST 176 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_MARK_VALUE_LEN 4 + +/* MC_CMD_FILTER_OP_OUT msgresponse */ +#define MC_CMD_FILTER_OP_OUT_LEN 12 +/* identifies the type of operation requested */ +#define MC_CMD_FILTER_OP_OUT_OP_OFST 0 +#define MC_CMD_FILTER_OP_OUT_OP_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_FILTER_OP_IN/OP */ +/* Returned filter handle (for insert / subscribe operations). Note that these + * handles should be considered opaque to the host, although a value of + * 0xFFFFFFFF_FFFFFFFF is guaranteed never to be a valid handle. + */ +#define MC_CMD_FILTER_OP_OUT_HANDLE_OFST 4 +#define MC_CMD_FILTER_OP_OUT_HANDLE_LEN 8 +#define MC_CMD_FILTER_OP_OUT_HANDLE_LO_OFST 4 +#define MC_CMD_FILTER_OP_OUT_HANDLE_LO_LEN 4 +#define MC_CMD_FILTER_OP_OUT_HANDLE_LO_LBN 32 +#define MC_CMD_FILTER_OP_OUT_HANDLE_LO_WIDTH 32 +#define MC_CMD_FILTER_OP_OUT_HANDLE_HI_OFST 8 +#define MC_CMD_FILTER_OP_OUT_HANDLE_HI_LEN 4 +#define MC_CMD_FILTER_OP_OUT_HANDLE_HI_LBN 64 +#define MC_CMD_FILTER_OP_OUT_HANDLE_HI_WIDTH 32 +/* enum: guaranteed invalid filter handle (low 32 bits) */ +#define MC_CMD_FILTER_OP_OUT_HANDLE_LO_INVALID 0xffffffff +/* enum: guaranteed invalid filter handle (high 32 bits) */ +#define MC_CMD_FILTER_OP_OUT_HANDLE_HI_INVALID 0xffffffff + +/* MC_CMD_FILTER_OP_EXT_OUT msgresponse */ +#define MC_CMD_FILTER_OP_EXT_OUT_LEN 12 +/* identifies the type of operation requested */ +#define MC_CMD_FILTER_OP_EXT_OUT_OP_OFST 0 +#define MC_CMD_FILTER_OP_EXT_OUT_OP_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_FILTER_OP_EXT_IN/OP */ +/* Returned filter handle (for insert / subscribe operations). Note that these + * handles should be considered opaque to the host, although a value of + * 0xFFFFFFFF_FFFFFFFF is guaranteed never to be a valid handle. + */ +#define MC_CMD_FILTER_OP_EXT_OUT_HANDLE_OFST 4 +#define MC_CMD_FILTER_OP_EXT_OUT_HANDLE_LEN 8 +#define MC_CMD_FILTER_OP_EXT_OUT_HANDLE_LO_OFST 4 +#define MC_CMD_FILTER_OP_EXT_OUT_HANDLE_LO_LEN 4 +#define MC_CMD_FILTER_OP_EXT_OUT_HANDLE_LO_LBN 32 +#define MC_CMD_FILTER_OP_EXT_OUT_HANDLE_LO_WIDTH 32 +#define MC_CMD_FILTER_OP_EXT_OUT_HANDLE_HI_OFST 8 +#define MC_CMD_FILTER_OP_EXT_OUT_HANDLE_HI_LEN 4 +#define MC_CMD_FILTER_OP_EXT_OUT_HANDLE_HI_LBN 64 +#define MC_CMD_FILTER_OP_EXT_OUT_HANDLE_HI_WIDTH 32 +/* Enum values, see field(s): */ +/* MC_CMD_FILTER_OP_OUT/HANDLE */ + + +/***********************************/ +/* MC_CMD_GET_PARSER_DISP_INFO + * Get information related to the parser-dispatcher subsystem + */ +#define MC_CMD_GET_PARSER_DISP_INFO 0xe4 +#undef MC_CMD_0xe4_PRIVILEGE_CTG + +#define MC_CMD_0xe4_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_PARSER_DISP_INFO_IN msgrequest */ +#define MC_CMD_GET_PARSER_DISP_INFO_IN_LEN 4 +/* identifies the type of operation requested */ +#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_OFST 0 +#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_LEN 4 +/* enum: read the list of supported RX filter matches */ +#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_RX_MATCHES 0x1 +/* enum: read flags indicating restrictions on filter insertion for the calling + * client + */ +#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_RESTRICTIONS 0x2 +/* enum: read properties relating to security rules (Medford-only; for use by + * SolarSecure apps, not directly by drivers. See SF-114946-SW.) + */ +#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SECURITY_RULE_INFO 0x3 +/* enum: read the list of supported RX filter matches for VXLAN/NVGRE + * encapsulated frames, which follow a different match sequence to normal + * frames (Medford only) + */ +#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_ENCAP_RX_MATCHES 0x4 +/* enum: read the list of supported matches for the encapsulation detection + * rules inserted by MC_CMD_VNIC_ENCAP_RULE_ADD. (ef100 and later) + */ +#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_VNIC_ENCAP_MATCHES 0x5 +/* enum: read the supported encapsulation types for the VNIC */ +#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_VNIC_ENCAP_TYPES 0x6 + +/* MC_CMD_GET_PARSER_DISP_INFO_OUT msgresponse */ +#define MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMIN 8 +#define MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMAX 252 +#define MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMAX_MCDI2 1020 +#define MC_CMD_GET_PARSER_DISP_INFO_OUT_LEN(num) (8+4*(num)) +#define MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_NUM(len) (((len)-8)/4) +/* identifies the type of operation requested */ +#define MC_CMD_GET_PARSER_DISP_INFO_OUT_OP_OFST 0 +#define MC_CMD_GET_PARSER_DISP_INFO_OUT_OP_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_GET_PARSER_DISP_INFO_IN/OP */ +/* number of supported match types */ +#define MC_CMD_GET_PARSER_DISP_INFO_OUT_NUM_SUPPORTED_MATCHES_OFST 4 +#define MC_CMD_GET_PARSER_DISP_INFO_OUT_NUM_SUPPORTED_MATCHES_LEN 4 +/* array of supported match types (valid MATCH_FIELDS values for + * MC_CMD_FILTER_OP) sorted in decreasing priority order + */ +#define MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_OFST 8 +#define MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_LEN 4 +#define MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MINNUM 0 +#define MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MAXNUM 61 +#define MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MAXNUM_MCDI2 253 + +/* MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT msgresponse */ +#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_LEN 8 +/* identifies the type of operation requested */ +#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_OP_OFST 0 +#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_OP_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_GET_PARSER_DISP_INFO_IN/OP */ +/* bitfield of filter insertion restrictions */ +#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_RESTRICTION_FLAGS_OFST 4 +#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_RESTRICTION_FLAGS_LEN 4 +#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_DST_IP_MCAST_ONLY_OFST 4 +#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_DST_IP_MCAST_ONLY_LBN 0 +#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_DST_IP_MCAST_ONLY_WIDTH 1 + +/* MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT msgresponse: + * GET_PARSER_DISP_INFO response format for OP_GET_SECURITY_RULE_INFO. + * (Medford-only; for use by SolarSecure apps, not directly by drivers. See + * SF-114946-SW.) NOTE - this message definition is provisional. It has not yet + * been used in any released code and may change during development. This note + * will be removed once it is regarded as stable. + */ +#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_LEN 36 +/* identifies the type of operation requested */ +#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_OP_OFST 0 +#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_OP_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_GET_PARSER_DISP_INFO_IN/OP */ +/* a version number representing the set of rule lookups that are implemented + * by the currently running firmware + */ +#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_RULES_VERSION_OFST 4 +#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_RULES_VERSION_LEN 4 +/* enum: implements lookup sequences described in SF-114946-SW draft C */ +#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_RULES_VERSION_SF_114946_SW_C 0x0 +/* the number of nodes in the subnet map */ +#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_SUBNET_MAP_NUM_NODES_OFST 8 +#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_SUBNET_MAP_NUM_NODES_LEN 4 +/* the number of entries in one subnet map node */ +#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_SUBNET_MAP_NUM_ENTRIES_PER_NODE_OFST 12 +#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_SUBNET_MAP_NUM_ENTRIES_PER_NODE_LEN 4 +/* minimum valid value for a subnet ID in a subnet map leaf */ +#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_SUBNET_ID_MIN_OFST 16 +#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_SUBNET_ID_MIN_LEN 4 +/* maximum valid value for a subnet ID in a subnet map leaf */ +#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_SUBNET_ID_MAX_OFST 20 +#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_SUBNET_ID_MAX_LEN 4 +/* the number of entries in the local and remote port range maps */ +#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_PORTRANGE_TREE_NUM_ENTRIES_OFST 24 +#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_PORTRANGE_TREE_NUM_ENTRIES_LEN 4 +/* minimum valid value for a portrange ID in a port range map leaf */ +#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_PORTRANGE_ID_MIN_OFST 28 +#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_PORTRANGE_ID_MIN_LEN 4 +/* maximum valid value for a portrange ID in a port range map leaf */ +#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_PORTRANGE_ID_MAX_OFST 32 +#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_PORTRANGE_ID_MAX_LEN 4 + +/* MC_CMD_GET_PARSER_DISP_VNIC_ENCAP_MATCHES_OUT msgresponse: This response is + * returned if a MC_CMD_GET_PARSER_DISP_INFO_IN request is sent with OP value + * OP_GET_SUPPORTED_VNIC_ENCAP_MATCHES. It contains information about the + * supported match types that can be used in the encapsulation detection rules + * inserted by MC_CMD_VNIC_ENCAP_RULE_ADD. + */ +#define MC_CMD_GET_PARSER_DISP_VNIC_ENCAP_MATCHES_OUT_LENMIN 8 +#define MC_CMD_GET_PARSER_DISP_VNIC_ENCAP_MATCHES_OUT_LENMAX 252 +#define MC_CMD_GET_PARSER_DISP_VNIC_ENCAP_MATCHES_OUT_LENMAX_MCDI2 1020 +#define MC_CMD_GET_PARSER_DISP_VNIC_ENCAP_MATCHES_OUT_LEN(num) (8+4*(num)) +#define MC_CMD_GET_PARSER_DISP_VNIC_ENCAP_MATCHES_OUT_SUPPORTED_MATCHES_NUM(len) (((len)-8)/4) +/* The op code OP_GET_SUPPORTED_VNIC_ENCAP_MATCHES is returned. */ +#define MC_CMD_GET_PARSER_DISP_VNIC_ENCAP_MATCHES_OUT_OP_OFST 0 +#define MC_CMD_GET_PARSER_DISP_VNIC_ENCAP_MATCHES_OUT_OP_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_GET_PARSER_DISP_INFO_IN/OP */ +/* number of supported match types */ +#define MC_CMD_GET_PARSER_DISP_VNIC_ENCAP_MATCHES_OUT_NUM_SUPPORTED_MATCHES_OFST 4 +#define MC_CMD_GET_PARSER_DISP_VNIC_ENCAP_MATCHES_OUT_NUM_SUPPORTED_MATCHES_LEN 4 +/* array of supported match types (valid MATCH_FLAGS values for + * MC_CMD_VNIC_ENCAP_RULE_ADD) sorted in decreasing priority order + */ +#define MC_CMD_GET_PARSER_DISP_VNIC_ENCAP_MATCHES_OUT_SUPPORTED_MATCHES_OFST 8 +#define MC_CMD_GET_PARSER_DISP_VNIC_ENCAP_MATCHES_OUT_SUPPORTED_MATCHES_LEN 4 +#define MC_CMD_GET_PARSER_DISP_VNIC_ENCAP_MATCHES_OUT_SUPPORTED_MATCHES_MINNUM 0 +#define MC_CMD_GET_PARSER_DISP_VNIC_ENCAP_MATCHES_OUT_SUPPORTED_MATCHES_MAXNUM 61 +#define MC_CMD_GET_PARSER_DISP_VNIC_ENCAP_MATCHES_OUT_SUPPORTED_MATCHES_MAXNUM_MCDI2 253 + +/* MC_CMD_GET_PARSER_DISP_SUPPORTED_VNIC_ENCAP_TYPES_OUT msgresponse: Returns + * the supported encapsulation types for the VNIC + */ +#define MC_CMD_GET_PARSER_DISP_SUPPORTED_VNIC_ENCAP_TYPES_OUT_LEN 8 +/* The op code OP_GET_SUPPORTED_VNIC_ENCAP_TYPES is returned */ +#define MC_CMD_GET_PARSER_DISP_SUPPORTED_VNIC_ENCAP_TYPES_OUT_OP_OFST 0 +#define MC_CMD_GET_PARSER_DISP_SUPPORTED_VNIC_ENCAP_TYPES_OUT_OP_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_GET_PARSER_DISP_INFO_IN/OP */ +#define MC_CMD_GET_PARSER_DISP_SUPPORTED_VNIC_ENCAP_TYPES_OUT_ENCAP_TYPES_SUPPORTED_OFST 4 +#define MC_CMD_GET_PARSER_DISP_SUPPORTED_VNIC_ENCAP_TYPES_OUT_ENCAP_TYPES_SUPPORTED_LEN 4 +#define MC_CMD_GET_PARSER_DISP_SUPPORTED_VNIC_ENCAP_TYPES_OUT_ENCAP_TYPE_VXLAN_OFST 4 +#define MC_CMD_GET_PARSER_DISP_SUPPORTED_VNIC_ENCAP_TYPES_OUT_ENCAP_TYPE_VXLAN_LBN 0 +#define MC_CMD_GET_PARSER_DISP_SUPPORTED_VNIC_ENCAP_TYPES_OUT_ENCAP_TYPE_VXLAN_WIDTH 1 +#define MC_CMD_GET_PARSER_DISP_SUPPORTED_VNIC_ENCAP_TYPES_OUT_ENCAP_TYPE_NVGRE_OFST 4 +#define MC_CMD_GET_PARSER_DISP_SUPPORTED_VNIC_ENCAP_TYPES_OUT_ENCAP_TYPE_NVGRE_LBN 1 +#define MC_CMD_GET_PARSER_DISP_SUPPORTED_VNIC_ENCAP_TYPES_OUT_ENCAP_TYPE_NVGRE_WIDTH 1 +#define MC_CMD_GET_PARSER_DISP_SUPPORTED_VNIC_ENCAP_TYPES_OUT_ENCAP_TYPE_GENEVE_OFST 4 +#define MC_CMD_GET_PARSER_DISP_SUPPORTED_VNIC_ENCAP_TYPES_OUT_ENCAP_TYPE_GENEVE_LBN 2 +#define MC_CMD_GET_PARSER_DISP_SUPPORTED_VNIC_ENCAP_TYPES_OUT_ENCAP_TYPE_GENEVE_WIDTH 1 +#define MC_CMD_GET_PARSER_DISP_SUPPORTED_VNIC_ENCAP_TYPES_OUT_ENCAP_TYPE_L2GRE_OFST 4 +#define MC_CMD_GET_PARSER_DISP_SUPPORTED_VNIC_ENCAP_TYPES_OUT_ENCAP_TYPE_L2GRE_LBN 3 +#define MC_CMD_GET_PARSER_DISP_SUPPORTED_VNIC_ENCAP_TYPES_OUT_ENCAP_TYPE_L2GRE_WIDTH 1 + + +/***********************************/ +/* MC_CMD_PARSER_DISP_RW + * Direct read/write of parser-dispatcher state (DICPUs and LUE) for debugging. + * Please note that this interface is only of use to debug tools which have + * knowledge of firmware and hardware data structures; nothing here is intended + * for use by normal driver code. Note that although this command is in the + * Admin privilege group, in tamperproof adapters, only read operations are + * permitted. + */ +#define MC_CMD_PARSER_DISP_RW 0xe5 +#undef MC_CMD_0xe5_PRIVILEGE_CTG + +#define MC_CMD_0xe5_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_PARSER_DISP_RW_IN msgrequest */ +#define MC_CMD_PARSER_DISP_RW_IN_LEN 32 +/* identifies the target of the operation */ +#define MC_CMD_PARSER_DISP_RW_IN_TARGET_OFST 0 +#define MC_CMD_PARSER_DISP_RW_IN_TARGET_LEN 4 +/* enum: RX dispatcher CPU */ +#define MC_CMD_PARSER_DISP_RW_IN_RX_DICPU 0x0 +/* enum: TX dispatcher CPU */ +#define MC_CMD_PARSER_DISP_RW_IN_TX_DICPU 0x1 +/* enum: Lookup engine (with original metadata format). Deprecated; used only + * by cmdclient as a fallback for very old Huntington firmware, and not + * supported in firmware beyond v6.4.0.1005. Use LUE_VERSIONED_METADATA + * instead. + */ +#define MC_CMD_PARSER_DISP_RW_IN_LUE 0x2 +/* enum: Lookup engine (with requested metadata format) */ +#define MC_CMD_PARSER_DISP_RW_IN_LUE_VERSIONED_METADATA 0x3 +/* enum: RX0 dispatcher CPU (alias for RX_DICPU; Medford has 2 RX DICPUs) */ +#define MC_CMD_PARSER_DISP_RW_IN_RX0_DICPU 0x0 +/* enum: RX1 dispatcher CPU (only valid for Medford) */ +#define MC_CMD_PARSER_DISP_RW_IN_RX1_DICPU 0x4 +/* enum: Miscellaneous other state (only valid for Medford) */ +#define MC_CMD_PARSER_DISP_RW_IN_MISC_STATE 0x5 +/* identifies the type of operation requested */ +#define MC_CMD_PARSER_DISP_RW_IN_OP_OFST 4 +#define MC_CMD_PARSER_DISP_RW_IN_OP_LEN 4 +/* enum: Read a word of DICPU DMEM or a LUE entry */ +#define MC_CMD_PARSER_DISP_RW_IN_READ 0x0 +/* enum: Write a word of DICPU DMEM or a LUE entry. Not permitted on + * tamperproof adapters. + */ +#define MC_CMD_PARSER_DISP_RW_IN_WRITE 0x1 +/* enum: Read-modify-write a word of DICPU DMEM (not valid for LUE). Not + * permitted on tamperproof adapters. + */ +#define MC_CMD_PARSER_DISP_RW_IN_RMW 0x2 +/* data memory address (DICPU targets) or LUE index (LUE targets) */ +#define MC_CMD_PARSER_DISP_RW_IN_ADDRESS_OFST 8 +#define MC_CMD_PARSER_DISP_RW_IN_ADDRESS_LEN 4 +/* selector (for MISC_STATE target) */ +#define MC_CMD_PARSER_DISP_RW_IN_SELECTOR_OFST 8 +#define MC_CMD_PARSER_DISP_RW_IN_SELECTOR_LEN 4 +/* enum: Port to datapath mapping */ +#define MC_CMD_PARSER_DISP_RW_IN_PORT_DP_MAPPING 0x1 +/* value to write (for DMEM writes) */ +#define MC_CMD_PARSER_DISP_RW_IN_DMEM_WRITE_VALUE_OFST 12 +#define MC_CMD_PARSER_DISP_RW_IN_DMEM_WRITE_VALUE_LEN 4 +/* XOR value (for DMEM read-modify-writes: new = (old & mask) ^ value) */ +#define MC_CMD_PARSER_DISP_RW_IN_DMEM_RMW_XOR_VALUE_OFST 12 +#define MC_CMD_PARSER_DISP_RW_IN_DMEM_RMW_XOR_VALUE_LEN 4 +/* AND mask (for DMEM read-modify-writes: new = (old & mask) ^ value) */ +#define MC_CMD_PARSER_DISP_RW_IN_DMEM_RMW_AND_MASK_OFST 16 +#define MC_CMD_PARSER_DISP_RW_IN_DMEM_RMW_AND_MASK_LEN 4 +/* metadata format (for LUE reads using LUE_VERSIONED_METADATA) */ +#define MC_CMD_PARSER_DISP_RW_IN_LUE_READ_METADATA_VERSION_OFST 12 +#define MC_CMD_PARSER_DISP_RW_IN_LUE_READ_METADATA_VERSION_LEN 4 +/* value to write (for LUE writes) */ +#define MC_CMD_PARSER_DISP_RW_IN_LUE_WRITE_VALUE_OFST 12 +#define MC_CMD_PARSER_DISP_RW_IN_LUE_WRITE_VALUE_LEN 20 + +/* MC_CMD_PARSER_DISP_RW_OUT msgresponse */ +#define MC_CMD_PARSER_DISP_RW_OUT_LEN 52 +/* value read (for DMEM reads) */ +#define MC_CMD_PARSER_DISP_RW_OUT_DMEM_READ_VALUE_OFST 0 +#define MC_CMD_PARSER_DISP_RW_OUT_DMEM_READ_VALUE_LEN 4 +/* value read (for LUE reads) */ +#define MC_CMD_PARSER_DISP_RW_OUT_LUE_READ_VALUE_OFST 0 +#define MC_CMD_PARSER_DISP_RW_OUT_LUE_READ_VALUE_LEN 20 +/* up to 8 32-bit words of additional soft state from the LUE manager (the + * exact content is firmware-dependent and intended only for debug use) + */ +#define MC_CMD_PARSER_DISP_RW_OUT_LUE_MGR_STATE_OFST 20 +#define MC_CMD_PARSER_DISP_RW_OUT_LUE_MGR_STATE_LEN 32 +/* datapath(s) used for each port (for MISC_STATE PORT_DP_MAPPING selector) */ +#define MC_CMD_PARSER_DISP_RW_OUT_PORT_DP_MAPPING_OFST 0 +#define MC_CMD_PARSER_DISP_RW_OUT_PORT_DP_MAPPING_LEN 4 +#define MC_CMD_PARSER_DISP_RW_OUT_PORT_DP_MAPPING_NUM 4 +#define MC_CMD_PARSER_DISP_RW_OUT_DP0 0x1 /* enum */ +#define MC_CMD_PARSER_DISP_RW_OUT_DP1 0x2 /* enum */ + + +/***********************************/ +/* MC_CMD_GET_PF_COUNT + * Get number of PFs on the device. + */ +#define MC_CMD_GET_PF_COUNT 0xb6 +#undef MC_CMD_0xb6_PRIVILEGE_CTG + +#define MC_CMD_0xb6_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_PF_COUNT_IN msgrequest */ +#define MC_CMD_GET_PF_COUNT_IN_LEN 0 + +/* MC_CMD_GET_PF_COUNT_OUT msgresponse */ +#define MC_CMD_GET_PF_COUNT_OUT_LEN 1 +/* Identifies the number of PFs on the device. */ +#define MC_CMD_GET_PF_COUNT_OUT_PF_COUNT_OFST 0 +#define MC_CMD_GET_PF_COUNT_OUT_PF_COUNT_LEN 1 + + +/***********************************/ +/* MC_CMD_SET_PF_COUNT + * Set number of PFs on the device. + */ +#define MC_CMD_SET_PF_COUNT 0xb7 + +/* MC_CMD_SET_PF_COUNT_IN msgrequest */ +#define MC_CMD_SET_PF_COUNT_IN_LEN 4 +/* New number of PFs on the device. */ +#define MC_CMD_SET_PF_COUNT_IN_PF_COUNT_OFST 0 +#define MC_CMD_SET_PF_COUNT_IN_PF_COUNT_LEN 4 + +/* MC_CMD_SET_PF_COUNT_OUT msgresponse */ +#define MC_CMD_SET_PF_COUNT_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_GET_PORT_ASSIGNMENT + * Get port assignment for current PCI function. + */ +#define MC_CMD_GET_PORT_ASSIGNMENT 0xb8 +#undef MC_CMD_0xb8_PRIVILEGE_CTG + +#define MC_CMD_0xb8_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_PORT_ASSIGNMENT_IN msgrequest */ +#define MC_CMD_GET_PORT_ASSIGNMENT_IN_LEN 0 + +/* MC_CMD_GET_PORT_ASSIGNMENT_OUT msgresponse */ +#define MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN 4 +/* Identifies the port assignment for this function. On EF100, it is possible + * for the function to have no network port assigned (either because it is not + * yet configured, or assigning a port to a given function personality makes no + * sense - e.g. virtio-blk), in which case the return value is NULL_PORT. + */ +#define MC_CMD_GET_PORT_ASSIGNMENT_OUT_PORT_OFST 0 +#define MC_CMD_GET_PORT_ASSIGNMENT_OUT_PORT_LEN 4 +/* enum: Special value to indicate no port is assigned to a function. */ +#define MC_CMD_GET_PORT_ASSIGNMENT_OUT_NULL_PORT 0xffffffff + + +/***********************************/ +/* MC_CMD_SET_PORT_ASSIGNMENT + * Set port assignment for current PCI function. + */ +#define MC_CMD_SET_PORT_ASSIGNMENT 0xb9 +#undef MC_CMD_0xb9_PRIVILEGE_CTG + +#define MC_CMD_0xb9_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_SET_PORT_ASSIGNMENT_IN msgrequest */ +#define MC_CMD_SET_PORT_ASSIGNMENT_IN_LEN 4 +/* Identifies the port assignment for this function. */ +#define MC_CMD_SET_PORT_ASSIGNMENT_IN_PORT_OFST 0 +#define MC_CMD_SET_PORT_ASSIGNMENT_IN_PORT_LEN 4 + +/* MC_CMD_SET_PORT_ASSIGNMENT_OUT msgresponse */ +#define MC_CMD_SET_PORT_ASSIGNMENT_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_ALLOC_VIS + * Allocate VIs for current PCI function. + */ +#define MC_CMD_ALLOC_VIS 0x8b +#undef MC_CMD_0x8b_PRIVILEGE_CTG + +#define MC_CMD_0x8b_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_ALLOC_VIS_IN msgrequest */ +#define MC_CMD_ALLOC_VIS_IN_LEN 8 +/* The minimum number of VIs that is acceptable */ +#define MC_CMD_ALLOC_VIS_IN_MIN_VI_COUNT_OFST 0 +#define MC_CMD_ALLOC_VIS_IN_MIN_VI_COUNT_LEN 4 +/* The maximum number of VIs that would be useful */ +#define MC_CMD_ALLOC_VIS_IN_MAX_VI_COUNT_OFST 4 +#define MC_CMD_ALLOC_VIS_IN_MAX_VI_COUNT_LEN 4 + +/* MC_CMD_ALLOC_VIS_OUT msgresponse: Huntington-compatible VI_ALLOC request. + * Use extended version in new code. + */ +#define MC_CMD_ALLOC_VIS_OUT_LEN 8 +/* The number of VIs allocated on this function */ +#define MC_CMD_ALLOC_VIS_OUT_VI_COUNT_OFST 0 +#define MC_CMD_ALLOC_VIS_OUT_VI_COUNT_LEN 4 +/* The base absolute VI number allocated to this function. Required to + * correctly interpret wakeup events. + */ +#define MC_CMD_ALLOC_VIS_OUT_VI_BASE_OFST 4 +#define MC_CMD_ALLOC_VIS_OUT_VI_BASE_LEN 4 + +/* MC_CMD_ALLOC_VIS_EXT_OUT msgresponse */ +#define MC_CMD_ALLOC_VIS_EXT_OUT_LEN 12 +/* The number of VIs allocated on this function */ +#define MC_CMD_ALLOC_VIS_EXT_OUT_VI_COUNT_OFST 0 +#define MC_CMD_ALLOC_VIS_EXT_OUT_VI_COUNT_LEN 4 +/* The base absolute VI number allocated to this function. Required to + * correctly interpret wakeup events. + */ +#define MC_CMD_ALLOC_VIS_EXT_OUT_VI_BASE_OFST 4 +#define MC_CMD_ALLOC_VIS_EXT_OUT_VI_BASE_LEN 4 +/* Function's port vi_shift value (always 0 on Huntington) */ +#define MC_CMD_ALLOC_VIS_EXT_OUT_VI_SHIFT_OFST 8 +#define MC_CMD_ALLOC_VIS_EXT_OUT_VI_SHIFT_LEN 4 + + +/***********************************/ +/* MC_CMD_FREE_VIS + * Free VIs for current PCI function. Any linked PIO buffers will be unlinked, + * but not freed. + */ +#define MC_CMD_FREE_VIS 0x8c +#undef MC_CMD_0x8c_PRIVILEGE_CTG + +#define MC_CMD_0x8c_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_FREE_VIS_IN msgrequest */ +#define MC_CMD_FREE_VIS_IN_LEN 0 + +/* MC_CMD_FREE_VIS_OUT msgresponse */ +#define MC_CMD_FREE_VIS_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_GET_SRIOV_CFG + * Get SRIOV config for this PF. + */ +#define MC_CMD_GET_SRIOV_CFG 0xba +#undef MC_CMD_0xba_PRIVILEGE_CTG + +#define MC_CMD_0xba_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_SRIOV_CFG_IN msgrequest */ +#define MC_CMD_GET_SRIOV_CFG_IN_LEN 0 + +/* MC_CMD_GET_SRIOV_CFG_OUT msgresponse */ +#define MC_CMD_GET_SRIOV_CFG_OUT_LEN 20 +/* Number of VFs currently enabled. */ +#define MC_CMD_GET_SRIOV_CFG_OUT_VF_CURRENT_OFST 0 +#define MC_CMD_GET_SRIOV_CFG_OUT_VF_CURRENT_LEN 4 +/* Max number of VFs before sriov stride and offset may need to be changed. */ +#define MC_CMD_GET_SRIOV_CFG_OUT_VF_MAX_OFST 4 +#define MC_CMD_GET_SRIOV_CFG_OUT_VF_MAX_LEN 4 +#define MC_CMD_GET_SRIOV_CFG_OUT_FLAGS_OFST 8 +#define MC_CMD_GET_SRIOV_CFG_OUT_FLAGS_LEN 4 +#define MC_CMD_GET_SRIOV_CFG_OUT_VF_ENABLED_OFST 8 +#define MC_CMD_GET_SRIOV_CFG_OUT_VF_ENABLED_LBN 0 +#define MC_CMD_GET_SRIOV_CFG_OUT_VF_ENABLED_WIDTH 1 +/* RID offset of first VF from PF. */ +#define MC_CMD_GET_SRIOV_CFG_OUT_VF_OFFSET_OFST 12 +#define MC_CMD_GET_SRIOV_CFG_OUT_VF_OFFSET_LEN 4 +/* RID offset of each subsequent VF from the previous. */ +#define MC_CMD_GET_SRIOV_CFG_OUT_VF_STRIDE_OFST 16 +#define MC_CMD_GET_SRIOV_CFG_OUT_VF_STRIDE_LEN 4 + + +/***********************************/ +/* MC_CMD_SET_SRIOV_CFG + * Set SRIOV config for this PF. + */ +#define MC_CMD_SET_SRIOV_CFG 0xbb +#undef MC_CMD_0xbb_PRIVILEGE_CTG + +#define MC_CMD_0xbb_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_SET_SRIOV_CFG_IN msgrequest */ +#define MC_CMD_SET_SRIOV_CFG_IN_LEN 20 +/* Number of VFs currently enabled. */ +#define MC_CMD_SET_SRIOV_CFG_IN_VF_CURRENT_OFST 0 +#define MC_CMD_SET_SRIOV_CFG_IN_VF_CURRENT_LEN 4 +/* Max number of VFs before sriov stride and offset may need to be changed. */ +#define MC_CMD_SET_SRIOV_CFG_IN_VF_MAX_OFST 4 +#define MC_CMD_SET_SRIOV_CFG_IN_VF_MAX_LEN 4 +#define MC_CMD_SET_SRIOV_CFG_IN_FLAGS_OFST 8 +#define MC_CMD_SET_SRIOV_CFG_IN_FLAGS_LEN 4 +#define MC_CMD_SET_SRIOV_CFG_IN_VF_ENABLED_OFST 8 +#define MC_CMD_SET_SRIOV_CFG_IN_VF_ENABLED_LBN 0 +#define MC_CMD_SET_SRIOV_CFG_IN_VF_ENABLED_WIDTH 1 +/* RID offset of first VF from PF, or 0 for no change, or + * MC_CMD_RESOURCE_INSTANCE_ANY to allow the system to allocate an offset. + */ +#define MC_CMD_SET_SRIOV_CFG_IN_VF_OFFSET_OFST 12 +#define MC_CMD_SET_SRIOV_CFG_IN_VF_OFFSET_LEN 4 +/* RID offset of each subsequent VF from the previous, 0 for no change, or + * MC_CMD_RESOURCE_INSTANCE_ANY to allow the system to allocate a stride. + */ +#define MC_CMD_SET_SRIOV_CFG_IN_VF_STRIDE_OFST 16 +#define MC_CMD_SET_SRIOV_CFG_IN_VF_STRIDE_LEN 4 + +/* MC_CMD_SET_SRIOV_CFG_OUT msgresponse */ +#define MC_CMD_SET_SRIOV_CFG_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_GET_VI_ALLOC_INFO + * Get information about number of VI's and base VI number allocated to this + * function. This message is not available to dynamic clients created by + * MC_CMD_CLIENT_ALLOC. + */ +#define MC_CMD_GET_VI_ALLOC_INFO 0x8d +#undef MC_CMD_0x8d_PRIVILEGE_CTG + +#define MC_CMD_0x8d_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_VI_ALLOC_INFO_IN msgrequest */ +#define MC_CMD_GET_VI_ALLOC_INFO_IN_LEN 0 + +/* MC_CMD_GET_VI_ALLOC_INFO_OUT msgresponse */ +#define MC_CMD_GET_VI_ALLOC_INFO_OUT_LEN 12 +/* The number of VIs allocated on this function */ +#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_COUNT_OFST 0 +#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_COUNT_LEN 4 +/* The base absolute VI number allocated to this function. Required to + * correctly interpret wakeup events. + */ +#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_BASE_OFST 4 +#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_BASE_LEN 4 +/* Function's port vi_shift value (always 0 on Huntington) */ +#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_SHIFT_OFST 8 +#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_SHIFT_LEN 4 + + +/***********************************/ +/* MC_CMD_DUMP_VI_STATE + * For CmdClient use. Dump pertinent information on a specific absolute VI. The + * VI must be owned by the calling client or one of its ancestors; usership of + * the VI (as set by MC_CMD_SET_VI_USER) is not sufficient. + */ +#define MC_CMD_DUMP_VI_STATE 0x8e +#undef MC_CMD_0x8e_PRIVILEGE_CTG + +#define MC_CMD_0x8e_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_DUMP_VI_STATE_IN msgrequest */ +#define MC_CMD_DUMP_VI_STATE_IN_LEN 4 +/* The VI number to query. */ +#define MC_CMD_DUMP_VI_STATE_IN_VI_NUMBER_OFST 0 +#define MC_CMD_DUMP_VI_STATE_IN_VI_NUMBER_LEN 4 + +/* MC_CMD_DUMP_VI_STATE_OUT msgresponse */ +#define MC_CMD_DUMP_VI_STATE_OUT_LEN 100 +/* The PF part of the function owning this VI. */ +#define MC_CMD_DUMP_VI_STATE_OUT_OWNER_PF_OFST 0 +#define MC_CMD_DUMP_VI_STATE_OUT_OWNER_PF_LEN 2 +/* The VF part of the function owning this VI. */ +#define MC_CMD_DUMP_VI_STATE_OUT_OWNER_VF_OFST 2 +#define MC_CMD_DUMP_VI_STATE_OUT_OWNER_VF_LEN 2 +/* Base of VIs allocated to this function. */ +#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VI_BASE_OFST 4 +#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VI_BASE_LEN 2 +/* Count of VIs allocated to the owner function. */ +#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VI_COUNT_OFST 6 +#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VI_COUNT_LEN 2 +/* Base interrupt vector allocated to this function. */ +#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VECTOR_BASE_OFST 8 +#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VECTOR_BASE_LEN 2 +/* Number of interrupt vectors allocated to this function. */ +#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VECTOR_COUNT_OFST 10 +#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VECTOR_COUNT_LEN 2 +/* Raw evq ptr table data. */ +#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_OFST 12 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_LEN 8 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_LO_OFST 12 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_LO_LEN 4 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_LO_LBN 96 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_LO_WIDTH 32 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_HI_OFST 16 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_HI_LEN 4 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_HI_LBN 128 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_HI_WIDTH 32 +/* Raw evq timer table data. */ +#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_OFST 20 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_LEN 8 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_LO_OFST 20 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_LO_LEN 4 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_LO_LBN 160 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_LO_WIDTH 32 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_HI_OFST 24 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_HI_LEN 4 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_HI_LBN 192 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_HI_WIDTH 32 +/* Combined metadata field. */ +#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_OFST 28 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_LEN 4 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_BASE_OFST 28 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_BASE_LBN 0 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_BASE_WIDTH 16 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_NPAGES_OFST 28 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_NPAGES_LBN 16 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_NPAGES_WIDTH 8 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_WKUP_REF_OFST 28 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_WKUP_REF_LBN 24 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_WKUP_REF_WIDTH 8 +/* TXDPCPU raw table data for queue. */ +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_OFST 32 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_LEN 8 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_LO_OFST 32 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_LO_LEN 4 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_LO_LBN 256 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_LO_WIDTH 32 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_HI_OFST 36 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_HI_LEN 4 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_HI_LBN 288 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_HI_WIDTH 32 +/* TXDPCPU raw table data for queue. */ +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_OFST 40 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_LEN 8 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_LO_OFST 40 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_LO_LEN 4 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_LO_LBN 320 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_LO_WIDTH 32 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_HI_OFST 44 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_HI_LEN 4 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_HI_LBN 352 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_HI_WIDTH 32 +/* TXDPCPU raw table data for queue. */ +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_OFST 48 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_LEN 8 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_LO_OFST 48 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_LO_LEN 4 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_LO_LBN 384 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_LO_WIDTH 32 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_HI_OFST 52 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_HI_LEN 4 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_HI_LBN 416 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_HI_WIDTH 32 +/* Combined metadata field. */ +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_OFST 56 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_LEN 8 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_LO_OFST 56 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_LO_LEN 4 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_LO_LBN 448 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_LO_WIDTH 32 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_HI_OFST 60 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_HI_LEN 4 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_HI_LBN 480 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_HI_WIDTH 32 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_BASE_OFST 56 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_BASE_LBN 0 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_BASE_WIDTH 16 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_NPAGES_OFST 56 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_NPAGES_LBN 16 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_NPAGES_WIDTH 8 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_QSTATE_OFST 56 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_QSTATE_LBN 24 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_QSTATE_WIDTH 8 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_WAITCOUNT_OFST 56 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_WAITCOUNT_LBN 32 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_WAITCOUNT_WIDTH 8 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_PADDING_OFST 56 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_PADDING_LBN 40 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_PADDING_WIDTH 24 +/* RXDPCPU raw table data for queue. */ +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_OFST 64 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_LEN 8 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_LO_OFST 64 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_LO_LEN 4 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_LO_LBN 512 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_LO_WIDTH 32 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_HI_OFST 68 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_HI_LEN 4 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_HI_LBN 544 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_HI_WIDTH 32 +/* RXDPCPU raw table data for queue. */ +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_OFST 72 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_LEN 8 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_LO_OFST 72 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_LO_LEN 4 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_LO_LBN 576 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_LO_WIDTH 32 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_HI_OFST 76 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_HI_LEN 4 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_HI_LBN 608 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_HI_WIDTH 32 +/* Reserved, currently 0. */ +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_OFST 80 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_LEN 8 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_LO_OFST 80 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_LO_LEN 4 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_LO_LBN 640 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_LO_WIDTH 32 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_HI_OFST 84 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_HI_LEN 4 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_HI_LBN 672 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_HI_WIDTH 32 +/* Combined metadata field. */ +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_OFST 88 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_LEN 8 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_LO_OFST 88 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_LO_LEN 4 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_LO_LBN 704 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_LO_WIDTH 32 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_HI_OFST 92 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_HI_LEN 4 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_HI_LBN 736 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_HI_WIDTH 32 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_BASE_OFST 88 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_BASE_LBN 0 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_BASE_WIDTH 16 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_NPAGES_OFST 88 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_NPAGES_LBN 16 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_NPAGES_WIDTH 8 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_QSTATE_OFST 88 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_QSTATE_LBN 24 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_QSTATE_WIDTH 8 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_WAITCOUNT_OFST 88 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_WAITCOUNT_LBN 32 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_WAITCOUNT_WIDTH 8 +/* Current user, as assigned by MC_CMD_SET_VI_USER. */ +#define MC_CMD_DUMP_VI_STATE_OUT_USER_CLIENT_ID_OFST 96 +#define MC_CMD_DUMP_VI_STATE_OUT_USER_CLIENT_ID_LEN 4 + + +/***********************************/ +/* MC_CMD_ALLOC_PIOBUF + * Allocate a push I/O buffer for later use with a tx queue. + */ +#define MC_CMD_ALLOC_PIOBUF 0x8f +#undef MC_CMD_0x8f_PRIVILEGE_CTG + +#define MC_CMD_0x8f_PRIVILEGE_CTG SRIOV_CTG_ONLOAD + +/* MC_CMD_ALLOC_PIOBUF_IN msgrequest */ +#define MC_CMD_ALLOC_PIOBUF_IN_LEN 0 + +/* MC_CMD_ALLOC_PIOBUF_OUT msgresponse */ +#define MC_CMD_ALLOC_PIOBUF_OUT_LEN 4 +/* Handle for allocated push I/O buffer. */ +#define MC_CMD_ALLOC_PIOBUF_OUT_PIOBUF_HANDLE_OFST 0 +#define MC_CMD_ALLOC_PIOBUF_OUT_PIOBUF_HANDLE_LEN 4 + + +/***********************************/ +/* MC_CMD_FREE_PIOBUF + * Free a push I/O buffer. + */ +#define MC_CMD_FREE_PIOBUF 0x90 +#undef MC_CMD_0x90_PRIVILEGE_CTG + +#define MC_CMD_0x90_PRIVILEGE_CTG SRIOV_CTG_ONLOAD + +/* MC_CMD_FREE_PIOBUF_IN msgrequest */ +#define MC_CMD_FREE_PIOBUF_IN_LEN 4 +/* Handle for allocated push I/O buffer. */ +#define MC_CMD_FREE_PIOBUF_IN_PIOBUF_HANDLE_OFST 0 +#define MC_CMD_FREE_PIOBUF_IN_PIOBUF_HANDLE_LEN 4 + +/* MC_CMD_FREE_PIOBUF_OUT msgresponse */ +#define MC_CMD_FREE_PIOBUF_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_GET_VI_TLP_PROCESSING + * Get TLP steering and ordering information for a VI. The caller must have the + * GRP_FUNC_DMA privilege and must be the currently-assigned user of this VI or + * an ancestor of the current user (see MC_CMD_SET_VI_USER). + */ +#define MC_CMD_GET_VI_TLP_PROCESSING 0xb0 +#undef MC_CMD_0xb0_PRIVILEGE_CTG + +#define MC_CMD_0xb0_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_VI_TLP_PROCESSING_IN msgrequest */ +#define MC_CMD_GET_VI_TLP_PROCESSING_IN_LEN 4 +/* VI number to get information for. */ +#define MC_CMD_GET_VI_TLP_PROCESSING_IN_INSTANCE_OFST 0 +#define MC_CMD_GET_VI_TLP_PROCESSING_IN_INSTANCE_LEN 4 + +/* MC_CMD_GET_VI_TLP_PROCESSING_OUT msgresponse */ +#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_LEN 4 +/* Transaction processing steering hint 1 for use with the Rx Queue. */ +#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_TAG1_RX_OFST 0 +#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_TAG1_RX_LEN 1 +/* Transaction processing steering hint 2 for use with the Ev Queue. */ +#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_TAG2_EV_OFST 1 +#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_TAG2_EV_LEN 1 +/* Use Relaxed ordering model for TLPs on this VI. */ +#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_RELAXED_ORDERING_LBN 16 +#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_RELAXED_ORDERING_WIDTH 1 +/* Use ID based ordering for TLPs on this VI. */ +#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_ID_BASED_ORDERING_LBN 17 +#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_ID_BASED_ORDERING_WIDTH 1 +/* Set no snoop bit for TLPs on this VI. */ +#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_NO_SNOOP_LBN 18 +#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_NO_SNOOP_WIDTH 1 +/* Enable TPH for TLPs on this VI. */ +#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_ON_LBN 19 +#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_ON_WIDTH 1 +#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_DATA_OFST 0 +#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_DATA_LEN 4 + + +/***********************************/ +/* MC_CMD_SET_VI_TLP_PROCESSING + * Set TLP steering and ordering information for a VI. The caller must have the + * GRP_FUNC_DMA privilege and must be the currently-assigned user of this VI or + * an ancestor of the current user (see MC_CMD_SET_VI_USER). + */ +#define MC_CMD_SET_VI_TLP_PROCESSING 0xb1 +#undef MC_CMD_0xb1_PRIVILEGE_CTG + +#define MC_CMD_0xb1_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_SET_VI_TLP_PROCESSING_IN msgrequest */ +#define MC_CMD_SET_VI_TLP_PROCESSING_IN_LEN 8 +/* VI number to set information for. */ +#define MC_CMD_SET_VI_TLP_PROCESSING_IN_INSTANCE_OFST 0 +#define MC_CMD_SET_VI_TLP_PROCESSING_IN_INSTANCE_LEN 4 +/* Transaction processing steering hint 1 for use with the Rx Queue. */ +#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_TAG1_RX_OFST 4 +#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_TAG1_RX_LEN 1 +/* Transaction processing steering hint 2 for use with the Ev Queue. */ +#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_TAG2_EV_OFST 5 +#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_TAG2_EV_LEN 1 +/* Use Relaxed ordering model for TLPs on this VI. */ +#define MC_CMD_SET_VI_TLP_PROCESSING_IN_RELAXED_ORDERING_LBN 48 +#define MC_CMD_SET_VI_TLP_PROCESSING_IN_RELAXED_ORDERING_WIDTH 1 +/* Use ID based ordering for TLPs on this VI. */ +#define MC_CMD_SET_VI_TLP_PROCESSING_IN_ID_BASED_ORDERING_LBN 49 +#define MC_CMD_SET_VI_TLP_PROCESSING_IN_ID_BASED_ORDERING_WIDTH 1 +/* Set the no snoop bit for TLPs on this VI. */ +#define MC_CMD_SET_VI_TLP_PROCESSING_IN_NO_SNOOP_LBN 50 +#define MC_CMD_SET_VI_TLP_PROCESSING_IN_NO_SNOOP_WIDTH 1 +/* Enable TPH for TLPs on this VI. */ +#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_ON_LBN 51 +#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_ON_WIDTH 1 +#define MC_CMD_SET_VI_TLP_PROCESSING_IN_DATA_OFST 4 +#define MC_CMD_SET_VI_TLP_PROCESSING_IN_DATA_LEN 4 + +/* MC_CMD_SET_VI_TLP_PROCESSING_OUT msgresponse */ +#define MC_CMD_SET_VI_TLP_PROCESSING_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_GET_TLP_PROCESSING_GLOBALS + * Get global PCIe steering and transaction processing configuration. + */ +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS 0xbc +#undef MC_CMD_0xbc_PRIVILEGE_CTG + +#define MC_CMD_0xbc_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN msgrequest */ +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_LEN 4 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_OFST 0 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_LEN 4 +/* enum: MISC. */ +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_MISC 0x0 +/* enum: IDO. */ +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_IDO 0x1 +/* enum: RO. */ +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_RO 0x2 +/* enum: TPH Type. */ +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_TPH_TYPE 0x3 + +/* MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT msgresponse */ +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_LEN 8 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_GLOBAL_CATEGORY_OFST 0 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_GLOBAL_CATEGORY_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN/TLP_GLOBAL_CATEGORY */ +/* Amalgamated TLP info word. */ +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_WORD_OFST 4 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_WORD_LEN 4 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_WTAG_EN_OFST 4 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_WTAG_EN_LBN 0 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_WTAG_EN_WIDTH 1 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_SPARE_OFST 4 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_SPARE_LBN 1 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_SPARE_WIDTH 31 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_DL_EN_OFST 4 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_DL_EN_LBN 0 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_DL_EN_WIDTH 1 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_TX_EN_OFST 4 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_TX_EN_LBN 1 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_TX_EN_WIDTH 1 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_EV_EN_OFST 4 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_EV_EN_LBN 2 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_EV_EN_WIDTH 1 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_RX_EN_OFST 4 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_RX_EN_LBN 3 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_RX_EN_WIDTH 1 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_SPARE_OFST 4 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_SPARE_LBN 4 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_SPARE_WIDTH 28 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_RXDMA_EN_OFST 4 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_RXDMA_EN_LBN 0 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_RXDMA_EN_WIDTH 1 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_TXDMA_EN_OFST 4 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_TXDMA_EN_LBN 1 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_TXDMA_EN_WIDTH 1 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_DL_EN_OFST 4 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_DL_EN_LBN 2 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_DL_EN_WIDTH 1 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_SPARE_OFST 4 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_SPARE_LBN 3 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_SPARE_WIDTH 29 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_MSIX_OFST 4 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_MSIX_LBN 0 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_MSIX_WIDTH 2 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_DL_OFST 4 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_DL_LBN 2 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_DL_WIDTH 2 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_TX_OFST 4 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_TX_LBN 4 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_TX_WIDTH 2 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_EV_OFST 4 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_EV_LBN 6 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_EV_WIDTH 2 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_RX_OFST 4 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_RX_LBN 8 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_RX_WIDTH 2 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TLP_TYPE_SPARE_OFST 4 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TLP_TYPE_SPARE_LBN 9 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TLP_TYPE_SPARE_WIDTH 23 + + +/***********************************/ +/* MC_CMD_SET_TLP_PROCESSING_GLOBALS + * Set global PCIe steering and transaction processing configuration. + */ +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS 0xbd +#undef MC_CMD_0xbd_PRIVILEGE_CTG + +#define MC_CMD_0xbd_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN msgrequest */ +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_LEN 8 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_OFST 0 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_GET_TLP_PROCESSING_GLOBALS/MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN/TLP_GLOBAL_CATEGORY */ +/* Amalgamated TLP info word. */ +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_WORD_OFST 4 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_WORD_LEN 4 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_MISC_WTAG_EN_OFST 4 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_MISC_WTAG_EN_LBN 0 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_MISC_WTAG_EN_WIDTH 1 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_DL_EN_OFST 4 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_DL_EN_LBN 0 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_DL_EN_WIDTH 1 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_TX_EN_OFST 4 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_TX_EN_LBN 1 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_TX_EN_WIDTH 1 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_EV_EN_OFST 4 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_EV_EN_LBN 2 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_EV_EN_WIDTH 1 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_RX_EN_OFST 4 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_RX_EN_LBN 3 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_RX_EN_WIDTH 1 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_RXDMA_EN_OFST 4 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_RXDMA_EN_LBN 0 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_RXDMA_EN_WIDTH 1 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_TXDMA_EN_OFST 4 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_TXDMA_EN_LBN 1 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_TXDMA_EN_WIDTH 1 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_DL_EN_OFST 4 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_DL_EN_LBN 2 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_DL_EN_WIDTH 1 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_MSIX_OFST 4 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_MSIX_LBN 0 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_MSIX_WIDTH 2 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_DL_OFST 4 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_DL_LBN 2 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_DL_WIDTH 2 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_TX_OFST 4 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_TX_LBN 4 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_TX_WIDTH 2 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_EV_OFST 4 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_EV_LBN 6 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_EV_WIDTH 2 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_RX_OFST 4 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_RX_LBN 8 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_RX_WIDTH 2 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_SPARE_OFST 4 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_SPARE_LBN 10 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_SPARE_WIDTH 22 + +/* MC_CMD_SET_TLP_PROCESSING_GLOBALS_OUT msgresponse */ +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_SATELLITE_DOWNLOAD + * Download a new set of images to the satellite CPUs from the host. + */ +#define MC_CMD_SATELLITE_DOWNLOAD 0x91 +#undef MC_CMD_0x91_PRIVILEGE_CTG + +#define MC_CMD_0x91_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND + +/* MC_CMD_SATELLITE_DOWNLOAD_IN msgrequest: The reset requirements for the CPUs + * are subtle, and so downloads must proceed in a number of phases. + * + * 1) PHASE_RESET with a target of TARGET_ALL and chunk ID/length of 0. + * + * 2) PHASE_IMEMS for each of the IMEM targets (target IDs 0-11). Each download + * may consist of multiple chunks. The final chunk (with CHUNK_ID_LAST) should + * be a checksum (a simple 32-bit sum) of the transferred data. An individual + * download may be aborted using CHUNK_ID_ABORT. + * + * 3) PHASE_VECTORS for each of the vector table targets (target IDs 12-15), + * similar to PHASE_IMEMS. + * + * 4) PHASE_READY with a target of TARGET_ALL and chunk ID/length of 0. + * + * After any error (a requested abort is not considered to be an error) the + * sequence must be restarted from PHASE_RESET. + */ +#define MC_CMD_SATELLITE_DOWNLOAD_IN_LENMIN 20 +#define MC_CMD_SATELLITE_DOWNLOAD_IN_LENMAX 252 +#define MC_CMD_SATELLITE_DOWNLOAD_IN_LENMAX_MCDI2 1020 +#define MC_CMD_SATELLITE_DOWNLOAD_IN_LEN(num) (16+4*(num)) +#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_NUM(len) (((len)-16)/4) +/* Download phase. (Note: the IDLE phase is used internally and is never valid + * in a command from the host.) + */ +#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_OFST 0 +#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_LEN 4 +#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_IDLE 0x0 /* enum */ +#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_RESET 0x1 /* enum */ +#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_IMEMS 0x2 /* enum */ +#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_VECTORS 0x3 /* enum */ +#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_READY 0x4 /* enum */ +/* Target for download. (These match the blob numbers defined in + * mc_flash_layout.h.) + */ +#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_OFST 4 +#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_LEN 4 +/* enum: Valid in phase 2 (PHASE_IMEMS) only */ +#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXDI_TEXT 0x0 +/* enum: Valid in phase 2 (PHASE_IMEMS) only */ +#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXDI_TEXT 0x1 +/* enum: Valid in phase 2 (PHASE_IMEMS) only */ +#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXDP_TEXT 0x2 +/* enum: Valid in phase 2 (PHASE_IMEMS) only */ +#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXDP_TEXT 0x3 +/* enum: Valid in phase 2 (PHASE_IMEMS) only */ +#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXHRSL_HR_LUT 0x4 +/* enum: Valid in phase 2 (PHASE_IMEMS) only */ +#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXHRSL_HR_LUT_CFG 0x5 +/* enum: Valid in phase 2 (PHASE_IMEMS) only */ +#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXHRSL_HR_LUT 0x6 +/* enum: Valid in phase 2 (PHASE_IMEMS) only */ +#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXHRSL_HR_LUT_CFG 0x7 +/* enum: Valid in phase 2 (PHASE_IMEMS) only */ +#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXHRSL_HR_PGM 0x8 +/* enum: Valid in phase 2 (PHASE_IMEMS) only */ +#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXHRSL_SL_PGM 0x9 +/* enum: Valid in phase 2 (PHASE_IMEMS) only */ +#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXHRSL_HR_PGM 0xa +/* enum: Valid in phase 2 (PHASE_IMEMS) only */ +#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXHRSL_SL_PGM 0xb +/* enum: Valid in phase 3 (PHASE_VECTORS) only */ +#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXDI_VTBL0 0xc +/* enum: Valid in phase 3 (PHASE_VECTORS) only */ +#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXDI_VTBL0 0xd +/* enum: Valid in phase 3 (PHASE_VECTORS) only */ +#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXDI_VTBL1 0xe +/* enum: Valid in phase 3 (PHASE_VECTORS) only */ +#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXDI_VTBL1 0xf +/* enum: Valid in phases 1 (PHASE_RESET) and 4 (PHASE_READY) only */ +#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_ALL 0xffffffff +/* Chunk ID, or CHUNK_ID_LAST or CHUNK_ID_ABORT */ +#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_ID_OFST 8 +#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_ID_LEN 4 +/* enum: Last chunk, containing checksum rather than data */ +#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_ID_LAST 0xffffffff +/* enum: Abort download of this item */ +#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_ID_ABORT 0xfffffffe +/* Length of this chunk in bytes */ +#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_LEN_OFST 12 +#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_LEN_LEN 4 +/* Data for this chunk */ +#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_OFST 16 +#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_LEN 4 +#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_MINNUM 1 +#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_MAXNUM 59 +#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_MAXNUM_MCDI2 251 + +/* MC_CMD_SATELLITE_DOWNLOAD_OUT msgresponse */ +#define MC_CMD_SATELLITE_DOWNLOAD_OUT_LEN 8 +/* Same as MC_CMD_ERR field, but included as 0 in success cases */ +#define MC_CMD_SATELLITE_DOWNLOAD_OUT_RESULT_OFST 0 +#define MC_CMD_SATELLITE_DOWNLOAD_OUT_RESULT_LEN 4 +/* Extra status information */ +#define MC_CMD_SATELLITE_DOWNLOAD_OUT_INFO_OFST 4 +#define MC_CMD_SATELLITE_DOWNLOAD_OUT_INFO_LEN 4 +/* enum: Code download OK, completed. */ +#define MC_CMD_SATELLITE_DOWNLOAD_OUT_OK_COMPLETE 0x0 +/* enum: Code download aborted as requested. */ +#define MC_CMD_SATELLITE_DOWNLOAD_OUT_OK_ABORTED 0x1 +/* enum: Code download OK so far, send next chunk. */ +#define MC_CMD_SATELLITE_DOWNLOAD_OUT_OK_NEXT_CHUNK 0x2 +/* enum: Download phases out of sequence */ +#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_PHASE 0x100 +/* enum: Bad target for this phase */ +#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_TARGET 0x101 +/* enum: Chunk ID out of sequence */ +#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_CHUNK_ID 0x200 +/* enum: Chunk length zero or too large */ +#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_CHUNK_LEN 0x201 +/* enum: Checksum was incorrect */ +#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_CHECKSUM 0x300 + + +/***********************************/ +/* MC_CMD_GET_CAPABILITIES + * Get device capabilities. This is supplementary to the MC_CMD_GET_BOARD_CFG + * command, and intended to reference inherent device capabilities as opposed + * to current NVRAM config. + */ +#define MC_CMD_GET_CAPABILITIES 0xbe +#undef MC_CMD_0xbe_PRIVILEGE_CTG + +#define MC_CMD_0xbe_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_CAPABILITIES_IN msgrequest */ +#define MC_CMD_GET_CAPABILITIES_IN_LEN 0 + +/* MC_CMD_GET_CAPABILITIES_OUT msgresponse */ +#define MC_CMD_GET_CAPABILITIES_OUT_LEN 20 +/* First word of flags. */ +#define MC_CMD_GET_CAPABILITIES_OUT_FLAGS1_OFST 0 +#define MC_CMD_GET_CAPABILITIES_OUT_FLAGS1_LEN 4 +#define MC_CMD_GET_CAPABILITIES_OUT_VPORT_RECONFIGURE_OFST 0 +#define MC_CMD_GET_CAPABILITIES_OUT_VPORT_RECONFIGURE_LBN 3 +#define MC_CMD_GET_CAPABILITIES_OUT_VPORT_RECONFIGURE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_TX_STRIPING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_OUT_TX_STRIPING_LBN 4 +#define MC_CMD_GET_CAPABILITIES_OUT_TX_STRIPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_VADAPTOR_QUERY_OFST 0 +#define MC_CMD_GET_CAPABILITIES_OUT_VADAPTOR_QUERY_LBN 5 +#define MC_CMD_GET_CAPABILITIES_OUT_VADAPTOR_QUERY_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_EVB_PORT_VLAN_RESTRICT_OFST 0 +#define MC_CMD_GET_CAPABILITIES_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6 +#define MC_CMD_GET_CAPABILITIES_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_DRV_ATTACH_PREBOOT_OFST 0 +#define MC_CMD_GET_CAPABILITIES_OUT_DRV_ATTACH_PREBOOT_LBN 7 +#define MC_CMD_GET_CAPABILITIES_OUT_DRV_ATTACH_PREBOOT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_FORCE_EVENT_MERGING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_FORCE_EVENT_MERGING_LBN 8 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_SET_MAC_ENHANCED_OFST 0 +#define MC_CMD_GET_CAPABILITIES_OUT_SET_MAC_ENHANCED_LBN 9 +#define MC_CMD_GET_CAPABILITIES_OUT_SET_MAC_ENHANCED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_OFST 0 +#define MC_CMD_GET_CAPABILITIES_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10 +#define MC_CMD_GET_CAPABILITIES_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_OFST 0 +#define MC_CMD_GET_CAPABILITIES_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11 +#define MC_CMD_GET_CAPABILITIES_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_TX_MAC_SECURITY_FILTERING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_OUT_TX_MAC_SECURITY_FILTERING_LBN 12 +#define MC_CMD_GET_CAPABILITIES_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_ADDITIONAL_RSS_MODES_OFST 0 +#define MC_CMD_GET_CAPABILITIES_OUT_ADDITIONAL_RSS_MODES_LBN 13 +#define MC_CMD_GET_CAPABILITIES_OUT_ADDITIONAL_RSS_MODES_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_QBB_OFST 0 +#define MC_CMD_GET_CAPABILITIES_OUT_QBB_LBN 14 +#define MC_CMD_GET_CAPABILITIES_OUT_QBB_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_PACKED_STREAM_VAR_BUFFERS_OFST 0 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_RSS_LIMITED_OFST 0 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_RSS_LIMITED_LBN 16 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_RSS_LIMITED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_PACKED_STREAM_OFST 0 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_PACKED_STREAM_LBN 17 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_PACKED_STREAM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_INCLUDE_FCS_OFST 0 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_INCLUDE_FCS_LBN 18 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_INCLUDE_FCS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_TX_VLAN_INSERTION_OFST 0 +#define MC_CMD_GET_CAPABILITIES_OUT_TX_VLAN_INSERTION_LBN 19 +#define MC_CMD_GET_CAPABILITIES_OUT_TX_VLAN_INSERTION_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_VLAN_STRIPPING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_VLAN_STRIPPING_LBN 20 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_VLAN_STRIPPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_OFST 0 +#define MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_LBN 21 +#define MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_0_OFST 0 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_0_LBN 22 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_0_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_14_OFST 0 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_14_LBN 23 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_14_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_TIMESTAMP_OFST 0 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_TIMESTAMP_LBN 24 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_TIMESTAMP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_LBN 25 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_MCAST_FILTER_CHAINING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_OUT_MCAST_FILTER_CHAINING_LBN 26 +#define MC_CMD_GET_CAPABILITIES_OUT_MCAST_FILTER_CHAINING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_OFST 0 +#define MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_LBN 27 +#define MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_DISABLE_SCATTER_OFST 0 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_DISABLE_SCATTER_LBN 28 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_DISABLE_SCATTER_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_TX_MCAST_UDP_LOOPBACK_OFST 0 +#define MC_CMD_GET_CAPABILITIES_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29 +#define MC_CMD_GET_CAPABILITIES_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_EVB_OFST 0 +#define MC_CMD_GET_CAPABILITIES_OUT_EVB_LBN 30 +#define MC_CMD_GET_CAPABILITIES_OUT_EVB_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_OFST 0 +#define MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN 31 +#define MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_WIDTH 1 +/* RxDPCPU firmware id. */ +#define MC_CMD_GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID_OFST 4 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID_LEN 2 +/* enum: Standard RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXDP 0x0 +/* enum: Low latency RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_LOW_LATENCY 0x1 +/* enum: Packed stream RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_PACKED_STREAM 0x2 +/* enum: Rules engine RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_RULES_ENGINE 0x5 +/* enum: DPDK RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_DPDK 0x6 +/* enum: BIST RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_BIST 0x10a +/* enum: RXDP Test firmware image 1 */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101 +/* enum: RXDP Test firmware image 2 */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102 +/* enum: RXDP Test firmware image 3 */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103 +/* enum: RXDP Test firmware image 4 */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104 +/* enum: RXDP Test firmware image 5 */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_BACKPRESSURE 0x105 +/* enum: RXDP Test firmware image 6 */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106 +/* enum: RXDP Test firmware image 7 */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107 +/* enum: RXDP Test firmware image 8 */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_DISABLE_DL 0x108 +/* enum: RXDP Test firmware image 9 */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b +/* enum: RXDP Test firmware image 10 */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_SLOW 0x10c +/* TxDPCPU firmware id. */ +#define MC_CMD_GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID_OFST 6 +#define MC_CMD_GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID_LEN 2 +/* enum: Standard TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXDP 0x0 +/* enum: Low latency TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_LOW_LATENCY 0x1 +/* enum: High packet rate TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_HIGH_PACKET_RATE 0x3 +/* enum: Rules engine TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_RULES_ENGINE 0x5 +/* enum: DPDK TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_DPDK 0x6 +/* enum: BIST TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_BIST 0x12d +/* enum: TXDP Test firmware image 1 */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_TEST_FW_TSO_EDIT 0x101 +/* enum: TXDP Test firmware image 2 */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102 +/* enum: TXDP CSR bus test firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_TEST_FW_CSR 0x103 +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_OFST 8 +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_LEN 2 +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_REV_OFST 8 +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_REV_LBN 0 +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_REV_WIDTH 12 +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_TYPE_OFST 8 +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_TYPE_LBN 12 +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4 +/* enum: reserved value - do not use (may indicate alternative interpretation + * of REV field in future) + */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_RESERVED 0x0 +/* enum: Trivial RX PD firmware for early Huntington development (Huntington + * development only) + */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1 +/* enum: RX PD firmware for telemetry prototyping (Medford2 development only) + */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_TESTFW_TELEMETRY 0x1 +/* enum: RX PD firmware with approximately Siena-compatible behaviour + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2 +/* enum: Full featured RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3 +/* enum: (deprecated original name for the FULL_FEATURED variant) */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_VSWITCH 0x3 +/* enum: siena_compat variant RX PD firmware using PM rather than MAC + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 +/* enum: Low latency RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5 +/* enum: Packed stream RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6 +/* enum: RX PD firmware handling layer 2 only for high packet rate performance + * tests (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7 +/* enum: Rules engine RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8 +/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_L3XUDP 0x9 +/* enum: DPDK RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_DPDK 0xa +/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe +/* enum: RX PD firmware parsing but not filtering network overlay tunnel + * encapsulations (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_OFST 10 +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_LEN 2 +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_REV_OFST 10 +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_REV_LBN 0 +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_REV_WIDTH 12 +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_TYPE_OFST 10 +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_TYPE_LBN 12 +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4 +/* enum: reserved value - do not use (may indicate alternative interpretation + * of REV field in future) + */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_RESERVED 0x0 +/* enum: Trivial TX PD firmware for early Huntington development (Huntington + * development only) + */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1 +/* enum: TX PD firmware for telemetry prototyping (Medford2 development only) + */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_TESTFW_TELEMETRY 0x1 +/* enum: TX PD firmware with approximately Siena-compatible behaviour + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2 +/* enum: Full featured TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3 +/* enum: (deprecated original name for the FULL_FEATURED variant) */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_VSWITCH 0x3 +/* enum: siena_compat variant TX PD firmware using PM rather than MAC + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */ +/* enum: TX PD firmware handling layer 2 only for high packet rate performance + * tests (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7 +/* enum: Rules engine TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8 +/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_L3XUDP 0x9 +/* enum: DPDK TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_DPDK 0xa +/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe +/* Hardware capabilities of NIC */ +#define MC_CMD_GET_CAPABILITIES_OUT_HW_CAPABILITIES_OFST 12 +#define MC_CMD_GET_CAPABILITIES_OUT_HW_CAPABILITIES_LEN 4 +/* Licensed capabilities */ +#define MC_CMD_GET_CAPABILITIES_OUT_LICENSE_CAPABILITIES_OFST 16 +#define MC_CMD_GET_CAPABILITIES_OUT_LICENSE_CAPABILITIES_LEN 4 + +/* MC_CMD_GET_CAPABILITIES_V2_IN msgrequest */ +#define MC_CMD_GET_CAPABILITIES_V2_IN_LEN 0 + +/* MC_CMD_GET_CAPABILITIES_V2_OUT msgresponse */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_LEN 72 +/* First word of flags. */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_FLAGS1_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_FLAGS1_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_VPORT_RECONFIGURE_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_VPORT_RECONFIGURE_LBN 3 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_VPORT_RECONFIGURE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_STRIPING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_STRIPING_LBN 4 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_STRIPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_VADAPTOR_QUERY_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_VADAPTOR_QUERY_LBN 5 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_VADAPTOR_QUERY_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVB_PORT_VLAN_RESTRICT_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_DRV_ATTACH_PREBOOT_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_DRV_ATTACH_PREBOOT_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_DRV_ATTACH_PREBOOT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_FORCE_EVENT_MERGING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_FORCE_EVENT_MERGING_LBN 8 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_SET_MAC_ENHANCED_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_SET_MAC_ENHANCED_LBN 9 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_SET_MAC_ENHANCED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MAC_SECURITY_FILTERING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MAC_SECURITY_FILTERING_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_ADDITIONAL_RSS_MODES_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_ADDITIONAL_RSS_MODES_LBN 13 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_ADDITIONAL_RSS_MODES_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_QBB_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_QBB_LBN 14 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_QBB_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PACKED_STREAM_VAR_BUFFERS_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_RSS_LIMITED_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_RSS_LIMITED_LBN 16 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_RSS_LIMITED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PACKED_STREAM_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PACKED_STREAM_LBN 17 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PACKED_STREAM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_INCLUDE_FCS_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_INCLUDE_FCS_LBN 18 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_INCLUDE_FCS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_VLAN_INSERTION_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_VLAN_INSERTION_LBN 19 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_VLAN_INSERTION_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_VLAN_STRIPPING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_VLAN_STRIPPING_LBN 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_VLAN_STRIPPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PREFIX_LEN_0_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PREFIX_LEN_0_LBN 22 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PREFIX_LEN_0_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PREFIX_LEN_14_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PREFIX_LEN_14_LBN 23 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PREFIX_LEN_14_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_TIMESTAMP_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_TIMESTAMP_LBN 24 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_TIMESTAMP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_BATCHING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_BATCHING_LBN 25 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_BATCHING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_MCAST_FILTER_CHAINING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_MCAST_FILTER_CHAINING_LBN 26 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_MCAST_FILTER_CHAINING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_PM_AND_RXDP_COUNTERS_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_PM_AND_RXDP_COUNTERS_LBN 27 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_DISABLE_SCATTER_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_DISABLE_SCATTER_LBN 28 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_DISABLE_SCATTER_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MCAST_UDP_LOOPBACK_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVB_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVB_LBN 30 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVB_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_VXLAN_NVGRE_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_VXLAN_NVGRE_LBN 31 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_VXLAN_NVGRE_WIDTH 1 +/* RxDPCPU firmware id. */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_DPCPU_FW_ID_OFST 4 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_DPCPU_FW_ID_LEN 2 +/* enum: Standard RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP 0x0 +/* enum: Low latency RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_LOW_LATENCY 0x1 +/* enum: Packed stream RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_PACKED_STREAM 0x2 +/* enum: Rules engine RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_RULES_ENGINE 0x5 +/* enum: DPDK RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_DPDK 0x6 +/* enum: BIST RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_BIST 0x10a +/* enum: RXDP Test firmware image 1 */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101 +/* enum: RXDP Test firmware image 2 */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102 +/* enum: RXDP Test firmware image 3 */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103 +/* enum: RXDP Test firmware image 4 */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104 +/* enum: RXDP Test firmware image 5 */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_BACKPRESSURE 0x105 +/* enum: RXDP Test firmware image 6 */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106 +/* enum: RXDP Test firmware image 7 */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107 +/* enum: RXDP Test firmware image 8 */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_DISABLE_DL 0x108 +/* enum: RXDP Test firmware image 9 */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b +/* enum: RXDP Test firmware image 10 */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_SLOW 0x10c +/* TxDPCPU firmware id. */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_DPCPU_FW_ID_OFST 6 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_DPCPU_FW_ID_LEN 2 +/* enum: Standard TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP 0x0 +/* enum: Low latency TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_LOW_LATENCY 0x1 +/* enum: High packet rate TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_HIGH_PACKET_RATE 0x3 +/* enum: Rules engine TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_RULES_ENGINE 0x5 +/* enum: DPDK TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_DPDK 0x6 +/* enum: BIST TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_BIST 0x12d +/* enum: TXDP Test firmware image 1 */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_TEST_FW_TSO_EDIT 0x101 +/* enum: TXDP Test firmware image 2 */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102 +/* enum: TXDP CSR bus test firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_TEST_FW_CSR 0x103 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_OFST 8 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_REV_OFST 8 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_REV_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_REV_WIDTH 12 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_TYPE_OFST 8 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_TYPE_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4 +/* enum: reserved value - do not use (may indicate alternative interpretation + * of REV field in future) + */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_RESERVED 0x0 +/* enum: Trivial RX PD firmware for early Huntington development (Huntington + * development only) + */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1 +/* enum: RX PD firmware for telemetry prototyping (Medford2 development only) + */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_TESTFW_TELEMETRY 0x1 +/* enum: RX PD firmware with approximately Siena-compatible behaviour + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2 +/* enum: Full featured RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3 +/* enum: (deprecated original name for the FULL_FEATURED variant) */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_VSWITCH 0x3 +/* enum: siena_compat variant RX PD firmware using PM rather than MAC + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 +/* enum: Low latency RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5 +/* enum: Packed stream RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6 +/* enum: RX PD firmware handling layer 2 only for high packet rate performance + * tests (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7 +/* enum: Rules engine RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8 +/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_L3XUDP 0x9 +/* enum: DPDK RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_DPDK 0xa +/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe +/* enum: RX PD firmware parsing but not filtering network overlay tunnel + * encapsulations (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_OFST 10 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_REV_OFST 10 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_REV_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_REV_WIDTH 12 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_TYPE_OFST 10 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_TYPE_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4 +/* enum: reserved value - do not use (may indicate alternative interpretation + * of REV field in future) + */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_RESERVED 0x0 +/* enum: Trivial TX PD firmware for early Huntington development (Huntington + * development only) + */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1 +/* enum: TX PD firmware for telemetry prototyping (Medford2 development only) + */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_TESTFW_TELEMETRY 0x1 +/* enum: TX PD firmware with approximately Siena-compatible behaviour + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2 +/* enum: Full featured TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3 +/* enum: (deprecated original name for the FULL_FEATURED variant) */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_VSWITCH 0x3 +/* enum: siena_compat variant TX PD firmware using PM rather than MAC + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */ +/* enum: TX PD firmware handling layer 2 only for high packet rate performance + * tests (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7 +/* enum: Rules engine TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8 +/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_L3XUDP 0x9 +/* enum: DPDK TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_DPDK 0xa +/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe +/* Hardware capabilities of NIC */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_HW_CAPABILITIES_OFST 12 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_HW_CAPABILITIES_LEN 4 +/* Licensed capabilities */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_LICENSE_CAPABILITIES_OFST 16 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_LICENSE_CAPABILITIES_LEN 4 +/* Second word of flags. Not present on older firmware (check the length). */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_FLAGS2_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_FLAGS2_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_ENCAP_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_ENCAP_LBN 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_ENCAP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVQ_TIMER_CTRL_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVQ_TIMER_CTRL_LBN 2 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVQ_TIMER_CTRL_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVENT_CUT_THROUGH_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVENT_CUT_THROUGH_LBN 3 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVENT_CUT_THROUGH_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_CUT_THROUGH_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_CUT_THROUGH_LBN 4 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_CUT_THROUGH_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_VFIFO_ULL_MODE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_VFIFO_ULL_MODE_LBN 5 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_VFIFO_ULL_MODE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_MAC_STATS_40G_TX_SIZE_BINS_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN 6 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_MAC_STATS_40G_TX_SIZE_BINS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_EVQ_TYPE_SUPPORTED_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_EVQ_TYPE_SUPPORTED_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_EVQ_TYPE_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_EVQ_V2_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_EVQ_V2_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_EVQ_V2_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MAC_TIMESTAMPING_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MAC_TIMESTAMPING_LBN 8 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MAC_TIMESTAMPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TIMESTAMP_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TIMESTAMP_LBN 9 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TIMESTAMP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_SNIFF_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_SNIFF_LBN 10 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_SNIFF_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_SNIFF_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_SNIFF_LBN 11 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_SNIFF_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_MCDI_BACKGROUND_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_MCDI_BACKGROUND_LBN 13 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_MCDI_BACKGROUND_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_MCDI_DB_RETURN_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_MCDI_DB_RETURN_LBN 14 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_MCDI_DB_RETURN_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_CTPIO_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_CTPIO_LBN 15 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_CTPIO_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TSA_SUPPORT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TSA_SUPPORT_LBN 16 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TSA_SUPPORT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TSA_BOUND_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TSA_BOUND_LBN 17 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TSA_BOUND_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_SF_ADAPTER_AUTHENTICATION_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_FILTER_ACTION_FLAG_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_FILTER_ACTION_FLAG_LBN 19 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_FILTER_ACTION_FLAG_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_FILTER_ACTION_MARK_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_FILTER_ACTION_MARK_LBN 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_FILTER_ACTION_MARK_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_EQUAL_STRIDE_SUPER_BUFFER_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_EQUAL_STRIDE_SUPER_BUFFER_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_EQUAL_STRIDE_SUPER_BUFFER_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_EQUAL_STRIDE_PACKED_STREAM_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_EQUAL_STRIDE_PACKED_STREAM_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_EQUAL_STRIDE_PACKED_STREAM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_L3XUDP_SUPPORT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_L3XUDP_SUPPORT_LBN 22 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_L3XUDP_SUPPORT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_FW_SUBVARIANT_NO_TX_CSUM_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_FW_SUBVARIANT_NO_TX_CSUM_LBN 23 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_FW_SUBVARIANT_NO_TX_CSUM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_VI_SPREADING_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_VI_SPREADING_LBN 24 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_VI_SPREADING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_HLB_IDLE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_HLB_IDLE_LBN 25 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_HLB_IDLE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_RXQ_NO_CONT_EV_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_RXQ_NO_CONT_EV_LBN 26 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_RXQ_NO_CONT_EV_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_RXQ_WITH_BUFFER_SIZE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_RXQ_WITH_BUFFER_SIZE_LBN 27 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_RXQ_WITH_BUFFER_SIZE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_BUNDLE_UPDATE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_BUNDLE_UPDATE_LBN 28 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_BUNDLE_UPDATE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V3_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V3_LBN 29 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V3_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_DYNAMIC_SENSORS_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_DYNAMIC_SENSORS_LBN 30 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_DYNAMIC_SENSORS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_LBN 31 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_WIDTH 1 +/* Number of FATSOv2 contexts per datapath supported by this NIC (when + * TX_TSO_V2 == 1). Not present on older firmware (check the length). + */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_N_CONTEXTS_OFST 24 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_N_CONTEXTS_LEN 2 +/* One byte per PF containing the number of the external port assigned to this + * PF, indexed by PF number. Special values indicate that a PF is either not + * present or not assigned. + */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_PFS_TO_PORTS_ASSIGNMENT_OFST 26 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16 +/* enum: The caller is not permitted to access information on this PF. */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_ACCESS_NOT_PERMITTED 0xff +/* enum: PF does not exist. */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_PF_NOT_PRESENT 0xfe +/* enum: PF does exist but is not assigned to any external port. */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_PF_NOT_ASSIGNED 0xfd +/* enum: This value indicates that PF is assigned, but it cannot be expressed + * in this field. It is intended for a possible future situation where a more + * complex scheme of PFs to ports mapping is being used. The future driver + * should look for a new field supporting the new scheme. The current/old + * driver should treat this value as PF_NOT_ASSIGNED. + */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc +/* One byte per PF containing the number of its VFs, indexed by PF number. A + * special value indicates that a PF is not present. + */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VFS_PER_PF_OFST 42 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VFS_PER_PF_LEN 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VFS_PER_PF_NUM 16 +/* enum: The caller is not permitted to access information on this PF. */ +/* MC_CMD_GET_CAPABILITIES_V2_OUT_ACCESS_NOT_PERMITTED 0xff */ +/* enum: PF does not exist. */ +/* MC_CMD_GET_CAPABILITIES_V2_OUT_PF_NOT_PRESENT 0xfe */ +/* Number of VIs available for each external port */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VIS_PER_PORT_OFST 58 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VIS_PER_PORT_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VIS_PER_PORT_NUM 4 +/* Size of RX descriptor cache expressed as binary logarithm The actual size + * equals (2 ^ RX_DESC_CACHE_SIZE) + */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_DESC_CACHE_SIZE_OFST 66 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_DESC_CACHE_SIZE_LEN 1 +/* Size of TX descriptor cache expressed as binary logarithm The actual size + * equals (2 ^ TX_DESC_CACHE_SIZE) + */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_DESC_CACHE_SIZE_OFST 67 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_DESC_CACHE_SIZE_LEN 1 +/* Total number of available PIO buffers */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_PIO_BUFFS_OFST 68 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_PIO_BUFFS_LEN 2 +/* Size of a single PIO buffer */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_SIZE_PIO_BUFF_OFST 70 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_SIZE_PIO_BUFF_LEN 2 + +/* MC_CMD_GET_CAPABILITIES_V3_OUT msgresponse */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_LEN 76 +/* First word of flags. */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_FLAGS1_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_FLAGS1_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VPORT_RECONFIGURE_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VPORT_RECONFIGURE_LBN 3 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VPORT_RECONFIGURE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_STRIPING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_STRIPING_LBN 4 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_STRIPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VADAPTOR_QUERY_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VADAPTOR_QUERY_LBN 5 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VADAPTOR_QUERY_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVB_PORT_VLAN_RESTRICT_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_DRV_ATTACH_PREBOOT_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_DRV_ATTACH_PREBOOT_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_DRV_ATTACH_PREBOOT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_FORCE_EVENT_MERGING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_FORCE_EVENT_MERGING_LBN 8 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_SET_MAC_ENHANCED_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_SET_MAC_ENHANCED_LBN 9 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_SET_MAC_ENHANCED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_MAC_SECURITY_FILTERING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_MAC_SECURITY_FILTERING_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_ADDITIONAL_RSS_MODES_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_ADDITIONAL_RSS_MODES_LBN 13 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_ADDITIONAL_RSS_MODES_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_QBB_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_QBB_LBN 14 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_QBB_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PACKED_STREAM_VAR_BUFFERS_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_RSS_LIMITED_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_RSS_LIMITED_LBN 16 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_RSS_LIMITED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PACKED_STREAM_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PACKED_STREAM_LBN 17 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PACKED_STREAM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_INCLUDE_FCS_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_INCLUDE_FCS_LBN 18 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_INCLUDE_FCS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_VLAN_INSERTION_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_VLAN_INSERTION_LBN 19 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_VLAN_INSERTION_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_VLAN_STRIPPING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_VLAN_STRIPPING_LBN 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_VLAN_STRIPPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PREFIX_LEN_0_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PREFIX_LEN_0_LBN 22 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PREFIX_LEN_0_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PREFIX_LEN_14_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PREFIX_LEN_14_LBN 23 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PREFIX_LEN_14_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_TIMESTAMP_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_TIMESTAMP_LBN 24 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_TIMESTAMP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_BATCHING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_BATCHING_LBN 25 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_BATCHING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_MCAST_FILTER_CHAINING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_MCAST_FILTER_CHAINING_LBN 26 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_MCAST_FILTER_CHAINING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_PM_AND_RXDP_COUNTERS_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_PM_AND_RXDP_COUNTERS_LBN 27 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_DISABLE_SCATTER_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_DISABLE_SCATTER_LBN 28 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_DISABLE_SCATTER_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_MCAST_UDP_LOOPBACK_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVB_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVB_LBN 30 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVB_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VXLAN_NVGRE_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VXLAN_NVGRE_LBN 31 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VXLAN_NVGRE_WIDTH 1 +/* RxDPCPU firmware id. */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_DPCPU_FW_ID_OFST 4 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_DPCPU_FW_ID_LEN 2 +/* enum: Standard RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP 0x0 +/* enum: Low latency RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_LOW_LATENCY 0x1 +/* enum: Packed stream RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_PACKED_STREAM 0x2 +/* enum: Rules engine RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_RULES_ENGINE 0x5 +/* enum: DPDK RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_DPDK 0x6 +/* enum: BIST RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_BIST 0x10a +/* enum: RXDP Test firmware image 1 */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101 +/* enum: RXDP Test firmware image 2 */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102 +/* enum: RXDP Test firmware image 3 */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103 +/* enum: RXDP Test firmware image 4 */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104 +/* enum: RXDP Test firmware image 5 */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_BACKPRESSURE 0x105 +/* enum: RXDP Test firmware image 6 */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106 +/* enum: RXDP Test firmware image 7 */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107 +/* enum: RXDP Test firmware image 8 */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_DISABLE_DL 0x108 +/* enum: RXDP Test firmware image 9 */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b +/* enum: RXDP Test firmware image 10 */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_SLOW 0x10c +/* TxDPCPU firmware id. */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_DPCPU_FW_ID_OFST 6 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_DPCPU_FW_ID_LEN 2 +/* enum: Standard TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP 0x0 +/* enum: Low latency TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_LOW_LATENCY 0x1 +/* enum: High packet rate TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_HIGH_PACKET_RATE 0x3 +/* enum: Rules engine TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_RULES_ENGINE 0x5 +/* enum: DPDK TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_DPDK 0x6 +/* enum: BIST TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_BIST 0x12d +/* enum: TXDP Test firmware image 1 */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_TEST_FW_TSO_EDIT 0x101 +/* enum: TXDP Test firmware image 2 */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102 +/* enum: TXDP CSR bus test firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_TEST_FW_CSR 0x103 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_OFST 8 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_REV_OFST 8 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_REV_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_REV_WIDTH 12 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_TYPE_OFST 8 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_TYPE_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4 +/* enum: reserved value - do not use (may indicate alternative interpretation + * of REV field in future) + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_RESERVED 0x0 +/* enum: Trivial RX PD firmware for early Huntington development (Huntington + * development only) + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1 +/* enum: RX PD firmware for telemetry prototyping (Medford2 development only) + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_TESTFW_TELEMETRY 0x1 +/* enum: RX PD firmware with approximately Siena-compatible behaviour + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2 +/* enum: Full featured RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3 +/* enum: (deprecated original name for the FULL_FEATURED variant) */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_VSWITCH 0x3 +/* enum: siena_compat variant RX PD firmware using PM rather than MAC + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 +/* enum: Low latency RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5 +/* enum: Packed stream RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6 +/* enum: RX PD firmware handling layer 2 only for high packet rate performance + * tests (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7 +/* enum: Rules engine RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8 +/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_L3XUDP 0x9 +/* enum: DPDK RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_DPDK 0xa +/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe +/* enum: RX PD firmware parsing but not filtering network overlay tunnel + * encapsulations (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_OFST 10 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_REV_OFST 10 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_REV_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_REV_WIDTH 12 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_TYPE_OFST 10 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_TYPE_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4 +/* enum: reserved value - do not use (may indicate alternative interpretation + * of REV field in future) + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_RESERVED 0x0 +/* enum: Trivial TX PD firmware for early Huntington development (Huntington + * development only) + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1 +/* enum: TX PD firmware for telemetry prototyping (Medford2 development only) + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_TESTFW_TELEMETRY 0x1 +/* enum: TX PD firmware with approximately Siena-compatible behaviour + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2 +/* enum: Full featured TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3 +/* enum: (deprecated original name for the FULL_FEATURED variant) */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_VSWITCH 0x3 +/* enum: siena_compat variant TX PD firmware using PM rather than MAC + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */ +/* enum: TX PD firmware handling layer 2 only for high packet rate performance + * tests (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7 +/* enum: Rules engine TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8 +/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_L3XUDP 0x9 +/* enum: DPDK TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_DPDK 0xa +/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe +/* Hardware capabilities of NIC */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_HW_CAPABILITIES_OFST 12 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_HW_CAPABILITIES_LEN 4 +/* Licensed capabilities */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_LICENSE_CAPABILITIES_OFST 16 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_LICENSE_CAPABILITIES_LEN 4 +/* Second word of flags. Not present on older firmware (check the length). */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_FLAGS2_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_FLAGS2_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_ENCAP_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_ENCAP_LBN 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_ENCAP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVQ_TIMER_CTRL_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVQ_TIMER_CTRL_LBN 2 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVQ_TIMER_CTRL_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVENT_CUT_THROUGH_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVENT_CUT_THROUGH_LBN 3 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVENT_CUT_THROUGH_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_CUT_THROUGH_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_CUT_THROUGH_LBN 4 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_CUT_THROUGH_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_VFIFO_ULL_MODE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_VFIFO_ULL_MODE_LBN 5 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_VFIFO_ULL_MODE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_MAC_STATS_40G_TX_SIZE_BINS_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN 6 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_MAC_STATS_40G_TX_SIZE_BINS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_INIT_EVQ_TYPE_SUPPORTED_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_INIT_EVQ_TYPE_SUPPORTED_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_INIT_EVQ_TYPE_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_INIT_EVQ_V2_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_INIT_EVQ_V2_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_INIT_EVQ_V2_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_MAC_TIMESTAMPING_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_MAC_TIMESTAMPING_LBN 8 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_MAC_TIMESTAMPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TIMESTAMP_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TIMESTAMP_LBN 9 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TIMESTAMP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_SNIFF_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_SNIFF_LBN 10 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_SNIFF_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_SNIFF_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_SNIFF_LBN 11 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_SNIFF_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_MCDI_BACKGROUND_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_MCDI_BACKGROUND_LBN 13 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_MCDI_BACKGROUND_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_MCDI_DB_RETURN_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_MCDI_DB_RETURN_LBN 14 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_MCDI_DB_RETURN_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_CTPIO_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_CTPIO_LBN 15 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_CTPIO_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TSA_SUPPORT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TSA_SUPPORT_LBN 16 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TSA_SUPPORT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TSA_BOUND_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TSA_BOUND_LBN 17 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TSA_BOUND_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_SF_ADAPTER_AUTHENTICATION_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_FILTER_ACTION_FLAG_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_FILTER_ACTION_FLAG_LBN 19 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_FILTER_ACTION_FLAG_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_FILTER_ACTION_MARK_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_FILTER_ACTION_MARK_LBN 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_FILTER_ACTION_MARK_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_EQUAL_STRIDE_SUPER_BUFFER_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_EQUAL_STRIDE_SUPER_BUFFER_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_EQUAL_STRIDE_SUPER_BUFFER_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_EQUAL_STRIDE_PACKED_STREAM_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_EQUAL_STRIDE_PACKED_STREAM_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_EQUAL_STRIDE_PACKED_STREAM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_L3XUDP_SUPPORT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_L3XUDP_SUPPORT_LBN 22 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_L3XUDP_SUPPORT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_FW_SUBVARIANT_NO_TX_CSUM_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_FW_SUBVARIANT_NO_TX_CSUM_LBN 23 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_FW_SUBVARIANT_NO_TX_CSUM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_SPREADING_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_SPREADING_LBN 24 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_SPREADING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_HLB_IDLE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_HLB_IDLE_LBN 25 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_HLB_IDLE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_INIT_RXQ_NO_CONT_EV_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_INIT_RXQ_NO_CONT_EV_LBN 26 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_INIT_RXQ_NO_CONT_EV_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_INIT_RXQ_WITH_BUFFER_SIZE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_INIT_RXQ_WITH_BUFFER_SIZE_LBN 27 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_INIT_RXQ_WITH_BUFFER_SIZE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_BUNDLE_UPDATE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_BUNDLE_UPDATE_LBN 28 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_BUNDLE_UPDATE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V3_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V3_LBN 29 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V3_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_DYNAMIC_SENSORS_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_DYNAMIC_SENSORS_LBN 30 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_DYNAMIC_SENSORS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_LBN 31 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_WIDTH 1 +/* Number of FATSOv2 contexts per datapath supported by this NIC (when + * TX_TSO_V2 == 1). Not present on older firmware (check the length). + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_N_CONTEXTS_OFST 24 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_N_CONTEXTS_LEN 2 +/* One byte per PF containing the number of the external port assigned to this + * PF, indexed by PF number. Special values indicate that a PF is either not + * present or not assigned. + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_PFS_TO_PORTS_ASSIGNMENT_OFST 26 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16 +/* enum: The caller is not permitted to access information on this PF. */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_ACCESS_NOT_PERMITTED 0xff +/* enum: PF does not exist. */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_PF_NOT_PRESENT 0xfe +/* enum: PF does exist but is not assigned to any external port. */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_PF_NOT_ASSIGNED 0xfd +/* enum: This value indicates that PF is assigned, but it cannot be expressed + * in this field. It is intended for a possible future situation where a more + * complex scheme of PFs to ports mapping is being used. The future driver + * should look for a new field supporting the new scheme. The current/old + * driver should treat this value as PF_NOT_ASSIGNED. + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc +/* One byte per PF containing the number of its VFs, indexed by PF number. A + * special value indicates that a PF is not present. + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_VFS_PER_PF_OFST 42 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_VFS_PER_PF_LEN 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_VFS_PER_PF_NUM 16 +/* enum: The caller is not permitted to access information on this PF. */ +/* MC_CMD_GET_CAPABILITIES_V3_OUT_ACCESS_NOT_PERMITTED 0xff */ +/* enum: PF does not exist. */ +/* MC_CMD_GET_CAPABILITIES_V3_OUT_PF_NOT_PRESENT 0xfe */ +/* Number of VIs available for each external port */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_VIS_PER_PORT_OFST 58 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_VIS_PER_PORT_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_VIS_PER_PORT_NUM 4 +/* Size of RX descriptor cache expressed as binary logarithm The actual size + * equals (2 ^ RX_DESC_CACHE_SIZE) + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_DESC_CACHE_SIZE_OFST 66 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_DESC_CACHE_SIZE_LEN 1 +/* Size of TX descriptor cache expressed as binary logarithm The actual size + * equals (2 ^ TX_DESC_CACHE_SIZE) + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_DESC_CACHE_SIZE_OFST 67 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_DESC_CACHE_SIZE_LEN 1 +/* Total number of available PIO buffers */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_PIO_BUFFS_OFST 68 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_PIO_BUFFS_LEN 2 +/* Size of a single PIO buffer */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_SIZE_PIO_BUFF_OFST 70 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_SIZE_PIO_BUFF_LEN 2 +/* On chips later than Medford the amount of address space assigned to each VI + * is configurable. This is a global setting that the driver must query to + * discover the VI to address mapping. Cut-through PIO (CTPIO) is not available + * with 8k VI windows. + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_OFST 72 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_LEN 1 +/* enum: Each VI occupies 8k as on Huntington and Medford. PIO is at offset 4k. + * CTPIO is not mapped. + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_8K 0x0 +/* enum: Each VI occupies 16k. PIO is at offset 4k. CTPIO is at offset 12k. */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_16K 0x1 +/* enum: Each VI occupies 64k. PIO is at offset 4k. CTPIO is at offset 12k. */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_64K 0x2 +/* Number of vFIFOs per adapter that can be used for VFIFO Stuffing + * (SF-115995-SW) in the present configuration of firmware and port mode. + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VFIFO_STUFFING_NUM_VFIFOS_OFST 73 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VFIFO_STUFFING_NUM_VFIFOS_LEN 1 +/* Number of buffers per adapter that can be used for VFIFO Stuffing + * (SF-115995-SW) in the present configuration of firmware and port mode. + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_OFST 74 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_LEN 2 + +/* MC_CMD_GET_CAPABILITIES_V4_OUT msgresponse */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_LEN 78 +/* First word of flags. */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_FLAGS1_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_FLAGS1_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VPORT_RECONFIGURE_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VPORT_RECONFIGURE_LBN 3 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VPORT_RECONFIGURE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_STRIPING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_STRIPING_LBN 4 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_STRIPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VADAPTOR_QUERY_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VADAPTOR_QUERY_LBN 5 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VADAPTOR_QUERY_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVB_PORT_VLAN_RESTRICT_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_DRV_ATTACH_PREBOOT_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_DRV_ATTACH_PREBOOT_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_DRV_ATTACH_PREBOOT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_FORCE_EVENT_MERGING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_FORCE_EVENT_MERGING_LBN 8 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_SET_MAC_ENHANCED_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_SET_MAC_ENHANCED_LBN 9 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_SET_MAC_ENHANCED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MAC_SECURITY_FILTERING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MAC_SECURITY_FILTERING_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_ADDITIONAL_RSS_MODES_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_ADDITIONAL_RSS_MODES_LBN 13 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_ADDITIONAL_RSS_MODES_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_QBB_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_QBB_LBN 14 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_QBB_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PACKED_STREAM_VAR_BUFFERS_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_RSS_LIMITED_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_RSS_LIMITED_LBN 16 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_RSS_LIMITED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PACKED_STREAM_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PACKED_STREAM_LBN 17 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PACKED_STREAM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_INCLUDE_FCS_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_INCLUDE_FCS_LBN 18 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_INCLUDE_FCS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_VLAN_INSERTION_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_VLAN_INSERTION_LBN 19 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_VLAN_INSERTION_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_VLAN_STRIPPING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_VLAN_STRIPPING_LBN 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_VLAN_STRIPPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PREFIX_LEN_0_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PREFIX_LEN_0_LBN 22 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PREFIX_LEN_0_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PREFIX_LEN_14_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PREFIX_LEN_14_LBN 23 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PREFIX_LEN_14_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_TIMESTAMP_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_TIMESTAMP_LBN 24 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_TIMESTAMP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_BATCHING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_BATCHING_LBN 25 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_BATCHING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_MCAST_FILTER_CHAINING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_MCAST_FILTER_CHAINING_LBN 26 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_MCAST_FILTER_CHAINING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_PM_AND_RXDP_COUNTERS_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_PM_AND_RXDP_COUNTERS_LBN 27 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DISABLE_SCATTER_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DISABLE_SCATTER_LBN 28 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DISABLE_SCATTER_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MCAST_UDP_LOOPBACK_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVB_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVB_LBN 30 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVB_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VXLAN_NVGRE_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VXLAN_NVGRE_LBN 31 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VXLAN_NVGRE_WIDTH 1 +/* RxDPCPU firmware id. */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DPCPU_FW_ID_OFST 4 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DPCPU_FW_ID_LEN 2 +/* enum: Standard RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP 0x0 +/* enum: Low latency RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_LOW_LATENCY 0x1 +/* enum: Packed stream RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_PACKED_STREAM 0x2 +/* enum: Rules engine RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_RULES_ENGINE 0x5 +/* enum: DPDK RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_DPDK 0x6 +/* enum: BIST RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_BIST 0x10a +/* enum: RXDP Test firmware image 1 */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101 +/* enum: RXDP Test firmware image 2 */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102 +/* enum: RXDP Test firmware image 3 */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103 +/* enum: RXDP Test firmware image 4 */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104 +/* enum: RXDP Test firmware image 5 */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_BACKPRESSURE 0x105 +/* enum: RXDP Test firmware image 6 */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106 +/* enum: RXDP Test firmware image 7 */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107 +/* enum: RXDP Test firmware image 8 */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_DISABLE_DL 0x108 +/* enum: RXDP Test firmware image 9 */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b +/* enum: RXDP Test firmware image 10 */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_SLOW 0x10c +/* TxDPCPU firmware id. */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_DPCPU_FW_ID_OFST 6 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_DPCPU_FW_ID_LEN 2 +/* enum: Standard TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP 0x0 +/* enum: Low latency TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_LOW_LATENCY 0x1 +/* enum: High packet rate TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_HIGH_PACKET_RATE 0x3 +/* enum: Rules engine TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_RULES_ENGINE 0x5 +/* enum: DPDK TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_DPDK 0x6 +/* enum: BIST TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_BIST 0x12d +/* enum: TXDP Test firmware image 1 */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_TEST_FW_TSO_EDIT 0x101 +/* enum: TXDP Test firmware image 2 */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102 +/* enum: TXDP CSR bus test firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_TEST_FW_CSR 0x103 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_OFST 8 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_REV_OFST 8 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_REV_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_REV_WIDTH 12 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_TYPE_OFST 8 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_TYPE_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4 +/* enum: reserved value - do not use (may indicate alternative interpretation + * of REV field in future) + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_RESERVED 0x0 +/* enum: Trivial RX PD firmware for early Huntington development (Huntington + * development only) + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1 +/* enum: RX PD firmware for telemetry prototyping (Medford2 development only) + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_TESTFW_TELEMETRY 0x1 +/* enum: RX PD firmware with approximately Siena-compatible behaviour + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2 +/* enum: Full featured RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3 +/* enum: (deprecated original name for the FULL_FEATURED variant) */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_VSWITCH 0x3 +/* enum: siena_compat variant RX PD firmware using PM rather than MAC + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 +/* enum: Low latency RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5 +/* enum: Packed stream RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6 +/* enum: RX PD firmware handling layer 2 only for high packet rate performance + * tests (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7 +/* enum: Rules engine RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8 +/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_L3XUDP 0x9 +/* enum: DPDK RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_DPDK 0xa +/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe +/* enum: RX PD firmware parsing but not filtering network overlay tunnel + * encapsulations (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_OFST 10 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_REV_OFST 10 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_REV_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_REV_WIDTH 12 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_TYPE_OFST 10 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_TYPE_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4 +/* enum: reserved value - do not use (may indicate alternative interpretation + * of REV field in future) + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_RESERVED 0x0 +/* enum: Trivial TX PD firmware for early Huntington development (Huntington + * development only) + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1 +/* enum: TX PD firmware for telemetry prototyping (Medford2 development only) + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_TESTFW_TELEMETRY 0x1 +/* enum: TX PD firmware with approximately Siena-compatible behaviour + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2 +/* enum: Full featured TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3 +/* enum: (deprecated original name for the FULL_FEATURED variant) */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_VSWITCH 0x3 +/* enum: siena_compat variant TX PD firmware using PM rather than MAC + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */ +/* enum: TX PD firmware handling layer 2 only for high packet rate performance + * tests (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7 +/* enum: Rules engine TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8 +/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_L3XUDP 0x9 +/* enum: DPDK TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_DPDK 0xa +/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe +/* Hardware capabilities of NIC */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_HW_CAPABILITIES_OFST 12 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_HW_CAPABILITIES_LEN 4 +/* Licensed capabilities */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_LICENSE_CAPABILITIES_OFST 16 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_LICENSE_CAPABILITIES_LEN 4 +/* Second word of flags. Not present on older firmware (check the length). */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_FLAGS2_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_FLAGS2_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_ENCAP_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_ENCAP_LBN 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_ENCAP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVQ_TIMER_CTRL_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVQ_TIMER_CTRL_LBN 2 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVQ_TIMER_CTRL_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVENT_CUT_THROUGH_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVENT_CUT_THROUGH_LBN 3 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVENT_CUT_THROUGH_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_CUT_THROUGH_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_CUT_THROUGH_LBN 4 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_CUT_THROUGH_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_VFIFO_ULL_MODE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_VFIFO_ULL_MODE_LBN 5 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_VFIFO_ULL_MODE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_40G_TX_SIZE_BINS_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN 6 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_40G_TX_SIZE_BINS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_EVQ_TYPE_SUPPORTED_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_EVQ_TYPE_SUPPORTED_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_EVQ_TYPE_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_EVQ_V2_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_EVQ_V2_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_EVQ_V2_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MAC_TIMESTAMPING_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MAC_TIMESTAMPING_LBN 8 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MAC_TIMESTAMPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TIMESTAMP_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TIMESTAMP_LBN 9 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TIMESTAMP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_SNIFF_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_SNIFF_LBN 10 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_SNIFF_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_SNIFF_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_SNIFF_LBN 11 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_SNIFF_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_MCDI_BACKGROUND_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_MCDI_BACKGROUND_LBN 13 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_MCDI_BACKGROUND_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_MCDI_DB_RETURN_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_MCDI_DB_RETURN_LBN 14 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_MCDI_DB_RETURN_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_CTPIO_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_CTPIO_LBN 15 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_CTPIO_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TSA_SUPPORT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TSA_SUPPORT_LBN 16 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TSA_SUPPORT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TSA_BOUND_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TSA_BOUND_LBN 17 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TSA_BOUND_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_SF_ADAPTER_AUTHENTICATION_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_FILTER_ACTION_FLAG_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_FILTER_ACTION_FLAG_LBN 19 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_FILTER_ACTION_FLAG_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_FILTER_ACTION_MARK_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_FILTER_ACTION_MARK_LBN 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_FILTER_ACTION_MARK_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_EQUAL_STRIDE_SUPER_BUFFER_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_EQUAL_STRIDE_SUPER_BUFFER_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_EQUAL_STRIDE_SUPER_BUFFER_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_EQUAL_STRIDE_PACKED_STREAM_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_EQUAL_STRIDE_PACKED_STREAM_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_EQUAL_STRIDE_PACKED_STREAM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_L3XUDP_SUPPORT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_L3XUDP_SUPPORT_LBN 22 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_L3XUDP_SUPPORT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_FW_SUBVARIANT_NO_TX_CSUM_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_FW_SUBVARIANT_NO_TX_CSUM_LBN 23 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_FW_SUBVARIANT_NO_TX_CSUM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VI_SPREADING_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VI_SPREADING_LBN 24 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VI_SPREADING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_HLB_IDLE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_HLB_IDLE_LBN 25 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_HLB_IDLE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_RXQ_NO_CONT_EV_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_RXQ_NO_CONT_EV_LBN 26 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_RXQ_NO_CONT_EV_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_RXQ_WITH_BUFFER_SIZE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_RXQ_WITH_BUFFER_SIZE_LBN 27 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_RXQ_WITH_BUFFER_SIZE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_BUNDLE_UPDATE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_BUNDLE_UPDATE_LBN 28 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_BUNDLE_UPDATE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V3_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V3_LBN 29 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V3_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_DYNAMIC_SENSORS_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_DYNAMIC_SENSORS_LBN 30 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_DYNAMIC_SENSORS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_LBN 31 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_WIDTH 1 +/* Number of FATSOv2 contexts per datapath supported by this NIC (when + * TX_TSO_V2 == 1). Not present on older firmware (check the length). + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_N_CONTEXTS_OFST 24 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_N_CONTEXTS_LEN 2 +/* One byte per PF containing the number of the external port assigned to this + * PF, indexed by PF number. Special values indicate that a PF is either not + * present or not assigned. + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_PFS_TO_PORTS_ASSIGNMENT_OFST 26 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16 +/* enum: The caller is not permitted to access information on this PF. */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_ACCESS_NOT_PERMITTED 0xff +/* enum: PF does not exist. */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_PF_NOT_PRESENT 0xfe +/* enum: PF does exist but is not assigned to any external port. */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_PF_NOT_ASSIGNED 0xfd +/* enum: This value indicates that PF is assigned, but it cannot be expressed + * in this field. It is intended for a possible future situation where a more + * complex scheme of PFs to ports mapping is being used. The future driver + * should look for a new field supporting the new scheme. The current/old + * driver should treat this value as PF_NOT_ASSIGNED. + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc +/* One byte per PF containing the number of its VFs, indexed by PF number. A + * special value indicates that a PF is not present. + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VFS_PER_PF_OFST 42 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VFS_PER_PF_LEN 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VFS_PER_PF_NUM 16 +/* enum: The caller is not permitted to access information on this PF. */ +/* MC_CMD_GET_CAPABILITIES_V4_OUT_ACCESS_NOT_PERMITTED 0xff */ +/* enum: PF does not exist. */ +/* MC_CMD_GET_CAPABILITIES_V4_OUT_PF_NOT_PRESENT 0xfe */ +/* Number of VIs available for each external port */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VIS_PER_PORT_OFST 58 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VIS_PER_PORT_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VIS_PER_PORT_NUM 4 +/* Size of RX descriptor cache expressed as binary logarithm The actual size + * equals (2 ^ RX_DESC_CACHE_SIZE) + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DESC_CACHE_SIZE_OFST 66 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DESC_CACHE_SIZE_LEN 1 +/* Size of TX descriptor cache expressed as binary logarithm The actual size + * equals (2 ^ TX_DESC_CACHE_SIZE) + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_DESC_CACHE_SIZE_OFST 67 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_DESC_CACHE_SIZE_LEN 1 +/* Total number of available PIO buffers */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_PIO_BUFFS_OFST 68 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_PIO_BUFFS_LEN 2 +/* Size of a single PIO buffer */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_SIZE_PIO_BUFF_OFST 70 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_SIZE_PIO_BUFF_LEN 2 +/* On chips later than Medford the amount of address space assigned to each VI + * is configurable. This is a global setting that the driver must query to + * discover the VI to address mapping. Cut-through PIO (CTPIO) is not available + * with 8k VI windows. + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VI_WINDOW_MODE_OFST 72 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VI_WINDOW_MODE_LEN 1 +/* enum: Each VI occupies 8k as on Huntington and Medford. PIO is at offset 4k. + * CTPIO is not mapped. + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VI_WINDOW_MODE_8K 0x0 +/* enum: Each VI occupies 16k. PIO is at offset 4k. CTPIO is at offset 12k. */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VI_WINDOW_MODE_16K 0x1 +/* enum: Each VI occupies 64k. PIO is at offset 4k. CTPIO is at offset 12k. */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VI_WINDOW_MODE_64K 0x2 +/* Number of vFIFOs per adapter that can be used for VFIFO Stuffing + * (SF-115995-SW) in the present configuration of firmware and port mode. + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VFIFO_STUFFING_NUM_VFIFOS_OFST 73 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VFIFO_STUFFING_NUM_VFIFOS_LEN 1 +/* Number of buffers per adapter that can be used for VFIFO Stuffing + * (SF-115995-SW) in the present configuration of firmware and port mode. + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_OFST 74 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_LEN 2 +/* Entry count in the MAC stats array, including the final GENERATION_END + * entry. For MAC stats DMA, drivers should allocate a buffer large enough to + * hold at least this many 64-bit stats values, if they wish to receive all + * available stats. If the buffer is shorter than MAC_STATS_NUM_STATS * 8, the + * stats array returned will be truncated. + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS_OFST 76 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS_LEN 2 + +/* MC_CMD_GET_CAPABILITIES_V5_OUT msgresponse */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_LEN 84 +/* First word of flags. */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_FLAGS1_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_FLAGS1_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VPORT_RECONFIGURE_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VPORT_RECONFIGURE_LBN 3 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VPORT_RECONFIGURE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_STRIPING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_STRIPING_LBN 4 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_STRIPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VADAPTOR_QUERY_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VADAPTOR_QUERY_LBN 5 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VADAPTOR_QUERY_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_EVB_PORT_VLAN_RESTRICT_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_DRV_ATTACH_PREBOOT_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_DRV_ATTACH_PREBOOT_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_DRV_ATTACH_PREBOOT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_FORCE_EVENT_MERGING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_FORCE_EVENT_MERGING_LBN 8 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_SET_MAC_ENHANCED_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_SET_MAC_ENHANCED_LBN 9 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_SET_MAC_ENHANCED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_MAC_SECURITY_FILTERING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_MAC_SECURITY_FILTERING_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_ADDITIONAL_RSS_MODES_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_ADDITIONAL_RSS_MODES_LBN 13 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_ADDITIONAL_RSS_MODES_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_QBB_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_QBB_LBN 14 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_QBB_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PACKED_STREAM_VAR_BUFFERS_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_RSS_LIMITED_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_RSS_LIMITED_LBN 16 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_RSS_LIMITED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PACKED_STREAM_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PACKED_STREAM_LBN 17 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PACKED_STREAM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_INCLUDE_FCS_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_INCLUDE_FCS_LBN 18 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_INCLUDE_FCS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_VLAN_INSERTION_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_VLAN_INSERTION_LBN 19 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_VLAN_INSERTION_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_VLAN_STRIPPING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_VLAN_STRIPPING_LBN 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_VLAN_STRIPPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PREFIX_LEN_0_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PREFIX_LEN_0_LBN 22 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PREFIX_LEN_0_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PREFIX_LEN_14_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PREFIX_LEN_14_LBN 23 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PREFIX_LEN_14_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_TIMESTAMP_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_TIMESTAMP_LBN 24 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_TIMESTAMP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_BATCHING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_BATCHING_LBN 25 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_BATCHING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_MCAST_FILTER_CHAINING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_MCAST_FILTER_CHAINING_LBN 26 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_MCAST_FILTER_CHAINING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_PM_AND_RXDP_COUNTERS_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_PM_AND_RXDP_COUNTERS_LBN 27 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_DISABLE_SCATTER_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_DISABLE_SCATTER_LBN 28 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_DISABLE_SCATTER_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_MCAST_UDP_LOOPBACK_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_EVB_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_EVB_LBN 30 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_EVB_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VXLAN_NVGRE_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VXLAN_NVGRE_LBN 31 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VXLAN_NVGRE_WIDTH 1 +/* RxDPCPU firmware id. */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_DPCPU_FW_ID_OFST 4 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_DPCPU_FW_ID_LEN 2 +/* enum: Standard RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP 0x0 +/* enum: Low latency RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_LOW_LATENCY 0x1 +/* enum: Packed stream RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_PACKED_STREAM 0x2 +/* enum: Rules engine RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_RULES_ENGINE 0x5 +/* enum: DPDK RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_DPDK 0x6 +/* enum: BIST RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_BIST 0x10a +/* enum: RXDP Test firmware image 1 */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101 +/* enum: RXDP Test firmware image 2 */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102 +/* enum: RXDP Test firmware image 3 */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103 +/* enum: RXDP Test firmware image 4 */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104 +/* enum: RXDP Test firmware image 5 */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_BACKPRESSURE 0x105 +/* enum: RXDP Test firmware image 6 */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106 +/* enum: RXDP Test firmware image 7 */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107 +/* enum: RXDP Test firmware image 8 */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_FW_DISABLE_DL 0x108 +/* enum: RXDP Test firmware image 9 */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b +/* enum: RXDP Test firmware image 10 */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_FW_SLOW 0x10c +/* TxDPCPU firmware id. */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_DPCPU_FW_ID_OFST 6 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_DPCPU_FW_ID_LEN 2 +/* enum: Standard TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXDP 0x0 +/* enum: Low latency TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXDP_LOW_LATENCY 0x1 +/* enum: High packet rate TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXDP_HIGH_PACKET_RATE 0x3 +/* enum: Rules engine TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXDP_RULES_ENGINE 0x5 +/* enum: DPDK TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXDP_DPDK 0x6 +/* enum: BIST TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXDP_BIST 0x12d +/* enum: TXDP Test firmware image 1 */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXDP_TEST_FW_TSO_EDIT 0x101 +/* enum: TXDP Test firmware image 2 */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102 +/* enum: TXDP CSR bus test firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXDP_TEST_FW_CSR 0x103 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_VERSION_OFST 8 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_VERSION_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_VERSION_REV_OFST 8 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_VERSION_REV_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_VERSION_REV_WIDTH 12 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_VERSION_TYPE_OFST 8 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_VERSION_TYPE_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4 +/* enum: reserved value - do not use (may indicate alternative interpretation + * of REV field in future) + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_RESERVED 0x0 +/* enum: Trivial RX PD firmware for early Huntington development (Huntington + * development only) + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1 +/* enum: RX PD firmware for telemetry prototyping (Medford2 development only) + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_TESTFW_TELEMETRY 0x1 +/* enum: RX PD firmware with approximately Siena-compatible behaviour + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2 +/* enum: Full featured RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3 +/* enum: (deprecated original name for the FULL_FEATURED variant) */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_VSWITCH 0x3 +/* enum: siena_compat variant RX PD firmware using PM rather than MAC + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 +/* enum: Low latency RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5 +/* enum: Packed stream RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6 +/* enum: RX PD firmware handling layer 2 only for high packet rate performance + * tests (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7 +/* enum: Rules engine RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8 +/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_L3XUDP 0x9 +/* enum: DPDK RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_DPDK 0xa +/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe +/* enum: RX PD firmware parsing but not filtering network overlay tunnel + * encapsulations (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_VERSION_OFST 10 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_VERSION_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_VERSION_REV_OFST 10 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_VERSION_REV_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_VERSION_REV_WIDTH 12 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_VERSION_TYPE_OFST 10 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_VERSION_TYPE_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4 +/* enum: reserved value - do not use (may indicate alternative interpretation + * of REV field in future) + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_RESERVED 0x0 +/* enum: Trivial TX PD firmware for early Huntington development (Huntington + * development only) + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1 +/* enum: TX PD firmware for telemetry prototyping (Medford2 development only) + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_TESTFW_TELEMETRY 0x1 +/* enum: TX PD firmware with approximately Siena-compatible behaviour + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2 +/* enum: Full featured TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3 +/* enum: (deprecated original name for the FULL_FEATURED variant) */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_VSWITCH 0x3 +/* enum: siena_compat variant TX PD firmware using PM rather than MAC + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */ +/* enum: TX PD firmware handling layer 2 only for high packet rate performance + * tests (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7 +/* enum: Rules engine TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8 +/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_L3XUDP 0x9 +/* enum: DPDK TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_DPDK 0xa +/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe +/* Hardware capabilities of NIC */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_HW_CAPABILITIES_OFST 12 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_HW_CAPABILITIES_LEN 4 +/* Licensed capabilities */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_LICENSE_CAPABILITIES_OFST 16 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_LICENSE_CAPABILITIES_LEN 4 +/* Second word of flags. Not present on older firmware (check the length). */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_FLAGS2_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_FLAGS2_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V2_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V2_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V2_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V2_ENCAP_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V2_ENCAP_LBN 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V2_ENCAP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_EVQ_TIMER_CTRL_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_EVQ_TIMER_CTRL_LBN 2 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_EVQ_TIMER_CTRL_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_EVENT_CUT_THROUGH_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_EVENT_CUT_THROUGH_LBN 3 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_EVENT_CUT_THROUGH_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_CUT_THROUGH_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_CUT_THROUGH_LBN 4 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_CUT_THROUGH_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_VFIFO_ULL_MODE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_VFIFO_ULL_MODE_LBN 5 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_VFIFO_ULL_MODE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_MAC_STATS_40G_TX_SIZE_BINS_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN 6 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_MAC_STATS_40G_TX_SIZE_BINS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_INIT_EVQ_TYPE_SUPPORTED_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_INIT_EVQ_TYPE_SUPPORTED_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_INIT_EVQ_TYPE_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_INIT_EVQ_V2_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_INIT_EVQ_V2_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_INIT_EVQ_V2_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_MAC_TIMESTAMPING_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_MAC_TIMESTAMPING_LBN 8 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_MAC_TIMESTAMPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TIMESTAMP_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TIMESTAMP_LBN 9 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TIMESTAMP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_SNIFF_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_SNIFF_LBN 10 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_SNIFF_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_SNIFF_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_SNIFF_LBN 11 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_SNIFF_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_MCDI_BACKGROUND_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_MCDI_BACKGROUND_LBN 13 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_MCDI_BACKGROUND_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_MCDI_DB_RETURN_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_MCDI_DB_RETURN_LBN 14 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_MCDI_DB_RETURN_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_CTPIO_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_CTPIO_LBN 15 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_CTPIO_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TSA_SUPPORT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TSA_SUPPORT_LBN 16 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TSA_SUPPORT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TSA_BOUND_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TSA_BOUND_LBN 17 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TSA_BOUND_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_SF_ADAPTER_AUTHENTICATION_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_FILTER_ACTION_FLAG_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_FILTER_ACTION_FLAG_LBN 19 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_FILTER_ACTION_FLAG_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_FILTER_ACTION_MARK_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_FILTER_ACTION_MARK_LBN 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_FILTER_ACTION_MARK_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_EQUAL_STRIDE_SUPER_BUFFER_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_EQUAL_STRIDE_SUPER_BUFFER_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_EQUAL_STRIDE_SUPER_BUFFER_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_EQUAL_STRIDE_PACKED_STREAM_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_EQUAL_STRIDE_PACKED_STREAM_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_EQUAL_STRIDE_PACKED_STREAM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_L3XUDP_SUPPORT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_L3XUDP_SUPPORT_LBN 22 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_L3XUDP_SUPPORT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_FW_SUBVARIANT_NO_TX_CSUM_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_FW_SUBVARIANT_NO_TX_CSUM_LBN 23 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_FW_SUBVARIANT_NO_TX_CSUM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VI_SPREADING_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VI_SPREADING_LBN 24 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VI_SPREADING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_HLB_IDLE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_HLB_IDLE_LBN 25 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_HLB_IDLE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_INIT_RXQ_NO_CONT_EV_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_INIT_RXQ_NO_CONT_EV_LBN 26 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_INIT_RXQ_NO_CONT_EV_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_INIT_RXQ_WITH_BUFFER_SIZE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_INIT_RXQ_WITH_BUFFER_SIZE_LBN 27 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_INIT_RXQ_WITH_BUFFER_SIZE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_BUNDLE_UPDATE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_BUNDLE_UPDATE_LBN 28 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_BUNDLE_UPDATE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V3_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V3_LBN 29 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V3_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_DYNAMIC_SENSORS_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_DYNAMIC_SENSORS_LBN 30 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_DYNAMIC_SENSORS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_LBN 31 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_WIDTH 1 +/* Number of FATSOv2 contexts per datapath supported by this NIC (when + * TX_TSO_V2 == 1). Not present on older firmware (check the length). + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V2_N_CONTEXTS_OFST 24 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V2_N_CONTEXTS_LEN 2 +/* One byte per PF containing the number of the external port assigned to this + * PF, indexed by PF number. Special values indicate that a PF is either not + * present or not assigned. + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_PFS_TO_PORTS_ASSIGNMENT_OFST 26 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16 +/* enum: The caller is not permitted to access information on this PF. */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_ACCESS_NOT_PERMITTED 0xff +/* enum: PF does not exist. */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_PF_NOT_PRESENT 0xfe +/* enum: PF does exist but is not assigned to any external port. */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_PF_NOT_ASSIGNED 0xfd +/* enum: This value indicates that PF is assigned, but it cannot be expressed + * in this field. It is intended for a possible future situation where a more + * complex scheme of PFs to ports mapping is being used. The future driver + * should look for a new field supporting the new scheme. The current/old + * driver should treat this value as PF_NOT_ASSIGNED. + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc +/* One byte per PF containing the number of its VFs, indexed by PF number. A + * special value indicates that a PF is not present. + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_VFS_PER_PF_OFST 42 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_VFS_PER_PF_LEN 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_VFS_PER_PF_NUM 16 +/* enum: The caller is not permitted to access information on this PF. */ +/* MC_CMD_GET_CAPABILITIES_V5_OUT_ACCESS_NOT_PERMITTED 0xff */ +/* enum: PF does not exist. */ +/* MC_CMD_GET_CAPABILITIES_V5_OUT_PF_NOT_PRESENT 0xfe */ +/* Number of VIs available for each external port */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_VIS_PER_PORT_OFST 58 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_VIS_PER_PORT_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_VIS_PER_PORT_NUM 4 +/* Size of RX descriptor cache expressed as binary logarithm The actual size + * equals (2 ^ RX_DESC_CACHE_SIZE) + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_DESC_CACHE_SIZE_OFST 66 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_DESC_CACHE_SIZE_LEN 1 +/* Size of TX descriptor cache expressed as binary logarithm The actual size + * equals (2 ^ TX_DESC_CACHE_SIZE) + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_DESC_CACHE_SIZE_OFST 67 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_DESC_CACHE_SIZE_LEN 1 +/* Total number of available PIO buffers */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_PIO_BUFFS_OFST 68 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_PIO_BUFFS_LEN 2 +/* Size of a single PIO buffer */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_SIZE_PIO_BUFF_OFST 70 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_SIZE_PIO_BUFF_LEN 2 +/* On chips later than Medford the amount of address space assigned to each VI + * is configurable. This is a global setting that the driver must query to + * discover the VI to address mapping. Cut-through PIO (CTPIO) is not available + * with 8k VI windows. + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VI_WINDOW_MODE_OFST 72 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VI_WINDOW_MODE_LEN 1 +/* enum: Each VI occupies 8k as on Huntington and Medford. PIO is at offset 4k. + * CTPIO is not mapped. + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VI_WINDOW_MODE_8K 0x0 +/* enum: Each VI occupies 16k. PIO is at offset 4k. CTPIO is at offset 12k. */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VI_WINDOW_MODE_16K 0x1 +/* enum: Each VI occupies 64k. PIO is at offset 4k. CTPIO is at offset 12k. */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VI_WINDOW_MODE_64K 0x2 +/* Number of vFIFOs per adapter that can be used for VFIFO Stuffing + * (SF-115995-SW) in the present configuration of firmware and port mode. + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VFIFO_STUFFING_NUM_VFIFOS_OFST 73 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VFIFO_STUFFING_NUM_VFIFOS_LEN 1 +/* Number of buffers per adapter that can be used for VFIFO Stuffing + * (SF-115995-SW) in the present configuration of firmware and port mode. + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_OFST 74 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_LEN 2 +/* Entry count in the MAC stats array, including the final GENERATION_END + * entry. For MAC stats DMA, drivers should allocate a buffer large enough to + * hold at least this many 64-bit stats values, if they wish to receive all + * available stats. If the buffer is shorter than MAC_STATS_NUM_STATS * 8, the + * stats array returned will be truncated. + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_MAC_STATS_NUM_STATS_OFST 76 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_MAC_STATS_NUM_STATS_LEN 2 +/* Maximum supported value for MC_CMD_FILTER_OP_V3/MATCH_MARK_VALUE. This field + * will only be non-zero if MC_CMD_GET_CAPABILITIES/FILTER_ACTION_MARK is set. + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_FILTER_ACTION_MARK_MAX_OFST 80 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_FILTER_ACTION_MARK_MAX_LEN 4 + +/* MC_CMD_GET_CAPABILITIES_V6_OUT msgresponse */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_LEN 148 +/* First word of flags. */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_FLAGS1_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_FLAGS1_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_VPORT_RECONFIGURE_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_VPORT_RECONFIGURE_LBN 3 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_VPORT_RECONFIGURE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_STRIPING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_STRIPING_LBN 4 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_STRIPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_VADAPTOR_QUERY_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_VADAPTOR_QUERY_LBN 5 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_VADAPTOR_QUERY_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_EVB_PORT_VLAN_RESTRICT_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_DRV_ATTACH_PREBOOT_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_DRV_ATTACH_PREBOOT_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_DRV_ATTACH_PREBOOT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_FORCE_EVENT_MERGING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_FORCE_EVENT_MERGING_LBN 8 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_SET_MAC_ENHANCED_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_SET_MAC_ENHANCED_LBN 9 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_SET_MAC_ENHANCED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_MAC_SECURITY_FILTERING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_MAC_SECURITY_FILTERING_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_ADDITIONAL_RSS_MODES_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_ADDITIONAL_RSS_MODES_LBN 13 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_ADDITIONAL_RSS_MODES_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_QBB_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_QBB_LBN 14 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_QBB_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_PACKED_STREAM_VAR_BUFFERS_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_RSS_LIMITED_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_RSS_LIMITED_LBN 16 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_RSS_LIMITED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_PACKED_STREAM_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_PACKED_STREAM_LBN 17 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_PACKED_STREAM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_INCLUDE_FCS_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_INCLUDE_FCS_LBN 18 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_INCLUDE_FCS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_VLAN_INSERTION_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_VLAN_INSERTION_LBN 19 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_VLAN_INSERTION_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_VLAN_STRIPPING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_VLAN_STRIPPING_LBN 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_VLAN_STRIPPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_PREFIX_LEN_0_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_PREFIX_LEN_0_LBN 22 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_PREFIX_LEN_0_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_PREFIX_LEN_14_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_PREFIX_LEN_14_LBN 23 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_PREFIX_LEN_14_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_TIMESTAMP_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_TIMESTAMP_LBN 24 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_TIMESTAMP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_BATCHING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_BATCHING_LBN 25 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_BATCHING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_MCAST_FILTER_CHAINING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_MCAST_FILTER_CHAINING_LBN 26 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_MCAST_FILTER_CHAINING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_PM_AND_RXDP_COUNTERS_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_PM_AND_RXDP_COUNTERS_LBN 27 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_DISABLE_SCATTER_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_DISABLE_SCATTER_LBN 28 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_DISABLE_SCATTER_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_MCAST_UDP_LOOPBACK_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_EVB_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_EVB_LBN 30 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_EVB_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_VXLAN_NVGRE_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_VXLAN_NVGRE_LBN 31 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_VXLAN_NVGRE_WIDTH 1 +/* RxDPCPU firmware id. */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_DPCPU_FW_ID_OFST 4 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_DPCPU_FW_ID_LEN 2 +/* enum: Standard RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP 0x0 +/* enum: Low latency RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_LOW_LATENCY 0x1 +/* enum: Packed stream RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_PACKED_STREAM 0x2 +/* enum: Rules engine RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_RULES_ENGINE 0x5 +/* enum: DPDK RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_DPDK 0x6 +/* enum: BIST RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_BIST 0x10a +/* enum: RXDP Test firmware image 1 */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101 +/* enum: RXDP Test firmware image 2 */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102 +/* enum: RXDP Test firmware image 3 */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103 +/* enum: RXDP Test firmware image 4 */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104 +/* enum: RXDP Test firmware image 5 */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_TEST_BACKPRESSURE 0x105 +/* enum: RXDP Test firmware image 6 */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106 +/* enum: RXDP Test firmware image 7 */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107 +/* enum: RXDP Test firmware image 8 */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_TEST_FW_DISABLE_DL 0x108 +/* enum: RXDP Test firmware image 9 */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b +/* enum: RXDP Test firmware image 10 */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_TEST_FW_SLOW 0x10c +/* TxDPCPU firmware id. */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_DPCPU_FW_ID_OFST 6 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_DPCPU_FW_ID_LEN 2 +/* enum: Standard TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXDP 0x0 +/* enum: Low latency TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXDP_LOW_LATENCY 0x1 +/* enum: High packet rate TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXDP_HIGH_PACKET_RATE 0x3 +/* enum: Rules engine TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXDP_RULES_ENGINE 0x5 +/* enum: DPDK TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXDP_DPDK 0x6 +/* enum: BIST TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXDP_BIST 0x12d +/* enum: TXDP Test firmware image 1 */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXDP_TEST_FW_TSO_EDIT 0x101 +/* enum: TXDP Test firmware image 2 */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102 +/* enum: TXDP CSR bus test firmware */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXDP_TEST_FW_CSR 0x103 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_VERSION_OFST 8 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_VERSION_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_VERSION_REV_OFST 8 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_VERSION_REV_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_VERSION_REV_WIDTH 12 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_VERSION_TYPE_OFST 8 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_VERSION_TYPE_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4 +/* enum: reserved value - do not use (may indicate alternative interpretation + * of REV field in future) + */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_RESERVED 0x0 +/* enum: Trivial RX PD firmware for early Huntington development (Huntington + * development only) + */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1 +/* enum: RX PD firmware for telemetry prototyping (Medford2 development only) + */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_TESTFW_TELEMETRY 0x1 +/* enum: RX PD firmware with approximately Siena-compatible behaviour + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2 +/* enum: Full featured RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3 +/* enum: (deprecated original name for the FULL_FEATURED variant) */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_VSWITCH 0x3 +/* enum: siena_compat variant RX PD firmware using PM rather than MAC + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 +/* enum: Low latency RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5 +/* enum: Packed stream RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6 +/* enum: RX PD firmware handling layer 2 only for high packet rate performance + * tests (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7 +/* enum: Rules engine RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8 +/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_L3XUDP 0x9 +/* enum: DPDK RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_DPDK 0xa +/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe +/* enum: RX PD firmware parsing but not filtering network overlay tunnel + * encapsulations (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_VERSION_OFST 10 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_VERSION_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_VERSION_REV_OFST 10 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_VERSION_REV_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_VERSION_REV_WIDTH 12 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_VERSION_TYPE_OFST 10 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_VERSION_TYPE_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4 +/* enum: reserved value - do not use (may indicate alternative interpretation + * of REV field in future) + */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_TYPE_RESERVED 0x0 +/* enum: Trivial TX PD firmware for early Huntington development (Huntington + * development only) + */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1 +/* enum: TX PD firmware for telemetry prototyping (Medford2 development only) + */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_TYPE_TESTFW_TELEMETRY 0x1 +/* enum: TX PD firmware with approximately Siena-compatible behaviour + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2 +/* enum: Full featured TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3 +/* enum: (deprecated original name for the FULL_FEATURED variant) */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_TYPE_VSWITCH 0x3 +/* enum: siena_compat variant TX PD firmware using PM rather than MAC + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */ +/* enum: TX PD firmware handling layer 2 only for high packet rate performance + * tests (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7 +/* enum: Rules engine TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8 +/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_TYPE_L3XUDP 0x9 +/* enum: DPDK TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_TYPE_DPDK 0xa +/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe +/* Hardware capabilities of NIC */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_HW_CAPABILITIES_OFST 12 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_HW_CAPABILITIES_LEN 4 +/* Licensed capabilities */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_LICENSE_CAPABILITIES_OFST 16 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_LICENSE_CAPABILITIES_LEN 4 +/* Second word of flags. Not present on older firmware (check the length). */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_FLAGS2_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_FLAGS2_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_V2_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_V2_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_V2_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_V2_ENCAP_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_V2_ENCAP_LBN 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_V2_ENCAP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_EVQ_TIMER_CTRL_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_EVQ_TIMER_CTRL_LBN 2 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_EVQ_TIMER_CTRL_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_EVENT_CUT_THROUGH_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_EVENT_CUT_THROUGH_LBN 3 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_EVENT_CUT_THROUGH_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_CUT_THROUGH_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_CUT_THROUGH_LBN 4 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_CUT_THROUGH_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_VFIFO_ULL_MODE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_VFIFO_ULL_MODE_LBN 5 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_VFIFO_ULL_MODE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_MAC_STATS_40G_TX_SIZE_BINS_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN 6 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_MAC_STATS_40G_TX_SIZE_BINS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_INIT_EVQ_TYPE_SUPPORTED_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_INIT_EVQ_TYPE_SUPPORTED_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_INIT_EVQ_TYPE_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_INIT_EVQ_V2_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_INIT_EVQ_V2_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_INIT_EVQ_V2_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_MAC_TIMESTAMPING_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_MAC_TIMESTAMPING_LBN 8 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_MAC_TIMESTAMPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TIMESTAMP_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TIMESTAMP_LBN 9 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TIMESTAMP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_SNIFF_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_SNIFF_LBN 10 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_SNIFF_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_SNIFF_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_SNIFF_LBN 11 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_SNIFF_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_MCDI_BACKGROUND_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_MCDI_BACKGROUND_LBN 13 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_MCDI_BACKGROUND_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_MCDI_DB_RETURN_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_MCDI_DB_RETURN_LBN 14 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_MCDI_DB_RETURN_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_CTPIO_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_CTPIO_LBN 15 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_CTPIO_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TSA_SUPPORT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TSA_SUPPORT_LBN 16 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TSA_SUPPORT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TSA_BOUND_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TSA_BOUND_LBN 17 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TSA_BOUND_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_SF_ADAPTER_AUTHENTICATION_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_FILTER_ACTION_FLAG_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_FILTER_ACTION_FLAG_LBN 19 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_FILTER_ACTION_FLAG_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_FILTER_ACTION_MARK_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_FILTER_ACTION_MARK_LBN 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_FILTER_ACTION_MARK_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_EQUAL_STRIDE_SUPER_BUFFER_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_EQUAL_STRIDE_SUPER_BUFFER_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_EQUAL_STRIDE_SUPER_BUFFER_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_EQUAL_STRIDE_PACKED_STREAM_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_EQUAL_STRIDE_PACKED_STREAM_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_EQUAL_STRIDE_PACKED_STREAM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_L3XUDP_SUPPORT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_L3XUDP_SUPPORT_LBN 22 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_L3XUDP_SUPPORT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_FW_SUBVARIANT_NO_TX_CSUM_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_FW_SUBVARIANT_NO_TX_CSUM_LBN 23 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_FW_SUBVARIANT_NO_TX_CSUM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_VI_SPREADING_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_VI_SPREADING_LBN 24 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_VI_SPREADING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_HLB_IDLE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_HLB_IDLE_LBN 25 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_HLB_IDLE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_INIT_RXQ_NO_CONT_EV_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_INIT_RXQ_NO_CONT_EV_LBN 26 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_INIT_RXQ_NO_CONT_EV_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_INIT_RXQ_WITH_BUFFER_SIZE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_INIT_RXQ_WITH_BUFFER_SIZE_LBN 27 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_INIT_RXQ_WITH_BUFFER_SIZE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_BUNDLE_UPDATE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_BUNDLE_UPDATE_LBN 28 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_BUNDLE_UPDATE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_V3_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_V3_LBN 29 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_V3_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_DYNAMIC_SENSORS_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_DYNAMIC_SENSORS_LBN 30 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_DYNAMIC_SENSORS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_LBN 31 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_WIDTH 1 +/* Number of FATSOv2 contexts per datapath supported by this NIC (when + * TX_TSO_V2 == 1). Not present on older firmware (check the length). + */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_V2_N_CONTEXTS_OFST 24 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_V2_N_CONTEXTS_LEN 2 +/* One byte per PF containing the number of the external port assigned to this + * PF, indexed by PF number. Special values indicate that a PF is either not + * present or not assigned. + */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_PFS_TO_PORTS_ASSIGNMENT_OFST 26 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16 +/* enum: The caller is not permitted to access information on this PF. */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_ACCESS_NOT_PERMITTED 0xff +/* enum: PF does not exist. */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_PF_NOT_PRESENT 0xfe +/* enum: PF does exist but is not assigned to any external port. */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_PF_NOT_ASSIGNED 0xfd +/* enum: This value indicates that PF is assigned, but it cannot be expressed + * in this field. It is intended for a possible future situation where a more + * complex scheme of PFs to ports mapping is being used. The future driver + * should look for a new field supporting the new scheme. The current/old + * driver should treat this value as PF_NOT_ASSIGNED. + */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc +/* One byte per PF containing the number of its VFs, indexed by PF number. A + * special value indicates that a PF is not present. + */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_NUM_VFS_PER_PF_OFST 42 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_NUM_VFS_PER_PF_LEN 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_NUM_VFS_PER_PF_NUM 16 +/* enum: The caller is not permitted to access information on this PF. */ +/* MC_CMD_GET_CAPABILITIES_V6_OUT_ACCESS_NOT_PERMITTED 0xff */ +/* enum: PF does not exist. */ +/* MC_CMD_GET_CAPABILITIES_V6_OUT_PF_NOT_PRESENT 0xfe */ +/* Number of VIs available for each external port */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_NUM_VIS_PER_PORT_OFST 58 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_NUM_VIS_PER_PORT_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_NUM_VIS_PER_PORT_NUM 4 +/* Size of RX descriptor cache expressed as binary logarithm The actual size + * equals (2 ^ RX_DESC_CACHE_SIZE) + */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_DESC_CACHE_SIZE_OFST 66 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_DESC_CACHE_SIZE_LEN 1 +/* Size of TX descriptor cache expressed as binary logarithm The actual size + * equals (2 ^ TX_DESC_CACHE_SIZE) + */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_DESC_CACHE_SIZE_OFST 67 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_DESC_CACHE_SIZE_LEN 1 +/* Total number of available PIO buffers */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_NUM_PIO_BUFFS_OFST 68 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_NUM_PIO_BUFFS_LEN 2 +/* Size of a single PIO buffer */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_SIZE_PIO_BUFF_OFST 70 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_SIZE_PIO_BUFF_LEN 2 +/* On chips later than Medford the amount of address space assigned to each VI + * is configurable. This is a global setting that the driver must query to + * discover the VI to address mapping. Cut-through PIO (CTPIO) is not available + * with 8k VI windows. + */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_VI_WINDOW_MODE_OFST 72 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_VI_WINDOW_MODE_LEN 1 +/* enum: Each VI occupies 8k as on Huntington and Medford. PIO is at offset 4k. + * CTPIO is not mapped. + */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_VI_WINDOW_MODE_8K 0x0 +/* enum: Each VI occupies 16k. PIO is at offset 4k. CTPIO is at offset 12k. */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_VI_WINDOW_MODE_16K 0x1 +/* enum: Each VI occupies 64k. PIO is at offset 4k. CTPIO is at offset 12k. */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_VI_WINDOW_MODE_64K 0x2 +/* Number of vFIFOs per adapter that can be used for VFIFO Stuffing + * (SF-115995-SW) in the present configuration of firmware and port mode. + */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_VFIFO_STUFFING_NUM_VFIFOS_OFST 73 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_VFIFO_STUFFING_NUM_VFIFOS_LEN 1 +/* Number of buffers per adapter that can be used for VFIFO Stuffing + * (SF-115995-SW) in the present configuration of firmware and port mode. + */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_OFST 74 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_LEN 2 +/* Entry count in the MAC stats array, including the final GENERATION_END + * entry. For MAC stats DMA, drivers should allocate a buffer large enough to + * hold at least this many 64-bit stats values, if they wish to receive all + * available stats. If the buffer is shorter than MAC_STATS_NUM_STATS * 8, the + * stats array returned will be truncated. + */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_MAC_STATS_NUM_STATS_OFST 76 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_MAC_STATS_NUM_STATS_LEN 2 +/* Maximum supported value for MC_CMD_FILTER_OP_V3/MATCH_MARK_VALUE. This field + * will only be non-zero if MC_CMD_GET_CAPABILITIES/FILTER_ACTION_MARK is set. + */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_FILTER_ACTION_MARK_MAX_OFST 80 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_FILTER_ACTION_MARK_MAX_LEN 4 +/* On devices where the INIT_RXQ_WITH_BUFFER_SIZE flag (in + * GET_CAPABILITIES_OUT_V2) is set, drivers have to specify a buffer size when + * they create an RX queue. Due to hardware limitations, only a small number of + * different buffer sizes may be available concurrently. Nonzero entries in + * this array are the sizes of buffers which the system guarantees will be + * available for use. If the list is empty, there are no limitations on + * concurrent buffer sizes. + */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_GUARANTEED_RX_BUFFER_SIZES_OFST 84 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_GUARANTEED_RX_BUFFER_SIZES_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_GUARANTEED_RX_BUFFER_SIZES_NUM 16 + +/* MC_CMD_GET_CAPABILITIES_V7_OUT msgresponse */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_LEN 152 +/* First word of flags. */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_FLAGS1_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_FLAGS1_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_VPORT_RECONFIGURE_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_VPORT_RECONFIGURE_LBN 3 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_VPORT_RECONFIGURE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_STRIPING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_STRIPING_LBN 4 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_STRIPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_VADAPTOR_QUERY_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_VADAPTOR_QUERY_LBN 5 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_VADAPTOR_QUERY_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_EVB_PORT_VLAN_RESTRICT_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_DRV_ATTACH_PREBOOT_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_DRV_ATTACH_PREBOOT_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_DRV_ATTACH_PREBOOT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_FORCE_EVENT_MERGING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_FORCE_EVENT_MERGING_LBN 8 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_SET_MAC_ENHANCED_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_SET_MAC_ENHANCED_LBN 9 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_SET_MAC_ENHANCED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_MAC_SECURITY_FILTERING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_MAC_SECURITY_FILTERING_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_ADDITIONAL_RSS_MODES_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_ADDITIONAL_RSS_MODES_LBN 13 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_ADDITIONAL_RSS_MODES_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_QBB_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_QBB_LBN 14 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_QBB_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_PACKED_STREAM_VAR_BUFFERS_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_RSS_LIMITED_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_RSS_LIMITED_LBN 16 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_RSS_LIMITED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_PACKED_STREAM_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_PACKED_STREAM_LBN 17 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_PACKED_STREAM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_INCLUDE_FCS_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_INCLUDE_FCS_LBN 18 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_INCLUDE_FCS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_VLAN_INSERTION_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_VLAN_INSERTION_LBN 19 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_VLAN_INSERTION_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_VLAN_STRIPPING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_VLAN_STRIPPING_LBN 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_VLAN_STRIPPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TSO_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TSO_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TSO_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_PREFIX_LEN_0_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_PREFIX_LEN_0_LBN 22 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_PREFIX_LEN_0_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_PREFIX_LEN_14_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_PREFIX_LEN_14_LBN 23 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_PREFIX_LEN_14_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_TIMESTAMP_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_TIMESTAMP_LBN 24 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_TIMESTAMP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_BATCHING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_BATCHING_LBN 25 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_BATCHING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_MCAST_FILTER_CHAINING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_MCAST_FILTER_CHAINING_LBN 26 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_MCAST_FILTER_CHAINING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_PM_AND_RXDP_COUNTERS_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_PM_AND_RXDP_COUNTERS_LBN 27 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_DISABLE_SCATTER_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_DISABLE_SCATTER_LBN 28 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_DISABLE_SCATTER_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_MCAST_UDP_LOOPBACK_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_EVB_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_EVB_LBN 30 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_EVB_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_VXLAN_NVGRE_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_VXLAN_NVGRE_LBN 31 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_VXLAN_NVGRE_WIDTH 1 +/* RxDPCPU firmware id. */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_DPCPU_FW_ID_OFST 4 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_DPCPU_FW_ID_LEN 2 +/* enum: Standard RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP 0x0 +/* enum: Low latency RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_LOW_LATENCY 0x1 +/* enum: Packed stream RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_PACKED_STREAM 0x2 +/* enum: Rules engine RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_RULES_ENGINE 0x5 +/* enum: DPDK RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_DPDK 0x6 +/* enum: BIST RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_BIST 0x10a +/* enum: RXDP Test firmware image 1 */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101 +/* enum: RXDP Test firmware image 2 */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102 +/* enum: RXDP Test firmware image 3 */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103 +/* enum: RXDP Test firmware image 4 */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104 +/* enum: RXDP Test firmware image 5 */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_TEST_BACKPRESSURE 0x105 +/* enum: RXDP Test firmware image 6 */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106 +/* enum: RXDP Test firmware image 7 */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107 +/* enum: RXDP Test firmware image 8 */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_TEST_FW_DISABLE_DL 0x108 +/* enum: RXDP Test firmware image 9 */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b +/* enum: RXDP Test firmware image 10 */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_TEST_FW_SLOW 0x10c +/* TxDPCPU firmware id. */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_DPCPU_FW_ID_OFST 6 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_DPCPU_FW_ID_LEN 2 +/* enum: Standard TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXDP 0x0 +/* enum: Low latency TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXDP_LOW_LATENCY 0x1 +/* enum: High packet rate TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXDP_HIGH_PACKET_RATE 0x3 +/* enum: Rules engine TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXDP_RULES_ENGINE 0x5 +/* enum: DPDK TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXDP_DPDK 0x6 +/* enum: BIST TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXDP_BIST 0x12d +/* enum: TXDP Test firmware image 1 */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXDP_TEST_FW_TSO_EDIT 0x101 +/* enum: TXDP Test firmware image 2 */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102 +/* enum: TXDP CSR bus test firmware */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXDP_TEST_FW_CSR 0x103 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_VERSION_OFST 8 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_VERSION_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_VERSION_REV_OFST 8 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_VERSION_REV_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_VERSION_REV_WIDTH 12 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_VERSION_TYPE_OFST 8 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_VERSION_TYPE_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4 +/* enum: reserved value - do not use (may indicate alternative interpretation + * of REV field in future) + */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_RESERVED 0x0 +/* enum: Trivial RX PD firmware for early Huntington development (Huntington + * development only) + */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1 +/* enum: RX PD firmware for telemetry prototyping (Medford2 development only) + */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_TESTFW_TELEMETRY 0x1 +/* enum: RX PD firmware with approximately Siena-compatible behaviour + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2 +/* enum: Full featured RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3 +/* enum: (deprecated original name for the FULL_FEATURED variant) */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_VSWITCH 0x3 +/* enum: siena_compat variant RX PD firmware using PM rather than MAC + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 +/* enum: Low latency RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5 +/* enum: Packed stream RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6 +/* enum: RX PD firmware handling layer 2 only for high packet rate performance + * tests (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7 +/* enum: Rules engine RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8 +/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_L3XUDP 0x9 +/* enum: DPDK RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_DPDK 0xa +/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe +/* enum: RX PD firmware parsing but not filtering network overlay tunnel + * encapsulations (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_VERSION_OFST 10 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_VERSION_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_VERSION_REV_OFST 10 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_VERSION_REV_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_VERSION_REV_WIDTH 12 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_VERSION_TYPE_OFST 10 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_VERSION_TYPE_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4 +/* enum: reserved value - do not use (may indicate alternative interpretation + * of REV field in future) + */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_TYPE_RESERVED 0x0 +/* enum: Trivial TX PD firmware for early Huntington development (Huntington + * development only) + */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1 +/* enum: TX PD firmware for telemetry prototyping (Medford2 development only) + */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_TYPE_TESTFW_TELEMETRY 0x1 +/* enum: TX PD firmware with approximately Siena-compatible behaviour + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2 +/* enum: Full featured TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3 +/* enum: (deprecated original name for the FULL_FEATURED variant) */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_TYPE_VSWITCH 0x3 +/* enum: siena_compat variant TX PD firmware using PM rather than MAC + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */ +/* enum: TX PD firmware handling layer 2 only for high packet rate performance + * tests (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7 +/* enum: Rules engine TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8 +/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_TYPE_L3XUDP 0x9 +/* enum: DPDK TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_TYPE_DPDK 0xa +/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe +/* Hardware capabilities of NIC */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_HW_CAPABILITIES_OFST 12 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_HW_CAPABILITIES_LEN 4 +/* Licensed capabilities */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_LICENSE_CAPABILITIES_OFST 16 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_LICENSE_CAPABILITIES_LEN 4 +/* Second word of flags. Not present on older firmware (check the length). */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_FLAGS2_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_FLAGS2_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TSO_V2_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TSO_V2_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TSO_V2_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TSO_V2_ENCAP_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TSO_V2_ENCAP_LBN 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TSO_V2_ENCAP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_EVQ_TIMER_CTRL_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_EVQ_TIMER_CTRL_LBN 2 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_EVQ_TIMER_CTRL_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_EVENT_CUT_THROUGH_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_EVENT_CUT_THROUGH_LBN 3 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_EVENT_CUT_THROUGH_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_CUT_THROUGH_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_CUT_THROUGH_LBN 4 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_CUT_THROUGH_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_VFIFO_ULL_MODE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_VFIFO_ULL_MODE_LBN 5 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_VFIFO_ULL_MODE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_MAC_STATS_40G_TX_SIZE_BINS_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN 6 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_MAC_STATS_40G_TX_SIZE_BINS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_INIT_EVQ_TYPE_SUPPORTED_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_INIT_EVQ_TYPE_SUPPORTED_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_INIT_EVQ_TYPE_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_INIT_EVQ_V2_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_INIT_EVQ_V2_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_INIT_EVQ_V2_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_MAC_TIMESTAMPING_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_MAC_TIMESTAMPING_LBN 8 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_MAC_TIMESTAMPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TIMESTAMP_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TIMESTAMP_LBN 9 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TIMESTAMP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_SNIFF_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_SNIFF_LBN 10 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_SNIFF_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_SNIFF_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_SNIFF_LBN 11 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_SNIFF_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_MCDI_BACKGROUND_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_MCDI_BACKGROUND_LBN 13 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_MCDI_BACKGROUND_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_MCDI_DB_RETURN_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_MCDI_DB_RETURN_LBN 14 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_MCDI_DB_RETURN_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_CTPIO_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_CTPIO_LBN 15 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_CTPIO_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TSA_SUPPORT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TSA_SUPPORT_LBN 16 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TSA_SUPPORT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TSA_BOUND_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TSA_BOUND_LBN 17 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TSA_BOUND_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_SF_ADAPTER_AUTHENTICATION_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_FILTER_ACTION_FLAG_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_FILTER_ACTION_FLAG_LBN 19 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_FILTER_ACTION_FLAG_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_FILTER_ACTION_MARK_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_FILTER_ACTION_MARK_LBN 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_FILTER_ACTION_MARK_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_EQUAL_STRIDE_SUPER_BUFFER_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_EQUAL_STRIDE_SUPER_BUFFER_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_EQUAL_STRIDE_SUPER_BUFFER_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_EQUAL_STRIDE_PACKED_STREAM_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_EQUAL_STRIDE_PACKED_STREAM_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_EQUAL_STRIDE_PACKED_STREAM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_L3XUDP_SUPPORT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_L3XUDP_SUPPORT_LBN 22 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_L3XUDP_SUPPORT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_FW_SUBVARIANT_NO_TX_CSUM_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_FW_SUBVARIANT_NO_TX_CSUM_LBN 23 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_FW_SUBVARIANT_NO_TX_CSUM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_VI_SPREADING_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_VI_SPREADING_LBN 24 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_VI_SPREADING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_HLB_IDLE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_HLB_IDLE_LBN 25 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_HLB_IDLE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_INIT_RXQ_NO_CONT_EV_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_INIT_RXQ_NO_CONT_EV_LBN 26 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_INIT_RXQ_NO_CONT_EV_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_INIT_RXQ_WITH_BUFFER_SIZE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_INIT_RXQ_WITH_BUFFER_SIZE_LBN 27 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_INIT_RXQ_WITH_BUFFER_SIZE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_BUNDLE_UPDATE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_BUNDLE_UPDATE_LBN 28 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_BUNDLE_UPDATE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TSO_V3_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TSO_V3_LBN 29 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TSO_V3_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_DYNAMIC_SENSORS_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_DYNAMIC_SENSORS_LBN 30 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_DYNAMIC_SENSORS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_LBN 31 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_WIDTH 1 +/* Number of FATSOv2 contexts per datapath supported by this NIC (when + * TX_TSO_V2 == 1). Not present on older firmware (check the length). + */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TSO_V2_N_CONTEXTS_OFST 24 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TSO_V2_N_CONTEXTS_LEN 2 +/* One byte per PF containing the number of the external port assigned to this + * PF, indexed by PF number. Special values indicate that a PF is either not + * present or not assigned. + */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_PFS_TO_PORTS_ASSIGNMENT_OFST 26 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16 +/* enum: The caller is not permitted to access information on this PF. */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_ACCESS_NOT_PERMITTED 0xff +/* enum: PF does not exist. */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_PF_NOT_PRESENT 0xfe +/* enum: PF does exist but is not assigned to any external port. */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_PF_NOT_ASSIGNED 0xfd +/* enum: This value indicates that PF is assigned, but it cannot be expressed + * in this field. It is intended for a possible future situation where a more + * complex scheme of PFs to ports mapping is being used. The future driver + * should look for a new field supporting the new scheme. The current/old + * driver should treat this value as PF_NOT_ASSIGNED. + */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc +/* One byte per PF containing the number of its VFs, indexed by PF number. A + * special value indicates that a PF is not present. + */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_NUM_VFS_PER_PF_OFST 42 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_NUM_VFS_PER_PF_LEN 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_NUM_VFS_PER_PF_NUM 16 +/* enum: The caller is not permitted to access information on this PF. */ +/* MC_CMD_GET_CAPABILITIES_V7_OUT_ACCESS_NOT_PERMITTED 0xff */ +/* enum: PF does not exist. */ +/* MC_CMD_GET_CAPABILITIES_V7_OUT_PF_NOT_PRESENT 0xfe */ +/* Number of VIs available for each external port */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_NUM_VIS_PER_PORT_OFST 58 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_NUM_VIS_PER_PORT_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_NUM_VIS_PER_PORT_NUM 4 +/* Size of RX descriptor cache expressed as binary logarithm The actual size + * equals (2 ^ RX_DESC_CACHE_SIZE) + */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_DESC_CACHE_SIZE_OFST 66 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_DESC_CACHE_SIZE_LEN 1 +/* Size of TX descriptor cache expressed as binary logarithm The actual size + * equals (2 ^ TX_DESC_CACHE_SIZE) + */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_DESC_CACHE_SIZE_OFST 67 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_DESC_CACHE_SIZE_LEN 1 +/* Total number of available PIO buffers */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_NUM_PIO_BUFFS_OFST 68 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_NUM_PIO_BUFFS_LEN 2 +/* Size of a single PIO buffer */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_SIZE_PIO_BUFF_OFST 70 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_SIZE_PIO_BUFF_LEN 2 +/* On chips later than Medford the amount of address space assigned to each VI + * is configurable. This is a global setting that the driver must query to + * discover the VI to address mapping. Cut-through PIO (CTPIO) is not available + * with 8k VI windows. + */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_VI_WINDOW_MODE_OFST 72 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_VI_WINDOW_MODE_LEN 1 +/* enum: Each VI occupies 8k as on Huntington and Medford. PIO is at offset 4k. + * CTPIO is not mapped. + */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_VI_WINDOW_MODE_8K 0x0 +/* enum: Each VI occupies 16k. PIO is at offset 4k. CTPIO is at offset 12k. */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_VI_WINDOW_MODE_16K 0x1 +/* enum: Each VI occupies 64k. PIO is at offset 4k. CTPIO is at offset 12k. */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_VI_WINDOW_MODE_64K 0x2 +/* Number of vFIFOs per adapter that can be used for VFIFO Stuffing + * (SF-115995-SW) in the present configuration of firmware and port mode. + */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_VFIFO_STUFFING_NUM_VFIFOS_OFST 73 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_VFIFO_STUFFING_NUM_VFIFOS_LEN 1 +/* Number of buffers per adapter that can be used for VFIFO Stuffing + * (SF-115995-SW) in the present configuration of firmware and port mode. + */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_OFST 74 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_LEN 2 +/* Entry count in the MAC stats array, including the final GENERATION_END + * entry. For MAC stats DMA, drivers should allocate a buffer large enough to + * hold at least this many 64-bit stats values, if they wish to receive all + * available stats. If the buffer is shorter than MAC_STATS_NUM_STATS * 8, the + * stats array returned will be truncated. + */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_MAC_STATS_NUM_STATS_OFST 76 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_MAC_STATS_NUM_STATS_LEN 2 +/* Maximum supported value for MC_CMD_FILTER_OP_V3/MATCH_MARK_VALUE. This field + * will only be non-zero if MC_CMD_GET_CAPABILITIES/FILTER_ACTION_MARK is set. + */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_FILTER_ACTION_MARK_MAX_OFST 80 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_FILTER_ACTION_MARK_MAX_LEN 4 +/* On devices where the INIT_RXQ_WITH_BUFFER_SIZE flag (in + * GET_CAPABILITIES_OUT_V2) is set, drivers have to specify a buffer size when + * they create an RX queue. Due to hardware limitations, only a small number of + * different buffer sizes may be available concurrently. Nonzero entries in + * this array are the sizes of buffers which the system guarantees will be + * available for use. If the list is empty, there are no limitations on + * concurrent buffer sizes. + */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_GUARANTEED_RX_BUFFER_SIZES_OFST 84 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_GUARANTEED_RX_BUFFER_SIZES_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_GUARANTEED_RX_BUFFER_SIZES_NUM 16 +/* Third word of flags. Not present on older firmware (check the length). */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_FLAGS3_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_FLAGS3_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_WOL_ETHERWAKE_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_WOL_ETHERWAKE_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_WOL_ETHERWAKE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RSS_EVEN_SPREADING_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RSS_EVEN_SPREADING_LBN 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RSS_EVEN_SPREADING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RSS_SELECTABLE_TABLE_SIZE_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RSS_SELECTABLE_TABLE_SIZE_LBN 2 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RSS_SELECTABLE_TABLE_SIZE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_MAE_SUPPORTED_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_MAE_SUPPORTED_LBN 3 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_MAE_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_VDPA_SUPPORTED_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_VDPA_SUPPORTED_LBN 4 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_VDPA_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_LBN 5 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_LBN 6 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_UNSOL_EV_CREDIT_SUPPORTED_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_UNSOL_EV_CREDIT_SUPPORTED_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_UNSOL_EV_CREDIT_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_ENCAPSULATED_MCDI_SUPPORTED_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_ENCAPSULATED_MCDI_SUPPORTED_LBN 8 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_ENCAPSULATED_MCDI_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_EXTERNAL_MAE_SUPPORTED_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_EXTERNAL_MAE_SUPPORTED_LBN 9 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_EXTERNAL_MAE_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_NVRAM_UPDATE_ABORT_SUPPORTED_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_NVRAM_UPDATE_ABORT_SUPPORTED_LBN 10 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_NVRAM_UPDATE_ABORT_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_MAE_ACTION_SET_ALLOC_V2_SUPPORTED_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_MAE_ACTION_SET_ALLOC_V2_SUPPORTED_LBN 11 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_MAE_ACTION_SET_ALLOC_V2_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RSS_STEER_ON_OUTER_SUPPORTED_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RSS_STEER_ON_OUTER_SUPPORTED_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RSS_STEER_ON_OUTER_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_MAE_ACTION_SET_ALLOC_V3_SUPPORTED_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_MAE_ACTION_SET_ALLOC_V3_SUPPORTED_LBN 13 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_MAE_ACTION_SET_ALLOC_V3_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_DYNAMIC_MPORT_JOURNAL_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_DYNAMIC_MPORT_JOURNAL_LBN 14 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_DYNAMIC_MPORT_JOURNAL_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_CLIENT_CMD_VF_PROXY_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_CLIENT_CMD_VF_PROXY_LBN 15 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_CLIENT_CMD_VF_PROXY_WIDTH 1 + +/* MC_CMD_GET_CAPABILITIES_V8_OUT msgresponse */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_LEN 160 +/* First word of flags. */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_FLAGS1_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_FLAGS1_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_VPORT_RECONFIGURE_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_VPORT_RECONFIGURE_LBN 3 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_VPORT_RECONFIGURE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_STRIPING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_STRIPING_LBN 4 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_STRIPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_VADAPTOR_QUERY_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_VADAPTOR_QUERY_LBN 5 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_VADAPTOR_QUERY_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_EVB_PORT_VLAN_RESTRICT_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_DRV_ATTACH_PREBOOT_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_DRV_ATTACH_PREBOOT_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_DRV_ATTACH_PREBOOT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_FORCE_EVENT_MERGING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_FORCE_EVENT_MERGING_LBN 8 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_SET_MAC_ENHANCED_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_SET_MAC_ENHANCED_LBN 9 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_SET_MAC_ENHANCED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_MAC_SECURITY_FILTERING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_MAC_SECURITY_FILTERING_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_ADDITIONAL_RSS_MODES_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_ADDITIONAL_RSS_MODES_LBN 13 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_ADDITIONAL_RSS_MODES_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_QBB_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_QBB_LBN 14 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_QBB_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_PACKED_STREAM_VAR_BUFFERS_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_RSS_LIMITED_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_RSS_LIMITED_LBN 16 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_RSS_LIMITED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_PACKED_STREAM_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_PACKED_STREAM_LBN 17 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_PACKED_STREAM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_INCLUDE_FCS_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_INCLUDE_FCS_LBN 18 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_INCLUDE_FCS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_VLAN_INSERTION_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_VLAN_INSERTION_LBN 19 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_VLAN_INSERTION_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_VLAN_STRIPPING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_VLAN_STRIPPING_LBN 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_VLAN_STRIPPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TSO_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TSO_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TSO_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_PREFIX_LEN_0_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_PREFIX_LEN_0_LBN 22 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_PREFIX_LEN_0_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_PREFIX_LEN_14_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_PREFIX_LEN_14_LBN 23 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_PREFIX_LEN_14_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_TIMESTAMP_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_TIMESTAMP_LBN 24 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_TIMESTAMP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_BATCHING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_BATCHING_LBN 25 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_BATCHING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_MCAST_FILTER_CHAINING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_MCAST_FILTER_CHAINING_LBN 26 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_MCAST_FILTER_CHAINING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_PM_AND_RXDP_COUNTERS_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_PM_AND_RXDP_COUNTERS_LBN 27 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_DISABLE_SCATTER_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_DISABLE_SCATTER_LBN 28 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_DISABLE_SCATTER_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_MCAST_UDP_LOOPBACK_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_EVB_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_EVB_LBN 30 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_EVB_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_VXLAN_NVGRE_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_VXLAN_NVGRE_LBN 31 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_VXLAN_NVGRE_WIDTH 1 +/* RxDPCPU firmware id. */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_DPCPU_FW_ID_OFST 4 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_DPCPU_FW_ID_LEN 2 +/* enum: Standard RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP 0x0 +/* enum: Low latency RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_LOW_LATENCY 0x1 +/* enum: Packed stream RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_PACKED_STREAM 0x2 +/* enum: Rules engine RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_RULES_ENGINE 0x5 +/* enum: DPDK RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_DPDK 0x6 +/* enum: BIST RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_BIST 0x10a +/* enum: RXDP Test firmware image 1 */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101 +/* enum: RXDP Test firmware image 2 */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102 +/* enum: RXDP Test firmware image 3 */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103 +/* enum: RXDP Test firmware image 4 */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104 +/* enum: RXDP Test firmware image 5 */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_TEST_BACKPRESSURE 0x105 +/* enum: RXDP Test firmware image 6 */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106 +/* enum: RXDP Test firmware image 7 */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107 +/* enum: RXDP Test firmware image 8 */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_TEST_FW_DISABLE_DL 0x108 +/* enum: RXDP Test firmware image 9 */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b +/* enum: RXDP Test firmware image 10 */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_TEST_FW_SLOW 0x10c +/* TxDPCPU firmware id. */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_DPCPU_FW_ID_OFST 6 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_DPCPU_FW_ID_LEN 2 +/* enum: Standard TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXDP 0x0 +/* enum: Low latency TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXDP_LOW_LATENCY 0x1 +/* enum: High packet rate TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXDP_HIGH_PACKET_RATE 0x3 +/* enum: Rules engine TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXDP_RULES_ENGINE 0x5 +/* enum: DPDK TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXDP_DPDK 0x6 +/* enum: BIST TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXDP_BIST 0x12d +/* enum: TXDP Test firmware image 1 */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXDP_TEST_FW_TSO_EDIT 0x101 +/* enum: TXDP Test firmware image 2 */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102 +/* enum: TXDP CSR bus test firmware */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXDP_TEST_FW_CSR 0x103 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_VERSION_OFST 8 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_VERSION_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_VERSION_REV_OFST 8 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_VERSION_REV_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_VERSION_REV_WIDTH 12 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_VERSION_TYPE_OFST 8 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_VERSION_TYPE_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4 +/* enum: reserved value - do not use (may indicate alternative interpretation + * of REV field in future) + */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_RESERVED 0x0 +/* enum: Trivial RX PD firmware for early Huntington development (Huntington + * development only) + */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1 +/* enum: RX PD firmware for telemetry prototyping (Medford2 development only) + */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_TESTFW_TELEMETRY 0x1 +/* enum: RX PD firmware with approximately Siena-compatible behaviour + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2 +/* enum: Full featured RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3 +/* enum: (deprecated original name for the FULL_FEATURED variant) */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_VSWITCH 0x3 +/* enum: siena_compat variant RX PD firmware using PM rather than MAC + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 +/* enum: Low latency RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5 +/* enum: Packed stream RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6 +/* enum: RX PD firmware handling layer 2 only for high packet rate performance + * tests (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7 +/* enum: Rules engine RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8 +/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_L3XUDP 0x9 +/* enum: DPDK RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_DPDK 0xa +/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe +/* enum: RX PD firmware parsing but not filtering network overlay tunnel + * encapsulations (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_VERSION_OFST 10 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_VERSION_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_VERSION_REV_OFST 10 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_VERSION_REV_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_VERSION_REV_WIDTH 12 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_VERSION_TYPE_OFST 10 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_VERSION_TYPE_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4 +/* enum: reserved value - do not use (may indicate alternative interpretation + * of REV field in future) + */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_TYPE_RESERVED 0x0 +/* enum: Trivial TX PD firmware for early Huntington development (Huntington + * development only) + */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1 +/* enum: TX PD firmware for telemetry prototyping (Medford2 development only) + */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_TYPE_TESTFW_TELEMETRY 0x1 +/* enum: TX PD firmware with approximately Siena-compatible behaviour + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2 +/* enum: Full featured TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3 +/* enum: (deprecated original name for the FULL_FEATURED variant) */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_TYPE_VSWITCH 0x3 +/* enum: siena_compat variant TX PD firmware using PM rather than MAC + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */ +/* enum: TX PD firmware handling layer 2 only for high packet rate performance + * tests (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7 +/* enum: Rules engine TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8 +/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_TYPE_L3XUDP 0x9 +/* enum: DPDK TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_TYPE_DPDK 0xa +/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe +/* Hardware capabilities of NIC */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_HW_CAPABILITIES_OFST 12 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_HW_CAPABILITIES_LEN 4 +/* Licensed capabilities */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_LICENSE_CAPABILITIES_OFST 16 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_LICENSE_CAPABILITIES_LEN 4 +/* Second word of flags. Not present on older firmware (check the length). */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_FLAGS2_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_FLAGS2_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TSO_V2_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TSO_V2_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TSO_V2_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TSO_V2_ENCAP_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TSO_V2_ENCAP_LBN 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TSO_V2_ENCAP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_EVQ_TIMER_CTRL_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_EVQ_TIMER_CTRL_LBN 2 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_EVQ_TIMER_CTRL_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_EVENT_CUT_THROUGH_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_EVENT_CUT_THROUGH_LBN 3 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_EVENT_CUT_THROUGH_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_CUT_THROUGH_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_CUT_THROUGH_LBN 4 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_CUT_THROUGH_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_VFIFO_ULL_MODE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_VFIFO_ULL_MODE_LBN 5 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_VFIFO_ULL_MODE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_MAC_STATS_40G_TX_SIZE_BINS_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN 6 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_MAC_STATS_40G_TX_SIZE_BINS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_INIT_EVQ_TYPE_SUPPORTED_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_INIT_EVQ_TYPE_SUPPORTED_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_INIT_EVQ_TYPE_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_INIT_EVQ_V2_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_INIT_EVQ_V2_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_INIT_EVQ_V2_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_MAC_TIMESTAMPING_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_MAC_TIMESTAMPING_LBN 8 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_MAC_TIMESTAMPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TIMESTAMP_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TIMESTAMP_LBN 9 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TIMESTAMP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_SNIFF_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_SNIFF_LBN 10 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_SNIFF_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_SNIFF_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_SNIFF_LBN 11 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_SNIFF_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_MCDI_BACKGROUND_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_MCDI_BACKGROUND_LBN 13 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_MCDI_BACKGROUND_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_MCDI_DB_RETURN_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_MCDI_DB_RETURN_LBN 14 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_MCDI_DB_RETURN_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_CTPIO_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_CTPIO_LBN 15 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_CTPIO_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TSA_SUPPORT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TSA_SUPPORT_LBN 16 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TSA_SUPPORT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TSA_BOUND_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TSA_BOUND_LBN 17 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TSA_BOUND_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_SF_ADAPTER_AUTHENTICATION_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_FILTER_ACTION_FLAG_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_FILTER_ACTION_FLAG_LBN 19 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_FILTER_ACTION_FLAG_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_FILTER_ACTION_MARK_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_FILTER_ACTION_MARK_LBN 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_FILTER_ACTION_MARK_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_EQUAL_STRIDE_SUPER_BUFFER_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_EQUAL_STRIDE_SUPER_BUFFER_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_EQUAL_STRIDE_SUPER_BUFFER_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_EQUAL_STRIDE_PACKED_STREAM_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_EQUAL_STRIDE_PACKED_STREAM_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_EQUAL_STRIDE_PACKED_STREAM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_L3XUDP_SUPPORT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_L3XUDP_SUPPORT_LBN 22 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_L3XUDP_SUPPORT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_FW_SUBVARIANT_NO_TX_CSUM_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_FW_SUBVARIANT_NO_TX_CSUM_LBN 23 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_FW_SUBVARIANT_NO_TX_CSUM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_VI_SPREADING_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_VI_SPREADING_LBN 24 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_VI_SPREADING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_HLB_IDLE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_HLB_IDLE_LBN 25 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_HLB_IDLE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_INIT_RXQ_NO_CONT_EV_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_INIT_RXQ_NO_CONT_EV_LBN 26 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_INIT_RXQ_NO_CONT_EV_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_INIT_RXQ_WITH_BUFFER_SIZE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_INIT_RXQ_WITH_BUFFER_SIZE_LBN 27 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_INIT_RXQ_WITH_BUFFER_SIZE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_BUNDLE_UPDATE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_BUNDLE_UPDATE_LBN 28 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_BUNDLE_UPDATE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TSO_V3_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TSO_V3_LBN 29 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TSO_V3_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_DYNAMIC_SENSORS_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_DYNAMIC_SENSORS_LBN 30 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_DYNAMIC_SENSORS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_LBN 31 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_WIDTH 1 +/* Number of FATSOv2 contexts per datapath supported by this NIC (when + * TX_TSO_V2 == 1). Not present on older firmware (check the length). + */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TSO_V2_N_CONTEXTS_OFST 24 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TSO_V2_N_CONTEXTS_LEN 2 +/* One byte per PF containing the number of the external port assigned to this + * PF, indexed by PF number. Special values indicate that a PF is either not + * present or not assigned. + */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_PFS_TO_PORTS_ASSIGNMENT_OFST 26 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16 +/* enum: The caller is not permitted to access information on this PF. */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_ACCESS_NOT_PERMITTED 0xff +/* enum: PF does not exist. */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_PF_NOT_PRESENT 0xfe +/* enum: PF does exist but is not assigned to any external port. */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_PF_NOT_ASSIGNED 0xfd +/* enum: This value indicates that PF is assigned, but it cannot be expressed + * in this field. It is intended for a possible future situation where a more + * complex scheme of PFs to ports mapping is being used. The future driver + * should look for a new field supporting the new scheme. The current/old + * driver should treat this value as PF_NOT_ASSIGNED. + */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc +/* One byte per PF containing the number of its VFs, indexed by PF number. A + * special value indicates that a PF is not present. + */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_NUM_VFS_PER_PF_OFST 42 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_NUM_VFS_PER_PF_LEN 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_NUM_VFS_PER_PF_NUM 16 +/* enum: The caller is not permitted to access information on this PF. */ +/* MC_CMD_GET_CAPABILITIES_V8_OUT_ACCESS_NOT_PERMITTED 0xff */ +/* enum: PF does not exist. */ +/* MC_CMD_GET_CAPABILITIES_V8_OUT_PF_NOT_PRESENT 0xfe */ +/* Number of VIs available for each external port */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_NUM_VIS_PER_PORT_OFST 58 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_NUM_VIS_PER_PORT_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_NUM_VIS_PER_PORT_NUM 4 +/* Size of RX descriptor cache expressed as binary logarithm The actual size + * equals (2 ^ RX_DESC_CACHE_SIZE) + */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_DESC_CACHE_SIZE_OFST 66 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_DESC_CACHE_SIZE_LEN 1 +/* Size of TX descriptor cache expressed as binary logarithm The actual size + * equals (2 ^ TX_DESC_CACHE_SIZE) + */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_DESC_CACHE_SIZE_OFST 67 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_DESC_CACHE_SIZE_LEN 1 +/* Total number of available PIO buffers */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_NUM_PIO_BUFFS_OFST 68 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_NUM_PIO_BUFFS_LEN 2 +/* Size of a single PIO buffer */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_SIZE_PIO_BUFF_OFST 70 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_SIZE_PIO_BUFF_LEN 2 +/* On chips later than Medford the amount of address space assigned to each VI + * is configurable. This is a global setting that the driver must query to + * discover the VI to address mapping. Cut-through PIO (CTPIO) is not available + * with 8k VI windows. + */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_VI_WINDOW_MODE_OFST 72 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_VI_WINDOW_MODE_LEN 1 +/* enum: Each VI occupies 8k as on Huntington and Medford. PIO is at offset 4k. + * CTPIO is not mapped. + */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_VI_WINDOW_MODE_8K 0x0 +/* enum: Each VI occupies 16k. PIO is at offset 4k. CTPIO is at offset 12k. */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_VI_WINDOW_MODE_16K 0x1 +/* enum: Each VI occupies 64k. PIO is at offset 4k. CTPIO is at offset 12k. */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_VI_WINDOW_MODE_64K 0x2 +/* Number of vFIFOs per adapter that can be used for VFIFO Stuffing + * (SF-115995-SW) in the present configuration of firmware and port mode. + */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_VFIFO_STUFFING_NUM_VFIFOS_OFST 73 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_VFIFO_STUFFING_NUM_VFIFOS_LEN 1 +/* Number of buffers per adapter that can be used for VFIFO Stuffing + * (SF-115995-SW) in the present configuration of firmware and port mode. + */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_OFST 74 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_LEN 2 +/* Entry count in the MAC stats array, including the final GENERATION_END + * entry. For MAC stats DMA, drivers should allocate a buffer large enough to + * hold at least this many 64-bit stats values, if they wish to receive all + * available stats. If the buffer is shorter than MAC_STATS_NUM_STATS * 8, the + * stats array returned will be truncated. + */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_MAC_STATS_NUM_STATS_OFST 76 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_MAC_STATS_NUM_STATS_LEN 2 +/* Maximum supported value for MC_CMD_FILTER_OP_V3/MATCH_MARK_VALUE. This field + * will only be non-zero if MC_CMD_GET_CAPABILITIES/FILTER_ACTION_MARK is set. + */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_FILTER_ACTION_MARK_MAX_OFST 80 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_FILTER_ACTION_MARK_MAX_LEN 4 +/* On devices where the INIT_RXQ_WITH_BUFFER_SIZE flag (in + * GET_CAPABILITIES_OUT_V2) is set, drivers have to specify a buffer size when + * they create an RX queue. Due to hardware limitations, only a small number of + * different buffer sizes may be available concurrently. Nonzero entries in + * this array are the sizes of buffers which the system guarantees will be + * available for use. If the list is empty, there are no limitations on + * concurrent buffer sizes. + */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_GUARANTEED_RX_BUFFER_SIZES_OFST 84 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_GUARANTEED_RX_BUFFER_SIZES_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_GUARANTEED_RX_BUFFER_SIZES_NUM 16 +/* Third word of flags. Not present on older firmware (check the length). */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_FLAGS3_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_FLAGS3_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_WOL_ETHERWAKE_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_WOL_ETHERWAKE_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_WOL_ETHERWAKE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RSS_EVEN_SPREADING_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RSS_EVEN_SPREADING_LBN 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RSS_EVEN_SPREADING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RSS_SELECTABLE_TABLE_SIZE_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RSS_SELECTABLE_TABLE_SIZE_LBN 2 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RSS_SELECTABLE_TABLE_SIZE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_MAE_SUPPORTED_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_MAE_SUPPORTED_LBN 3 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_MAE_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_VDPA_SUPPORTED_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_VDPA_SUPPORTED_LBN 4 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_VDPA_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_LBN 5 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_LBN 6 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_UNSOL_EV_CREDIT_SUPPORTED_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_UNSOL_EV_CREDIT_SUPPORTED_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_UNSOL_EV_CREDIT_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_ENCAPSULATED_MCDI_SUPPORTED_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_ENCAPSULATED_MCDI_SUPPORTED_LBN 8 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_ENCAPSULATED_MCDI_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_EXTERNAL_MAE_SUPPORTED_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_EXTERNAL_MAE_SUPPORTED_LBN 9 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_EXTERNAL_MAE_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_NVRAM_UPDATE_ABORT_SUPPORTED_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_NVRAM_UPDATE_ABORT_SUPPORTED_LBN 10 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_NVRAM_UPDATE_ABORT_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_MAE_ACTION_SET_ALLOC_V2_SUPPORTED_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_MAE_ACTION_SET_ALLOC_V2_SUPPORTED_LBN 11 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_MAE_ACTION_SET_ALLOC_V2_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RSS_STEER_ON_OUTER_SUPPORTED_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RSS_STEER_ON_OUTER_SUPPORTED_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RSS_STEER_ON_OUTER_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_MAE_ACTION_SET_ALLOC_V3_SUPPORTED_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_MAE_ACTION_SET_ALLOC_V3_SUPPORTED_LBN 13 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_MAE_ACTION_SET_ALLOC_V3_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_DYNAMIC_MPORT_JOURNAL_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_DYNAMIC_MPORT_JOURNAL_LBN 14 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_DYNAMIC_MPORT_JOURNAL_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_CLIENT_CMD_VF_PROXY_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_CLIENT_CMD_VF_PROXY_LBN 15 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_CLIENT_CMD_VF_PROXY_WIDTH 1 +/* These bits are reserved for communicating test-specific capabilities to + * host-side test software. All production drivers should treat this field as + * opaque. + */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TEST_RESERVED_OFST 152 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TEST_RESERVED_LEN 8 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TEST_RESERVED_LO_OFST 152 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TEST_RESERVED_LO_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TEST_RESERVED_LO_LBN 1216 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TEST_RESERVED_LO_WIDTH 32 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TEST_RESERVED_HI_OFST 156 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TEST_RESERVED_HI_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TEST_RESERVED_HI_LBN 1248 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TEST_RESERVED_HI_WIDTH 32 + +/* MC_CMD_GET_CAPABILITIES_V9_OUT msgresponse */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_LEN 184 +/* First word of flags. */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_FLAGS1_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_FLAGS1_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_VPORT_RECONFIGURE_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_VPORT_RECONFIGURE_LBN 3 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_VPORT_RECONFIGURE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_STRIPING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_STRIPING_LBN 4 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_STRIPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_VADAPTOR_QUERY_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_VADAPTOR_QUERY_LBN 5 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_VADAPTOR_QUERY_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_EVB_PORT_VLAN_RESTRICT_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_DRV_ATTACH_PREBOOT_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_DRV_ATTACH_PREBOOT_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_DRV_ATTACH_PREBOOT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_FORCE_EVENT_MERGING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_FORCE_EVENT_MERGING_LBN 8 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_SET_MAC_ENHANCED_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_SET_MAC_ENHANCED_LBN 9 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_SET_MAC_ENHANCED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_MAC_SECURITY_FILTERING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_MAC_SECURITY_FILTERING_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_ADDITIONAL_RSS_MODES_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_ADDITIONAL_RSS_MODES_LBN 13 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_ADDITIONAL_RSS_MODES_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_QBB_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_QBB_LBN 14 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_QBB_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_PACKED_STREAM_VAR_BUFFERS_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_RSS_LIMITED_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_RSS_LIMITED_LBN 16 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_RSS_LIMITED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_PACKED_STREAM_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_PACKED_STREAM_LBN 17 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_PACKED_STREAM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_INCLUDE_FCS_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_INCLUDE_FCS_LBN 18 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_INCLUDE_FCS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_VLAN_INSERTION_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_VLAN_INSERTION_LBN 19 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_VLAN_INSERTION_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_VLAN_STRIPPING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_VLAN_STRIPPING_LBN 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_VLAN_STRIPPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TSO_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TSO_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TSO_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_PREFIX_LEN_0_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_PREFIX_LEN_0_LBN 22 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_PREFIX_LEN_0_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_PREFIX_LEN_14_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_PREFIX_LEN_14_LBN 23 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_PREFIX_LEN_14_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_TIMESTAMP_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_TIMESTAMP_LBN 24 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_TIMESTAMP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_BATCHING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_BATCHING_LBN 25 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_BATCHING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_MCAST_FILTER_CHAINING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_MCAST_FILTER_CHAINING_LBN 26 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_MCAST_FILTER_CHAINING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_PM_AND_RXDP_COUNTERS_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_PM_AND_RXDP_COUNTERS_LBN 27 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_DISABLE_SCATTER_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_DISABLE_SCATTER_LBN 28 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_DISABLE_SCATTER_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_MCAST_UDP_LOOPBACK_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_EVB_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_EVB_LBN 30 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_EVB_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_VXLAN_NVGRE_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_VXLAN_NVGRE_LBN 31 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_VXLAN_NVGRE_WIDTH 1 +/* RxDPCPU firmware id. */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_DPCPU_FW_ID_OFST 4 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_DPCPU_FW_ID_LEN 2 +/* enum: Standard RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP 0x0 +/* enum: Low latency RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_LOW_LATENCY 0x1 +/* enum: Packed stream RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_PACKED_STREAM 0x2 +/* enum: Rules engine RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_RULES_ENGINE 0x5 +/* enum: DPDK RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_DPDK 0x6 +/* enum: BIST RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_BIST 0x10a +/* enum: RXDP Test firmware image 1 */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101 +/* enum: RXDP Test firmware image 2 */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102 +/* enum: RXDP Test firmware image 3 */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103 +/* enum: RXDP Test firmware image 4 */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104 +/* enum: RXDP Test firmware image 5 */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_TEST_BACKPRESSURE 0x105 +/* enum: RXDP Test firmware image 6 */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106 +/* enum: RXDP Test firmware image 7 */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107 +/* enum: RXDP Test firmware image 8 */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_TEST_FW_DISABLE_DL 0x108 +/* enum: RXDP Test firmware image 9 */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b +/* enum: RXDP Test firmware image 10 */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_TEST_FW_SLOW 0x10c +/* TxDPCPU firmware id. */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_DPCPU_FW_ID_OFST 6 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_DPCPU_FW_ID_LEN 2 +/* enum: Standard TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXDP 0x0 +/* enum: Low latency TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXDP_LOW_LATENCY 0x1 +/* enum: High packet rate TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXDP_HIGH_PACKET_RATE 0x3 +/* enum: Rules engine TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXDP_RULES_ENGINE 0x5 +/* enum: DPDK TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXDP_DPDK 0x6 +/* enum: BIST TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXDP_BIST 0x12d +/* enum: TXDP Test firmware image 1 */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXDP_TEST_FW_TSO_EDIT 0x101 +/* enum: TXDP Test firmware image 2 */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102 +/* enum: TXDP CSR bus test firmware */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXDP_TEST_FW_CSR 0x103 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_VERSION_OFST 8 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_VERSION_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_VERSION_REV_OFST 8 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_VERSION_REV_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_VERSION_REV_WIDTH 12 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_VERSION_TYPE_OFST 8 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_VERSION_TYPE_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4 +/* enum: reserved value - do not use (may indicate alternative interpretation + * of REV field in future) + */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_RESERVED 0x0 +/* enum: Trivial RX PD firmware for early Huntington development (Huntington + * development only) + */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1 +/* enum: RX PD firmware for telemetry prototyping (Medford2 development only) + */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_TESTFW_TELEMETRY 0x1 +/* enum: RX PD firmware with approximately Siena-compatible behaviour + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2 +/* enum: Full featured RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3 +/* enum: (deprecated original name for the FULL_FEATURED variant) */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_VSWITCH 0x3 +/* enum: siena_compat variant RX PD firmware using PM rather than MAC + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 +/* enum: Low latency RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5 +/* enum: Packed stream RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6 +/* enum: RX PD firmware handling layer 2 only for high packet rate performance + * tests (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7 +/* enum: Rules engine RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8 +/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_L3XUDP 0x9 +/* enum: DPDK RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_DPDK 0xa +/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe +/* enum: RX PD firmware parsing but not filtering network overlay tunnel + * encapsulations (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_VERSION_OFST 10 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_VERSION_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_VERSION_REV_OFST 10 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_VERSION_REV_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_VERSION_REV_WIDTH 12 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_VERSION_TYPE_OFST 10 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_VERSION_TYPE_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4 +/* enum: reserved value - do not use (may indicate alternative interpretation + * of REV field in future) + */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_TYPE_RESERVED 0x0 +/* enum: Trivial TX PD firmware for early Huntington development (Huntington + * development only) + */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1 +/* enum: TX PD firmware for telemetry prototyping (Medford2 development only) + */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_TYPE_TESTFW_TELEMETRY 0x1 +/* enum: TX PD firmware with approximately Siena-compatible behaviour + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2 +/* enum: Full featured TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3 +/* enum: (deprecated original name for the FULL_FEATURED variant) */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_TYPE_VSWITCH 0x3 +/* enum: siena_compat variant TX PD firmware using PM rather than MAC + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */ +/* enum: TX PD firmware handling layer 2 only for high packet rate performance + * tests (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7 +/* enum: Rules engine TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8 +/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_TYPE_L3XUDP 0x9 +/* enum: DPDK TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_TYPE_DPDK 0xa +/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe +/* Hardware capabilities of NIC */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_HW_CAPABILITIES_OFST 12 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_HW_CAPABILITIES_LEN 4 +/* Licensed capabilities */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_LICENSE_CAPABILITIES_OFST 16 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_LICENSE_CAPABILITIES_LEN 4 +/* Second word of flags. Not present on older firmware (check the length). */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_FLAGS2_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_FLAGS2_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TSO_V2_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TSO_V2_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TSO_V2_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TSO_V2_ENCAP_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TSO_V2_ENCAP_LBN 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TSO_V2_ENCAP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_EVQ_TIMER_CTRL_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_EVQ_TIMER_CTRL_LBN 2 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_EVQ_TIMER_CTRL_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_EVENT_CUT_THROUGH_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_EVENT_CUT_THROUGH_LBN 3 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_EVENT_CUT_THROUGH_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_CUT_THROUGH_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_CUT_THROUGH_LBN 4 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_CUT_THROUGH_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_VFIFO_ULL_MODE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_VFIFO_ULL_MODE_LBN 5 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_VFIFO_ULL_MODE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_MAC_STATS_40G_TX_SIZE_BINS_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN 6 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_MAC_STATS_40G_TX_SIZE_BINS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_INIT_EVQ_TYPE_SUPPORTED_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_INIT_EVQ_TYPE_SUPPORTED_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_INIT_EVQ_TYPE_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_INIT_EVQ_V2_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_INIT_EVQ_V2_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_INIT_EVQ_V2_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_MAC_TIMESTAMPING_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_MAC_TIMESTAMPING_LBN 8 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_MAC_TIMESTAMPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TIMESTAMP_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TIMESTAMP_LBN 9 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TIMESTAMP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_SNIFF_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_SNIFF_LBN 10 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_SNIFF_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_SNIFF_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_SNIFF_LBN 11 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_SNIFF_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_MCDI_BACKGROUND_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_MCDI_BACKGROUND_LBN 13 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_MCDI_BACKGROUND_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_MCDI_DB_RETURN_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_MCDI_DB_RETURN_LBN 14 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_MCDI_DB_RETURN_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_CTPIO_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_CTPIO_LBN 15 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_CTPIO_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TSA_SUPPORT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TSA_SUPPORT_LBN 16 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TSA_SUPPORT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TSA_BOUND_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TSA_BOUND_LBN 17 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TSA_BOUND_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_SF_ADAPTER_AUTHENTICATION_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_FILTER_ACTION_FLAG_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_FILTER_ACTION_FLAG_LBN 19 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_FILTER_ACTION_FLAG_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_FILTER_ACTION_MARK_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_FILTER_ACTION_MARK_LBN 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_FILTER_ACTION_MARK_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_EQUAL_STRIDE_SUPER_BUFFER_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_EQUAL_STRIDE_SUPER_BUFFER_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_EQUAL_STRIDE_SUPER_BUFFER_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_EQUAL_STRIDE_PACKED_STREAM_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_EQUAL_STRIDE_PACKED_STREAM_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_EQUAL_STRIDE_PACKED_STREAM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_L3XUDP_SUPPORT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_L3XUDP_SUPPORT_LBN 22 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_L3XUDP_SUPPORT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_FW_SUBVARIANT_NO_TX_CSUM_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_FW_SUBVARIANT_NO_TX_CSUM_LBN 23 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_FW_SUBVARIANT_NO_TX_CSUM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_VI_SPREADING_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_VI_SPREADING_LBN 24 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_VI_SPREADING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_HLB_IDLE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_HLB_IDLE_LBN 25 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_HLB_IDLE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_INIT_RXQ_NO_CONT_EV_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_INIT_RXQ_NO_CONT_EV_LBN 26 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_INIT_RXQ_NO_CONT_EV_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_INIT_RXQ_WITH_BUFFER_SIZE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_INIT_RXQ_WITH_BUFFER_SIZE_LBN 27 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_INIT_RXQ_WITH_BUFFER_SIZE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_BUNDLE_UPDATE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_BUNDLE_UPDATE_LBN 28 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_BUNDLE_UPDATE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TSO_V3_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TSO_V3_LBN 29 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TSO_V3_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_DYNAMIC_SENSORS_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_DYNAMIC_SENSORS_LBN 30 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_DYNAMIC_SENSORS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_LBN 31 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_WIDTH 1 +/* Number of FATSOv2 contexts per datapath supported by this NIC (when + * TX_TSO_V2 == 1). Not present on older firmware (check the length). + */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TSO_V2_N_CONTEXTS_OFST 24 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TSO_V2_N_CONTEXTS_LEN 2 +/* One byte per PF containing the number of the external port assigned to this + * PF, indexed by PF number. Special values indicate that a PF is either not + * present or not assigned. + */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_PFS_TO_PORTS_ASSIGNMENT_OFST 26 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16 +/* enum: The caller is not permitted to access information on this PF. */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_ACCESS_NOT_PERMITTED 0xff +/* enum: PF does not exist. */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_PF_NOT_PRESENT 0xfe +/* enum: PF does exist but is not assigned to any external port. */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_PF_NOT_ASSIGNED 0xfd +/* enum: This value indicates that PF is assigned, but it cannot be expressed + * in this field. It is intended for a possible future situation where a more + * complex scheme of PFs to ports mapping is being used. The future driver + * should look for a new field supporting the new scheme. The current/old + * driver should treat this value as PF_NOT_ASSIGNED. + */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc +/* One byte per PF containing the number of its VFs, indexed by PF number. A + * special value indicates that a PF is not present. + */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_NUM_VFS_PER_PF_OFST 42 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_NUM_VFS_PER_PF_LEN 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_NUM_VFS_PER_PF_NUM 16 +/* enum: The caller is not permitted to access information on this PF. */ +/* MC_CMD_GET_CAPABILITIES_V9_OUT_ACCESS_NOT_PERMITTED 0xff */ +/* enum: PF does not exist. */ +/* MC_CMD_GET_CAPABILITIES_V9_OUT_PF_NOT_PRESENT 0xfe */ +/* Number of VIs available for each external port */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_NUM_VIS_PER_PORT_OFST 58 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_NUM_VIS_PER_PORT_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_NUM_VIS_PER_PORT_NUM 4 +/* Size of RX descriptor cache expressed as binary logarithm The actual size + * equals (2 ^ RX_DESC_CACHE_SIZE) + */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_DESC_CACHE_SIZE_OFST 66 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_DESC_CACHE_SIZE_LEN 1 +/* Size of TX descriptor cache expressed as binary logarithm The actual size + * equals (2 ^ TX_DESC_CACHE_SIZE) + */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_DESC_CACHE_SIZE_OFST 67 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_DESC_CACHE_SIZE_LEN 1 +/* Total number of available PIO buffers */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_NUM_PIO_BUFFS_OFST 68 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_NUM_PIO_BUFFS_LEN 2 +/* Size of a single PIO buffer */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_SIZE_PIO_BUFF_OFST 70 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_SIZE_PIO_BUFF_LEN 2 +/* On chips later than Medford the amount of address space assigned to each VI + * is configurable. This is a global setting that the driver must query to + * discover the VI to address mapping. Cut-through PIO (CTPIO) is not available + * with 8k VI windows. + */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_VI_WINDOW_MODE_OFST 72 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_VI_WINDOW_MODE_LEN 1 +/* enum: Each VI occupies 8k as on Huntington and Medford. PIO is at offset 4k. + * CTPIO is not mapped. + */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_VI_WINDOW_MODE_8K 0x0 +/* enum: Each VI occupies 16k. PIO is at offset 4k. CTPIO is at offset 12k. */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_VI_WINDOW_MODE_16K 0x1 +/* enum: Each VI occupies 64k. PIO is at offset 4k. CTPIO is at offset 12k. */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_VI_WINDOW_MODE_64K 0x2 +/* Number of vFIFOs per adapter that can be used for VFIFO Stuffing + * (SF-115995-SW) in the present configuration of firmware and port mode. + */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_VFIFO_STUFFING_NUM_VFIFOS_OFST 73 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_VFIFO_STUFFING_NUM_VFIFOS_LEN 1 +/* Number of buffers per adapter that can be used for VFIFO Stuffing + * (SF-115995-SW) in the present configuration of firmware and port mode. + */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_OFST 74 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_LEN 2 +/* Entry count in the MAC stats array, including the final GENERATION_END + * entry. For MAC stats DMA, drivers should allocate a buffer large enough to + * hold at least this many 64-bit stats values, if they wish to receive all + * available stats. If the buffer is shorter than MAC_STATS_NUM_STATS * 8, the + * stats array returned will be truncated. + */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_MAC_STATS_NUM_STATS_OFST 76 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_MAC_STATS_NUM_STATS_LEN 2 +/* Maximum supported value for MC_CMD_FILTER_OP_V3/MATCH_MARK_VALUE. This field + * will only be non-zero if MC_CMD_GET_CAPABILITIES/FILTER_ACTION_MARK is set. + */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_FILTER_ACTION_MARK_MAX_OFST 80 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_FILTER_ACTION_MARK_MAX_LEN 4 +/* On devices where the INIT_RXQ_WITH_BUFFER_SIZE flag (in + * GET_CAPABILITIES_OUT_V2) is set, drivers have to specify a buffer size when + * they create an RX queue. Due to hardware limitations, only a small number of + * different buffer sizes may be available concurrently. Nonzero entries in + * this array are the sizes of buffers which the system guarantees will be + * available for use. If the list is empty, there are no limitations on + * concurrent buffer sizes. + */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_GUARANTEED_RX_BUFFER_SIZES_OFST 84 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_GUARANTEED_RX_BUFFER_SIZES_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_GUARANTEED_RX_BUFFER_SIZES_NUM 16 +/* Third word of flags. Not present on older firmware (check the length). */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_FLAGS3_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_FLAGS3_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_WOL_ETHERWAKE_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_WOL_ETHERWAKE_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_WOL_ETHERWAKE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_EVEN_SPREADING_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_EVEN_SPREADING_LBN 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_EVEN_SPREADING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_SELECTABLE_TABLE_SIZE_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_SELECTABLE_TABLE_SIZE_LBN 2 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_SELECTABLE_TABLE_SIZE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_MAE_SUPPORTED_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_MAE_SUPPORTED_LBN 3 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_MAE_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_VDPA_SUPPORTED_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_VDPA_SUPPORTED_LBN 4 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_VDPA_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_LBN 5 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_LBN 6 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_UNSOL_EV_CREDIT_SUPPORTED_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_UNSOL_EV_CREDIT_SUPPORTED_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_UNSOL_EV_CREDIT_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_ENCAPSULATED_MCDI_SUPPORTED_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_ENCAPSULATED_MCDI_SUPPORTED_LBN 8 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_ENCAPSULATED_MCDI_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_EXTERNAL_MAE_SUPPORTED_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_EXTERNAL_MAE_SUPPORTED_LBN 9 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_EXTERNAL_MAE_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_NVRAM_UPDATE_ABORT_SUPPORTED_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_NVRAM_UPDATE_ABORT_SUPPORTED_LBN 10 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_NVRAM_UPDATE_ABORT_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_MAE_ACTION_SET_ALLOC_V2_SUPPORTED_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_MAE_ACTION_SET_ALLOC_V2_SUPPORTED_LBN 11 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_MAE_ACTION_SET_ALLOC_V2_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_STEER_ON_OUTER_SUPPORTED_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_STEER_ON_OUTER_SUPPORTED_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_STEER_ON_OUTER_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_MAE_ACTION_SET_ALLOC_V3_SUPPORTED_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_MAE_ACTION_SET_ALLOC_V3_SUPPORTED_LBN 13 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_MAE_ACTION_SET_ALLOC_V3_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_DYNAMIC_MPORT_JOURNAL_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_DYNAMIC_MPORT_JOURNAL_LBN 14 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_DYNAMIC_MPORT_JOURNAL_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_CLIENT_CMD_VF_PROXY_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_CLIENT_CMD_VF_PROXY_LBN 15 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_CLIENT_CMD_VF_PROXY_WIDTH 1 +/* These bits are reserved for communicating test-specific capabilities to + * host-side test software. All production drivers should treat this field as + * opaque. + */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TEST_RESERVED_OFST 152 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TEST_RESERVED_LEN 8 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TEST_RESERVED_LO_OFST 152 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TEST_RESERVED_LO_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TEST_RESERVED_LO_LBN 1216 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TEST_RESERVED_LO_WIDTH 32 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TEST_RESERVED_HI_OFST 156 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TEST_RESERVED_HI_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TEST_RESERVED_HI_LBN 1248 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TEST_RESERVED_HI_WIDTH 32 +/* The minimum size (in table entries) of indirection table to be allocated + * from the pool for an RSS context. Note that the table size used must be a + * power of 2. + */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_MIN_INDIRECTION_TABLE_SIZE_OFST 160 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_MIN_INDIRECTION_TABLE_SIZE_LEN 4 +/* The maximum size (in table entries) of indirection table to be allocated + * from the pool for an RSS context. Note that the table size used must be a + * power of 2. + */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_MAX_INDIRECTION_TABLE_SIZE_OFST 164 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_MAX_INDIRECTION_TABLE_SIZE_LEN 4 +/* The maximum number of queues that can be used by an RSS context in exclusive + * mode. In exclusive mode the context has a configurable indirection table and + * a configurable RSS key. + */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_MAX_INDIRECTION_QUEUES_OFST 168 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_MAX_INDIRECTION_QUEUES_LEN 4 +/* The maximum number of queues that can be used by an RSS context in even- + * spreading mode. In even-spreading mode the context has no indirection table + * but it does have a configurable RSS key. + */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_MAX_EVEN_SPREADING_QUEUES_OFST 172 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_MAX_EVEN_SPREADING_QUEUES_LEN 4 +/* The total number of RSS contexts supported. Note that the number of + * available contexts using indirection tables is also limited by the + * availability of indirection table space allocated from a common pool. + */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_NUM_CONTEXTS_OFST 176 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_NUM_CONTEXTS_LEN 4 +/* The total amount of indirection table space that can be shared between RSS + * contexts. + */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_TABLE_POOL_SIZE_OFST 180 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_TABLE_POOL_SIZE_LEN 4 + +/* MC_CMD_GET_CAPABILITIES_V10_OUT msgresponse */ +#define MC_CMD_GET_CAPABILITIES_V10_OUT_LEN 192 +/* First word of flags. */ +#define MC_CMD_GET_CAPABILITIES_V10_OUT_FLAGS1_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_FLAGS1_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_VPORT_RECONFIGURE_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_VPORT_RECONFIGURE_LBN 3 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_VPORT_RECONFIGURE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_STRIPING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_STRIPING_LBN 4 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_STRIPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_VADAPTOR_QUERY_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_VADAPTOR_QUERY_LBN 5 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_VADAPTOR_QUERY_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_EVB_PORT_VLAN_RESTRICT_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_DRV_ATTACH_PREBOOT_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_DRV_ATTACH_PREBOOT_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_DRV_ATTACH_PREBOOT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_FORCE_EVENT_MERGING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_FORCE_EVENT_MERGING_LBN 8 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_SET_MAC_ENHANCED_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_SET_MAC_ENHANCED_LBN 9 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_SET_MAC_ENHANCED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_MAC_SECURITY_FILTERING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_MAC_SECURITY_FILTERING_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_ADDITIONAL_RSS_MODES_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_ADDITIONAL_RSS_MODES_LBN 13 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_ADDITIONAL_RSS_MODES_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_QBB_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_QBB_LBN 14 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_QBB_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_PACKED_STREAM_VAR_BUFFERS_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_RSS_LIMITED_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_RSS_LIMITED_LBN 16 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_RSS_LIMITED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_PACKED_STREAM_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_PACKED_STREAM_LBN 17 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_PACKED_STREAM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_INCLUDE_FCS_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_INCLUDE_FCS_LBN 18 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_INCLUDE_FCS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_VLAN_INSERTION_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_VLAN_INSERTION_LBN 19 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_VLAN_INSERTION_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_VLAN_STRIPPING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_VLAN_STRIPPING_LBN 20 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_VLAN_STRIPPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_TSO_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_TSO_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_TSO_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_PREFIX_LEN_0_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_PREFIX_LEN_0_LBN 22 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_PREFIX_LEN_0_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_PREFIX_LEN_14_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_PREFIX_LEN_14_LBN 23 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_PREFIX_LEN_14_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_TIMESTAMP_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_TIMESTAMP_LBN 24 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_TIMESTAMP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_BATCHING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_BATCHING_LBN 25 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_BATCHING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_MCAST_FILTER_CHAINING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_MCAST_FILTER_CHAINING_LBN 26 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_MCAST_FILTER_CHAINING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_PM_AND_RXDP_COUNTERS_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_PM_AND_RXDP_COUNTERS_LBN 27 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_DISABLE_SCATTER_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_DISABLE_SCATTER_LBN 28 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_DISABLE_SCATTER_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_MCAST_UDP_LOOPBACK_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_EVB_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_EVB_LBN 30 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_EVB_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_VXLAN_NVGRE_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_VXLAN_NVGRE_LBN 31 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_VXLAN_NVGRE_WIDTH 1 +/* RxDPCPU firmware id. */ +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_DPCPU_FW_ID_OFST 4 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_DPCPU_FW_ID_LEN 2 +/* enum: Standard RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXDP 0x0 +/* enum: Low latency RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXDP_LOW_LATENCY 0x1 +/* enum: Packed stream RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXDP_PACKED_STREAM 0x2 +/* enum: Rules engine RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXDP_RULES_ENGINE 0x5 +/* enum: DPDK RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXDP_DPDK 0x6 +/* enum: BIST RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXDP_BIST 0x10a +/* enum: RXDP Test firmware image 1 */ +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101 +/* enum: RXDP Test firmware image 2 */ +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102 +/* enum: RXDP Test firmware image 3 */ +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103 +/* enum: RXDP Test firmware image 4 */ +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104 +/* enum: RXDP Test firmware image 5 */ +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXDP_TEST_BACKPRESSURE 0x105 +/* enum: RXDP Test firmware image 6 */ +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106 +/* enum: RXDP Test firmware image 7 */ +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107 +/* enum: RXDP Test firmware image 8 */ +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXDP_TEST_FW_DISABLE_DL 0x108 +/* enum: RXDP Test firmware image 9 */ +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b +/* enum: RXDP Test firmware image 10 */ +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXDP_TEST_FW_SLOW 0x10c +/* TxDPCPU firmware id. */ +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_DPCPU_FW_ID_OFST 6 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_DPCPU_FW_ID_LEN 2 +/* enum: Standard TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXDP 0x0 +/* enum: Low latency TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXDP_LOW_LATENCY 0x1 +/* enum: High packet rate TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXDP_HIGH_PACKET_RATE 0x3 +/* enum: Rules engine TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXDP_RULES_ENGINE 0x5 +/* enum: DPDK TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXDP_DPDK 0x6 +/* enum: BIST TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXDP_BIST 0x12d +/* enum: TXDP Test firmware image 1 */ +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXDP_TEST_FW_TSO_EDIT 0x101 +/* enum: TXDP Test firmware image 2 */ +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102 +/* enum: TXDP CSR bus test firmware */ +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXDP_TEST_FW_CSR 0x103 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXPD_FW_VERSION_OFST 8 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXPD_FW_VERSION_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXPD_FW_VERSION_REV_OFST 8 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXPD_FW_VERSION_REV_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXPD_FW_VERSION_REV_WIDTH 12 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXPD_FW_VERSION_TYPE_OFST 8 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXPD_FW_VERSION_TYPE_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4 +/* enum: reserved value - do not use (may indicate alternative interpretation + * of REV field in future) + */ +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXPD_FW_TYPE_RESERVED 0x0 +/* enum: Trivial RX PD firmware for early Huntington development (Huntington + * development only) + */ +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1 +/* enum: RX PD firmware for telemetry prototyping (Medford2 development only) + */ +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXPD_FW_TYPE_TESTFW_TELEMETRY 0x1 +/* enum: RX PD firmware with approximately Siena-compatible behaviour + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2 +/* enum: Full featured RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3 +/* enum: (deprecated original name for the FULL_FEATURED variant) */ +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXPD_FW_TYPE_VSWITCH 0x3 +/* enum: siena_compat variant RX PD firmware using PM rather than MAC + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 +/* enum: Low latency RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5 +/* enum: Packed stream RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6 +/* enum: RX PD firmware handling layer 2 only for high packet rate performance + * tests (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7 +/* enum: Rules engine RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8 +/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */ +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXPD_FW_TYPE_L3XUDP 0x9 +/* enum: DPDK RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXPD_FW_TYPE_DPDK 0xa +/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe +/* enum: RX PD firmware parsing but not filtering network overlay tunnel + * encapsulations (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXPD_FW_VERSION_OFST 10 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXPD_FW_VERSION_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXPD_FW_VERSION_REV_OFST 10 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXPD_FW_VERSION_REV_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXPD_FW_VERSION_REV_WIDTH 12 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXPD_FW_VERSION_TYPE_OFST 10 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXPD_FW_VERSION_TYPE_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4 +/* enum: reserved value - do not use (may indicate alternative interpretation + * of REV field in future) + */ +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXPD_FW_TYPE_RESERVED 0x0 +/* enum: Trivial TX PD firmware for early Huntington development (Huntington + * development only) + */ +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1 +/* enum: TX PD firmware for telemetry prototyping (Medford2 development only) + */ +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXPD_FW_TYPE_TESTFW_TELEMETRY 0x1 +/* enum: TX PD firmware with approximately Siena-compatible behaviour + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2 +/* enum: Full featured TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3 +/* enum: (deprecated original name for the FULL_FEATURED variant) */ +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXPD_FW_TYPE_VSWITCH 0x3 +/* enum: siena_compat variant TX PD firmware using PM rather than MAC + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */ +/* enum: TX PD firmware handling layer 2 only for high packet rate performance + * tests (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7 +/* enum: Rules engine TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8 +/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */ +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXPD_FW_TYPE_L3XUDP 0x9 +/* enum: DPDK TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXPD_FW_TYPE_DPDK 0xa +/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe +/* Hardware capabilities of NIC */ +#define MC_CMD_GET_CAPABILITIES_V10_OUT_HW_CAPABILITIES_OFST 12 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_HW_CAPABILITIES_LEN 4 +/* Licensed capabilities */ +#define MC_CMD_GET_CAPABILITIES_V10_OUT_LICENSE_CAPABILITIES_OFST 16 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_LICENSE_CAPABILITIES_LEN 4 +/* Second word of flags. Not present on older firmware (check the length). */ +#define MC_CMD_GET_CAPABILITIES_V10_OUT_FLAGS2_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_FLAGS2_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_TSO_V2_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_TSO_V2_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_TSO_V2_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_TSO_V2_ENCAP_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_TSO_V2_ENCAP_LBN 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_TSO_V2_ENCAP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_EVQ_TIMER_CTRL_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_EVQ_TIMER_CTRL_LBN 2 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_EVQ_TIMER_CTRL_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_EVENT_CUT_THROUGH_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_EVENT_CUT_THROUGH_LBN 3 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_EVENT_CUT_THROUGH_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_CUT_THROUGH_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_CUT_THROUGH_LBN 4 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_CUT_THROUGH_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_VFIFO_ULL_MODE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_VFIFO_ULL_MODE_LBN 5 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_VFIFO_ULL_MODE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_MAC_STATS_40G_TX_SIZE_BINS_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN 6 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_MAC_STATS_40G_TX_SIZE_BINS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_INIT_EVQ_TYPE_SUPPORTED_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_INIT_EVQ_TYPE_SUPPORTED_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_INIT_EVQ_TYPE_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_INIT_EVQ_V2_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_INIT_EVQ_V2_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_INIT_EVQ_V2_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_MAC_TIMESTAMPING_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_MAC_TIMESTAMPING_LBN 8 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_MAC_TIMESTAMPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_TIMESTAMP_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_TIMESTAMP_LBN 9 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_TIMESTAMP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_SNIFF_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_SNIFF_LBN 10 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_SNIFF_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_SNIFF_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_SNIFF_LBN 11 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_SNIFF_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_MCDI_BACKGROUND_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_MCDI_BACKGROUND_LBN 13 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_MCDI_BACKGROUND_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_MCDI_DB_RETURN_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_MCDI_DB_RETURN_LBN 14 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_MCDI_DB_RETURN_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_CTPIO_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_CTPIO_LBN 15 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_CTPIO_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TSA_SUPPORT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TSA_SUPPORT_LBN 16 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TSA_SUPPORT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TSA_BOUND_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TSA_BOUND_LBN 17 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TSA_BOUND_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_SF_ADAPTER_AUTHENTICATION_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_FILTER_ACTION_FLAG_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_FILTER_ACTION_FLAG_LBN 19 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_FILTER_ACTION_FLAG_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_FILTER_ACTION_MARK_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_FILTER_ACTION_MARK_LBN 20 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_FILTER_ACTION_MARK_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_EQUAL_STRIDE_SUPER_BUFFER_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_EQUAL_STRIDE_SUPER_BUFFER_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_EQUAL_STRIDE_SUPER_BUFFER_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_EQUAL_STRIDE_PACKED_STREAM_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_EQUAL_STRIDE_PACKED_STREAM_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_EQUAL_STRIDE_PACKED_STREAM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_L3XUDP_SUPPORT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_L3XUDP_SUPPORT_LBN 22 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_L3XUDP_SUPPORT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_FW_SUBVARIANT_NO_TX_CSUM_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_FW_SUBVARIANT_NO_TX_CSUM_LBN 23 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_FW_SUBVARIANT_NO_TX_CSUM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_VI_SPREADING_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_VI_SPREADING_LBN 24 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_VI_SPREADING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXDP_HLB_IDLE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXDP_HLB_IDLE_LBN 25 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXDP_HLB_IDLE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_INIT_RXQ_NO_CONT_EV_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_INIT_RXQ_NO_CONT_EV_LBN 26 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_INIT_RXQ_NO_CONT_EV_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_INIT_RXQ_WITH_BUFFER_SIZE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_INIT_RXQ_WITH_BUFFER_SIZE_LBN 27 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_INIT_RXQ_WITH_BUFFER_SIZE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_BUNDLE_UPDATE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_BUNDLE_UPDATE_LBN 28 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_BUNDLE_UPDATE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_TSO_V3_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_TSO_V3_LBN 29 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_TSO_V3_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_DYNAMIC_SENSORS_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_DYNAMIC_SENSORS_LBN 30 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_DYNAMIC_SENSORS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_LBN 31 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_WIDTH 1 +/* Number of FATSOv2 contexts per datapath supported by this NIC (when + * TX_TSO_V2 == 1). Not present on older firmware (check the length). + */ +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_TSO_V2_N_CONTEXTS_OFST 24 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_TSO_V2_N_CONTEXTS_LEN 2 +/* One byte per PF containing the number of the external port assigned to this + * PF, indexed by PF number. Special values indicate that a PF is either not + * present or not assigned. + */ +#define MC_CMD_GET_CAPABILITIES_V10_OUT_PFS_TO_PORTS_ASSIGNMENT_OFST 26 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16 +/* enum: The caller is not permitted to access information on this PF. */ +#define MC_CMD_GET_CAPABILITIES_V10_OUT_ACCESS_NOT_PERMITTED 0xff +/* enum: PF does not exist. */ +#define MC_CMD_GET_CAPABILITIES_V10_OUT_PF_NOT_PRESENT 0xfe +/* enum: PF does exist but is not assigned to any external port. */ +#define MC_CMD_GET_CAPABILITIES_V10_OUT_PF_NOT_ASSIGNED 0xfd +/* enum: This value indicates that PF is assigned, but it cannot be expressed + * in this field. It is intended for a possible future situation where a more + * complex scheme of PFs to ports mapping is being used. The future driver + * should look for a new field supporting the new scheme. The current/old + * driver should treat this value as PF_NOT_ASSIGNED. + */ +#define MC_CMD_GET_CAPABILITIES_V10_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc +/* One byte per PF containing the number of its VFs, indexed by PF number. A + * special value indicates that a PF is not present. + */ +#define MC_CMD_GET_CAPABILITIES_V10_OUT_NUM_VFS_PER_PF_OFST 42 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_NUM_VFS_PER_PF_LEN 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_NUM_VFS_PER_PF_NUM 16 +/* enum: The caller is not permitted to access information on this PF. */ +/* MC_CMD_GET_CAPABILITIES_V10_OUT_ACCESS_NOT_PERMITTED 0xff */ +/* enum: PF does not exist. */ +/* MC_CMD_GET_CAPABILITIES_V10_OUT_PF_NOT_PRESENT 0xfe */ +/* Number of VIs available for each external port */ +#define MC_CMD_GET_CAPABILITIES_V10_OUT_NUM_VIS_PER_PORT_OFST 58 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_NUM_VIS_PER_PORT_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_NUM_VIS_PER_PORT_NUM 4 +/* Size of RX descriptor cache expressed as binary logarithm The actual size + * equals (2 ^ RX_DESC_CACHE_SIZE) + */ +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_DESC_CACHE_SIZE_OFST 66 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_DESC_CACHE_SIZE_LEN 1 +/* Size of TX descriptor cache expressed as binary logarithm The actual size + * equals (2 ^ TX_DESC_CACHE_SIZE) + */ +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_DESC_CACHE_SIZE_OFST 67 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_DESC_CACHE_SIZE_LEN 1 +/* Total number of available PIO buffers */ +#define MC_CMD_GET_CAPABILITIES_V10_OUT_NUM_PIO_BUFFS_OFST 68 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_NUM_PIO_BUFFS_LEN 2 +/* Size of a single PIO buffer */ +#define MC_CMD_GET_CAPABILITIES_V10_OUT_SIZE_PIO_BUFF_OFST 70 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_SIZE_PIO_BUFF_LEN 2 +/* On chips later than Medford the amount of address space assigned to each VI + * is configurable. This is a global setting that the driver must query to + * discover the VI to address mapping. Cut-through PIO (CTPIO) is not available + * with 8k VI windows. + */ +#define MC_CMD_GET_CAPABILITIES_V10_OUT_VI_WINDOW_MODE_OFST 72 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_VI_WINDOW_MODE_LEN 1 +/* enum: Each VI occupies 8k as on Huntington and Medford. PIO is at offset 4k. + * CTPIO is not mapped. + */ +#define MC_CMD_GET_CAPABILITIES_V10_OUT_VI_WINDOW_MODE_8K 0x0 +/* enum: Each VI occupies 16k. PIO is at offset 4k. CTPIO is at offset 12k. */ +#define MC_CMD_GET_CAPABILITIES_V10_OUT_VI_WINDOW_MODE_16K 0x1 +/* enum: Each VI occupies 64k. PIO is at offset 4k. CTPIO is at offset 12k. */ +#define MC_CMD_GET_CAPABILITIES_V10_OUT_VI_WINDOW_MODE_64K 0x2 +/* Number of vFIFOs per adapter that can be used for VFIFO Stuffing + * (SF-115995-SW) in the present configuration of firmware and port mode. + */ +#define MC_CMD_GET_CAPABILITIES_V10_OUT_VFIFO_STUFFING_NUM_VFIFOS_OFST 73 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_VFIFO_STUFFING_NUM_VFIFOS_LEN 1 +/* Number of buffers per adapter that can be used for VFIFO Stuffing + * (SF-115995-SW) in the present configuration of firmware and port mode. + */ +#define MC_CMD_GET_CAPABILITIES_V10_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_OFST 74 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_LEN 2 +/* Entry count in the MAC stats array, including the final GENERATION_END + * entry. For MAC stats DMA, drivers should allocate a buffer large enough to + * hold at least this many 64-bit stats values, if they wish to receive all + * available stats. If the buffer is shorter than MAC_STATS_NUM_STATS * 8, the + * stats array returned will be truncated. + */ +#define MC_CMD_GET_CAPABILITIES_V10_OUT_MAC_STATS_NUM_STATS_OFST 76 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_MAC_STATS_NUM_STATS_LEN 2 +/* Maximum supported value for MC_CMD_FILTER_OP_V3/MATCH_MARK_VALUE. This field + * will only be non-zero if MC_CMD_GET_CAPABILITIES/FILTER_ACTION_MARK is set. + */ +#define MC_CMD_GET_CAPABILITIES_V10_OUT_FILTER_ACTION_MARK_MAX_OFST 80 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_FILTER_ACTION_MARK_MAX_LEN 4 +/* On devices where the INIT_RXQ_WITH_BUFFER_SIZE flag (in + * GET_CAPABILITIES_OUT_V2) is set, drivers have to specify a buffer size when + * they create an RX queue. Due to hardware limitations, only a small number of + * different buffer sizes may be available concurrently. Nonzero entries in + * this array are the sizes of buffers which the system guarantees will be + * available for use. If the list is empty, there are no limitations on + * concurrent buffer sizes. + */ +#define MC_CMD_GET_CAPABILITIES_V10_OUT_GUARANTEED_RX_BUFFER_SIZES_OFST 84 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_GUARANTEED_RX_BUFFER_SIZES_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_GUARANTEED_RX_BUFFER_SIZES_NUM 16 +/* Third word of flags. Not present on older firmware (check the length). */ +#define MC_CMD_GET_CAPABILITIES_V10_OUT_FLAGS3_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_FLAGS3_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_WOL_ETHERWAKE_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_WOL_ETHERWAKE_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_WOL_ETHERWAKE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RSS_EVEN_SPREADING_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RSS_EVEN_SPREADING_LBN 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RSS_EVEN_SPREADING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RSS_SELECTABLE_TABLE_SIZE_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RSS_SELECTABLE_TABLE_SIZE_LBN 2 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RSS_SELECTABLE_TABLE_SIZE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_MAE_SUPPORTED_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_MAE_SUPPORTED_LBN 3 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_MAE_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_VDPA_SUPPORTED_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_VDPA_SUPPORTED_LBN 4 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_VDPA_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_LBN 5 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_LBN 6 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_UNSOL_EV_CREDIT_SUPPORTED_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_UNSOL_EV_CREDIT_SUPPORTED_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_UNSOL_EV_CREDIT_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_ENCAPSULATED_MCDI_SUPPORTED_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_ENCAPSULATED_MCDI_SUPPORTED_LBN 8 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_ENCAPSULATED_MCDI_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_EXTERNAL_MAE_SUPPORTED_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_EXTERNAL_MAE_SUPPORTED_LBN 9 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_EXTERNAL_MAE_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_NVRAM_UPDATE_ABORT_SUPPORTED_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_NVRAM_UPDATE_ABORT_SUPPORTED_LBN 10 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_NVRAM_UPDATE_ABORT_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_MAE_ACTION_SET_ALLOC_V2_SUPPORTED_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_MAE_ACTION_SET_ALLOC_V2_SUPPORTED_LBN 11 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_MAE_ACTION_SET_ALLOC_V2_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RSS_STEER_ON_OUTER_SUPPORTED_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RSS_STEER_ON_OUTER_SUPPORTED_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RSS_STEER_ON_OUTER_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_MAE_ACTION_SET_ALLOC_V3_SUPPORTED_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_MAE_ACTION_SET_ALLOC_V3_SUPPORTED_LBN 13 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_MAE_ACTION_SET_ALLOC_V3_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_DYNAMIC_MPORT_JOURNAL_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_DYNAMIC_MPORT_JOURNAL_LBN 14 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_DYNAMIC_MPORT_JOURNAL_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_CLIENT_CMD_VF_PROXY_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_CLIENT_CMD_VF_PROXY_LBN 15 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_CLIENT_CMD_VF_PROXY_WIDTH 1 +/* These bits are reserved for communicating test-specific capabilities to + * host-side test software. All production drivers should treat this field as + * opaque. + */ +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TEST_RESERVED_OFST 152 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TEST_RESERVED_LEN 8 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TEST_RESERVED_LO_OFST 152 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TEST_RESERVED_LO_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TEST_RESERVED_LO_LBN 1216 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TEST_RESERVED_LO_WIDTH 32 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TEST_RESERVED_HI_OFST 156 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TEST_RESERVED_HI_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TEST_RESERVED_HI_LBN 1248 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TEST_RESERVED_HI_WIDTH 32 +/* The minimum size (in table entries) of indirection table to be allocated + * from the pool for an RSS context. Note that the table size used must be a + * power of 2. + */ +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RSS_MIN_INDIRECTION_TABLE_SIZE_OFST 160 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RSS_MIN_INDIRECTION_TABLE_SIZE_LEN 4 +/* The maximum size (in table entries) of indirection table to be allocated + * from the pool for an RSS context. Note that the table size used must be a + * power of 2. + */ +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RSS_MAX_INDIRECTION_TABLE_SIZE_OFST 164 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RSS_MAX_INDIRECTION_TABLE_SIZE_LEN 4 +/* The maximum number of queues that can be used by an RSS context in exclusive + * mode. In exclusive mode the context has a configurable indirection table and + * a configurable RSS key. + */ +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RSS_MAX_INDIRECTION_QUEUES_OFST 168 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RSS_MAX_INDIRECTION_QUEUES_LEN 4 +/* The maximum number of queues that can be used by an RSS context in even- + * spreading mode. In even-spreading mode the context has no indirection table + * but it does have a configurable RSS key. + */ +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RSS_MAX_EVEN_SPREADING_QUEUES_OFST 172 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RSS_MAX_EVEN_SPREADING_QUEUES_LEN 4 +/* The total number of RSS contexts supported. Note that the number of + * available contexts using indirection tables is also limited by the + * availability of indirection table space allocated from a common pool. + */ +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RSS_NUM_CONTEXTS_OFST 176 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RSS_NUM_CONTEXTS_LEN 4 +/* The total amount of indirection table space that can be shared between RSS + * contexts. + */ +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RSS_TABLE_POOL_SIZE_OFST 180 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RSS_TABLE_POOL_SIZE_LEN 4 +/* A bitmap of the queue sizes the device can provide, where bit N being set + * indicates that 2**N is a valid size. The device may be limited in the number + * of different queue sizes that can exist simultaneously, so a bit being set + * here does not guarantee that an attempt to create a queue of that size will + * succeed. + */ +#define MC_CMD_GET_CAPABILITIES_V10_OUT_SUPPORTED_QUEUE_SIZES_OFST 184 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_SUPPORTED_QUEUE_SIZES_LEN 4 +/* A bitmap of queue sizes that are always available, in the same format as + * SUPPORTED_QUEUE_SIZES. Attempting to create a queue with one of these sizes + * will never fail due to unavailability of the requested size. + */ +#define MC_CMD_GET_CAPABILITIES_V10_OUT_GUARANTEED_QUEUE_SIZES_OFST 188 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_GUARANTEED_QUEUE_SIZES_LEN 4 + + +/***********************************/ +/* MC_CMD_V2_EXTN + * Encapsulation for a v2 extended command + */ +#define MC_CMD_V2_EXTN 0x7f + +/* MC_CMD_V2_EXTN_IN msgrequest */ +#define MC_CMD_V2_EXTN_IN_LEN 4 +/* the extended command number */ +#define MC_CMD_V2_EXTN_IN_EXTENDED_CMD_LBN 0 +#define MC_CMD_V2_EXTN_IN_EXTENDED_CMD_WIDTH 15 +#define MC_CMD_V2_EXTN_IN_UNUSED_LBN 15 +#define MC_CMD_V2_EXTN_IN_UNUSED_WIDTH 1 +/* the actual length of the encapsulated command (which is not in the v1 + * header) + */ +#define MC_CMD_V2_EXTN_IN_ACTUAL_LEN_LBN 16 +#define MC_CMD_V2_EXTN_IN_ACTUAL_LEN_WIDTH 10 +#define MC_CMD_V2_EXTN_IN_UNUSED2_LBN 26 +#define MC_CMD_V2_EXTN_IN_UNUSED2_WIDTH 2 +/* Type of command/response */ +#define MC_CMD_V2_EXTN_IN_MESSAGE_TYPE_LBN 28 +#define MC_CMD_V2_EXTN_IN_MESSAGE_TYPE_WIDTH 4 +/* enum: MCDI command directed to or response originating from the MC. */ +#define MC_CMD_V2_EXTN_IN_MCDI_MESSAGE_TYPE_MC 0x0 +/* enum: MCDI command directed to a TSA controller. MCDI responses of this type + * are not defined. + */ +#define MC_CMD_V2_EXTN_IN_MCDI_MESSAGE_TYPE_TSA 0x1 +/* enum: MCDI command used for platform management. Typically, these commands + * are used for low-level operations directed at the platform as a whole (e.g. + * MMIO device enumeration) rather than individual functions and use a + * dedicated comms channel (e.g. RPmsg/IPI). May be handled by the same or + * different CPU as MCDI_MESSAGE_TYPE_MC. + */ +#define MC_CMD_V2_EXTN_IN_MCDI_MESSAGE_TYPE_PLATFORM 0x2 + + +/***********************************/ +/* MC_CMD_TCM_BUCKET_ALLOC + * Allocate a pacer bucket (for qau rp or a snapper test) + */ +#define MC_CMD_TCM_BUCKET_ALLOC 0xb2 +#undef MC_CMD_0xb2_PRIVILEGE_CTG + +#define MC_CMD_0xb2_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_TCM_BUCKET_ALLOC_IN msgrequest */ +#define MC_CMD_TCM_BUCKET_ALLOC_IN_LEN 0 + +/* MC_CMD_TCM_BUCKET_ALLOC_OUT msgresponse */ +#define MC_CMD_TCM_BUCKET_ALLOC_OUT_LEN 4 +/* the bucket id */ +#define MC_CMD_TCM_BUCKET_ALLOC_OUT_BUCKET_OFST 0 +#define MC_CMD_TCM_BUCKET_ALLOC_OUT_BUCKET_LEN 4 + + +/***********************************/ +/* MC_CMD_TCM_BUCKET_FREE + * Free a pacer bucket + */ +#define MC_CMD_TCM_BUCKET_FREE 0xb3 +#undef MC_CMD_0xb3_PRIVILEGE_CTG + +#define MC_CMD_0xb3_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_TCM_BUCKET_FREE_IN msgrequest */ +#define MC_CMD_TCM_BUCKET_FREE_IN_LEN 4 +/* the bucket id */ +#define MC_CMD_TCM_BUCKET_FREE_IN_BUCKET_OFST 0 +#define MC_CMD_TCM_BUCKET_FREE_IN_BUCKET_LEN 4 + +/* MC_CMD_TCM_BUCKET_FREE_OUT msgresponse */ +#define MC_CMD_TCM_BUCKET_FREE_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_TCM_BUCKET_INIT + * Initialise pacer bucket with a given rate + */ +#define MC_CMD_TCM_BUCKET_INIT 0xb4 +#undef MC_CMD_0xb4_PRIVILEGE_CTG + +#define MC_CMD_0xb4_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_TCM_BUCKET_INIT_IN msgrequest */ +#define MC_CMD_TCM_BUCKET_INIT_IN_LEN 8 +/* the bucket id */ +#define MC_CMD_TCM_BUCKET_INIT_IN_BUCKET_OFST 0 +#define MC_CMD_TCM_BUCKET_INIT_IN_BUCKET_LEN 4 +/* the rate in mbps */ +#define MC_CMD_TCM_BUCKET_INIT_IN_RATE_OFST 4 +#define MC_CMD_TCM_BUCKET_INIT_IN_RATE_LEN 4 + +/* MC_CMD_TCM_BUCKET_INIT_EXT_IN msgrequest */ +#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_LEN 12 +/* the bucket id */ +#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_BUCKET_OFST 0 +#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_BUCKET_LEN 4 +/* the rate in mbps */ +#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_RATE_OFST 4 +#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_RATE_LEN 4 +/* the desired maximum fill level */ +#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_MAX_FILL_OFST 8 +#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_MAX_FILL_LEN 4 + +/* MC_CMD_TCM_BUCKET_INIT_OUT msgresponse */ +#define MC_CMD_TCM_BUCKET_INIT_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_TCM_TXQ_INIT + * Initialise txq in pacer with given options or set options + */ +#define MC_CMD_TCM_TXQ_INIT 0xb5 +#undef MC_CMD_0xb5_PRIVILEGE_CTG + +#define MC_CMD_0xb5_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_TCM_TXQ_INIT_IN msgrequest */ +#define MC_CMD_TCM_TXQ_INIT_IN_LEN 28 +/* the txq id */ +#define MC_CMD_TCM_TXQ_INIT_IN_QID_OFST 0 +#define MC_CMD_TCM_TXQ_INIT_IN_QID_LEN 4 +/* the static priority associated with the txq */ +#define MC_CMD_TCM_TXQ_INIT_IN_LABEL_OFST 4 +#define MC_CMD_TCM_TXQ_INIT_IN_LABEL_LEN 4 +/* bitmask of the priority queues this txq is inserted into when inserted. */ +#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAGS_OFST 8 +#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAGS_LEN 4 +#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_GUARANTEED_OFST 8 +#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_GUARANTEED_LBN 0 +#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_GUARANTEED_WIDTH 1 +#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_NORMAL_OFST 8 +#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_NORMAL_LBN 1 +#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_NORMAL_WIDTH 1 +#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_LOW_OFST 8 +#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_LOW_LBN 2 +#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_LOW_WIDTH 1 +/* the reaction point (RP) bucket */ +#define MC_CMD_TCM_TXQ_INIT_IN_RP_BKT_OFST 12 +#define MC_CMD_TCM_TXQ_INIT_IN_RP_BKT_LEN 4 +/* an already reserved bucket (typically set to bucket associated with outer + * vswitch) + */ +#define MC_CMD_TCM_TXQ_INIT_IN_MAX_BKT1_OFST 16 +#define MC_CMD_TCM_TXQ_INIT_IN_MAX_BKT1_LEN 4 +/* an already reserved bucket (typically set to bucket associated with inner + * vswitch) + */ +#define MC_CMD_TCM_TXQ_INIT_IN_MAX_BKT2_OFST 20 +#define MC_CMD_TCM_TXQ_INIT_IN_MAX_BKT2_LEN 4 +/* the min bucket (typically for ETS/minimum bandwidth) */ +#define MC_CMD_TCM_TXQ_INIT_IN_MIN_BKT_OFST 24 +#define MC_CMD_TCM_TXQ_INIT_IN_MIN_BKT_LEN 4 + +/* MC_CMD_TCM_TXQ_INIT_EXT_IN msgrequest */ +#define MC_CMD_TCM_TXQ_INIT_EXT_IN_LEN 32 +/* the txq id */ +#define MC_CMD_TCM_TXQ_INIT_EXT_IN_QID_OFST 0 +#define MC_CMD_TCM_TXQ_INIT_EXT_IN_QID_LEN 4 +/* the static priority associated with the txq */ +#define MC_CMD_TCM_TXQ_INIT_EXT_IN_LABEL_NORMAL_OFST 4 +#define MC_CMD_TCM_TXQ_INIT_EXT_IN_LABEL_NORMAL_LEN 4 +/* bitmask of the priority queues this txq is inserted into when inserted. */ +#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAGS_OFST 8 +#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAGS_LEN 4 +#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_GUARANTEED_OFST 8 +#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_GUARANTEED_LBN 0 +#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_GUARANTEED_WIDTH 1 +#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_NORMAL_OFST 8 +#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_NORMAL_LBN 1 +#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_NORMAL_WIDTH 1 +#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_LOW_OFST 8 +#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_LOW_LBN 2 +#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_LOW_WIDTH 1 +/* the reaction point (RP) bucket */ +#define MC_CMD_TCM_TXQ_INIT_EXT_IN_RP_BKT_OFST 12 +#define MC_CMD_TCM_TXQ_INIT_EXT_IN_RP_BKT_LEN 4 +/* an already reserved bucket (typically set to bucket associated with outer + * vswitch) + */ +#define MC_CMD_TCM_TXQ_INIT_EXT_IN_MAX_BKT1_OFST 16 +#define MC_CMD_TCM_TXQ_INIT_EXT_IN_MAX_BKT1_LEN 4 +/* an already reserved bucket (typically set to bucket associated with inner + * vswitch) + */ +#define MC_CMD_TCM_TXQ_INIT_EXT_IN_MAX_BKT2_OFST 20 +#define MC_CMD_TCM_TXQ_INIT_EXT_IN_MAX_BKT2_LEN 4 +/* the min bucket (typically for ETS/minimum bandwidth) */ +#define MC_CMD_TCM_TXQ_INIT_EXT_IN_MIN_BKT_OFST 24 +#define MC_CMD_TCM_TXQ_INIT_EXT_IN_MIN_BKT_LEN 4 +/* the static priority associated with the txq */ +#define MC_CMD_TCM_TXQ_INIT_EXT_IN_LABEL_GUARANTEED_OFST 28 +#define MC_CMD_TCM_TXQ_INIT_EXT_IN_LABEL_GUARANTEED_LEN 4 + +/* MC_CMD_TCM_TXQ_INIT_OUT msgresponse */ +#define MC_CMD_TCM_TXQ_INIT_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_LINK_PIOBUF + * Link a push I/O buffer to a TxQ + */ +#define MC_CMD_LINK_PIOBUF 0x92 +#undef MC_CMD_0x92_PRIVILEGE_CTG + +#define MC_CMD_0x92_PRIVILEGE_CTG SRIOV_CTG_ONLOAD + +/* MC_CMD_LINK_PIOBUF_IN msgrequest */ +#define MC_CMD_LINK_PIOBUF_IN_LEN 8 +/* Handle for allocated push I/O buffer. */ +#define MC_CMD_LINK_PIOBUF_IN_PIOBUF_HANDLE_OFST 0 +#define MC_CMD_LINK_PIOBUF_IN_PIOBUF_HANDLE_LEN 4 +/* Function Local Instance (VI) number which has a TxQ allocated to it. */ +#define MC_CMD_LINK_PIOBUF_IN_TXQ_INSTANCE_OFST 4 +#define MC_CMD_LINK_PIOBUF_IN_TXQ_INSTANCE_LEN 4 + +/* MC_CMD_LINK_PIOBUF_OUT msgresponse */ +#define MC_CMD_LINK_PIOBUF_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_UNLINK_PIOBUF + * Unlink a push I/O buffer from a TxQ + */ +#define MC_CMD_UNLINK_PIOBUF 0x93 +#undef MC_CMD_0x93_PRIVILEGE_CTG + +#define MC_CMD_0x93_PRIVILEGE_CTG SRIOV_CTG_ONLOAD + +/* MC_CMD_UNLINK_PIOBUF_IN msgrequest */ +#define MC_CMD_UNLINK_PIOBUF_IN_LEN 4 +/* Function Local Instance (VI) number. */ +#define MC_CMD_UNLINK_PIOBUF_IN_TXQ_INSTANCE_OFST 0 +#define MC_CMD_UNLINK_PIOBUF_IN_TXQ_INSTANCE_LEN 4 + +/* MC_CMD_UNLINK_PIOBUF_OUT msgresponse */ +#define MC_CMD_UNLINK_PIOBUF_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_VSWITCH_ALLOC + * allocate and initialise a v-switch. + */ +#define MC_CMD_VSWITCH_ALLOC 0x94 +#undef MC_CMD_0x94_PRIVILEGE_CTG + +#define MC_CMD_0x94_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_VSWITCH_ALLOC_IN msgrequest */ +#define MC_CMD_VSWITCH_ALLOC_IN_LEN 16 +/* The port to connect to the v-switch's upstream port. */ +#define MC_CMD_VSWITCH_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0 +#define MC_CMD_VSWITCH_ALLOC_IN_UPSTREAM_PORT_ID_LEN 4 +/* The type of v-switch to create. */ +#define MC_CMD_VSWITCH_ALLOC_IN_TYPE_OFST 4 +#define MC_CMD_VSWITCH_ALLOC_IN_TYPE_LEN 4 +/* enum: VLAN */ +#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VLAN 0x1 +/* enum: VEB */ +#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VEB 0x2 +/* enum: VEPA (obsolete) */ +#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VEPA 0x3 +/* enum: MUX */ +#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_MUX 0x4 +/* enum: Snapper specific; semantics TBD */ +#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_TEST 0x5 +/* Flags controlling v-port creation */ +#define MC_CMD_VSWITCH_ALLOC_IN_FLAGS_OFST 8 +#define MC_CMD_VSWITCH_ALLOC_IN_FLAGS_LEN 4 +#define MC_CMD_VSWITCH_ALLOC_IN_FLAG_AUTO_PORT_OFST 8 +#define MC_CMD_VSWITCH_ALLOC_IN_FLAG_AUTO_PORT_LBN 0 +#define MC_CMD_VSWITCH_ALLOC_IN_FLAG_AUTO_PORT_WIDTH 1 +/* The number of VLAN tags to allow for attached v-ports. For VLAN aggregators, + * this must be one or greated, and the attached v-ports must have exactly this + * number of tags. For other v-switch types, this must be zero of greater, and + * is an upper limit on the number of VLAN tags for attached v-ports. An error + * will be returned if existing configuration means we can't support attached + * v-ports with this number of tags. + */ +#define MC_CMD_VSWITCH_ALLOC_IN_NUM_VLAN_TAGS_OFST 12 +#define MC_CMD_VSWITCH_ALLOC_IN_NUM_VLAN_TAGS_LEN 4 + +/* MC_CMD_VSWITCH_ALLOC_OUT msgresponse */ +#define MC_CMD_VSWITCH_ALLOC_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_VSWITCH_FREE + * de-allocate a v-switch. + */ +#define MC_CMD_VSWITCH_FREE 0x95 +#undef MC_CMD_0x95_PRIVILEGE_CTG + +#define MC_CMD_0x95_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_VSWITCH_FREE_IN msgrequest */ +#define MC_CMD_VSWITCH_FREE_IN_LEN 4 +/* The port to which the v-switch is connected. */ +#define MC_CMD_VSWITCH_FREE_IN_UPSTREAM_PORT_ID_OFST 0 +#define MC_CMD_VSWITCH_FREE_IN_UPSTREAM_PORT_ID_LEN 4 + +/* MC_CMD_VSWITCH_FREE_OUT msgresponse */ +#define MC_CMD_VSWITCH_FREE_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_VSWITCH_QUERY + * read some config of v-switch. For now this command is an empty placeholder. + * It may be used to check if a v-switch is connected to a given EVB port (if + * not, then the command returns ENOENT). + */ +#define MC_CMD_VSWITCH_QUERY 0x63 +#undef MC_CMD_0x63_PRIVILEGE_CTG + +#define MC_CMD_0x63_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_VSWITCH_QUERY_IN msgrequest */ +#define MC_CMD_VSWITCH_QUERY_IN_LEN 4 +/* The port to which the v-switch is connected. */ +#define MC_CMD_VSWITCH_QUERY_IN_UPSTREAM_PORT_ID_OFST 0 +#define MC_CMD_VSWITCH_QUERY_IN_UPSTREAM_PORT_ID_LEN 4 + +/* MC_CMD_VSWITCH_QUERY_OUT msgresponse */ +#define MC_CMD_VSWITCH_QUERY_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_VPORT_ALLOC + * allocate a v-port. + */ +#define MC_CMD_VPORT_ALLOC 0x96 +#undef MC_CMD_0x96_PRIVILEGE_CTG + +#define MC_CMD_0x96_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_VPORT_ALLOC_IN msgrequest */ +#define MC_CMD_VPORT_ALLOC_IN_LEN 20 +/* The port to which the v-switch is connected. */ +#define MC_CMD_VPORT_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0 +#define MC_CMD_VPORT_ALLOC_IN_UPSTREAM_PORT_ID_LEN 4 +/* The type of the new v-port. */ +#define MC_CMD_VPORT_ALLOC_IN_TYPE_OFST 4 +#define MC_CMD_VPORT_ALLOC_IN_TYPE_LEN 4 +/* enum: VLAN (obsolete) */ +#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_VLAN 0x1 +/* enum: VEB (obsolete) */ +#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_VEB 0x2 +/* enum: VEPA (obsolete) */ +#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_VEPA 0x3 +/* enum: A normal v-port receives packets which match a specified MAC and/or + * VLAN. + */ +#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_NORMAL 0x4 +/* enum: An expansion v-port packets traffic which don't match any other + * v-port. + */ +#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_EXPANSION 0x5 +/* enum: An test v-port receives packets which match any filters installed by + * its downstream components. + */ +#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_TEST 0x6 +/* Flags controlling v-port creation */ +#define MC_CMD_VPORT_ALLOC_IN_FLAGS_OFST 8 +#define MC_CMD_VPORT_ALLOC_IN_FLAGS_LEN 4 +#define MC_CMD_VPORT_ALLOC_IN_FLAG_AUTO_PORT_OFST 8 +#define MC_CMD_VPORT_ALLOC_IN_FLAG_AUTO_PORT_LBN 0 +#define MC_CMD_VPORT_ALLOC_IN_FLAG_AUTO_PORT_WIDTH 1 +#define MC_CMD_VPORT_ALLOC_IN_FLAG_VLAN_RESTRICT_OFST 8 +#define MC_CMD_VPORT_ALLOC_IN_FLAG_VLAN_RESTRICT_LBN 1 +#define MC_CMD_VPORT_ALLOC_IN_FLAG_VLAN_RESTRICT_WIDTH 1 +/* The number of VLAN tags to insert/remove. An error will be returned if + * incompatible with the number of VLAN tags specified for the upstream + * v-switch. + */ +#define MC_CMD_VPORT_ALLOC_IN_NUM_VLAN_TAGS_OFST 12 +#define MC_CMD_VPORT_ALLOC_IN_NUM_VLAN_TAGS_LEN 4 +/* The actual VLAN tags to insert/remove */ +#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAGS_OFST 16 +#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAGS_LEN 4 +#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAG_0_OFST 16 +#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAG_0_LBN 0 +#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAG_0_WIDTH 16 +#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAG_1_OFST 16 +#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAG_1_LBN 16 +#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAG_1_WIDTH 16 + +/* MC_CMD_VPORT_ALLOC_OUT msgresponse */ +#define MC_CMD_VPORT_ALLOC_OUT_LEN 4 +/* The handle of the new v-port */ +#define MC_CMD_VPORT_ALLOC_OUT_VPORT_ID_OFST 0 +#define MC_CMD_VPORT_ALLOC_OUT_VPORT_ID_LEN 4 + + +/***********************************/ +/* MC_CMD_VPORT_FREE + * de-allocate a v-port. + */ +#define MC_CMD_VPORT_FREE 0x97 +#undef MC_CMD_0x97_PRIVILEGE_CTG + +#define MC_CMD_0x97_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_VPORT_FREE_IN msgrequest */ +#define MC_CMD_VPORT_FREE_IN_LEN 4 +/* The handle of the v-port */ +#define MC_CMD_VPORT_FREE_IN_VPORT_ID_OFST 0 +#define MC_CMD_VPORT_FREE_IN_VPORT_ID_LEN 4 + +/* MC_CMD_VPORT_FREE_OUT msgresponse */ +#define MC_CMD_VPORT_FREE_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_VADAPTOR_ALLOC + * allocate a v-adaptor. + */ +#define MC_CMD_VADAPTOR_ALLOC 0x98 +#undef MC_CMD_0x98_PRIVILEGE_CTG + +#define MC_CMD_0x98_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_VADAPTOR_ALLOC_IN msgrequest */ +#define MC_CMD_VADAPTOR_ALLOC_IN_LEN 30 +/* The port to connect to the v-adaptor's port. */ +#define MC_CMD_VADAPTOR_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0 +#define MC_CMD_VADAPTOR_ALLOC_IN_UPSTREAM_PORT_ID_LEN 4 +/* Flags controlling v-adaptor creation */ +#define MC_CMD_VADAPTOR_ALLOC_IN_FLAGS_OFST 8 +#define MC_CMD_VADAPTOR_ALLOC_IN_FLAGS_LEN 4 +#define MC_CMD_VADAPTOR_ALLOC_IN_FLAG_AUTO_VADAPTOR_OFST 8 +#define MC_CMD_VADAPTOR_ALLOC_IN_FLAG_AUTO_VADAPTOR_LBN 0 +#define MC_CMD_VADAPTOR_ALLOC_IN_FLAG_AUTO_VADAPTOR_WIDTH 1 +#define MC_CMD_VADAPTOR_ALLOC_IN_FLAG_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_OFST 8 +#define MC_CMD_VADAPTOR_ALLOC_IN_FLAG_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 1 +#define MC_CMD_VADAPTOR_ALLOC_IN_FLAG_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1 +/* The number of VLAN tags to strip on receive */ +#define MC_CMD_VADAPTOR_ALLOC_IN_NUM_VLANS_OFST 12 +#define MC_CMD_VADAPTOR_ALLOC_IN_NUM_VLANS_LEN 4 +/* The number of VLAN tags to transparently insert/remove. */ +#define MC_CMD_VADAPTOR_ALLOC_IN_NUM_VLAN_TAGS_OFST 16 +#define MC_CMD_VADAPTOR_ALLOC_IN_NUM_VLAN_TAGS_LEN 4 +/* The actual VLAN tags to insert/remove */ +#define MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAGS_OFST 20 +#define MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAGS_LEN 4 +#define MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAG_0_OFST 20 +#define MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAG_0_LBN 0 +#define MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAG_0_WIDTH 16 +#define MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAG_1_OFST 20 +#define MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAG_1_LBN 16 +#define MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAG_1_WIDTH 16 +/* The MAC address to assign to this v-adaptor */ +#define MC_CMD_VADAPTOR_ALLOC_IN_MACADDR_OFST 24 +#define MC_CMD_VADAPTOR_ALLOC_IN_MACADDR_LEN 6 +/* enum: Derive the MAC address from the upstream port */ +#define MC_CMD_VADAPTOR_ALLOC_IN_AUTO_MAC 0x0 + +/* MC_CMD_VADAPTOR_ALLOC_OUT msgresponse */ +#define MC_CMD_VADAPTOR_ALLOC_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_VADAPTOR_FREE + * de-allocate a v-adaptor. + */ +#define MC_CMD_VADAPTOR_FREE 0x99 +#undef MC_CMD_0x99_PRIVILEGE_CTG + +#define MC_CMD_0x99_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_VADAPTOR_FREE_IN msgrequest */ +#define MC_CMD_VADAPTOR_FREE_IN_LEN 4 +/* The port to which the v-adaptor is connected. */ +#define MC_CMD_VADAPTOR_FREE_IN_UPSTREAM_PORT_ID_OFST 0 +#define MC_CMD_VADAPTOR_FREE_IN_UPSTREAM_PORT_ID_LEN 4 + +/* MC_CMD_VADAPTOR_FREE_OUT msgresponse */ +#define MC_CMD_VADAPTOR_FREE_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_VADAPTOR_SET_MAC + * assign a new MAC address to a v-adaptor. + */ +#define MC_CMD_VADAPTOR_SET_MAC 0x5d +#undef MC_CMD_0x5d_PRIVILEGE_CTG + +#define MC_CMD_0x5d_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_VADAPTOR_SET_MAC_IN msgrequest */ +#define MC_CMD_VADAPTOR_SET_MAC_IN_LEN 10 +/* The port to which the v-adaptor is connected. */ +#define MC_CMD_VADAPTOR_SET_MAC_IN_UPSTREAM_PORT_ID_OFST 0 +#define MC_CMD_VADAPTOR_SET_MAC_IN_UPSTREAM_PORT_ID_LEN 4 +/* The new MAC address to assign to this v-adaptor */ +#define MC_CMD_VADAPTOR_SET_MAC_IN_MACADDR_OFST 4 +#define MC_CMD_VADAPTOR_SET_MAC_IN_MACADDR_LEN 6 + +/* MC_CMD_VADAPTOR_SET_MAC_OUT msgresponse */ +#define MC_CMD_VADAPTOR_SET_MAC_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_VADAPTOR_GET_MAC + * read the MAC address assigned to a v-adaptor. + */ +#define MC_CMD_VADAPTOR_GET_MAC 0x5e +#undef MC_CMD_0x5e_PRIVILEGE_CTG + +#define MC_CMD_0x5e_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_VADAPTOR_GET_MAC_IN msgrequest */ +#define MC_CMD_VADAPTOR_GET_MAC_IN_LEN 4 +/* The port to which the v-adaptor is connected. */ +#define MC_CMD_VADAPTOR_GET_MAC_IN_UPSTREAM_PORT_ID_OFST 0 +#define MC_CMD_VADAPTOR_GET_MAC_IN_UPSTREAM_PORT_ID_LEN 4 + +/* MC_CMD_VADAPTOR_GET_MAC_OUT msgresponse */ +#define MC_CMD_VADAPTOR_GET_MAC_OUT_LEN 6 +/* The MAC address assigned to this v-adaptor */ +#define MC_CMD_VADAPTOR_GET_MAC_OUT_MACADDR_OFST 0 +#define MC_CMD_VADAPTOR_GET_MAC_OUT_MACADDR_LEN 6 + + +/***********************************/ +/* MC_CMD_VADAPTOR_QUERY + * read some config of v-adaptor. + */ +#define MC_CMD_VADAPTOR_QUERY 0x61 +#undef MC_CMD_0x61_PRIVILEGE_CTG + +#define MC_CMD_0x61_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_VADAPTOR_QUERY_IN msgrequest */ +#define MC_CMD_VADAPTOR_QUERY_IN_LEN 4 +/* The port to which the v-adaptor is connected. */ +#define MC_CMD_VADAPTOR_QUERY_IN_UPSTREAM_PORT_ID_OFST 0 +#define MC_CMD_VADAPTOR_QUERY_IN_UPSTREAM_PORT_ID_LEN 4 + +/* MC_CMD_VADAPTOR_QUERY_OUT msgresponse */ +#define MC_CMD_VADAPTOR_QUERY_OUT_LEN 12 +/* The EVB port flags as defined at MC_CMD_VPORT_ALLOC. */ +#define MC_CMD_VADAPTOR_QUERY_OUT_PORT_FLAGS_OFST 0 +#define MC_CMD_VADAPTOR_QUERY_OUT_PORT_FLAGS_LEN 4 +/* The v-adaptor flags as defined at MC_CMD_VADAPTOR_ALLOC. */ +#define MC_CMD_VADAPTOR_QUERY_OUT_VADAPTOR_FLAGS_OFST 4 +#define MC_CMD_VADAPTOR_QUERY_OUT_VADAPTOR_FLAGS_LEN 4 +/* The number of VLAN tags that may still be added */ +#define MC_CMD_VADAPTOR_QUERY_OUT_NUM_AVAILABLE_VLAN_TAGS_OFST 8 +#define MC_CMD_VADAPTOR_QUERY_OUT_NUM_AVAILABLE_VLAN_TAGS_LEN 4 + + +/***********************************/ +/* MC_CMD_EVB_PORT_ASSIGN + * assign a port to a PCI function. + */ +#define MC_CMD_EVB_PORT_ASSIGN 0x9a +#undef MC_CMD_0x9a_PRIVILEGE_CTG + +#define MC_CMD_0x9a_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_EVB_PORT_ASSIGN_IN msgrequest */ +#define MC_CMD_EVB_PORT_ASSIGN_IN_LEN 8 +/* The port to assign. */ +#define MC_CMD_EVB_PORT_ASSIGN_IN_PORT_ID_OFST 0 +#define MC_CMD_EVB_PORT_ASSIGN_IN_PORT_ID_LEN 4 +/* The target function to modify. */ +#define MC_CMD_EVB_PORT_ASSIGN_IN_FUNCTION_OFST 4 +#define MC_CMD_EVB_PORT_ASSIGN_IN_FUNCTION_LEN 4 +#define MC_CMD_EVB_PORT_ASSIGN_IN_PF_OFST 4 +#define MC_CMD_EVB_PORT_ASSIGN_IN_PF_LBN 0 +#define MC_CMD_EVB_PORT_ASSIGN_IN_PF_WIDTH 16 +#define MC_CMD_EVB_PORT_ASSIGN_IN_VF_OFST 4 +#define MC_CMD_EVB_PORT_ASSIGN_IN_VF_LBN 16 +#define MC_CMD_EVB_PORT_ASSIGN_IN_VF_WIDTH 16 + +/* MC_CMD_EVB_PORT_ASSIGN_OUT msgresponse */ +#define MC_CMD_EVB_PORT_ASSIGN_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_RDWR_A64_REGIONS + * Assign the 64 bit region addresses. + */ +#define MC_CMD_RDWR_A64_REGIONS 0x9b +#undef MC_CMD_0x9b_PRIVILEGE_CTG + +#define MC_CMD_0x9b_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_RDWR_A64_REGIONS_IN msgrequest */ +#define MC_CMD_RDWR_A64_REGIONS_IN_LEN 17 +#define MC_CMD_RDWR_A64_REGIONS_IN_REGION0_OFST 0 +#define MC_CMD_RDWR_A64_REGIONS_IN_REGION0_LEN 4 +#define MC_CMD_RDWR_A64_REGIONS_IN_REGION1_OFST 4 +#define MC_CMD_RDWR_A64_REGIONS_IN_REGION1_LEN 4 +#define MC_CMD_RDWR_A64_REGIONS_IN_REGION2_OFST 8 +#define MC_CMD_RDWR_A64_REGIONS_IN_REGION2_LEN 4 +#define MC_CMD_RDWR_A64_REGIONS_IN_REGION3_OFST 12 +#define MC_CMD_RDWR_A64_REGIONS_IN_REGION3_LEN 4 +/* Write enable bits 0-3, set to write, clear to read. */ +#define MC_CMD_RDWR_A64_REGIONS_IN_WRITE_MASK_LBN 128 +#define MC_CMD_RDWR_A64_REGIONS_IN_WRITE_MASK_WIDTH 4 +#define MC_CMD_RDWR_A64_REGIONS_IN_WRITE_MASK_BYTE_OFST 16 +#define MC_CMD_RDWR_A64_REGIONS_IN_WRITE_MASK_BYTE_LEN 1 + +/* MC_CMD_RDWR_A64_REGIONS_OUT msgresponse: This data always included + * regardless of state of write bits in the request. + */ +#define MC_CMD_RDWR_A64_REGIONS_OUT_LEN 16 +#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION0_OFST 0 +#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION0_LEN 4 +#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION1_OFST 4 +#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION1_LEN 4 +#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION2_OFST 8 +#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION2_LEN 4 +#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION3_OFST 12 +#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION3_LEN 4 + + +/***********************************/ +/* MC_CMD_ONLOAD_STACK_ALLOC + * Allocate an Onload stack ID. + */ +#define MC_CMD_ONLOAD_STACK_ALLOC 0x9c +#undef MC_CMD_0x9c_PRIVILEGE_CTG + +#define MC_CMD_0x9c_PRIVILEGE_CTG SRIOV_CTG_ONLOAD + +/* MC_CMD_ONLOAD_STACK_ALLOC_IN msgrequest */ +#define MC_CMD_ONLOAD_STACK_ALLOC_IN_LEN 4 +/* The handle of the owning upstream port */ +#define MC_CMD_ONLOAD_STACK_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0 +#define MC_CMD_ONLOAD_STACK_ALLOC_IN_UPSTREAM_PORT_ID_LEN 4 + +/* MC_CMD_ONLOAD_STACK_ALLOC_OUT msgresponse */ +#define MC_CMD_ONLOAD_STACK_ALLOC_OUT_LEN 4 +/* The handle of the new Onload stack */ +#define MC_CMD_ONLOAD_STACK_ALLOC_OUT_ONLOAD_STACK_ID_OFST 0 +#define MC_CMD_ONLOAD_STACK_ALLOC_OUT_ONLOAD_STACK_ID_LEN 4 + + +/***********************************/ +/* MC_CMD_ONLOAD_STACK_FREE + * Free an Onload stack ID. + */ +#define MC_CMD_ONLOAD_STACK_FREE 0x9d +#undef MC_CMD_0x9d_PRIVILEGE_CTG + +#define MC_CMD_0x9d_PRIVILEGE_CTG SRIOV_CTG_ONLOAD + +/* MC_CMD_ONLOAD_STACK_FREE_IN msgrequest */ +#define MC_CMD_ONLOAD_STACK_FREE_IN_LEN 4 +/* The handle of the Onload stack */ +#define MC_CMD_ONLOAD_STACK_FREE_IN_ONLOAD_STACK_ID_OFST 0 +#define MC_CMD_ONLOAD_STACK_FREE_IN_ONLOAD_STACK_ID_LEN 4 + +/* MC_CMD_ONLOAD_STACK_FREE_OUT msgresponse */ +#define MC_CMD_ONLOAD_STACK_FREE_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_RSS_CONTEXT_ALLOC + * Allocate an RSS context. + */ +#define MC_CMD_RSS_CONTEXT_ALLOC 0x9e +#undef MC_CMD_0x9e_PRIVILEGE_CTG + +#define MC_CMD_0x9e_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_RSS_CONTEXT_ALLOC_IN msgrequest */ +#define MC_CMD_RSS_CONTEXT_ALLOC_IN_LEN 12 +/* The handle of the owning upstream port */ +#define MC_CMD_RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0 +#define MC_CMD_RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID_LEN 4 +/* The type of context to allocate */ +#define MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_OFST 4 +#define MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_LEN 4 +/* enum: Allocate a context for exclusive use. The key and indirection table + * must be explicitly configured. + */ +#define MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_EXCLUSIVE 0x0 +/* enum: Allocate a context for shared use; this will spread across a range of + * queues, but the key and indirection table are pre-configured and may not be + * changed. For this mode, NUM_QUEUES must 2, 4, 8, 16, 32 or 64. + */ +#define MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_SHARED 0x1 +/* enum: Allocate a context to spread evenly across an arbitrary number of + * queues. No indirection table space is allocated for this context. (EF100 and + * later) + */ +#define MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_EVEN_SPREADING 0x2 +/* Number of queues spanned by this context. For exclusive contexts this must + * be in the range 1 to RSS_MAX_INDIRECTION_QUEUES, where + * RSS_MAX_INDIRECTION_QUEUES is queried from MC_CMD_GET_CAPABILITIES_V9 or if + * V9 is not supported then RSS_MAX_INDIRECTION_QUEUES is 64. Valid entries in + * the indirection table will be in the range 0 to NUM_QUEUES-1. For even- + * spreading contexts this must be in the range 1 to + * RSS_MAX_EVEN_SPREADING_QUEUES as queried from MC_CMD_GET_CAPABILITIES. Note + * that specifying NUM_QUEUES = 1 will not perform any spreading but may still + * be useful as a way of obtaining the Toeplitz hash. + */ +#define MC_CMD_RSS_CONTEXT_ALLOC_IN_NUM_QUEUES_OFST 8 +#define MC_CMD_RSS_CONTEXT_ALLOC_IN_NUM_QUEUES_LEN 4 + +/* MC_CMD_RSS_CONTEXT_ALLOC_V2_IN msgrequest */ +#define MC_CMD_RSS_CONTEXT_ALLOC_V2_IN_LEN 16 +/* The handle of the owning upstream port */ +#define MC_CMD_RSS_CONTEXT_ALLOC_V2_IN_UPSTREAM_PORT_ID_OFST 0 +#define MC_CMD_RSS_CONTEXT_ALLOC_V2_IN_UPSTREAM_PORT_ID_LEN 4 +/* The type of context to allocate */ +#define MC_CMD_RSS_CONTEXT_ALLOC_V2_IN_TYPE_OFST 4 +#define MC_CMD_RSS_CONTEXT_ALLOC_V2_IN_TYPE_LEN 4 +/* enum: Allocate a context for exclusive use. The key and indirection table + * must be explicitly configured. + */ +#define MC_CMD_RSS_CONTEXT_ALLOC_V2_IN_TYPE_EXCLUSIVE 0x0 +/* enum: Allocate a context for shared use; this will spread across a range of + * queues, but the key and indirection table are pre-configured and may not be + * changed. For this mode, NUM_QUEUES must 2, 4, 8, 16, 32 or 64. + */ +#define MC_CMD_RSS_CONTEXT_ALLOC_V2_IN_TYPE_SHARED 0x1 +/* enum: Allocate a context to spread evenly across an arbitrary number of + * queues. No indirection table space is allocated for this context. (EF100 and + * later) + */ +#define MC_CMD_RSS_CONTEXT_ALLOC_V2_IN_TYPE_EVEN_SPREADING 0x2 +/* Number of queues spanned by this context. For exclusive contexts this must + * be in the range 1 to RSS_MAX_INDIRECTION_QUEUES, where + * RSS_MAX_INDIRECTION_QUEUES is queried from MC_CMD_GET_CAPABILITIES_V9 or if + * V9 is not supported then RSS_MAX_INDIRECTION_QUEUES is 64. Valid entries in + * the indirection table will be in the range 0 to NUM_QUEUES-1. For even- + * spreading contexts this must be in the range 1 to + * RSS_MAX_EVEN_SPREADING_QUEUES as queried from MC_CMD_GET_CAPABILITIES. Note + * that specifying NUM_QUEUES = 1 will not perform any spreading but may still + * be useful as a way of obtaining the Toeplitz hash. + */ +#define MC_CMD_RSS_CONTEXT_ALLOC_V2_IN_NUM_QUEUES_OFST 8 +#define MC_CMD_RSS_CONTEXT_ALLOC_V2_IN_NUM_QUEUES_LEN 4 +/* Size of indirection table to be allocated to this context from the pool. + * Must be a power of 2. The minimum and maximum table size can be queried + * using MC_CMD_GET_CAPABILITIES_V9. If there is not enough space remaining in + * the common pool to allocate the requested table size, due to allocating + * table space to other RSS contexts, then the command will fail with + * MC_CMD_ERR_ENOSPC. + */ +#define MC_CMD_RSS_CONTEXT_ALLOC_V2_IN_INDIRECTION_TABLE_SIZE_OFST 12 +#define MC_CMD_RSS_CONTEXT_ALLOC_V2_IN_INDIRECTION_TABLE_SIZE_LEN 4 + +/* MC_CMD_RSS_CONTEXT_ALLOC_OUT msgresponse */ +#define MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN 4 +/* The handle of the new RSS context. This should be considered opaque to the + * host, although a value of 0xFFFFFFFF is guaranteed never to be a valid + * handle. + */ +#define MC_CMD_RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID_OFST 0 +#define MC_CMD_RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID_LEN 4 +/* enum: guaranteed invalid RSS context handle value */ +#define MC_CMD_RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID_INVALID 0xffffffff + + +/***********************************/ +/* MC_CMD_RSS_CONTEXT_FREE + * Free an RSS context. + */ +#define MC_CMD_RSS_CONTEXT_FREE 0x9f +#undef MC_CMD_0x9f_PRIVILEGE_CTG + +#define MC_CMD_0x9f_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_RSS_CONTEXT_FREE_IN msgrequest */ +#define MC_CMD_RSS_CONTEXT_FREE_IN_LEN 4 +/* The handle of the RSS context */ +#define MC_CMD_RSS_CONTEXT_FREE_IN_RSS_CONTEXT_ID_OFST 0 +#define MC_CMD_RSS_CONTEXT_FREE_IN_RSS_CONTEXT_ID_LEN 4 + +/* MC_CMD_RSS_CONTEXT_FREE_OUT msgresponse */ +#define MC_CMD_RSS_CONTEXT_FREE_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_RSS_CONTEXT_SET_KEY + * Set the Toeplitz hash key for an RSS context. + */ +#define MC_CMD_RSS_CONTEXT_SET_KEY 0xa0 +#undef MC_CMD_0xa0_PRIVILEGE_CTG + +#define MC_CMD_0xa0_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_RSS_CONTEXT_SET_KEY_IN msgrequest */ +#define MC_CMD_RSS_CONTEXT_SET_KEY_IN_LEN 44 +/* The handle of the RSS context */ +#define MC_CMD_RSS_CONTEXT_SET_KEY_IN_RSS_CONTEXT_ID_OFST 0 +#define MC_CMD_RSS_CONTEXT_SET_KEY_IN_RSS_CONTEXT_ID_LEN 4 +/* The 40-byte Toeplitz hash key (TBD endianness issues?) */ +#define MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_OFST 4 +#define MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN 40 + +/* MC_CMD_RSS_CONTEXT_SET_KEY_OUT msgresponse */ +#define MC_CMD_RSS_CONTEXT_SET_KEY_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_RSS_CONTEXT_GET_KEY + * Get the Toeplitz hash key for an RSS context. + */ +#define MC_CMD_RSS_CONTEXT_GET_KEY 0xa1 +#undef MC_CMD_0xa1_PRIVILEGE_CTG + +#define MC_CMD_0xa1_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_RSS_CONTEXT_GET_KEY_IN msgrequest */ +#define MC_CMD_RSS_CONTEXT_GET_KEY_IN_LEN 4 +/* The handle of the RSS context */ +#define MC_CMD_RSS_CONTEXT_GET_KEY_IN_RSS_CONTEXT_ID_OFST 0 +#define MC_CMD_RSS_CONTEXT_GET_KEY_IN_RSS_CONTEXT_ID_LEN 4 + +/* MC_CMD_RSS_CONTEXT_GET_KEY_OUT msgresponse */ +#define MC_CMD_RSS_CONTEXT_GET_KEY_OUT_LEN 44 +/* The 40-byte Toeplitz hash key (TBD endianness issues?) */ +#define MC_CMD_RSS_CONTEXT_GET_KEY_OUT_TOEPLITZ_KEY_OFST 4 +#define MC_CMD_RSS_CONTEXT_GET_KEY_OUT_TOEPLITZ_KEY_LEN 40 + + +/***********************************/ +/* MC_CMD_RSS_CONTEXT_SET_TABLE + * Set the indirection table for an RSS context. This command should only be + * used with indirection tables containing 128 entries, which is the default + * when the RSS context is allocated without specifying a table size. + */ +#define MC_CMD_RSS_CONTEXT_SET_TABLE 0xa2 +#undef MC_CMD_0xa2_PRIVILEGE_CTG + +#define MC_CMD_0xa2_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_RSS_CONTEXT_SET_TABLE_IN msgrequest */ +#define MC_CMD_RSS_CONTEXT_SET_TABLE_IN_LEN 132 +/* The handle of the RSS context */ +#define MC_CMD_RSS_CONTEXT_SET_TABLE_IN_RSS_CONTEXT_ID_OFST 0 +#define MC_CMD_RSS_CONTEXT_SET_TABLE_IN_RSS_CONTEXT_ID_LEN 4 +/* The 128-byte indirection table (1 byte per entry) */ +#define MC_CMD_RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE_OFST 4 +#define MC_CMD_RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE_LEN 128 + +/* MC_CMD_RSS_CONTEXT_SET_TABLE_OUT msgresponse */ +#define MC_CMD_RSS_CONTEXT_SET_TABLE_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_RSS_CONTEXT_GET_TABLE + * Get the indirection table for an RSS context. This command should only be + * used with indirection tables containing 128 entries, which is the default + * when the RSS context is allocated without specifying a table size. + */ +#define MC_CMD_RSS_CONTEXT_GET_TABLE 0xa3 +#undef MC_CMD_0xa3_PRIVILEGE_CTG + +#define MC_CMD_0xa3_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_RSS_CONTEXT_GET_TABLE_IN msgrequest */ +#define MC_CMD_RSS_CONTEXT_GET_TABLE_IN_LEN 4 +/* The handle of the RSS context */ +#define MC_CMD_RSS_CONTEXT_GET_TABLE_IN_RSS_CONTEXT_ID_OFST 0 +#define MC_CMD_RSS_CONTEXT_GET_TABLE_IN_RSS_CONTEXT_ID_LEN 4 + +/* MC_CMD_RSS_CONTEXT_GET_TABLE_OUT msgresponse */ +#define MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_LEN 132 +/* The 128-byte indirection table (1 byte per entry) */ +#define MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_INDIRECTION_TABLE_OFST 4 +#define MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_INDIRECTION_TABLE_LEN 128 + + +/***********************************/ +/* MC_CMD_RSS_CONTEXT_WRITE_TABLE + * Write a portion of a selectable-size indirection table for an RSS context. + * This command must be used instead of MC_CMD_RSS_CONTEXT_SET_TABLE if the + * RSS_SELECTABLE_TABLE_SIZE bit is set in MC_CMD_GET_CAPABILITIES. + */ +#define MC_CMD_RSS_CONTEXT_WRITE_TABLE 0x13e +#undef MC_CMD_0x13e_PRIVILEGE_CTG + +#define MC_CMD_0x13e_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN msgrequest */ +#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_LENMIN 8 +#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_LENMAX 252 +#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_LENMAX_MCDI2 1020 +#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_LEN(num) (4+4*(num)) +#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_ENTRIES_NUM(len) (((len)-4)/4) +/* The handle of the RSS context */ +#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_RSS_CONTEXT_ID_OFST 0 +#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_RSS_CONTEXT_ID_LEN 4 +/* An array of index-value pairs to be written to the table. Structure is + * MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY. + */ +#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_ENTRIES_OFST 4 +#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_ENTRIES_LEN 4 +#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_ENTRIES_MINNUM 1 +#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_ENTRIES_MAXNUM 62 +#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_ENTRIES_MAXNUM_MCDI2 254 + +/* MC_CMD_RSS_CONTEXT_WRITE_TABLE_OUT msgresponse */ +#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_OUT_LEN 0 + +/* MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY structuredef */ +#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY_LEN 4 +/* The index of the table entry to be written. */ +#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY_INDEX_OFST 0 +#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY_INDEX_LEN 2 +#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY_INDEX_LBN 0 +#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY_INDEX_WIDTH 16 +/* The value to write into the table entry. */ +#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY_VALUE_OFST 2 +#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY_VALUE_LEN 2 +#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY_VALUE_LBN 16 +#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY_VALUE_WIDTH 16 + + +/***********************************/ +/* MC_CMD_RSS_CONTEXT_READ_TABLE + * Read a portion of a selectable-size indirection table for an RSS context. + * This command must be used instead of MC_CMD_RSS_CONTEXT_GET_TABLE if the + * RSS_SELECTABLE_TABLE_SIZE bit is set in MC_CMD_GET_CAPABILITIES. + */ +#define MC_CMD_RSS_CONTEXT_READ_TABLE 0x13f +#undef MC_CMD_0x13f_PRIVILEGE_CTG + +#define MC_CMD_0x13f_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_RSS_CONTEXT_READ_TABLE_IN msgrequest */ +#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_LENMIN 6 +#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_LENMAX 252 +#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_LENMAX_MCDI2 1020 +#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_LEN(num) (4+2*(num)) +#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_INDICES_NUM(len) (((len)-4)/2) +/* The handle of the RSS context */ +#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_RSS_CONTEXT_ID_OFST 0 +#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_RSS_CONTEXT_ID_LEN 4 +/* An array containing the indices of the entries to be read. */ +#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_INDICES_OFST 4 +#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_INDICES_LEN 2 +#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_INDICES_MINNUM 1 +#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_INDICES_MAXNUM 124 +#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_INDICES_MAXNUM_MCDI2 508 + +/* MC_CMD_RSS_CONTEXT_READ_TABLE_OUT msgresponse */ +#define MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_LENMIN 2 +#define MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_LENMAX 252 +#define MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_LENMAX_MCDI2 1020 +#define MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_LEN(num) (0+2*(num)) +#define MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_DATA_NUM(len) (((len)-0)/2) +/* A buffer containing the requested entries read from the table. */ +#define MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_DATA_OFST 0 +#define MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_DATA_LEN 2 +#define MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_DATA_MINNUM 1 +#define MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_DATA_MAXNUM 126 +#define MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_DATA_MAXNUM_MCDI2 510 + + +/***********************************/ +/* MC_CMD_RSS_CONTEXT_SET_FLAGS + * Set various control flags for an RSS context. + */ +#define MC_CMD_RSS_CONTEXT_SET_FLAGS 0xe1 +#undef MC_CMD_0xe1_PRIVILEGE_CTG + +#define MC_CMD_0xe1_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_RSS_CONTEXT_SET_FLAGS_IN msgrequest */ +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_LEN 8 +/* The handle of the RSS context */ +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_RSS_CONTEXT_ID_OFST 0 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_RSS_CONTEXT_ID_LEN 4 +/* Hash control flags. The _EN bits are always supported, but new modes are + * available when ADDITIONAL_RSS_MODES is reported by MC_CMD_GET_CAPABILITIES: + * in this case, the MODE fields may be set to non-zero values, and will take + * effect regardless of the settings of the _EN flags. See the RSS_MODE + * structure for the meaning of the mode bits. Drivers must check the + * capability before trying to set any _MODE fields, as older firmware will + * reject any attempt to set the FLAGS field to a value > 0xff with EINVAL. In + * the case where all the _MODE flags are zero, the _EN flags take effect, + * providing backward compatibility for existing drivers. (Setting all _MODE + * *and* all _EN flags to zero is valid, to disable RSS spreading for that + * particular packet type.) + */ +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_FLAGS_OFST 4 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_FLAGS_LEN 4 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV4_EN_OFST 4 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV4_EN_LBN 0 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV4_EN_WIDTH 1 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV4_EN_OFST 4 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV4_EN_LBN 1 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV4_EN_WIDTH 1 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV6_EN_OFST 4 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV6_EN_LBN 2 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV6_EN_WIDTH 1 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV6_EN_OFST 4 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV6_EN_LBN 3 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV6_EN_WIDTH 1 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_RESERVED_OFST 4 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_RESERVED_LBN 4 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_RESERVED_WIDTH 4 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV4_RSS_MODE_OFST 4 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV4_RSS_MODE_LBN 8 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV4_RSS_MODE_WIDTH 4 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_UDP_IPV4_RSS_MODE_OFST 4 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_UDP_IPV4_RSS_MODE_LBN 12 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_UDP_IPV4_RSS_MODE_WIDTH 4 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV4_RSS_MODE_OFST 4 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV4_RSS_MODE_LBN 16 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV4_RSS_MODE_WIDTH 4 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV6_RSS_MODE_OFST 4 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV6_RSS_MODE_LBN 20 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV6_RSS_MODE_WIDTH 4 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_UDP_IPV6_RSS_MODE_OFST 4 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_UDP_IPV6_RSS_MODE_LBN 24 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_UDP_IPV6_RSS_MODE_WIDTH 4 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV6_RSS_MODE_OFST 4 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV6_RSS_MODE_LBN 28 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV6_RSS_MODE_WIDTH 4 + +/* MC_CMD_RSS_CONTEXT_SET_FLAGS_OUT msgresponse */ +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_RSS_CONTEXT_GET_FLAGS + * Get various control flags for an RSS context. + */ +#define MC_CMD_RSS_CONTEXT_GET_FLAGS 0xe2 +#undef MC_CMD_0xe2_PRIVILEGE_CTG + +#define MC_CMD_0xe2_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_RSS_CONTEXT_GET_FLAGS_IN msgrequest */ +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_IN_LEN 4 +/* The handle of the RSS context */ +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_IN_RSS_CONTEXT_ID_OFST 0 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_IN_RSS_CONTEXT_ID_LEN 4 + +/* MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT msgresponse */ +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_LEN 8 +/* Hash control flags. If all _MODE bits are zero (which will always be true + * for older firmware which does not report the ADDITIONAL_RSS_MODES + * capability), the _EN bits report the state. If any _MODE bits are non-zero + * (which will only be true when the firmware reports ADDITIONAL_RSS_MODES) + * then the _EN bits should be disregarded, although the _MODE flags are + * guaranteed to be consistent with the _EN flags for a freshly-allocated RSS + * context and in the case where the _EN flags were used in the SET. This + * provides backward compatibility: old drivers will not be attempting to + * derive any meaning from the _MODE bits (and can never set them to any value + * not representable by the _EN bits); new drivers can always determine the + * mode by looking only at the _MODE bits; the value returned by a GET can + * always be used for a SET regardless of old/new driver vs. old/new firmware. + */ +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_FLAGS_OFST 4 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_FLAGS_LEN 4 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV4_EN_OFST 4 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV4_EN_LBN 0 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV4_EN_WIDTH 1 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV4_EN_OFST 4 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV4_EN_LBN 1 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV4_EN_WIDTH 1 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV6_EN_OFST 4 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV6_EN_LBN 2 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV6_EN_WIDTH 1 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV6_EN_OFST 4 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV6_EN_LBN 3 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV6_EN_WIDTH 1 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_RESERVED_OFST 4 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_RESERVED_LBN 4 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_RESERVED_WIDTH 4 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV4_RSS_MODE_OFST 4 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV4_RSS_MODE_LBN 8 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV4_RSS_MODE_WIDTH 4 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV4_RSS_MODE_OFST 4 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV4_RSS_MODE_LBN 12 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV4_RSS_MODE_WIDTH 4 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV4_RSS_MODE_OFST 4 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV4_RSS_MODE_LBN 16 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV4_RSS_MODE_WIDTH 4 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV6_RSS_MODE_OFST 4 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV6_RSS_MODE_LBN 20 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV6_RSS_MODE_WIDTH 4 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV6_RSS_MODE_OFST 4 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV6_RSS_MODE_LBN 24 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV6_RSS_MODE_WIDTH 4 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV6_RSS_MODE_OFST 4 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV6_RSS_MODE_LBN 28 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV6_RSS_MODE_WIDTH 4 + + +/***********************************/ +/* MC_CMD_DOT1P_MAPPING_ALLOC + * Allocate a .1p mapping. + */ +#define MC_CMD_DOT1P_MAPPING_ALLOC 0xa4 +#undef MC_CMD_0xa4_PRIVILEGE_CTG + +#define MC_CMD_0xa4_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_DOT1P_MAPPING_ALLOC_IN msgrequest */ +#define MC_CMD_DOT1P_MAPPING_ALLOC_IN_LEN 8 +/* The handle of the owning upstream port */ +#define MC_CMD_DOT1P_MAPPING_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0 +#define MC_CMD_DOT1P_MAPPING_ALLOC_IN_UPSTREAM_PORT_ID_LEN 4 +/* Number of queues spanned by this mapping, in the range 1-64; valid fixed + * offsets in the mapping table will be in the range 0 to NUM_QUEUES-1, and + * referenced RSS contexts must span no more than this number. + */ +#define MC_CMD_DOT1P_MAPPING_ALLOC_IN_NUM_QUEUES_OFST 4 +#define MC_CMD_DOT1P_MAPPING_ALLOC_IN_NUM_QUEUES_LEN 4 + +/* MC_CMD_DOT1P_MAPPING_ALLOC_OUT msgresponse */ +#define MC_CMD_DOT1P_MAPPING_ALLOC_OUT_LEN 4 +/* The handle of the new .1p mapping. This should be considered opaque to the + * host, although a value of 0xFFFFFFFF is guaranteed never to be a valid + * handle. + */ +#define MC_CMD_DOT1P_MAPPING_ALLOC_OUT_DOT1P_MAPPING_ID_OFST 0 +#define MC_CMD_DOT1P_MAPPING_ALLOC_OUT_DOT1P_MAPPING_ID_LEN 4 +/* enum: guaranteed invalid .1p mapping handle value */ +#define MC_CMD_DOT1P_MAPPING_ALLOC_OUT_DOT1P_MAPPING_ID_INVALID 0xffffffff + + +/***********************************/ +/* MC_CMD_DOT1P_MAPPING_FREE + * Free a .1p mapping. + */ +#define MC_CMD_DOT1P_MAPPING_FREE 0xa5 +#undef MC_CMD_0xa5_PRIVILEGE_CTG + +#define MC_CMD_0xa5_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_DOT1P_MAPPING_FREE_IN msgrequest */ +#define MC_CMD_DOT1P_MAPPING_FREE_IN_LEN 4 +/* The handle of the .1p mapping */ +#define MC_CMD_DOT1P_MAPPING_FREE_IN_DOT1P_MAPPING_ID_OFST 0 +#define MC_CMD_DOT1P_MAPPING_FREE_IN_DOT1P_MAPPING_ID_LEN 4 + +/* MC_CMD_DOT1P_MAPPING_FREE_OUT msgresponse */ +#define MC_CMD_DOT1P_MAPPING_FREE_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_DOT1P_MAPPING_SET_TABLE + * Set the mapping table for a .1p mapping. + */ +#define MC_CMD_DOT1P_MAPPING_SET_TABLE 0xa6 +#undef MC_CMD_0xa6_PRIVILEGE_CTG + +#define MC_CMD_0xa6_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_DOT1P_MAPPING_SET_TABLE_IN msgrequest */ +#define MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_LEN 36 +/* The handle of the .1p mapping */ +#define MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_DOT1P_MAPPING_ID_OFST 0 +#define MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_DOT1P_MAPPING_ID_LEN 4 +/* Per-priority mappings (1 32-bit word per entry - an offset or RSS context + * handle) + */ +#define MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_MAPPING_TABLE_OFST 4 +#define MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_MAPPING_TABLE_LEN 32 + +/* MC_CMD_DOT1P_MAPPING_SET_TABLE_OUT msgresponse */ +#define MC_CMD_DOT1P_MAPPING_SET_TABLE_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_DOT1P_MAPPING_GET_TABLE + * Get the mapping table for a .1p mapping. + */ +#define MC_CMD_DOT1P_MAPPING_GET_TABLE 0xa7 +#undef MC_CMD_0xa7_PRIVILEGE_CTG + +#define MC_CMD_0xa7_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_DOT1P_MAPPING_GET_TABLE_IN msgrequest */ +#define MC_CMD_DOT1P_MAPPING_GET_TABLE_IN_LEN 4 +/* The handle of the .1p mapping */ +#define MC_CMD_DOT1P_MAPPING_GET_TABLE_IN_DOT1P_MAPPING_ID_OFST 0 +#define MC_CMD_DOT1P_MAPPING_GET_TABLE_IN_DOT1P_MAPPING_ID_LEN 4 + +/* MC_CMD_DOT1P_MAPPING_GET_TABLE_OUT msgresponse */ +#define MC_CMD_DOT1P_MAPPING_GET_TABLE_OUT_LEN 36 +/* Per-priority mappings (1 32-bit word per entry - an offset or RSS context + * handle) + */ +#define MC_CMD_DOT1P_MAPPING_GET_TABLE_OUT_MAPPING_TABLE_OFST 4 +#define MC_CMD_DOT1P_MAPPING_GET_TABLE_OUT_MAPPING_TABLE_LEN 32 + + +/***********************************/ +/* MC_CMD_GET_VECTOR_CFG + * Get Interrupt Vector config for this PF. + */ +#define MC_CMD_GET_VECTOR_CFG 0xbf +#undef MC_CMD_0xbf_PRIVILEGE_CTG + +#define MC_CMD_0xbf_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_VECTOR_CFG_IN msgrequest */ +#define MC_CMD_GET_VECTOR_CFG_IN_LEN 0 + +/* MC_CMD_GET_VECTOR_CFG_OUT msgresponse */ +#define MC_CMD_GET_VECTOR_CFG_OUT_LEN 12 +/* Base absolute interrupt vector number. */ +#define MC_CMD_GET_VECTOR_CFG_OUT_VEC_BASE_OFST 0 +#define MC_CMD_GET_VECTOR_CFG_OUT_VEC_BASE_LEN 4 +/* Number of interrupt vectors allocate to this PF. */ +#define MC_CMD_GET_VECTOR_CFG_OUT_VECS_PER_PF_OFST 4 +#define MC_CMD_GET_VECTOR_CFG_OUT_VECS_PER_PF_LEN 4 +/* Number of interrupt vectors to allocate per VF. */ +#define MC_CMD_GET_VECTOR_CFG_OUT_VECS_PER_VF_OFST 8 +#define MC_CMD_GET_VECTOR_CFG_OUT_VECS_PER_VF_LEN 4 + + +/***********************************/ +/* MC_CMD_SET_VECTOR_CFG + * Set Interrupt Vector config for this PF. + */ +#define MC_CMD_SET_VECTOR_CFG 0xc0 +#undef MC_CMD_0xc0_PRIVILEGE_CTG + +#define MC_CMD_0xc0_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_SET_VECTOR_CFG_IN msgrequest */ +#define MC_CMD_SET_VECTOR_CFG_IN_LEN 12 +/* Base absolute interrupt vector number, or MC_CMD_RESOURCE_INSTANCE_ANY to + * let the system find a suitable base. + */ +#define MC_CMD_SET_VECTOR_CFG_IN_VEC_BASE_OFST 0 +#define MC_CMD_SET_VECTOR_CFG_IN_VEC_BASE_LEN 4 +/* Number of interrupt vectors allocate to this PF. */ +#define MC_CMD_SET_VECTOR_CFG_IN_VECS_PER_PF_OFST 4 +#define MC_CMD_SET_VECTOR_CFG_IN_VECS_PER_PF_LEN 4 +/* Number of interrupt vectors to allocate per VF. */ +#define MC_CMD_SET_VECTOR_CFG_IN_VECS_PER_VF_OFST 8 +#define MC_CMD_SET_VECTOR_CFG_IN_VECS_PER_VF_LEN 4 + +/* MC_CMD_SET_VECTOR_CFG_OUT msgresponse */ +#define MC_CMD_SET_VECTOR_CFG_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_VPORT_ADD_MAC_ADDRESS + * Add a MAC address to a v-port + */ +#define MC_CMD_VPORT_ADD_MAC_ADDRESS 0xa8 +#undef MC_CMD_0xa8_PRIVILEGE_CTG + +#define MC_CMD_0xa8_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_VPORT_ADD_MAC_ADDRESS_IN msgrequest */ +#define MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_LEN 10 +/* The handle of the v-port */ +#define MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_VPORT_ID_OFST 0 +#define MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_VPORT_ID_LEN 4 +/* MAC address to add */ +#define MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_MACADDR_OFST 4 +#define MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_MACADDR_LEN 6 + +/* MC_CMD_VPORT_ADD_MAC_ADDRESS_OUT msgresponse */ +#define MC_CMD_VPORT_ADD_MAC_ADDRESS_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_VPORT_DEL_MAC_ADDRESS + * Delete a MAC address from a v-port + */ +#define MC_CMD_VPORT_DEL_MAC_ADDRESS 0xa9 +#undef MC_CMD_0xa9_PRIVILEGE_CTG + +#define MC_CMD_0xa9_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_VPORT_DEL_MAC_ADDRESS_IN msgrequest */ +#define MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_LEN 10 +/* The handle of the v-port */ +#define MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_VPORT_ID_OFST 0 +#define MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_VPORT_ID_LEN 4 +/* MAC address to add */ +#define MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_MACADDR_OFST 4 +#define MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_MACADDR_LEN 6 + +/* MC_CMD_VPORT_DEL_MAC_ADDRESS_OUT msgresponse */ +#define MC_CMD_VPORT_DEL_MAC_ADDRESS_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_VPORT_GET_MAC_ADDRESSES + * Delete a MAC address from a v-port + */ +#define MC_CMD_VPORT_GET_MAC_ADDRESSES 0xaa +#undef MC_CMD_0xaa_PRIVILEGE_CTG + +#define MC_CMD_0xaa_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_VPORT_GET_MAC_ADDRESSES_IN msgrequest */ +#define MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_LEN 4 +/* The handle of the v-port */ +#define MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_VPORT_ID_OFST 0 +#define MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_VPORT_ID_LEN 4 + +/* MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT msgresponse */ +#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMIN 4 +#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMAX 250 +#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMAX_MCDI2 1018 +#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LEN(num) (4+6*(num)) +#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_NUM(len) (((len)-4)/6) +/* The number of MAC addresses returned */ +#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_COUNT_OFST 0 +#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_COUNT_LEN 4 +/* Array of MAC addresses */ +#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_OFST 4 +#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_LEN 6 +#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_MINNUM 0 +#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_MAXNUM 41 +#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_MAXNUM_MCDI2 169 + + +/***********************************/ +/* MC_CMD_VPORT_RECONFIGURE + * Replace VLAN tags and/or MAC addresses of an existing v-port. If the v-port + * has already been passed to another function (v-port's user), then that + * function will be reset before applying the changes. + */ +#define MC_CMD_VPORT_RECONFIGURE 0xeb +#undef MC_CMD_0xeb_PRIVILEGE_CTG + +#define MC_CMD_0xeb_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_VPORT_RECONFIGURE_IN msgrequest */ +#define MC_CMD_VPORT_RECONFIGURE_IN_LEN 44 +/* The handle of the v-port */ +#define MC_CMD_VPORT_RECONFIGURE_IN_VPORT_ID_OFST 0 +#define MC_CMD_VPORT_RECONFIGURE_IN_VPORT_ID_LEN 4 +/* Flags requesting what should be changed. */ +#define MC_CMD_VPORT_RECONFIGURE_IN_FLAGS_OFST 4 +#define MC_CMD_VPORT_RECONFIGURE_IN_FLAGS_LEN 4 +#define MC_CMD_VPORT_RECONFIGURE_IN_REPLACE_VLAN_TAGS_OFST 4 +#define MC_CMD_VPORT_RECONFIGURE_IN_REPLACE_VLAN_TAGS_LBN 0 +#define MC_CMD_VPORT_RECONFIGURE_IN_REPLACE_VLAN_TAGS_WIDTH 1 +#define MC_CMD_VPORT_RECONFIGURE_IN_REPLACE_MACADDRS_OFST 4 +#define MC_CMD_VPORT_RECONFIGURE_IN_REPLACE_MACADDRS_LBN 1 +#define MC_CMD_VPORT_RECONFIGURE_IN_REPLACE_MACADDRS_WIDTH 1 +/* The number of VLAN tags to insert/remove. An error will be returned if + * incompatible with the number of VLAN tags specified for the upstream + * v-switch. + */ +#define MC_CMD_VPORT_RECONFIGURE_IN_NUM_VLAN_TAGS_OFST 8 +#define MC_CMD_VPORT_RECONFIGURE_IN_NUM_VLAN_TAGS_LEN 4 +/* The actual VLAN tags to insert/remove */ +#define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAGS_OFST 12 +#define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAGS_LEN 4 +#define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAG_0_OFST 12 +#define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAG_0_LBN 0 +#define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAG_0_WIDTH 16 +#define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAG_1_OFST 12 +#define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAG_1_LBN 16 +#define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAG_1_WIDTH 16 +/* The number of MAC addresses to add */ +#define MC_CMD_VPORT_RECONFIGURE_IN_NUM_MACADDRS_OFST 16 +#define MC_CMD_VPORT_RECONFIGURE_IN_NUM_MACADDRS_LEN 4 +/* MAC addresses to add */ +#define MC_CMD_VPORT_RECONFIGURE_IN_MACADDRS_OFST 20 +#define MC_CMD_VPORT_RECONFIGURE_IN_MACADDRS_LEN 6 +#define MC_CMD_VPORT_RECONFIGURE_IN_MACADDRS_NUM 4 + +/* MC_CMD_VPORT_RECONFIGURE_OUT msgresponse */ +#define MC_CMD_VPORT_RECONFIGURE_OUT_LEN 4 +#define MC_CMD_VPORT_RECONFIGURE_OUT_FLAGS_OFST 0 +#define MC_CMD_VPORT_RECONFIGURE_OUT_FLAGS_LEN 4 +#define MC_CMD_VPORT_RECONFIGURE_OUT_RESET_DONE_OFST 0 +#define MC_CMD_VPORT_RECONFIGURE_OUT_RESET_DONE_LBN 0 +#define MC_CMD_VPORT_RECONFIGURE_OUT_RESET_DONE_WIDTH 1 + + +/***********************************/ +/* MC_CMD_EVB_PORT_QUERY + * read some config of v-port. + */ +#define MC_CMD_EVB_PORT_QUERY 0x62 +#undef MC_CMD_0x62_PRIVILEGE_CTG + +#define MC_CMD_0x62_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_EVB_PORT_QUERY_IN msgrequest */ +#define MC_CMD_EVB_PORT_QUERY_IN_LEN 4 +/* The handle of the v-port */ +#define MC_CMD_EVB_PORT_QUERY_IN_PORT_ID_OFST 0 +#define MC_CMD_EVB_PORT_QUERY_IN_PORT_ID_LEN 4 + +/* MC_CMD_EVB_PORT_QUERY_OUT msgresponse */ +#define MC_CMD_EVB_PORT_QUERY_OUT_LEN 8 +/* The EVB port flags as defined at MC_CMD_VPORT_ALLOC. */ +#define MC_CMD_EVB_PORT_QUERY_OUT_PORT_FLAGS_OFST 0 +#define MC_CMD_EVB_PORT_QUERY_OUT_PORT_FLAGS_LEN 4 +/* The number of VLAN tags that may be used on a v-adaptor connected to this + * EVB port. + */ +#define MC_CMD_EVB_PORT_QUERY_OUT_NUM_AVAILABLE_VLAN_TAGS_OFST 4 +#define MC_CMD_EVB_PORT_QUERY_OUT_NUM_AVAILABLE_VLAN_TAGS_LEN 4 + + +/***********************************/ +/* MC_CMD_DUMP_BUFTBL_ENTRIES + * Dump buffer table entries, mainly for command client debug use. Dumps + * absolute entries, and does not use chunk handles. All entries must be in + * range, and used for q page mapping, Although the latter restriction may be + * lifted in future. + */ +#define MC_CMD_DUMP_BUFTBL_ENTRIES 0xab +#undef MC_CMD_0xab_PRIVILEGE_CTG + +#define MC_CMD_0xab_PRIVILEGE_CTG SRIOV_CTG_INSECURE + +/* MC_CMD_DUMP_BUFTBL_ENTRIES_IN msgrequest */ +#define MC_CMD_DUMP_BUFTBL_ENTRIES_IN_LEN 8 +/* Index of the first buffer table entry. */ +#define MC_CMD_DUMP_BUFTBL_ENTRIES_IN_FIRSTID_OFST 0 +#define MC_CMD_DUMP_BUFTBL_ENTRIES_IN_FIRSTID_LEN 4 +/* Number of buffer table entries to dump. */ +#define MC_CMD_DUMP_BUFTBL_ENTRIES_IN_NUMENTRIES_OFST 4 +#define MC_CMD_DUMP_BUFTBL_ENTRIES_IN_NUMENTRIES_LEN 4 + +/* MC_CMD_DUMP_BUFTBL_ENTRIES_OUT msgresponse */ +#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LENMIN 12 +#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LENMAX 252 +#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LENMAX_MCDI2 1020 +#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LEN(num) (0+12*(num)) +#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_NUM(len) (((len)-0)/12) +/* Raw buffer table entries, layed out as BUFTBL_ENTRY. */ +#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_OFST 0 +#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_LEN 12 +#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_MINNUM 1 +#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_MAXNUM 21 +#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_MAXNUM_MCDI2 85 + + +/***********************************/ +/* MC_CMD_SET_RXDP_CONFIG + * Set global RXDP configuration settings + */ +#define MC_CMD_SET_RXDP_CONFIG 0xc1 +#undef MC_CMD_0xc1_PRIVILEGE_CTG + +#define MC_CMD_0xc1_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_SET_RXDP_CONFIG_IN msgrequest */ +#define MC_CMD_SET_RXDP_CONFIG_IN_LEN 4 +#define MC_CMD_SET_RXDP_CONFIG_IN_DATA_OFST 0 +#define MC_CMD_SET_RXDP_CONFIG_IN_DATA_LEN 4 +#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_DMA_OFST 0 +#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_DMA_LBN 0 +#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_DMA_WIDTH 1 +#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_LEN_OFST 0 +#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_LEN_LBN 1 +#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_LEN_WIDTH 2 +/* enum: pad to 64 bytes */ +#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_64 0x0 +/* enum: pad to 128 bytes (Medford only) */ +#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_128 0x1 +/* enum: pad to 256 bytes (Medford only) */ +#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_256 0x2 + +/* MC_CMD_SET_RXDP_CONFIG_OUT msgresponse */ +#define MC_CMD_SET_RXDP_CONFIG_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_GET_RXDP_CONFIG + * Get global RXDP configuration settings + */ +#define MC_CMD_GET_RXDP_CONFIG 0xc2 +#undef MC_CMD_0xc2_PRIVILEGE_CTG + +#define MC_CMD_0xc2_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_RXDP_CONFIG_IN msgrequest */ +#define MC_CMD_GET_RXDP_CONFIG_IN_LEN 0 + +/* MC_CMD_GET_RXDP_CONFIG_OUT msgresponse */ +#define MC_CMD_GET_RXDP_CONFIG_OUT_LEN 4 +#define MC_CMD_GET_RXDP_CONFIG_OUT_DATA_OFST 0 +#define MC_CMD_GET_RXDP_CONFIG_OUT_DATA_LEN 4 +#define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_DMA_OFST 0 +#define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_DMA_LBN 0 +#define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_DMA_WIDTH 1 +#define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_LEN_OFST 0 +#define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_LEN_LBN 1 +#define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_LEN_WIDTH 2 +/* Enum values, see field(s): */ +/* MC_CMD_SET_RXDP_CONFIG/MC_CMD_SET_RXDP_CONFIG_IN/PAD_HOST_LEN */ + + +/***********************************/ +/* MC_CMD_GET_CLOCK + * Return the system and PDCPU clock frequencies. + */ +#define MC_CMD_GET_CLOCK 0xac +#undef MC_CMD_0xac_PRIVILEGE_CTG + +#define MC_CMD_0xac_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_CLOCK_IN msgrequest */ +#define MC_CMD_GET_CLOCK_IN_LEN 0 + +/* MC_CMD_GET_CLOCK_OUT msgresponse */ +#define MC_CMD_GET_CLOCK_OUT_LEN 8 +/* System frequency, MHz */ +#define MC_CMD_GET_CLOCK_OUT_SYS_FREQ_OFST 0 +#define MC_CMD_GET_CLOCK_OUT_SYS_FREQ_LEN 4 +/* DPCPU frequency, MHz */ +#define MC_CMD_GET_CLOCK_OUT_DPCPU_FREQ_OFST 4 +#define MC_CMD_GET_CLOCK_OUT_DPCPU_FREQ_LEN 4 + + +/***********************************/ +/* MC_CMD_SET_CLOCK + * Control the system and DPCPU clock frequencies. Changes are lost reboot. + */ +#define MC_CMD_SET_CLOCK 0xad +#undef MC_CMD_0xad_PRIVILEGE_CTG + +#define MC_CMD_0xad_PRIVILEGE_CTG SRIOV_CTG_INSECURE + +/* MC_CMD_SET_CLOCK_IN msgrequest */ +#define MC_CMD_SET_CLOCK_IN_LEN 28 +/* Requested frequency in MHz for system clock domain */ +#define MC_CMD_SET_CLOCK_IN_SYS_FREQ_OFST 0 +#define MC_CMD_SET_CLOCK_IN_SYS_FREQ_LEN 4 +/* enum: Leave the system clock domain frequency unchanged */ +#define MC_CMD_SET_CLOCK_IN_SYS_DOMAIN_DONT_CHANGE 0x0 +/* Requested frequency in MHz for inter-core clock domain */ +#define MC_CMD_SET_CLOCK_IN_ICORE_FREQ_OFST 4 +#define MC_CMD_SET_CLOCK_IN_ICORE_FREQ_LEN 4 +/* enum: Leave the inter-core clock domain frequency unchanged */ +#define MC_CMD_SET_CLOCK_IN_ICORE_DOMAIN_DONT_CHANGE 0x0 +/* Requested frequency in MHz for DPCPU clock domain */ +#define MC_CMD_SET_CLOCK_IN_DPCPU_FREQ_OFST 8 +#define MC_CMD_SET_CLOCK_IN_DPCPU_FREQ_LEN 4 +/* enum: Leave the DPCPU clock domain frequency unchanged */ +#define MC_CMD_SET_CLOCK_IN_DPCPU_DOMAIN_DONT_CHANGE 0x0 +/* Requested frequency in MHz for PCS clock domain */ +#define MC_CMD_SET_CLOCK_IN_PCS_FREQ_OFST 12 +#define MC_CMD_SET_CLOCK_IN_PCS_FREQ_LEN 4 +/* enum: Leave the PCS clock domain frequency unchanged */ +#define MC_CMD_SET_CLOCK_IN_PCS_DOMAIN_DONT_CHANGE 0x0 +/* Requested frequency in MHz for MC clock domain */ +#define MC_CMD_SET_CLOCK_IN_MC_FREQ_OFST 16 +#define MC_CMD_SET_CLOCK_IN_MC_FREQ_LEN 4 +/* enum: Leave the MC clock domain frequency unchanged */ +#define MC_CMD_SET_CLOCK_IN_MC_DOMAIN_DONT_CHANGE 0x0 +/* Requested frequency in MHz for rmon clock domain */ +#define MC_CMD_SET_CLOCK_IN_RMON_FREQ_OFST 20 +#define MC_CMD_SET_CLOCK_IN_RMON_FREQ_LEN 4 +/* enum: Leave the rmon clock domain frequency unchanged */ +#define MC_CMD_SET_CLOCK_IN_RMON_DOMAIN_DONT_CHANGE 0x0 +/* Requested frequency in MHz for vswitch clock domain */ +#define MC_CMD_SET_CLOCK_IN_VSWITCH_FREQ_OFST 24 +#define MC_CMD_SET_CLOCK_IN_VSWITCH_FREQ_LEN 4 +/* enum: Leave the vswitch clock domain frequency unchanged */ +#define MC_CMD_SET_CLOCK_IN_VSWITCH_DOMAIN_DONT_CHANGE 0x0 + +/* MC_CMD_SET_CLOCK_OUT msgresponse */ +#define MC_CMD_SET_CLOCK_OUT_LEN 28 +/* Resulting system frequency in MHz */ +#define MC_CMD_SET_CLOCK_OUT_SYS_FREQ_OFST 0 +#define MC_CMD_SET_CLOCK_OUT_SYS_FREQ_LEN 4 +/* enum: The system clock domain doesn't exist */ +#define MC_CMD_SET_CLOCK_OUT_SYS_DOMAIN_UNSUPPORTED 0x0 +/* Resulting inter-core frequency in MHz */ +#define MC_CMD_SET_CLOCK_OUT_ICORE_FREQ_OFST 4 +#define MC_CMD_SET_CLOCK_OUT_ICORE_FREQ_LEN 4 +/* enum: The inter-core clock domain doesn't exist / isn't used */ +#define MC_CMD_SET_CLOCK_OUT_ICORE_DOMAIN_UNSUPPORTED 0x0 +/* Resulting DPCPU frequency in MHz */ +#define MC_CMD_SET_CLOCK_OUT_DPCPU_FREQ_OFST 8 +#define MC_CMD_SET_CLOCK_OUT_DPCPU_FREQ_LEN 4 +/* enum: The dpcpu clock domain doesn't exist */ +#define MC_CMD_SET_CLOCK_OUT_DPCPU_DOMAIN_UNSUPPORTED 0x0 +/* Resulting PCS frequency in MHz */ +#define MC_CMD_SET_CLOCK_OUT_PCS_FREQ_OFST 12 +#define MC_CMD_SET_CLOCK_OUT_PCS_FREQ_LEN 4 +/* enum: The PCS clock domain doesn't exist / isn't controlled */ +#define MC_CMD_SET_CLOCK_OUT_PCS_DOMAIN_UNSUPPORTED 0x0 +/* Resulting MC frequency in MHz */ +#define MC_CMD_SET_CLOCK_OUT_MC_FREQ_OFST 16 +#define MC_CMD_SET_CLOCK_OUT_MC_FREQ_LEN 4 +/* enum: The MC clock domain doesn't exist / isn't controlled */ +#define MC_CMD_SET_CLOCK_OUT_MC_DOMAIN_UNSUPPORTED 0x0 +/* Resulting rmon frequency in MHz */ +#define MC_CMD_SET_CLOCK_OUT_RMON_FREQ_OFST 20 +#define MC_CMD_SET_CLOCK_OUT_RMON_FREQ_LEN 4 +/* enum: The rmon clock domain doesn't exist / isn't controlled */ +#define MC_CMD_SET_CLOCK_OUT_RMON_DOMAIN_UNSUPPORTED 0x0 +/* Resulting vswitch frequency in MHz */ +#define MC_CMD_SET_CLOCK_OUT_VSWITCH_FREQ_OFST 24 +#define MC_CMD_SET_CLOCK_OUT_VSWITCH_FREQ_LEN 4 +/* enum: The vswitch clock domain doesn't exist / isn't controlled */ +#define MC_CMD_SET_CLOCK_OUT_VSWITCH_DOMAIN_UNSUPPORTED 0x0 + + +/***********************************/ +/* MC_CMD_DPCPU_RPC + * Send an arbitrary DPCPU message. + */ +#define MC_CMD_DPCPU_RPC 0xae +#undef MC_CMD_0xae_PRIVILEGE_CTG + +#define MC_CMD_0xae_PRIVILEGE_CTG SRIOV_CTG_INSECURE + +/* MC_CMD_DPCPU_RPC_IN msgrequest */ +#define MC_CMD_DPCPU_RPC_IN_LEN 36 +#define MC_CMD_DPCPU_RPC_IN_CPU_OFST 0 +#define MC_CMD_DPCPU_RPC_IN_CPU_LEN 4 +/* enum: RxDPCPU0 */ +#define MC_CMD_DPCPU_RPC_IN_DPCPU_RX0 0x0 +/* enum: TxDPCPU0 */ +#define MC_CMD_DPCPU_RPC_IN_DPCPU_TX0 0x1 +/* enum: TxDPCPU1 */ +#define MC_CMD_DPCPU_RPC_IN_DPCPU_TX1 0x2 +/* enum: RxDPCPU1 (Medford only) */ +#define MC_CMD_DPCPU_RPC_IN_DPCPU_RX1 0x3 +/* enum: RxDPCPU (will be for the calling function; for now, just an alias of + * DPCPU_RX0) + */ +#define MC_CMD_DPCPU_RPC_IN_DPCPU_RX 0x80 +/* enum: TxDPCPU (will be for the calling function; for now, just an alias of + * DPCPU_TX0) + */ +#define MC_CMD_DPCPU_RPC_IN_DPCPU_TX 0x81 +/* First 8 bits [39:32] of DATA are consumed by MC-DPCPU protocol and must be + * initialised to zero + */ +#define MC_CMD_DPCPU_RPC_IN_DATA_OFST 4 +#define MC_CMD_DPCPU_RPC_IN_DATA_LEN 32 +#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_CMDNUM_OFST 4 +#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_CMDNUM_LBN 8 +#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_CMDNUM_WIDTH 8 +#define MC_CMD_DPCPU_RPC_IN_CMDNUM_TXDPCPU_READ 0x6 /* enum */ +#define MC_CMD_DPCPU_RPC_IN_CMDNUM_TXDPCPU_WRITE 0x7 /* enum */ +#define MC_CMD_DPCPU_RPC_IN_CMDNUM_TXDPCPU_SELF_TEST 0xc /* enum */ +#define MC_CMD_DPCPU_RPC_IN_CMDNUM_TXDPCPU_CSR_ACCESS 0xe /* enum */ +#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_READ 0x46 /* enum */ +#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_WRITE 0x47 /* enum */ +#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_SELF_TEST 0x4a /* enum */ +#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_CSR_ACCESS 0x4c /* enum */ +#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_SET_MC_REPLAY_CNTXT 0x4d /* enum */ +#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_OBJID_OFST 4 +#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_OBJID_LBN 16 +#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_OBJID_WIDTH 16 +#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_ADDR_OFST 4 +#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_ADDR_LBN 16 +#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_ADDR_WIDTH 16 +#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_COUNT_OFST 4 +#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_COUNT_LBN 48 +#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_COUNT_WIDTH 16 +#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_INFO_OFST 4 +#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_INFO_LBN 16 +#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_INFO_WIDTH 240 +#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_OFST 4 +#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_LBN 16 +#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_WIDTH 16 +#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_STOP_RETURN_RESULT 0x0 /* enum */ +#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_START_READ 0x1 /* enum */ +#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_START_WRITE 0x2 /* enum */ +#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_START_WRITE_READ 0x3 /* enum */ +#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_START_PIPELINED_READ 0x4 /* enum */ +#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_START_DELAY_OFST 4 +#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_START_DELAY_LBN 48 +#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_START_DELAY_WIDTH 16 +#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_RPT_COUNT_OFST 4 +#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_RPT_COUNT_LBN 64 +#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_RPT_COUNT_WIDTH 16 +#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_GAP_DELAY_OFST 4 +#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_GAP_DELAY_LBN 80 +#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_GAP_DELAY_WIDTH 16 +#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_OFST 4 +#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_LBN 16 +#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_WIDTH 16 +#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_CUT_THROUGH 0x1 /* enum */ +#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_STORE_FORWARD 0x2 /* enum */ +#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_STORE_FORWARD_FIRST 0x3 /* enum */ +#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_CNTXT_OFST 4 +#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_CNTXT_LBN 64 +#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_CNTXT_WIDTH 16 +#define MC_CMD_DPCPU_RPC_IN_WDATA_OFST 12 +#define MC_CMD_DPCPU_RPC_IN_WDATA_LEN 24 +/* Register data to write. Only valid in write/write-read. */ +#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_DATA_OFST 16 +#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_DATA_LEN 4 +/* Register address. */ +#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_ADDRESS_OFST 20 +#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_ADDRESS_LEN 4 + +/* MC_CMD_DPCPU_RPC_OUT msgresponse */ +#define MC_CMD_DPCPU_RPC_OUT_LEN 36 +#define MC_CMD_DPCPU_RPC_OUT_RC_OFST 0 +#define MC_CMD_DPCPU_RPC_OUT_RC_LEN 4 +/* DATA */ +#define MC_CMD_DPCPU_RPC_OUT_DATA_OFST 4 +#define MC_CMD_DPCPU_RPC_OUT_DATA_LEN 32 +#define MC_CMD_DPCPU_RPC_OUT_HDR_CMD_RESP_ERRCODE_OFST 4 +#define MC_CMD_DPCPU_RPC_OUT_HDR_CMD_RESP_ERRCODE_LBN 32 +#define MC_CMD_DPCPU_RPC_OUT_HDR_CMD_RESP_ERRCODE_WIDTH 16 +#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_COUNT_OFST 4 +#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_COUNT_LBN 48 +#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_COUNT_WIDTH 16 +#define MC_CMD_DPCPU_RPC_OUT_RDATA_OFST 12 +#define MC_CMD_DPCPU_RPC_OUT_RDATA_LEN 24 +#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_1_OFST 12 +#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_1_LEN 4 +#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_2_OFST 16 +#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_2_LEN 4 +#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_3_OFST 20 +#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_3_LEN 4 +#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_4_OFST 24 +#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_4_LEN 4 + + +/***********************************/ +/* MC_CMD_TRIGGER_INTERRUPT + * Trigger an interrupt by prodding the BIU. + */ +#define MC_CMD_TRIGGER_INTERRUPT 0xe3 +#undef MC_CMD_0xe3_PRIVILEGE_CTG + +#define MC_CMD_0xe3_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_TRIGGER_INTERRUPT_IN msgrequest */ +#define MC_CMD_TRIGGER_INTERRUPT_IN_LEN 4 +/* Interrupt level relative to base for function. */ +#define MC_CMD_TRIGGER_INTERRUPT_IN_INTR_LEVEL_OFST 0 +#define MC_CMD_TRIGGER_INTERRUPT_IN_INTR_LEVEL_LEN 4 + +/* MC_CMD_TRIGGER_INTERRUPT_OUT msgresponse */ +#define MC_CMD_TRIGGER_INTERRUPT_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_SHMBOOT_OP + * Special operations to support (for now) shmboot. + */ +#define MC_CMD_SHMBOOT_OP 0xe6 +#undef MC_CMD_0xe6_PRIVILEGE_CTG + +#define MC_CMD_0xe6_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND + +/* MC_CMD_SHMBOOT_OP_IN msgrequest */ +#define MC_CMD_SHMBOOT_OP_IN_LEN 4 +/* Identifies the operation to perform */ +#define MC_CMD_SHMBOOT_OP_IN_SHMBOOT_OP_OFST 0 +#define MC_CMD_SHMBOOT_OP_IN_SHMBOOT_OP_LEN 4 +/* enum: Copy slave_data section to the slave core. (Greenport only) */ +#define MC_CMD_SHMBOOT_OP_IN_PUSH_SLAVE_DATA 0x0 + +/* MC_CMD_SHMBOOT_OP_OUT msgresponse */ +#define MC_CMD_SHMBOOT_OP_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_CAP_BLK_READ + * Read multiple 64bit words from capture block memory + */ +#define MC_CMD_CAP_BLK_READ 0xe7 +#undef MC_CMD_0xe7_PRIVILEGE_CTG + +#define MC_CMD_0xe7_PRIVILEGE_CTG SRIOV_CTG_INSECURE + +/* MC_CMD_CAP_BLK_READ_IN msgrequest */ +#define MC_CMD_CAP_BLK_READ_IN_LEN 12 +#define MC_CMD_CAP_BLK_READ_IN_CAP_REG_OFST 0 +#define MC_CMD_CAP_BLK_READ_IN_CAP_REG_LEN 4 +#define MC_CMD_CAP_BLK_READ_IN_ADDR_OFST 4 +#define MC_CMD_CAP_BLK_READ_IN_ADDR_LEN 4 +#define MC_CMD_CAP_BLK_READ_IN_COUNT_OFST 8 +#define MC_CMD_CAP_BLK_READ_IN_COUNT_LEN 4 + +/* MC_CMD_CAP_BLK_READ_OUT msgresponse */ +#define MC_CMD_CAP_BLK_READ_OUT_LENMIN 8 +#define MC_CMD_CAP_BLK_READ_OUT_LENMAX 248 +#define MC_CMD_CAP_BLK_READ_OUT_LENMAX_MCDI2 1016 +#define MC_CMD_CAP_BLK_READ_OUT_LEN(num) (0+8*(num)) +#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_NUM(len) (((len)-0)/8) +#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_OFST 0 +#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_LEN 8 +#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_LO_OFST 0 +#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_LO_LEN 4 +#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_LO_LBN 0 +#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_LO_WIDTH 32 +#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_HI_OFST 4 +#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_HI_LEN 4 +#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_HI_LBN 32 +#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_HI_WIDTH 32 +#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_MINNUM 1 +#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_MAXNUM 31 +#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_MAXNUM_MCDI2 127 + + +/***********************************/ +/* MC_CMD_DUMP_DO + * Take a dump of the DUT state + */ +#define MC_CMD_DUMP_DO 0xe8 +#undef MC_CMD_0xe8_PRIVILEGE_CTG + +#define MC_CMD_0xe8_PRIVILEGE_CTG SRIOV_CTG_INSECURE + +/* MC_CMD_DUMP_DO_IN msgrequest */ +#define MC_CMD_DUMP_DO_IN_LEN 52 +#define MC_CMD_DUMP_DO_IN_PADDING_OFST 0 +#define MC_CMD_DUMP_DO_IN_PADDING_LEN 4 +#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_OFST 4 +#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_LEN 4 +#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM 0x0 /* enum */ +#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_DEFAULT 0x1 /* enum */ +#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_TYPE_OFST 8 +#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_TYPE_LEN 4 +#define MC_CMD_DUMP_DO_IN_DUMP_LOCATION_NVRAM 0x1 /* enum */ +#define MC_CMD_DUMP_DO_IN_DUMP_LOCATION_HOST_MEMORY 0x2 /* enum */ +#define MC_CMD_DUMP_DO_IN_DUMP_LOCATION_HOST_MEMORY_MLI 0x3 /* enum */ +#define MC_CMD_DUMP_DO_IN_DUMP_LOCATION_UART 0x4 /* enum */ +#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_PARTITION_TYPE_ID_OFST 12 +#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_PARTITION_TYPE_ID_LEN 4 +#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_OFFSET_OFST 16 +#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_OFFSET_LEN 4 +#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_LO_OFST 12 +#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_LO_LEN 4 +#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_HI_OFST 16 +#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_HI_LEN 4 +#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_OFST 12 +#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_LEN 4 +#define MC_CMD_DUMP_DO_IN_HOST_MEMORY_MLI_PAGE_SIZE 0x1000 /* enum */ +#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_OFST 16 +#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_LEN 4 +#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_DEPTH_OFST 20 +#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_DEPTH_LEN 4 +#define MC_CMD_DUMP_DO_IN_HOST_MEMORY_MLI_MAX_DEPTH 0x2 /* enum */ +#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_UART_PORT_OFST 12 +#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_UART_PORT_LEN 4 +/* enum: The uart port this command was received over (if using a uart + * transport) + */ +#define MC_CMD_DUMP_DO_IN_UART_PORT_SRC 0xff +#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_SIZE_OFST 24 +#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_SIZE_LEN 4 +#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_OFST 28 +#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_LEN 4 +#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM 0x0 /* enum */ +#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_NVRAM_DUMP_PARTITION 0x1 /* enum */ +#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_TYPE_OFST 32 +#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_TYPE_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_DUMP_DO_IN/DUMPSPEC_SRC_CUSTOM_TYPE */ +#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_NVRAM_PARTITION_TYPE_ID_OFST 36 +#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_NVRAM_PARTITION_TYPE_ID_LEN 4 +#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_NVRAM_OFFSET_OFST 40 +#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_NVRAM_OFFSET_LEN 4 +#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_LO_OFST 36 +#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_LO_LEN 4 +#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_HI_OFST 40 +#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_HI_LEN 4 +#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_OFST 36 +#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_LEN 4 +#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_OFST 40 +#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_LEN 4 +#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_DEPTH_OFST 44 +#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_DEPTH_LEN 4 +#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_UART_PORT_OFST 36 +#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_UART_PORT_LEN 4 +#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_SIZE_OFST 48 +#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_SIZE_LEN 4 + +/* MC_CMD_DUMP_DO_OUT msgresponse */ +#define MC_CMD_DUMP_DO_OUT_LEN 4 +#define MC_CMD_DUMP_DO_OUT_DUMPFILE_SIZE_OFST 0 +#define MC_CMD_DUMP_DO_OUT_DUMPFILE_SIZE_LEN 4 + + +/***********************************/ +/* MC_CMD_DUMP_CONFIGURE_UNSOLICITED + * Configure unsolicited dumps + */ +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED 0xe9 +#undef MC_CMD_0xe9_PRIVILEGE_CTG + +#define MC_CMD_0xe9_PRIVILEGE_CTG SRIOV_CTG_INSECURE + +/* MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN msgrequest */ +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_LEN 52 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_ENABLE_OFST 0 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_ENABLE_LEN 4 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_OFST 4 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_DUMP_DO/MC_CMD_DUMP_DO_IN/DUMPSPEC_SRC */ +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_TYPE_OFST 8 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_TYPE_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_DUMP_DO/MC_CMD_DUMP_DO_IN/DUMPSPEC_SRC_CUSTOM_TYPE */ +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_PARTITION_TYPE_ID_OFST 12 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_PARTITION_TYPE_ID_LEN 4 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_OFFSET_OFST 16 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_OFFSET_LEN 4 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_LO_OFST 12 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_LO_LEN 4 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_HI_OFST 16 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_HI_LEN 4 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_OFST 12 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_LEN 4 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_OFST 16 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_LEN 4 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_DEPTH_OFST 20 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_DEPTH_LEN 4 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_UART_PORT_OFST 12 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_UART_PORT_LEN 4 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_SIZE_OFST 24 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_SIZE_LEN 4 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_OFST 28 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_DUMP_DO/MC_CMD_DUMP_DO_IN/DUMPFILE_DST */ +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_TYPE_OFST 32 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_TYPE_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_DUMP_DO/MC_CMD_DUMP_DO_IN/DUMPSPEC_SRC_CUSTOM_TYPE */ +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_NVRAM_PARTITION_TYPE_ID_OFST 36 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_NVRAM_PARTITION_TYPE_ID_LEN 4 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_NVRAM_OFFSET_OFST 40 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_NVRAM_OFFSET_LEN 4 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_LO_OFST 36 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_LO_LEN 4 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_HI_OFST 40 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_HI_LEN 4 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_OFST 36 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_LEN 4 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_OFST 40 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_LEN 4 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_DEPTH_OFST 44 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_DEPTH_LEN 4 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_UART_PORT_OFST 36 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_UART_PORT_LEN 4 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_SIZE_OFST 48 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_SIZE_LEN 4 + + +/***********************************/ +/* MC_CMD_SET_PSU + * Adjusts power supply parameters. This is a warranty-voiding operation. + * Returns: ENOENT if the parameter or rail specified does not exist, EINVAL if + * the parameter is out of range. + */ +#define MC_CMD_SET_PSU 0xea +#undef MC_CMD_0xea_PRIVILEGE_CTG + +#define MC_CMD_0xea_PRIVILEGE_CTG SRIOV_CTG_INSECURE + +/* MC_CMD_SET_PSU_IN msgrequest */ +#define MC_CMD_SET_PSU_IN_LEN 12 +#define MC_CMD_SET_PSU_IN_PARAM_OFST 0 +#define MC_CMD_SET_PSU_IN_PARAM_LEN 4 +#define MC_CMD_SET_PSU_IN_PARAM_SUPPLY_VOLTAGE 0x0 /* enum */ +#define MC_CMD_SET_PSU_IN_RAIL_OFST 4 +#define MC_CMD_SET_PSU_IN_RAIL_LEN 4 +#define MC_CMD_SET_PSU_IN_RAIL_0V9 0x0 /* enum */ +#define MC_CMD_SET_PSU_IN_RAIL_1V2 0x1 /* enum */ +/* desired value, eg voltage in mV */ +#define MC_CMD_SET_PSU_IN_VALUE_OFST 8 +#define MC_CMD_SET_PSU_IN_VALUE_LEN 4 + +/* MC_CMD_SET_PSU_OUT msgresponse */ +#define MC_CMD_SET_PSU_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_GET_FUNCTION_INFO + * Get function information. PF and VF number. + */ +#define MC_CMD_GET_FUNCTION_INFO 0xec +#undef MC_CMD_0xec_PRIVILEGE_CTG + +#define MC_CMD_0xec_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_FUNCTION_INFO_IN msgrequest */ +#define MC_CMD_GET_FUNCTION_INFO_IN_LEN 0 + +/* MC_CMD_GET_FUNCTION_INFO_OUT msgresponse */ +#define MC_CMD_GET_FUNCTION_INFO_OUT_LEN 8 +#define MC_CMD_GET_FUNCTION_INFO_OUT_PF_OFST 0 +#define MC_CMD_GET_FUNCTION_INFO_OUT_PF_LEN 4 +#define MC_CMD_GET_FUNCTION_INFO_OUT_VF_OFST 4 +#define MC_CMD_GET_FUNCTION_INFO_OUT_VF_LEN 4 + +/* MC_CMD_GET_FUNCTION_INFO_OUT_V2 msgresponse */ +#define MC_CMD_GET_FUNCTION_INFO_OUT_V2_LEN 12 +#define MC_CMD_GET_FUNCTION_INFO_OUT_V2_PF_OFST 0 +#define MC_CMD_GET_FUNCTION_INFO_OUT_V2_PF_LEN 4 +#define MC_CMD_GET_FUNCTION_INFO_OUT_V2_VF_OFST 4 +#define MC_CMD_GET_FUNCTION_INFO_OUT_V2_VF_LEN 4 +/* Values from PCIE_INTERFACE enumeration. For NICs with a single interface, or + * in the case of a V1 response, this should be HOST_PRIMARY. + */ +#define MC_CMD_GET_FUNCTION_INFO_OUT_V2_INTF_OFST 8 +#define MC_CMD_GET_FUNCTION_INFO_OUT_V2_INTF_LEN 4 + + +/***********************************/ +/* MC_CMD_ENABLE_OFFLINE_BIST + * Enters offline BIST mode. All queues are torn down, chip enters quiescent + * mode, calling function gets exclusive MCDI ownership. The only way out is + * reboot. + */ +#define MC_CMD_ENABLE_OFFLINE_BIST 0xed +#undef MC_CMD_0xed_PRIVILEGE_CTG + +#define MC_CMD_0xed_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND + +/* MC_CMD_ENABLE_OFFLINE_BIST_IN msgrequest */ +#define MC_CMD_ENABLE_OFFLINE_BIST_IN_LEN 0 + +/* MC_CMD_ENABLE_OFFLINE_BIST_OUT msgresponse */ +#define MC_CMD_ENABLE_OFFLINE_BIST_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_UART_SEND_DATA + * Send checksummed[sic] block of data over the uart. Response is a placeholder + * should we wish to make this reliable; currently requests are fire-and- + * forget. + */ +#define MC_CMD_UART_SEND_DATA 0xee +#undef MC_CMD_0xee_PRIVILEGE_CTG + +#define MC_CMD_0xee_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_UART_SEND_DATA_OUT msgrequest */ +#define MC_CMD_UART_SEND_DATA_OUT_LENMIN 16 +#define MC_CMD_UART_SEND_DATA_OUT_LENMAX 252 +#define MC_CMD_UART_SEND_DATA_OUT_LENMAX_MCDI2 1020 +#define MC_CMD_UART_SEND_DATA_OUT_LEN(num) (16+1*(num)) +#define MC_CMD_UART_SEND_DATA_OUT_DATA_NUM(len) (((len)-16)/1) +/* CRC32 over OFFSET, LENGTH, RESERVED, DATA */ +#define MC_CMD_UART_SEND_DATA_OUT_CHECKSUM_OFST 0 +#define MC_CMD_UART_SEND_DATA_OUT_CHECKSUM_LEN 4 +/* Offset at which to write the data */ +#define MC_CMD_UART_SEND_DATA_OUT_OFFSET_OFST 4 +#define MC_CMD_UART_SEND_DATA_OUT_OFFSET_LEN 4 +/* Length of data */ +#define MC_CMD_UART_SEND_DATA_OUT_LENGTH_OFST 8 +#define MC_CMD_UART_SEND_DATA_OUT_LENGTH_LEN 4 +/* Reserved for future use */ +#define MC_CMD_UART_SEND_DATA_OUT_RESERVED_OFST 12 +#define MC_CMD_UART_SEND_DATA_OUT_RESERVED_LEN 4 +#define MC_CMD_UART_SEND_DATA_OUT_DATA_OFST 16 +#define MC_CMD_UART_SEND_DATA_OUT_DATA_LEN 1 +#define MC_CMD_UART_SEND_DATA_OUT_DATA_MINNUM 0 +#define MC_CMD_UART_SEND_DATA_OUT_DATA_MAXNUM 236 +#define MC_CMD_UART_SEND_DATA_OUT_DATA_MAXNUM_MCDI2 1004 + +/* MC_CMD_UART_SEND_DATA_IN msgresponse */ +#define MC_CMD_UART_SEND_DATA_IN_LEN 0 + + +/***********************************/ +/* MC_CMD_UART_RECV_DATA + * Request checksummed[sic] block of data over the uart. Only a placeholder, + * subject to change and not currently implemented. + */ +#define MC_CMD_UART_RECV_DATA 0xef +#undef MC_CMD_0xef_PRIVILEGE_CTG + +#define MC_CMD_0xef_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_UART_RECV_DATA_OUT msgrequest */ +#define MC_CMD_UART_RECV_DATA_OUT_LEN 16 +/* CRC32 over OFFSET, LENGTH, RESERVED */ +#define MC_CMD_UART_RECV_DATA_OUT_CHECKSUM_OFST 0 +#define MC_CMD_UART_RECV_DATA_OUT_CHECKSUM_LEN 4 +/* Offset from which to read the data */ +#define MC_CMD_UART_RECV_DATA_OUT_OFFSET_OFST 4 +#define MC_CMD_UART_RECV_DATA_OUT_OFFSET_LEN 4 +/* Length of data */ +#define MC_CMD_UART_RECV_DATA_OUT_LENGTH_OFST 8 +#define MC_CMD_UART_RECV_DATA_OUT_LENGTH_LEN 4 +/* Reserved for future use */ +#define MC_CMD_UART_RECV_DATA_OUT_RESERVED_OFST 12 +#define MC_CMD_UART_RECV_DATA_OUT_RESERVED_LEN 4 + +/* MC_CMD_UART_RECV_DATA_IN msgresponse */ +#define MC_CMD_UART_RECV_DATA_IN_LENMIN 16 +#define MC_CMD_UART_RECV_DATA_IN_LENMAX 252 +#define MC_CMD_UART_RECV_DATA_IN_LENMAX_MCDI2 1020 +#define MC_CMD_UART_RECV_DATA_IN_LEN(num) (16+1*(num)) +#define MC_CMD_UART_RECV_DATA_IN_DATA_NUM(len) (((len)-16)/1) +/* CRC32 over RESERVED1, RESERVED2, RESERVED3, DATA */ +#define MC_CMD_UART_RECV_DATA_IN_CHECKSUM_OFST 0 +#define MC_CMD_UART_RECV_DATA_IN_CHECKSUM_LEN 4 +/* Offset at which to write the data */ +#define MC_CMD_UART_RECV_DATA_IN_RESERVED1_OFST 4 +#define MC_CMD_UART_RECV_DATA_IN_RESERVED1_LEN 4 +/* Length of data */ +#define MC_CMD_UART_RECV_DATA_IN_RESERVED2_OFST 8 +#define MC_CMD_UART_RECV_DATA_IN_RESERVED2_LEN 4 +/* Reserved for future use */ +#define MC_CMD_UART_RECV_DATA_IN_RESERVED3_OFST 12 +#define MC_CMD_UART_RECV_DATA_IN_RESERVED3_LEN 4 +#define MC_CMD_UART_RECV_DATA_IN_DATA_OFST 16 +#define MC_CMD_UART_RECV_DATA_IN_DATA_LEN 1 +#define MC_CMD_UART_RECV_DATA_IN_DATA_MINNUM 0 +#define MC_CMD_UART_RECV_DATA_IN_DATA_MAXNUM 236 +#define MC_CMD_UART_RECV_DATA_IN_DATA_MAXNUM_MCDI2 1004 + + +/***********************************/ +/* MC_CMD_READ_FUSES + * Read data programmed into the device One-Time-Programmable (OTP) Fuses + */ +#define MC_CMD_READ_FUSES 0xf0 +#undef MC_CMD_0xf0_PRIVILEGE_CTG + +#define MC_CMD_0xf0_PRIVILEGE_CTG SRIOV_CTG_INSECURE + +/* MC_CMD_READ_FUSES_IN msgrequest */ +#define MC_CMD_READ_FUSES_IN_LEN 8 +/* Offset in OTP to read */ +#define MC_CMD_READ_FUSES_IN_OFFSET_OFST 0 +#define MC_CMD_READ_FUSES_IN_OFFSET_LEN 4 +/* Length of data to read in bytes */ +#define MC_CMD_READ_FUSES_IN_LENGTH_OFST 4 +#define MC_CMD_READ_FUSES_IN_LENGTH_LEN 4 + +/* MC_CMD_READ_FUSES_OUT msgresponse */ +#define MC_CMD_READ_FUSES_OUT_LENMIN 4 +#define MC_CMD_READ_FUSES_OUT_LENMAX 252 +#define MC_CMD_READ_FUSES_OUT_LENMAX_MCDI2 1020 +#define MC_CMD_READ_FUSES_OUT_LEN(num) (4+1*(num)) +#define MC_CMD_READ_FUSES_OUT_DATA_NUM(len) (((len)-4)/1) +/* Length of returned OTP data in bytes */ +#define MC_CMD_READ_FUSES_OUT_LENGTH_OFST 0 +#define MC_CMD_READ_FUSES_OUT_LENGTH_LEN 4 +/* Returned data */ +#define MC_CMD_READ_FUSES_OUT_DATA_OFST 4 +#define MC_CMD_READ_FUSES_OUT_DATA_LEN 1 +#define MC_CMD_READ_FUSES_OUT_DATA_MINNUM 0 +#define MC_CMD_READ_FUSES_OUT_DATA_MAXNUM 248 +#define MC_CMD_READ_FUSES_OUT_DATA_MAXNUM_MCDI2 1016 + + +/***********************************/ +/* MC_CMD_KR_TUNE + * Get or set KR Serdes RXEQ and TX Driver settings + */ +#define MC_CMD_KR_TUNE 0xf1 +#undef MC_CMD_0xf1_PRIVILEGE_CTG + +#define MC_CMD_0xf1_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND + +/* MC_CMD_KR_TUNE_IN msgrequest */ +#define MC_CMD_KR_TUNE_IN_LENMIN 4 +#define MC_CMD_KR_TUNE_IN_LENMAX 252 +#define MC_CMD_KR_TUNE_IN_LENMAX_MCDI2 1020 +#define MC_CMD_KR_TUNE_IN_LEN(num) (4+4*(num)) +#define MC_CMD_KR_TUNE_IN_KR_TUNE_ARGS_NUM(len) (((len)-4)/4) +/* Requested operation */ +#define MC_CMD_KR_TUNE_IN_KR_TUNE_OP_OFST 0 +#define MC_CMD_KR_TUNE_IN_KR_TUNE_OP_LEN 1 +/* enum: Get current RXEQ settings */ +#define MC_CMD_KR_TUNE_IN_RXEQ_GET 0x0 +/* enum: Override RXEQ settings */ +#define MC_CMD_KR_TUNE_IN_RXEQ_SET 0x1 +/* enum: Get current TX Driver settings */ +#define MC_CMD_KR_TUNE_IN_TXEQ_GET 0x2 +/* enum: Override TX Driver settings */ +#define MC_CMD_KR_TUNE_IN_TXEQ_SET 0x3 +/* enum: Force KR Serdes reset / recalibration */ +#define MC_CMD_KR_TUNE_IN_RECAL 0x4 +/* enum: Start KR Serdes Eye diagram plot on a given lane. Lane must have valid + * signal. + */ +#define MC_CMD_KR_TUNE_IN_START_EYE_PLOT 0x5 +/* enum: Poll KR Serdes Eye diagram plot. Returns one row of BER data. The + * caller should call this command repeatedly after starting eye plot, until no + * more data is returned. + */ +#define MC_CMD_KR_TUNE_IN_POLL_EYE_PLOT 0x6 +/* enum: Read Figure Of Merit (eye quality, higher is better). */ +#define MC_CMD_KR_TUNE_IN_READ_FOM 0x7 +/* enum: Start/stop link training frames */ +#define MC_CMD_KR_TUNE_IN_LINK_TRAIN_RUN 0x8 +/* enum: Issue KR link training command (control training coefficients) */ +#define MC_CMD_KR_TUNE_IN_LINK_TRAIN_CMD 0x9 +/* Align the arguments to 32 bits */ +#define MC_CMD_KR_TUNE_IN_KR_TUNE_RSVD_OFST 1 +#define MC_CMD_KR_TUNE_IN_KR_TUNE_RSVD_LEN 3 +/* Arguments specific to the operation */ +#define MC_CMD_KR_TUNE_IN_KR_TUNE_ARGS_OFST 4 +#define MC_CMD_KR_TUNE_IN_KR_TUNE_ARGS_LEN 4 +#define MC_CMD_KR_TUNE_IN_KR_TUNE_ARGS_MINNUM 0 +#define MC_CMD_KR_TUNE_IN_KR_TUNE_ARGS_MAXNUM 62 +#define MC_CMD_KR_TUNE_IN_KR_TUNE_ARGS_MAXNUM_MCDI2 254 + +/* MC_CMD_KR_TUNE_OUT msgresponse */ +#define MC_CMD_KR_TUNE_OUT_LEN 0 + +/* MC_CMD_KR_TUNE_RXEQ_GET_IN msgrequest */ +#define MC_CMD_KR_TUNE_RXEQ_GET_IN_LEN 4 +/* Requested operation */ +#define MC_CMD_KR_TUNE_RXEQ_GET_IN_KR_TUNE_OP_OFST 0 +#define MC_CMD_KR_TUNE_RXEQ_GET_IN_KR_TUNE_OP_LEN 1 +/* Align the arguments to 32 bits */ +#define MC_CMD_KR_TUNE_RXEQ_GET_IN_KR_TUNE_RSVD_OFST 1 +#define MC_CMD_KR_TUNE_RXEQ_GET_IN_KR_TUNE_RSVD_LEN 3 + +/* MC_CMD_KR_TUNE_RXEQ_GET_OUT msgresponse */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LENMIN 4 +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LENMAX 252 +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LENMAX_MCDI2 1020 +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LEN(num) (0+4*(num)) +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_NUM(len) (((len)-0)/4) +/* RXEQ Parameter */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_OFST 0 +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_LEN 4 +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_MINNUM 1 +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_MAXNUM 63 +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_MAXNUM_MCDI2 255 +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_ID_OFST 0 +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_ID_LBN 0 +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_ID_WIDTH 8 +/* enum: Attenuation (0-15, Huntington) */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_ATT 0x0 +/* enum: CTLE Boost (0-15, Huntington) */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_BOOST 0x1 +/* enum: Edge DFE Tap1 (Huntington - 0 - max negative, 64 - zero, 127 - max + * positive, Medford - 0-31) + */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP1 0x2 +/* enum: Edge DFE Tap2 (Huntington - 0 - max negative, 32 - zero, 63 - max + * positive, Medford - 0-31) + */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP2 0x3 +/* enum: Edge DFE Tap3 (Huntington - 0 - max negative, 32 - zero, 63 - max + * positive, Medford - 0-16) + */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP3 0x4 +/* enum: Edge DFE Tap4 (Huntington - 0 - max negative, 32 - zero, 63 - max + * positive, Medford - 0-16) + */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP4 0x5 +/* enum: Edge DFE Tap5 (Huntington - 0 - max negative, 32 - zero, 63 - max + * positive, Medford - 0-16) + */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP5 0x6 +/* enum: Edge DFE DLEV (0-128 for Medford) */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_DLEV 0x7 +/* enum: Variable Gain Amplifier (0-15, Medford) */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_VGA 0x8 +/* enum: CTLE EQ Capacitor (0-15, Medford) */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_CTLE_EQC 0x9 +/* enum: CTLE EQ Resistor (0-7, Medford) */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_CTLE_EQRES 0xa +/* enum: CTLE gain (0-31, Medford2) */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_CTLE_GAIN 0xb +/* enum: CTLE pole (0-31, Medford2) */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_CTLE_POLE 0xc +/* enum: CTLE peaking (0-31, Medford2) */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_CTLE_PEAK 0xd +/* enum: DFE Tap1 - even path (Medford2 - 6 bit signed (-29 - +29)) */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP1_EVEN 0xe +/* enum: DFE Tap1 - odd path (Medford2 - 6 bit signed (-29 - +29)) */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP1_ODD 0xf +/* enum: DFE Tap2 (Medford2 - 6 bit signed (-20 - +20)) */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP2 0x10 +/* enum: DFE Tap3 (Medford2 - 6 bit signed (-20 - +20)) */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP3 0x11 +/* enum: DFE Tap4 (Medford2 - 6 bit signed (-20 - +20)) */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP4 0x12 +/* enum: DFE Tap5 (Medford2 - 6 bit signed (-24 - +24)) */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP5 0x13 +/* enum: DFE Tap6 (Medford2 - 6 bit signed (-24 - +24)) */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP6 0x14 +/* enum: DFE Tap7 (Medford2 - 6 bit signed (-24 - +24)) */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP7 0x15 +/* enum: DFE Tap8 (Medford2 - 6 bit signed (-24 - +24)) */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP8 0x16 +/* enum: DFE Tap9 (Medford2 - 6 bit signed (-24 - +24)) */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP9 0x17 +/* enum: DFE Tap10 (Medford2 - 6 bit signed (-24 - +24)) */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP10 0x18 +/* enum: DFE Tap11 (Medford2 - 6 bit signed (-24 - +24)) */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP11 0x19 +/* enum: DFE Tap12 (Medford2 - 6 bit signed (-24 - +24)) */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP12 0x1a +/* enum: I/Q clk offset (Medford2 - 4 bit signed (-5 - +5))) */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_IQ_OFF 0x1b +/* enum: Negative h1 polarity data sampler offset calibration code, even path + * (Medford2 - 6 bit signed (-29 - +29))) + */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_H1N_OFF_EVEN 0x1c +/* enum: Negative h1 polarity data sampler offset calibration code, odd path + * (Medford2 - 6 bit signed (-29 - +29))) + */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_H1N_OFF_ODD 0x1d +/* enum: Positive h1 polarity data sampler offset calibration code, even path + * (Medford2 - 6 bit signed (-29 - +29))) + */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_H1P_OFF_EVEN 0x1e +/* enum: Positive h1 polarity data sampler offset calibration code, odd path + * (Medford2 - 6 bit signed (-29 - +29))) + */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_H1P_OFF_ODD 0x1f +/* enum: CDR calibration loop code (Medford2) */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_CDR_PVT 0x20 +/* enum: CDR integral loop code (Medford2) */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_CDR_INTEG 0x21 +/* enum: CTLE Boost stages - retimer lineside (Medford2 with DS250x retimer - 4 + * stages, 2 bits per stage) + */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_BOOST_RT_LS 0x22 +/* enum: DFE Tap1 - retimer lineside (Medford2 with DS250x retimer (-31 - 31)) + */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP1_RT_LS 0x23 +/* enum: DFE Tap2 - retimer lineside (Medford2 with DS250x retimer (-15 - 15)) + */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP2_RT_LS 0x24 +/* enum: DFE Tap3 - retimer lineside (Medford2 with DS250x retimer (-15 - 15)) + */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP3_RT_LS 0x25 +/* enum: DFE Tap4 - retimer lineside (Medford2 with DS250x retimer (-15 - 15)) + */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP4_RT_LS 0x26 +/* enum: DFE Tap5 - retimer lineside (Medford2 with DS250x retimer (-15 - 15)) + */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP5_RT_LS 0x27 +/* enum: CTLE Boost stages - retimer hostside (Medford2 with DS250x retimer - 4 + * stages, 2 bits per stage) + */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_BOOST_RT_HS 0x28 +/* enum: DFE Tap1 - retimer hostside (Medford2 with DS250x retimer (-31 - 31)) + */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP1_RT_HS 0x29 +/* enum: DFE Tap2 - retimer hostside (Medford2 with DS250x retimer (-15 - 15)) + */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP2_RT_HS 0x2a +/* enum: DFE Tap3 - retimer hostside (Medford2 with DS250x retimer (-15 - 15)) + */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP3_RT_HS 0x2b +/* enum: DFE Tap4 - retimer hostside (Medford2 with DS250x retimer (-15 - 15)) + */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP4_RT_HS 0x2c +/* enum: DFE Tap5 - retimer hostside (Medford2 with DS250x retimer (-15 - 15)) + */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP5_RT_HS 0x2d +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_LANE_OFST 0 +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_LANE_LBN 8 +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_LANE_WIDTH 3 +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_0 0x0 /* enum */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_1 0x1 /* enum */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_2 0x2 /* enum */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_3 0x3 /* enum */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_ALL 0x4 /* enum */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_AUTOCAL_OFST 0 +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_AUTOCAL_LBN 11 +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_AUTOCAL_WIDTH 1 +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_RESERVED_OFST 0 +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_RESERVED_LBN 12 +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_RESERVED_WIDTH 4 +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_INITIAL_OFST 0 +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_INITIAL_LBN 16 +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_INITIAL_WIDTH 8 +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_CURRENT_OFST 0 +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_CURRENT_LBN 24 +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_CURRENT_WIDTH 8 + +/* MC_CMD_KR_TUNE_RXEQ_SET_IN msgrequest */ +#define MC_CMD_KR_TUNE_RXEQ_SET_IN_LENMIN 8 +#define MC_CMD_KR_TUNE_RXEQ_SET_IN_LENMAX 252 +#define MC_CMD_KR_TUNE_RXEQ_SET_IN_LENMAX_MCDI2 1020 +#define MC_CMD_KR_TUNE_RXEQ_SET_IN_LEN(num) (4+4*(num)) +#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_NUM(len) (((len)-4)/4) +/* Requested operation */ +#define MC_CMD_KR_TUNE_RXEQ_SET_IN_KR_TUNE_OP_OFST 0 +#define MC_CMD_KR_TUNE_RXEQ_SET_IN_KR_TUNE_OP_LEN 1 +/* Align the arguments to 32 bits */ +#define MC_CMD_KR_TUNE_RXEQ_SET_IN_KR_TUNE_RSVD_OFST 1 +#define MC_CMD_KR_TUNE_RXEQ_SET_IN_KR_TUNE_RSVD_LEN 3 +/* RXEQ Parameter */ +#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_OFST 4 +#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_LEN 4 +#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_MINNUM 1 +#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_MAXNUM 62 +#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_MAXNUM_MCDI2 254 +#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_ID_OFST 4 +#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_ID_LBN 0 +#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_ID_WIDTH 8 +/* Enum values, see field(s): */ +/* MC_CMD_KR_TUNE_RXEQ_GET_OUT/PARAM_ID */ +#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_LANE_OFST 4 +#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_LANE_LBN 8 +#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_LANE_WIDTH 3 +/* Enum values, see field(s): */ +/* MC_CMD_KR_TUNE_RXEQ_GET_OUT/PARAM_LANE */ +#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_AUTOCAL_OFST 4 +#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_AUTOCAL_LBN 11 +#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_AUTOCAL_WIDTH 1 +#define MC_CMD_KR_TUNE_RXEQ_SET_IN_RESERVED_OFST 4 +#define MC_CMD_KR_TUNE_RXEQ_SET_IN_RESERVED_LBN 12 +#define MC_CMD_KR_TUNE_RXEQ_SET_IN_RESERVED_WIDTH 4 +#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_INITIAL_OFST 4 +#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_INITIAL_LBN 16 +#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_INITIAL_WIDTH 8 +#define MC_CMD_KR_TUNE_RXEQ_SET_IN_RESERVED2_OFST 4 +#define MC_CMD_KR_TUNE_RXEQ_SET_IN_RESERVED2_LBN 24 +#define MC_CMD_KR_TUNE_RXEQ_SET_IN_RESERVED2_WIDTH 8 + +/* MC_CMD_KR_TUNE_RXEQ_SET_OUT msgresponse */ +#define MC_CMD_KR_TUNE_RXEQ_SET_OUT_LEN 0 + +/* MC_CMD_KR_TUNE_TXEQ_GET_IN msgrequest */ +#define MC_CMD_KR_TUNE_TXEQ_GET_IN_LEN 4 +/* Requested operation */ +#define MC_CMD_KR_TUNE_TXEQ_GET_IN_KR_TUNE_OP_OFST 0 +#define MC_CMD_KR_TUNE_TXEQ_GET_IN_KR_TUNE_OP_LEN 1 +/* Align the arguments to 32 bits */ +#define MC_CMD_KR_TUNE_TXEQ_GET_IN_KR_TUNE_RSVD_OFST 1 +#define MC_CMD_KR_TUNE_TXEQ_GET_IN_KR_TUNE_RSVD_LEN 3 + +/* MC_CMD_KR_TUNE_TXEQ_GET_OUT msgresponse */ +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LENMIN 4 +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LENMAX 252 +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LENMAX_MCDI2 1020 +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LEN(num) (0+4*(num)) +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_NUM(len) (((len)-0)/4) +/* TXEQ Parameter */ +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_OFST 0 +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_LEN 4 +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_MINNUM 1 +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_MAXNUM 63 +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_MAXNUM_MCDI2 255 +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_ID_OFST 0 +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_ID_LBN 0 +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_ID_WIDTH 8 +/* enum: TX Amplitude (Huntington, Medford, Medford2) */ +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_LEV 0x0 +/* enum: De-Emphasis Tap1 Magnitude (0-7) (Huntington) */ +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_MODE 0x1 +/* enum: De-Emphasis Tap1 Fine */ +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_DTLEV 0x2 +/* enum: De-Emphasis Tap2 Magnitude (0-6) (Huntington) */ +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_D2 0x3 +/* enum: De-Emphasis Tap2 Fine (Huntington) */ +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_D2TLEV 0x4 +/* enum: Pre-Emphasis Magnitude (Huntington) */ +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_E 0x5 +/* enum: Pre-Emphasis Fine (Huntington) */ +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_ETLEV 0x6 +/* enum: TX Slew Rate Coarse control (Huntington) */ +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_PREDRV_DLY 0x7 +/* enum: TX Slew Rate Fine control (Huntington) */ +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_SR_SET 0x8 +/* enum: TX Termination Impedance control (Huntington) */ +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_RT_SET 0x9 +/* enum: TX Amplitude Fine control (Medford) */ +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_LEV_FINE 0xa +/* enum: Pre-cursor Tap (Medford, Medford2) */ +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TAP_ADV 0xb +/* enum: Post-cursor Tap (Medford, Medford2) */ +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TAP_DLY 0xc +/* enum: TX Amplitude (Retimer Lineside) */ +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_LEV_RT_LS 0xd +/* enum: Pre-cursor Tap (Retimer Lineside) */ +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TAP_ADV_RT_LS 0xe +/* enum: Post-cursor Tap (Retimer Lineside) */ +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TAP_DLY_RT_LS 0xf +/* enum: TX Amplitude (Retimer Hostside) */ +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_LEV_RT_HS 0x10 +/* enum: Pre-cursor Tap (Retimer Hostside) */ +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TAP_ADV_RT_HS 0x11 +/* enum: Post-cursor Tap (Retimer Hostside) */ +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TAP_DLY_RT_HS 0x12 +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_LANE_OFST 0 +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_LANE_LBN 8 +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_LANE_WIDTH 3 +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_0 0x0 /* enum */ +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_1 0x1 /* enum */ +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_2 0x2 /* enum */ +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_3 0x3 /* enum */ +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_ALL 0x4 /* enum */ +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_RESERVED_OFST 0 +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_RESERVED_LBN 11 +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_RESERVED_WIDTH 5 +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_INITIAL_OFST 0 +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_INITIAL_LBN 16 +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_INITIAL_WIDTH 8 +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_RESERVED2_OFST 0 +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_RESERVED2_LBN 24 +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_RESERVED2_WIDTH 8 + +/* MC_CMD_KR_TUNE_TXEQ_SET_IN msgrequest */ +#define MC_CMD_KR_TUNE_TXEQ_SET_IN_LENMIN 8 +#define MC_CMD_KR_TUNE_TXEQ_SET_IN_LENMAX 252 +#define MC_CMD_KR_TUNE_TXEQ_SET_IN_LENMAX_MCDI2 1020 +#define MC_CMD_KR_TUNE_TXEQ_SET_IN_LEN(num) (4+4*(num)) +#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_NUM(len) (((len)-4)/4) +/* Requested operation */ +#define MC_CMD_KR_TUNE_TXEQ_SET_IN_KR_TUNE_OP_OFST 0 +#define MC_CMD_KR_TUNE_TXEQ_SET_IN_KR_TUNE_OP_LEN 1 +/* Align the arguments to 32 bits */ +#define MC_CMD_KR_TUNE_TXEQ_SET_IN_KR_TUNE_RSVD_OFST 1 +#define MC_CMD_KR_TUNE_TXEQ_SET_IN_KR_TUNE_RSVD_LEN 3 +/* TXEQ Parameter */ +#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_OFST 4 +#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_LEN 4 +#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_MINNUM 1 +#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_MAXNUM 62 +#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_MAXNUM_MCDI2 254 +#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_ID_OFST 4 +#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_ID_LBN 0 +#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_ID_WIDTH 8 +/* Enum values, see field(s): */ +/* MC_CMD_KR_TUNE_TXEQ_GET_OUT/PARAM_ID */ +#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_LANE_OFST 4 +#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_LANE_LBN 8 +#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_LANE_WIDTH 3 +/* Enum values, see field(s): */ +/* MC_CMD_KR_TUNE_TXEQ_GET_OUT/PARAM_LANE */ +#define MC_CMD_KR_TUNE_TXEQ_SET_IN_RESERVED_OFST 4 +#define MC_CMD_KR_TUNE_TXEQ_SET_IN_RESERVED_LBN 11 +#define MC_CMD_KR_TUNE_TXEQ_SET_IN_RESERVED_WIDTH 5 +#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_INITIAL_OFST 4 +#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_INITIAL_LBN 16 +#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_INITIAL_WIDTH 8 +#define MC_CMD_KR_TUNE_TXEQ_SET_IN_RESERVED2_OFST 4 +#define MC_CMD_KR_TUNE_TXEQ_SET_IN_RESERVED2_LBN 24 +#define MC_CMD_KR_TUNE_TXEQ_SET_IN_RESERVED2_WIDTH 8 + +/* MC_CMD_KR_TUNE_TXEQ_SET_OUT msgresponse */ +#define MC_CMD_KR_TUNE_TXEQ_SET_OUT_LEN 0 + +/* MC_CMD_KR_TUNE_RECAL_IN msgrequest */ +#define MC_CMD_KR_TUNE_RECAL_IN_LEN 4 +/* Requested operation */ +#define MC_CMD_KR_TUNE_RECAL_IN_KR_TUNE_OP_OFST 0 +#define MC_CMD_KR_TUNE_RECAL_IN_KR_TUNE_OP_LEN 1 +/* Align the arguments to 32 bits */ +#define MC_CMD_KR_TUNE_RECAL_IN_KR_TUNE_RSVD_OFST 1 +#define MC_CMD_KR_TUNE_RECAL_IN_KR_TUNE_RSVD_LEN 3 + +/* MC_CMD_KR_TUNE_RECAL_OUT msgresponse */ +#define MC_CMD_KR_TUNE_RECAL_OUT_LEN 0 + +/* MC_CMD_KR_TUNE_START_EYE_PLOT_IN msgrequest */ +#define MC_CMD_KR_TUNE_START_EYE_PLOT_IN_LEN 8 +/* Requested operation */ +#define MC_CMD_KR_TUNE_START_EYE_PLOT_IN_KR_TUNE_OP_OFST 0 +#define MC_CMD_KR_TUNE_START_EYE_PLOT_IN_KR_TUNE_OP_LEN 1 +/* Align the arguments to 32 bits */ +#define MC_CMD_KR_TUNE_START_EYE_PLOT_IN_KR_TUNE_RSVD_OFST 1 +#define MC_CMD_KR_TUNE_START_EYE_PLOT_IN_KR_TUNE_RSVD_LEN 3 +/* Port-relative lane to scan eye on */ +#define MC_CMD_KR_TUNE_START_EYE_PLOT_IN_LANE_OFST 4 +#define MC_CMD_KR_TUNE_START_EYE_PLOT_IN_LANE_LEN 4 + +/* MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN msgrequest */ +#define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_LEN 12 +/* Requested operation */ +#define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_KR_TUNE_OP_OFST 0 +#define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_KR_TUNE_OP_LEN 1 +/* Align the arguments to 32 bits */ +#define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_KR_TUNE_RSVD_OFST 1 +#define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_KR_TUNE_RSVD_LEN 3 +#define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_LANE_OFST 4 +#define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_LANE_LEN 4 +#define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_LANE_NUM_OFST 4 +#define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_LANE_NUM_LBN 0 +#define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_LANE_NUM_WIDTH 8 +#define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_LANE_ABS_REL_OFST 4 +#define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_LANE_ABS_REL_LBN 31 +#define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_LANE_ABS_REL_WIDTH 1 +/* Scan duration / cycle count */ +#define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_BER_OFST 8 +#define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_BER_LEN 4 + +/* MC_CMD_KR_TUNE_START_EYE_PLOT_OUT msgresponse */ +#define MC_CMD_KR_TUNE_START_EYE_PLOT_OUT_LEN 0 + +/* MC_CMD_KR_TUNE_POLL_EYE_PLOT_IN msgrequest */ +#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_IN_LEN 4 +/* Requested operation */ +#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_IN_KR_TUNE_OP_OFST 0 +#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_IN_KR_TUNE_OP_LEN 1 +/* Align the arguments to 32 bits */ +#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_IN_KR_TUNE_RSVD_OFST 1 +#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_IN_KR_TUNE_RSVD_LEN 3 + +/* MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT msgresponse */ +#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_LENMIN 0 +#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_LENMAX 252 +#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_LENMAX_MCDI2 1020 +#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_LEN(num) (0+2*(num)) +#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_NUM(len) (((len)-0)/2) +#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_OFST 0 +#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_LEN 2 +#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_MINNUM 0 +#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_MAXNUM 126 +#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_MAXNUM_MCDI2 510 + +/* MC_CMD_KR_TUNE_READ_FOM_IN msgrequest */ +#define MC_CMD_KR_TUNE_READ_FOM_IN_LEN 8 +/* Requested operation */ +#define MC_CMD_KR_TUNE_READ_FOM_IN_KR_TUNE_OP_OFST 0 +#define MC_CMD_KR_TUNE_READ_FOM_IN_KR_TUNE_OP_LEN 1 +/* Align the arguments to 32 bits */ +#define MC_CMD_KR_TUNE_READ_FOM_IN_KR_TUNE_RSVD_OFST 1 +#define MC_CMD_KR_TUNE_READ_FOM_IN_KR_TUNE_RSVD_LEN 3 +#define MC_CMD_KR_TUNE_READ_FOM_IN_LANE_OFST 4 +#define MC_CMD_KR_TUNE_READ_FOM_IN_LANE_LEN 4 +#define MC_CMD_KR_TUNE_READ_FOM_IN_LANE_NUM_OFST 4 +#define MC_CMD_KR_TUNE_READ_FOM_IN_LANE_NUM_LBN 0 +#define MC_CMD_KR_TUNE_READ_FOM_IN_LANE_NUM_WIDTH 8 +#define MC_CMD_KR_TUNE_READ_FOM_IN_LANE_ABS_REL_OFST 4 +#define MC_CMD_KR_TUNE_READ_FOM_IN_LANE_ABS_REL_LBN 31 +#define MC_CMD_KR_TUNE_READ_FOM_IN_LANE_ABS_REL_WIDTH 1 + +/* MC_CMD_KR_TUNE_READ_FOM_OUT msgresponse */ +#define MC_CMD_KR_TUNE_READ_FOM_OUT_LEN 4 +#define MC_CMD_KR_TUNE_READ_FOM_OUT_FOM_OFST 0 +#define MC_CMD_KR_TUNE_READ_FOM_OUT_FOM_LEN 4 + +/* MC_CMD_KR_TUNE_LINK_TRAIN_RUN_IN msgrequest */ +#define MC_CMD_KR_TUNE_LINK_TRAIN_RUN_IN_LEN 8 +/* Requested operation */ +#define MC_CMD_KR_TUNE_LINK_TRAIN_RUN_IN_KR_TUNE_OP_OFST 0 +#define MC_CMD_KR_TUNE_LINK_TRAIN_RUN_IN_KR_TUNE_OP_LEN 1 +/* Align the arguments to 32 bits */ +#define MC_CMD_KR_TUNE_LINK_TRAIN_RUN_IN_KR_TUNE_RSVD_OFST 1 +#define MC_CMD_KR_TUNE_LINK_TRAIN_RUN_IN_KR_TUNE_RSVD_LEN 3 +#define MC_CMD_KR_TUNE_LINK_TRAIN_RUN_IN_RUN_OFST 4 +#define MC_CMD_KR_TUNE_LINK_TRAIN_RUN_IN_RUN_LEN 4 +#define MC_CMD_KR_TUNE_LINK_TRAIN_RUN_IN_STOP 0x0 /* enum */ +#define MC_CMD_KR_TUNE_LINK_TRAIN_RUN_IN_START 0x1 /* enum */ + +/* MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN msgrequest */ +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_LEN 28 +/* Requested operation */ +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_KR_TUNE_OP_OFST 0 +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_KR_TUNE_OP_LEN 1 +/* Align the arguments to 32 bits */ +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_KR_TUNE_RSVD_OFST 1 +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_KR_TUNE_RSVD_LEN 3 +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_LANE_OFST 4 +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_LANE_LEN 4 +/* Set INITIALIZE state */ +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_INITIALIZE_OFST 8 +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_INITIALIZE_LEN 4 +/* Set PRESET state */ +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_PRESET_OFST 12 +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_PRESET_LEN 4 +/* C(-1) request */ +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_CM1_OFST 16 +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_CM1_LEN 4 +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_REQ_HOLD 0x0 /* enum */ +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_REQ_INCREMENT 0x1 /* enum */ +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_REQ_DECREMENT 0x2 /* enum */ +/* C(0) request */ +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_C0_OFST 20 +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_C0_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN/CM1 */ +/* C(+1) request */ +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_CP1_OFST 24 +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_CP1_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN/CM1 */ + +/* MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT msgresponse */ +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_LEN 24 +/* C(-1) status */ +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_CM1_STATUS_OFST 0 +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_CM1_STATUS_LEN 4 +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_STATUS_NOT_UPDATED 0x0 /* enum */ +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_STATUS_UPDATED 0x1 /* enum */ +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_STATUS_MINIMUM 0x2 /* enum */ +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_STATUS_MAXIMUM 0x3 /* enum */ +/* C(0) status */ +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_C0_STATUS_OFST 4 +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_C0_STATUS_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN/CM1 */ +/* C(+1) status */ +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_CP1_STATUS_OFST 8 +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_CP1_STATUS_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN/CM1 */ +/* C(-1) value */ +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_CM1_VALUE_OFST 12 +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_CM1_VALUE_LEN 4 +/* C(0) value */ +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_C0_VALUE_OFST 16 +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_C0_VALUE_LEN 4 +/* C(+1) status */ +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_CP1_VALUE_OFST 20 +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_CP1_VALUE_LEN 4 + + +/***********************************/ +/* MC_CMD_PCIE_TUNE + * Get or set PCIE Serdes RXEQ and TX Driver settings + */ +#define MC_CMD_PCIE_TUNE 0xf2 +#undef MC_CMD_0xf2_PRIVILEGE_CTG + +#define MC_CMD_0xf2_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND + +/* MC_CMD_PCIE_TUNE_IN msgrequest */ +#define MC_CMD_PCIE_TUNE_IN_LENMIN 4 +#define MC_CMD_PCIE_TUNE_IN_LENMAX 252 +#define MC_CMD_PCIE_TUNE_IN_LENMAX_MCDI2 1020 +#define MC_CMD_PCIE_TUNE_IN_LEN(num) (4+4*(num)) +#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_ARGS_NUM(len) (((len)-4)/4) +/* Requested operation */ +#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_OP_OFST 0 +#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_OP_LEN 1 +/* enum: Get current RXEQ settings */ +#define MC_CMD_PCIE_TUNE_IN_RXEQ_GET 0x0 +/* enum: Override RXEQ settings */ +#define MC_CMD_PCIE_TUNE_IN_RXEQ_SET 0x1 +/* enum: Get current TX Driver settings */ +#define MC_CMD_PCIE_TUNE_IN_TXEQ_GET 0x2 +/* enum: Override TX Driver settings */ +#define MC_CMD_PCIE_TUNE_IN_TXEQ_SET 0x3 +/* enum: Start PCIe Serdes Eye diagram plot on a given lane. */ +#define MC_CMD_PCIE_TUNE_IN_START_EYE_PLOT 0x5 +/* enum: Poll PCIe Serdes Eye diagram plot. Returns one row of BER data. The + * caller should call this command repeatedly after starting eye plot, until no + * more data is returned. + */ +#define MC_CMD_PCIE_TUNE_IN_POLL_EYE_PLOT 0x6 +/* enum: Enable the SERDES BIST and set it to generate a 200MHz square wave */ +#define MC_CMD_PCIE_TUNE_IN_BIST_SQUARE_WAVE 0x7 +/* Align the arguments to 32 bits */ +#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_RSVD_OFST 1 +#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_RSVD_LEN 3 +/* Arguments specific to the operation */ +#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_ARGS_OFST 4 +#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_ARGS_LEN 4 +#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_ARGS_MINNUM 0 +#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_ARGS_MAXNUM 62 +#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_ARGS_MAXNUM_MCDI2 254 + +/* MC_CMD_PCIE_TUNE_OUT msgresponse */ +#define MC_CMD_PCIE_TUNE_OUT_LEN 0 + +/* MC_CMD_PCIE_TUNE_RXEQ_GET_IN msgrequest */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_IN_LEN 4 +/* Requested operation */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_IN_PCIE_TUNE_OP_OFST 0 +#define MC_CMD_PCIE_TUNE_RXEQ_GET_IN_PCIE_TUNE_OP_LEN 1 +/* Align the arguments to 32 bits */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_IN_PCIE_TUNE_RSVD_OFST 1 +#define MC_CMD_PCIE_TUNE_RXEQ_GET_IN_PCIE_TUNE_RSVD_LEN 3 + +/* MC_CMD_PCIE_TUNE_RXEQ_GET_OUT msgresponse */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LENMIN 4 +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LENMAX 252 +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LENMAX_MCDI2 1020 +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LEN(num) (0+4*(num)) +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_NUM(len) (((len)-0)/4) +/* RXEQ Parameter */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_OFST 0 +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_LEN 4 +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_MINNUM 1 +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_MAXNUM 63 +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_MAXNUM_MCDI2 255 +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_ID_OFST 0 +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_ID_LBN 0 +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_ID_WIDTH 8 +/* enum: Attenuation (0-15) */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_ATT 0x0 +/* enum: CTLE Boost (0-15) */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_BOOST 0x1 +/* enum: DFE Tap1 (0 - max negative, 64 - zero, 127 - max positive) */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP1 0x2 +/* enum: DFE Tap2 (0 - max negative, 32 - zero, 63 - max positive) */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP2 0x3 +/* enum: DFE Tap3 (0 - max negative, 32 - zero, 63 - max positive) */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP3 0x4 +/* enum: DFE Tap4 (0 - max negative, 32 - zero, 63 - max positive) */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP4 0x5 +/* enum: DFE Tap5 (0 - max negative, 32 - zero, 63 - max positive) */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP5 0x6 +/* enum: DFE DLev */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_DLEV 0x7 +/* enum: Figure of Merit */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_FOM 0x8 +/* enum: CTLE EQ Capacitor (HF Gain) */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_CTLE_EQC 0x9 +/* enum: CTLE EQ Resistor (DC Gain) */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_CTLE_EQRES 0xa +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_LANE_OFST 0 +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_LANE_LBN 8 +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_LANE_WIDTH 5 +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_0 0x0 /* enum */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_1 0x1 /* enum */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_2 0x2 /* enum */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_3 0x3 /* enum */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_4 0x4 /* enum */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_5 0x5 /* enum */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_6 0x6 /* enum */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_7 0x7 /* enum */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_8 0x8 /* enum */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_9 0x9 /* enum */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_10 0xa /* enum */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_11 0xb /* enum */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_12 0xc /* enum */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_13 0xd /* enum */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_14 0xe /* enum */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_15 0xf /* enum */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_ALL 0x10 /* enum */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_AUTOCAL_OFST 0 +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_AUTOCAL_LBN 13 +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_AUTOCAL_WIDTH 1 +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_RESERVED_OFST 0 +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_RESERVED_LBN 14 +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_RESERVED_WIDTH 10 +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_CURRENT_OFST 0 +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_CURRENT_LBN 24 +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_CURRENT_WIDTH 8 + +/* MC_CMD_PCIE_TUNE_RXEQ_SET_IN msgrequest */ +#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_LENMIN 8 +#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_LENMAX 252 +#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_LENMAX_MCDI2 1020 +#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_LEN(num) (4+4*(num)) +#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_NUM(len) (((len)-4)/4) +/* Requested operation */ +#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PCIE_TUNE_OP_OFST 0 +#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PCIE_TUNE_OP_LEN 1 +/* Align the arguments to 32 bits */ +#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PCIE_TUNE_RSVD_OFST 1 +#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PCIE_TUNE_RSVD_LEN 3 +/* RXEQ Parameter */ +#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_OFST 4 +#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_LEN 4 +#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_MINNUM 1 +#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_MAXNUM 62 +#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_MAXNUM_MCDI2 254 +#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_ID_OFST 4 +#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_ID_LBN 0 +#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_ID_WIDTH 8 +/* Enum values, see field(s): */ +/* MC_CMD_PCIE_TUNE_RXEQ_GET_OUT/PARAM_ID */ +#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_LANE_OFST 4 +#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_LANE_LBN 8 +#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_LANE_WIDTH 5 +/* Enum values, see field(s): */ +/* MC_CMD_PCIE_TUNE_RXEQ_GET_OUT/PARAM_LANE */ +#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_AUTOCAL_OFST 4 +#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_AUTOCAL_LBN 13 +#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_AUTOCAL_WIDTH 1 +#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_RESERVED_OFST 4 +#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_RESERVED_LBN 14 +#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_RESERVED_WIDTH 2 +#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_INITIAL_OFST 4 +#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_INITIAL_LBN 16 +#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_INITIAL_WIDTH 8 +#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_RESERVED2_OFST 4 +#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_RESERVED2_LBN 24 +#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_RESERVED2_WIDTH 8 + +/* MC_CMD_PCIE_TUNE_RXEQ_SET_OUT msgresponse */ +#define MC_CMD_PCIE_TUNE_RXEQ_SET_OUT_LEN 0 + +/* MC_CMD_PCIE_TUNE_TXEQ_GET_IN msgrequest */ +#define MC_CMD_PCIE_TUNE_TXEQ_GET_IN_LEN 4 +/* Requested operation */ +#define MC_CMD_PCIE_TUNE_TXEQ_GET_IN_PCIE_TUNE_OP_OFST 0 +#define MC_CMD_PCIE_TUNE_TXEQ_GET_IN_PCIE_TUNE_OP_LEN 1 +/* Align the arguments to 32 bits */ +#define MC_CMD_PCIE_TUNE_TXEQ_GET_IN_PCIE_TUNE_RSVD_OFST 1 +#define MC_CMD_PCIE_TUNE_TXEQ_GET_IN_PCIE_TUNE_RSVD_LEN 3 + +/* MC_CMD_PCIE_TUNE_TXEQ_GET_OUT msgresponse */ +#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_LENMIN 4 +#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_LENMAX 252 +#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_LENMAX_MCDI2 1020 +#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_LEN(num) (0+4*(num)) +#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_NUM(len) (((len)-0)/4) +/* RXEQ Parameter */ +#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_OFST 0 +#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_LEN 4 +#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_MINNUM 1 +#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_MAXNUM 63 +#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_MAXNUM_MCDI2 255 +#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_ID_OFST 0 +#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_ID_LBN 0 +#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_ID_WIDTH 8 +/* enum: TxMargin (PIPE) */ +#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_TXMARGIN 0x0 +/* enum: TxSwing (PIPE) */ +#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_TXSWING 0x1 +/* enum: De-emphasis coefficient C(-1) (PIPE) */ +#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_CM1 0x2 +/* enum: De-emphasis coefficient C(0) (PIPE) */ +#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_C0 0x3 +/* enum: De-emphasis coefficient C(+1) (PIPE) */ +#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_CP1 0x4 +#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_LANE_OFST 0 +#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_LANE_LBN 8 +#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_LANE_WIDTH 4 +/* Enum values, see field(s): */ +/* MC_CMD_PCIE_TUNE_RXEQ_GET_OUT/PARAM_LANE */ +#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_RESERVED_OFST 0 +#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_RESERVED_LBN 12 +#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_RESERVED_WIDTH 12 +#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_CURRENT_OFST 0 +#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_CURRENT_LBN 24 +#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_CURRENT_WIDTH 8 + +/* MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN msgrequest */ +#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_LEN 8 +/* Requested operation */ +#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_PCIE_TUNE_OP_OFST 0 +#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_PCIE_TUNE_OP_LEN 1 +/* Align the arguments to 32 bits */ +#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_PCIE_TUNE_RSVD_OFST 1 +#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_PCIE_TUNE_RSVD_LEN 3 +#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_LANE_OFST 4 +#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_LANE_LEN 4 + +/* MC_CMD_PCIE_TUNE_START_EYE_PLOT_OUT msgresponse */ +#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_OUT_LEN 0 + +/* MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_IN msgrequest */ +#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_IN_LEN 4 +/* Requested operation */ +#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_IN_PCIE_TUNE_OP_OFST 0 +#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_IN_PCIE_TUNE_OP_LEN 1 +/* Align the arguments to 32 bits */ +#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_IN_PCIE_TUNE_RSVD_OFST 1 +#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_IN_PCIE_TUNE_RSVD_LEN 3 + +/* MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT msgresponse */ +#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_LENMIN 0 +#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_LENMAX 252 +#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_LENMAX_MCDI2 1020 +#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_LEN(num) (0+2*(num)) +#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_NUM(len) (((len)-0)/2) +#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_OFST 0 +#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_LEN 2 +#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_MINNUM 0 +#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_MAXNUM 126 +#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_MAXNUM_MCDI2 510 + +/* MC_CMD_PCIE_TUNE_BIST_SQUARE_WAVE_IN msgrequest */ +#define MC_CMD_PCIE_TUNE_BIST_SQUARE_WAVE_IN_LEN 0 + +/* MC_CMD_PCIE_TUNE_BIST_SQUARE_WAVE_OUT msgrequest */ +#define MC_CMD_PCIE_TUNE_BIST_SQUARE_WAVE_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_LICENSING + * Operations on the NVRAM_PARTITION_TYPE_LICENSE application license partition + * - not used for V3 licensing + */ +#define MC_CMD_LICENSING 0xf3 +#undef MC_CMD_0xf3_PRIVILEGE_CTG + +#define MC_CMD_0xf3_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_LICENSING_IN msgrequest */ +#define MC_CMD_LICENSING_IN_LEN 4 +/* identifies the type of operation requested */ +#define MC_CMD_LICENSING_IN_OP_OFST 0 +#define MC_CMD_LICENSING_IN_OP_LEN 4 +/* enum: re-read and apply licenses after a license key partition update; note + * that this operation returns a zero-length response + */ +#define MC_CMD_LICENSING_IN_OP_UPDATE_LICENSE 0x0 +/* enum: report counts of installed licenses */ +#define MC_CMD_LICENSING_IN_OP_GET_KEY_STATS 0x1 + +/* MC_CMD_LICENSING_OUT msgresponse */ +#define MC_CMD_LICENSING_OUT_LEN 28 +/* count of application keys which are valid */ +#define MC_CMD_LICENSING_OUT_VALID_APP_KEYS_OFST 0 +#define MC_CMD_LICENSING_OUT_VALID_APP_KEYS_LEN 4 +/* sum of UNVERIFIABLE_APP_KEYS + WRONG_NODE_APP_KEYS (for compatibility with + * MC_CMD_FC_OP_LICENSE) + */ +#define MC_CMD_LICENSING_OUT_INVALID_APP_KEYS_OFST 4 +#define MC_CMD_LICENSING_OUT_INVALID_APP_KEYS_LEN 4 +/* count of application keys which are invalid due to being blacklisted */ +#define MC_CMD_LICENSING_OUT_BLACKLISTED_APP_KEYS_OFST 8 +#define MC_CMD_LICENSING_OUT_BLACKLISTED_APP_KEYS_LEN 4 +/* count of application keys which are invalid due to being unverifiable */ +#define MC_CMD_LICENSING_OUT_UNVERIFIABLE_APP_KEYS_OFST 12 +#define MC_CMD_LICENSING_OUT_UNVERIFIABLE_APP_KEYS_LEN 4 +/* count of application keys which are invalid due to being for the wrong node + */ +#define MC_CMD_LICENSING_OUT_WRONG_NODE_APP_KEYS_OFST 16 +#define MC_CMD_LICENSING_OUT_WRONG_NODE_APP_KEYS_LEN 4 +/* licensing state (for diagnostics; the exact meaning of the bits in this + * field are private to the firmware) + */ +#define MC_CMD_LICENSING_OUT_LICENSING_STATE_OFST 20 +#define MC_CMD_LICENSING_OUT_LICENSING_STATE_LEN 4 +/* licensing subsystem self-test report (for manftest) */ +#define MC_CMD_LICENSING_OUT_LICENSING_SELF_TEST_OFST 24 +#define MC_CMD_LICENSING_OUT_LICENSING_SELF_TEST_LEN 4 +/* enum: licensing subsystem self-test failed */ +#define MC_CMD_LICENSING_OUT_SELF_TEST_FAIL 0x0 +/* enum: licensing subsystem self-test passed */ +#define MC_CMD_LICENSING_OUT_SELF_TEST_PASS 0x1 + + +/***********************************/ +/* MC_CMD_LICENSING_V3 + * Operations on the NVRAM_PARTITION_TYPE_LICENSE application license partition + * - V3 licensing (Medford) + */ +#define MC_CMD_LICENSING_V3 0xd0 +#undef MC_CMD_0xd0_PRIVILEGE_CTG + +#define MC_CMD_0xd0_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_LICENSING_V3_IN msgrequest */ +#define MC_CMD_LICENSING_V3_IN_LEN 4 +/* identifies the type of operation requested */ +#define MC_CMD_LICENSING_V3_IN_OP_OFST 0 +#define MC_CMD_LICENSING_V3_IN_OP_LEN 4 +/* enum: re-read and apply licenses after a license key partition update; note + * that this operation returns a zero-length response + */ +#define MC_CMD_LICENSING_V3_IN_OP_UPDATE_LICENSE 0x0 +/* enum: report counts of installed licenses Returns EAGAIN if license + * processing (updating) has been started but not yet completed. + */ +#define MC_CMD_LICENSING_V3_IN_OP_REPORT_LICENSE 0x1 + +/* MC_CMD_LICENSING_V3_OUT msgresponse */ +#define MC_CMD_LICENSING_V3_OUT_LEN 88 +/* count of keys which are valid */ +#define MC_CMD_LICENSING_V3_OUT_VALID_KEYS_OFST 0 +#define MC_CMD_LICENSING_V3_OUT_VALID_KEYS_LEN 4 +/* sum of UNVERIFIABLE_KEYS + WRONG_NODE_KEYS (for compatibility with + * MC_CMD_FC_OP_LICENSE) + */ +#define MC_CMD_LICENSING_V3_OUT_INVALID_KEYS_OFST 4 +#define MC_CMD_LICENSING_V3_OUT_INVALID_KEYS_LEN 4 +/* count of keys which are invalid due to being unverifiable */ +#define MC_CMD_LICENSING_V3_OUT_UNVERIFIABLE_KEYS_OFST 8 +#define MC_CMD_LICENSING_V3_OUT_UNVERIFIABLE_KEYS_LEN 4 +/* count of keys which are invalid due to being for the wrong node */ +#define MC_CMD_LICENSING_V3_OUT_WRONG_NODE_KEYS_OFST 12 +#define MC_CMD_LICENSING_V3_OUT_WRONG_NODE_KEYS_LEN 4 +/* licensing state (for diagnostics; the exact meaning of the bits in this + * field are private to the firmware) + */ +#define MC_CMD_LICENSING_V3_OUT_LICENSING_STATE_OFST 16 +#define MC_CMD_LICENSING_V3_OUT_LICENSING_STATE_LEN 4 +/* licensing subsystem self-test report (for manftest) */ +#define MC_CMD_LICENSING_V3_OUT_LICENSING_SELF_TEST_OFST 20 +#define MC_CMD_LICENSING_V3_OUT_LICENSING_SELF_TEST_LEN 4 +/* enum: licensing subsystem self-test failed */ +#define MC_CMD_LICENSING_V3_OUT_SELF_TEST_FAIL 0x0 +/* enum: licensing subsystem self-test passed */ +#define MC_CMD_LICENSING_V3_OUT_SELF_TEST_PASS 0x1 +/* bitmask of licensed applications */ +#define MC_CMD_LICENSING_V3_OUT_LICENSED_APPS_OFST 24 +#define MC_CMD_LICENSING_V3_OUT_LICENSED_APPS_LEN 8 +#define MC_CMD_LICENSING_V3_OUT_LICENSED_APPS_LO_OFST 24 +#define MC_CMD_LICENSING_V3_OUT_LICENSED_APPS_LO_LEN 4 +#define MC_CMD_LICENSING_V3_OUT_LICENSED_APPS_LO_LBN 192 +#define MC_CMD_LICENSING_V3_OUT_LICENSED_APPS_LO_WIDTH 32 +#define MC_CMD_LICENSING_V3_OUT_LICENSED_APPS_HI_OFST 28 +#define MC_CMD_LICENSING_V3_OUT_LICENSED_APPS_HI_LEN 4 +#define MC_CMD_LICENSING_V3_OUT_LICENSED_APPS_HI_LBN 224 +#define MC_CMD_LICENSING_V3_OUT_LICENSED_APPS_HI_WIDTH 32 +/* reserved for future use */ +#define MC_CMD_LICENSING_V3_OUT_RESERVED_0_OFST 32 +#define MC_CMD_LICENSING_V3_OUT_RESERVED_0_LEN 24 +/* bitmask of licensed features */ +#define MC_CMD_LICENSING_V3_OUT_LICENSED_FEATURES_OFST 56 +#define MC_CMD_LICENSING_V3_OUT_LICENSED_FEATURES_LEN 8 +#define MC_CMD_LICENSING_V3_OUT_LICENSED_FEATURES_LO_OFST 56 +#define MC_CMD_LICENSING_V3_OUT_LICENSED_FEATURES_LO_LEN 4 +#define MC_CMD_LICENSING_V3_OUT_LICENSED_FEATURES_LO_LBN 448 +#define MC_CMD_LICENSING_V3_OUT_LICENSED_FEATURES_LO_WIDTH 32 +#define MC_CMD_LICENSING_V3_OUT_LICENSED_FEATURES_HI_OFST 60 +#define MC_CMD_LICENSING_V3_OUT_LICENSED_FEATURES_HI_LEN 4 +#define MC_CMD_LICENSING_V3_OUT_LICENSED_FEATURES_HI_LBN 480 +#define MC_CMD_LICENSING_V3_OUT_LICENSED_FEATURES_HI_WIDTH 32 +/* reserved for future use */ +#define MC_CMD_LICENSING_V3_OUT_RESERVED_1_OFST 64 +#define MC_CMD_LICENSING_V3_OUT_RESERVED_1_LEN 24 + + +/***********************************/ +/* MC_CMD_LICENSING_GET_ID_V3 + * Get ID and type from the NVRAM_PARTITION_TYPE_LICENSE application license + * partition - V3 licensing (Medford) + */ +#define MC_CMD_LICENSING_GET_ID_V3 0xd1 +#undef MC_CMD_0xd1_PRIVILEGE_CTG + +#define MC_CMD_0xd1_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_LICENSING_GET_ID_V3_IN msgrequest */ +#define MC_CMD_LICENSING_GET_ID_V3_IN_LEN 0 + +/* MC_CMD_LICENSING_GET_ID_V3_OUT msgresponse */ +#define MC_CMD_LICENSING_GET_ID_V3_OUT_LENMIN 8 +#define MC_CMD_LICENSING_GET_ID_V3_OUT_LENMAX 252 +#define MC_CMD_LICENSING_GET_ID_V3_OUT_LENMAX_MCDI2 1020 +#define MC_CMD_LICENSING_GET_ID_V3_OUT_LEN(num) (8+1*(num)) +#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_NUM(len) (((len)-8)/1) +/* type of license (eg 3) */ +#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_TYPE_OFST 0 +#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_TYPE_LEN 4 +/* length of the license ID (in bytes) */ +#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_LENGTH_OFST 4 +#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_LENGTH_LEN 4 +/* the unique license ID of the adapter */ +#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_OFST 8 +#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_LEN 1 +#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_MINNUM 0 +#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_MAXNUM 244 +#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_MAXNUM_MCDI2 1012 + + +/***********************************/ +/* MC_CMD_MC2MC_PROXY + * Execute an arbitrary MCDI command on the slave MC of a dual-core device. + * This will fail on a single-core system. + */ +#define MC_CMD_MC2MC_PROXY 0xf4 +#undef MC_CMD_0xf4_PRIVILEGE_CTG + +#define MC_CMD_0xf4_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_MC2MC_PROXY_IN msgrequest */ +#define MC_CMD_MC2MC_PROXY_IN_LEN 0 + +/* MC_CMD_MC2MC_PROXY_OUT msgresponse */ +#define MC_CMD_MC2MC_PROXY_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_GET_LICENSED_APP_STATE + * Query the state of an individual licensed application. (Note that the actual + * state may be invalidated by the MC_CMD_LICENSING OP_UPDATE_LICENSE operation + * or a reboot of the MC.) Not used for V3 licensing + */ +#define MC_CMD_GET_LICENSED_APP_STATE 0xf5 +#undef MC_CMD_0xf5_PRIVILEGE_CTG + +#define MC_CMD_0xf5_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_LICENSED_APP_STATE_IN msgrequest */ +#define MC_CMD_GET_LICENSED_APP_STATE_IN_LEN 4 +/* application ID to query (LICENSED_APP_ID_xxx) */ +#define MC_CMD_GET_LICENSED_APP_STATE_IN_APP_ID_OFST 0 +#define MC_CMD_GET_LICENSED_APP_STATE_IN_APP_ID_LEN 4 + +/* MC_CMD_GET_LICENSED_APP_STATE_OUT msgresponse */ +#define MC_CMD_GET_LICENSED_APP_STATE_OUT_LEN 4 +/* state of this application */ +#define MC_CMD_GET_LICENSED_APP_STATE_OUT_STATE_OFST 0 +#define MC_CMD_GET_LICENSED_APP_STATE_OUT_STATE_LEN 4 +/* enum: no (or invalid) license is present for the application */ +#define MC_CMD_GET_LICENSED_APP_STATE_OUT_NOT_LICENSED 0x0 +/* enum: a valid license is present for the application */ +#define MC_CMD_GET_LICENSED_APP_STATE_OUT_LICENSED 0x1 + + +/***********************************/ +/* MC_CMD_GET_LICENSED_V3_APP_STATE + * Query the state of an individual licensed application. (Note that the actual + * state may be invalidated by the MC_CMD_LICENSING_V3 OP_UPDATE_LICENSE + * operation or a reboot of the MC.) Used for V3 licensing (Medford) + */ +#define MC_CMD_GET_LICENSED_V3_APP_STATE 0xd2 +#undef MC_CMD_0xd2_PRIVILEGE_CTG + +#define MC_CMD_0xd2_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_LICENSED_V3_APP_STATE_IN msgrequest */ +#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_LEN 8 +/* application ID to query (LICENSED_V3_APPS_xxx) expressed as a single bit + * mask + */ +#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_OFST 0 +#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_LEN 8 +#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_LO_OFST 0 +#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_LO_LEN 4 +#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_LO_LBN 0 +#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_LO_WIDTH 32 +#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_HI_OFST 4 +#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_HI_LEN 4 +#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_HI_LBN 32 +#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_HI_WIDTH 32 + +/* MC_CMD_GET_LICENSED_V3_APP_STATE_OUT msgresponse */ +#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_LEN 4 +/* state of this application */ +#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_STATE_OFST 0 +#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_STATE_LEN 4 +/* enum: no (or invalid) license is present for the application */ +#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_NOT_LICENSED 0x0 +/* enum: a valid license is present for the application */ +#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_LICENSED 0x1 + + +/***********************************/ +/* MC_CMD_GET_LICENSED_V3_FEATURE_STATES + * Query the state of an one or more licensed features. (Note that the actual + * state may be invalidated by the MC_CMD_LICENSING_V3 OP_UPDATE_LICENSE + * operation or a reboot of the MC.) Used for V3 licensing (Medford) + */ +#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES 0xd3 +#undef MC_CMD_0xd3_PRIVILEGE_CTG + +#define MC_CMD_0xd3_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN msgrequest */ +#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_LEN 8 +/* features to query (LICENSED_V3_FEATURES_xxx) expressed as a mask with one or + * more bits set + */ +#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_OFST 0 +#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_LEN 8 +#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_LO_OFST 0 +#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_LO_LEN 4 +#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_LO_LBN 0 +#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_LO_WIDTH 32 +#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_HI_OFST 4 +#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_HI_LEN 4 +#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_HI_LBN 32 +#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_HI_WIDTH 32 + +/* MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT msgresponse */ +#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_LEN 8 +/* states of these features - bit set for licensed, clear for not licensed */ +#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_OFST 0 +#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_LEN 8 +#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_LO_OFST 0 +#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_LO_LEN 4 +#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_LO_LBN 0 +#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_LO_WIDTH 32 +#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_HI_OFST 4 +#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_HI_LEN 4 +#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_HI_LBN 32 +#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_HI_WIDTH 32 + + +/***********************************/ +/* MC_CMD_LICENSED_APP_OP + * Perform an action for an individual licensed application - not used for V3 + * licensing. + */ +#define MC_CMD_LICENSED_APP_OP 0xf6 +#undef MC_CMD_0xf6_PRIVILEGE_CTG + +#define MC_CMD_0xf6_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_LICENSED_APP_OP_IN msgrequest */ +#define MC_CMD_LICENSED_APP_OP_IN_LENMIN 8 +#define MC_CMD_LICENSED_APP_OP_IN_LENMAX 252 +#define MC_CMD_LICENSED_APP_OP_IN_LENMAX_MCDI2 1020 +#define MC_CMD_LICENSED_APP_OP_IN_LEN(num) (8+4*(num)) +#define MC_CMD_LICENSED_APP_OP_IN_ARGS_NUM(len) (((len)-8)/4) +/* application ID */ +#define MC_CMD_LICENSED_APP_OP_IN_APP_ID_OFST 0 +#define MC_CMD_LICENSED_APP_OP_IN_APP_ID_LEN 4 +/* the type of operation requested */ +#define MC_CMD_LICENSED_APP_OP_IN_OP_OFST 4 +#define MC_CMD_LICENSED_APP_OP_IN_OP_LEN 4 +/* enum: validate application */ +#define MC_CMD_LICENSED_APP_OP_IN_OP_VALIDATE 0x0 +/* enum: mask application */ +#define MC_CMD_LICENSED_APP_OP_IN_OP_MASK 0x1 +/* arguments specific to this particular operation */ +#define MC_CMD_LICENSED_APP_OP_IN_ARGS_OFST 8 +#define MC_CMD_LICENSED_APP_OP_IN_ARGS_LEN 4 +#define MC_CMD_LICENSED_APP_OP_IN_ARGS_MINNUM 0 +#define MC_CMD_LICENSED_APP_OP_IN_ARGS_MAXNUM 61 +#define MC_CMD_LICENSED_APP_OP_IN_ARGS_MAXNUM_MCDI2 253 + +/* MC_CMD_LICENSED_APP_OP_OUT msgresponse */ +#define MC_CMD_LICENSED_APP_OP_OUT_LENMIN 0 +#define MC_CMD_LICENSED_APP_OP_OUT_LENMAX 252 +#define MC_CMD_LICENSED_APP_OP_OUT_LENMAX_MCDI2 1020 +#define MC_CMD_LICENSED_APP_OP_OUT_LEN(num) (0+4*(num)) +#define MC_CMD_LICENSED_APP_OP_OUT_RESULT_NUM(len) (((len)-0)/4) +/* result specific to this particular operation */ +#define MC_CMD_LICENSED_APP_OP_OUT_RESULT_OFST 0 +#define MC_CMD_LICENSED_APP_OP_OUT_RESULT_LEN 4 +#define MC_CMD_LICENSED_APP_OP_OUT_RESULT_MINNUM 0 +#define MC_CMD_LICENSED_APP_OP_OUT_RESULT_MAXNUM 63 +#define MC_CMD_LICENSED_APP_OP_OUT_RESULT_MAXNUM_MCDI2 255 + +/* MC_CMD_LICENSED_APP_OP_VALIDATE_IN msgrequest */ +#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_LEN 72 +/* application ID */ +#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_APP_ID_OFST 0 +#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_APP_ID_LEN 4 +/* the type of operation requested */ +#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_OP_OFST 4 +#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_OP_LEN 4 +/* validation challenge */ +#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_CHALLENGE_OFST 8 +#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_CHALLENGE_LEN 64 + +/* MC_CMD_LICENSED_APP_OP_VALIDATE_OUT msgresponse */ +#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_LEN 68 +/* feature expiry (time_t) */ +#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_EXPIRY_OFST 0 +#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_EXPIRY_LEN 4 +/* validation response */ +#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_RESPONSE_OFST 4 +#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_RESPONSE_LEN 64 + +/* MC_CMD_LICENSED_APP_OP_MASK_IN msgrequest */ +#define MC_CMD_LICENSED_APP_OP_MASK_IN_LEN 12 +/* application ID */ +#define MC_CMD_LICENSED_APP_OP_MASK_IN_APP_ID_OFST 0 +#define MC_CMD_LICENSED_APP_OP_MASK_IN_APP_ID_LEN 4 +/* the type of operation requested */ +#define MC_CMD_LICENSED_APP_OP_MASK_IN_OP_OFST 4 +#define MC_CMD_LICENSED_APP_OP_MASK_IN_OP_LEN 4 +/* flag */ +#define MC_CMD_LICENSED_APP_OP_MASK_IN_FLAG_OFST 8 +#define MC_CMD_LICENSED_APP_OP_MASK_IN_FLAG_LEN 4 + +/* MC_CMD_LICENSED_APP_OP_MASK_OUT msgresponse */ +#define MC_CMD_LICENSED_APP_OP_MASK_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_LICENSED_V3_VALIDATE_APP + * Perform validation for an individual licensed application - V3 licensing + * (Medford) + */ +#define MC_CMD_LICENSED_V3_VALIDATE_APP 0xd4 +#undef MC_CMD_0xd4_PRIVILEGE_CTG + +#define MC_CMD_0xd4_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_LICENSED_V3_VALIDATE_APP_IN msgrequest */ +#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_LEN 56 +/* challenge for validation (384 bits) */ +#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_CHALLENGE_OFST 0 +#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_CHALLENGE_LEN 48 +/* application ID expressed as a single bit mask */ +#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_OFST 48 +#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_LEN 8 +#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_LO_OFST 48 +#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_LO_LEN 4 +#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_LO_LBN 384 +#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_LO_WIDTH 32 +#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_HI_OFST 52 +#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_HI_LEN 4 +#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_HI_LBN 416 +#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_HI_WIDTH 32 + +/* MC_CMD_LICENSED_V3_VALIDATE_APP_OUT msgresponse */ +#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_LEN 116 +/* validation response to challenge in the form of ECDSA signature consisting + * of two 384-bit integers, r and s, in big-endian order. The signature signs a + * SHA-384 digest of a message constructed from the concatenation of the input + * message and the remaining fields of this output message, e.g. challenge[48 + * bytes] ... expiry_time[4 bytes] ... + */ +#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_RESPONSE_OFST 0 +#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_RESPONSE_LEN 96 +/* application expiry time */ +#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_TIME_OFST 96 +#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_TIME_LEN 4 +/* application expiry units */ +#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNITS_OFST 100 +#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNITS_LEN 4 +/* enum: expiry units are accounting units */ +#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNIT_ACC 0x0 +/* enum: expiry units are calendar days */ +#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNIT_DAYS 0x1 +/* base MAC address of the NIC stored in NVRAM (note that this is a constant + * value for a given NIC regardless which function is calling, effectively this + * is PF0 base MAC address) + */ +#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_BASE_MACADDR_OFST 104 +#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_BASE_MACADDR_LEN 6 +/* MAC address of v-adaptor associated with the client. If no such v-adapator + * exists, then the field is filled with 0xFF. + */ +#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_VADAPTOR_MACADDR_OFST 110 +#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_VADAPTOR_MACADDR_LEN 6 + + +/***********************************/ +/* MC_CMD_LICENSED_V3_MASK_FEATURES + * Mask features - V3 licensing (Medford) + */ +#define MC_CMD_LICENSED_V3_MASK_FEATURES 0xd5 +#undef MC_CMD_0xd5_PRIVILEGE_CTG + +#define MC_CMD_0xd5_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_LICENSED_V3_MASK_FEATURES_IN msgrequest */ +#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_LEN 12 +/* mask to be applied to features to be changed */ +#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_OFST 0 +#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_LEN 8 +#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_LO_OFST 0 +#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_LO_LEN 4 +#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_LO_LBN 0 +#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_LO_WIDTH 32 +#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_HI_OFST 4 +#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_HI_LEN 4 +#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_HI_LBN 32 +#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_HI_WIDTH 32 +/* whether to turn on or turn off the masked features */ +#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_FLAG_OFST 8 +#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_FLAG_LEN 4 +/* enum: turn the features off */ +#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_OFF 0x0 +/* enum: turn the features back on */ +#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_ON 0x1 + +/* MC_CMD_LICENSED_V3_MASK_FEATURES_OUT msgresponse */ +#define MC_CMD_LICENSED_V3_MASK_FEATURES_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_LICENSING_V3_TEMPORARY + * Perform operations to support installation of a single temporary license in + * the adapter, in addition to those found in the licensing partition. See + * SF-116124-SW for an overview of how this could be used. The license is + * stored in MC persistent data and so will survive a MC reboot, but will be + * erased when the adapter is power cycled + */ +#define MC_CMD_LICENSING_V3_TEMPORARY 0xd6 +#undef MC_CMD_0xd6_PRIVILEGE_CTG + +#define MC_CMD_0xd6_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND + +/* MC_CMD_LICENSING_V3_TEMPORARY_IN msgrequest */ +#define MC_CMD_LICENSING_V3_TEMPORARY_IN_LEN 4 +/* operation code */ +#define MC_CMD_LICENSING_V3_TEMPORARY_IN_OP_OFST 0 +#define MC_CMD_LICENSING_V3_TEMPORARY_IN_OP_LEN 4 +/* enum: install a new license, overwriting any existing temporary license. + * This is an asynchronous operation owing to the time taken to validate an + * ECDSA license + */ +#define MC_CMD_LICENSING_V3_TEMPORARY_SET 0x0 +/* enum: clear the license immediately rather than waiting for the next power + * cycle + */ +#define MC_CMD_LICENSING_V3_TEMPORARY_CLEAR 0x1 +/* enum: get the status of the asynchronous MC_CMD_LICENSING_V3_TEMPORARY_SET + * operation + */ +#define MC_CMD_LICENSING_V3_TEMPORARY_STATUS 0x2 + +/* MC_CMD_LICENSING_V3_TEMPORARY_IN_SET msgrequest */ +#define MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_LEN 164 +#define MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_OP_OFST 0 +#define MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_OP_LEN 4 +/* ECDSA license and signature */ +#define MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_LICENSE_OFST 4 +#define MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_LICENSE_LEN 160 + +/* MC_CMD_LICENSING_V3_TEMPORARY_IN_CLEAR msgrequest */ +#define MC_CMD_LICENSING_V3_TEMPORARY_IN_CLEAR_LEN 4 +#define MC_CMD_LICENSING_V3_TEMPORARY_IN_CLEAR_OP_OFST 0 +#define MC_CMD_LICENSING_V3_TEMPORARY_IN_CLEAR_OP_LEN 4 + +/* MC_CMD_LICENSING_V3_TEMPORARY_IN_STATUS msgrequest */ +#define MC_CMD_LICENSING_V3_TEMPORARY_IN_STATUS_LEN 4 +#define MC_CMD_LICENSING_V3_TEMPORARY_IN_STATUS_OP_OFST 0 +#define MC_CMD_LICENSING_V3_TEMPORARY_IN_STATUS_OP_LEN 4 + +/* MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS msgresponse */ +#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LEN 12 +/* status code */ +#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_STATUS_OFST 0 +#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_STATUS_LEN 4 +/* enum: finished validating and installing license */ +#define MC_CMD_LICENSING_V3_TEMPORARY_STATUS_OK 0x0 +/* enum: license validation and installation in progress */ +#define MC_CMD_LICENSING_V3_TEMPORARY_STATUS_IN_PROGRESS 0x1 +/* enum: licensing error. More specific error messages are not provided to + * avoid exposing details of the licensing system to the client + */ +#define MC_CMD_LICENSING_V3_TEMPORARY_STATUS_ERROR 0x2 +/* bitmask of licensed features */ +#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_OFST 4 +#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_LEN 8 +#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_LO_OFST 4 +#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_LO_LEN 4 +#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_LO_LBN 32 +#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_LO_WIDTH 32 +#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_HI_OFST 8 +#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_HI_LEN 4 +#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_HI_LBN 64 +#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_HI_WIDTH 32 + + +/***********************************/ +/* MC_CMD_SET_PORT_SNIFF_CONFIG + * Configure RX port sniffing for the physical port associated with the calling + * function. Only a privileged function may change the port sniffing + * configuration. A copy of all traffic delivered to the host (non-promiscuous + * mode) or all traffic arriving at the port (promiscuous mode) may be + * delivered to a specific queue, or a set of queues with RSS. + */ +#define MC_CMD_SET_PORT_SNIFF_CONFIG 0xf7 +#undef MC_CMD_0xf7_PRIVILEGE_CTG + +#define MC_CMD_0xf7_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_SET_PORT_SNIFF_CONFIG_IN msgrequest */ +#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_LEN 16 +/* configuration flags */ +#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_FLAGS_OFST 0 +#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_FLAGS_LEN 4 +#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_ENABLE_OFST 0 +#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_ENABLE_LBN 0 +#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_ENABLE_WIDTH 1 +#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_PROMISCUOUS_OFST 0 +#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_PROMISCUOUS_LBN 1 +#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_PROMISCUOUS_WIDTH 1 +/* receive queue handle (for RSS mode, this is the base queue) */ +#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_QUEUE_OFST 4 +#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_QUEUE_LEN 4 +/* receive mode */ +#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_MODE_OFST 8 +#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_MODE_LEN 4 +/* enum: receive to just the specified queue */ +#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_MODE_SIMPLE 0x0 +/* enum: receive to multiple queues using RSS context */ +#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_MODE_RSS 0x1 +/* RSS context (for RX_MODE_RSS) as returned by MC_CMD_RSS_CONTEXT_ALLOC. Note + * that these handles should be considered opaque to the host, although a value + * of 0xFFFFFFFF is guaranteed never to be a valid handle. + */ +#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_CONTEXT_OFST 12 +#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_CONTEXT_LEN 4 + +/* MC_CMD_SET_PORT_SNIFF_CONFIG_OUT msgresponse */ +#define MC_CMD_SET_PORT_SNIFF_CONFIG_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_GET_PORT_SNIFF_CONFIG + * Obtain the current RX port sniffing configuration for the physical port + * associated with the calling function. Only a privileged function may read + * the configuration. + */ +#define MC_CMD_GET_PORT_SNIFF_CONFIG 0xf8 +#undef MC_CMD_0xf8_PRIVILEGE_CTG + +#define MC_CMD_0xf8_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_PORT_SNIFF_CONFIG_IN msgrequest */ +#define MC_CMD_GET_PORT_SNIFF_CONFIG_IN_LEN 0 + +/* MC_CMD_GET_PORT_SNIFF_CONFIG_OUT msgresponse */ +#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_LEN 16 +/* configuration flags */ +#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_FLAGS_OFST 0 +#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_FLAGS_LEN 4 +#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_ENABLE_OFST 0 +#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_ENABLE_LBN 0 +#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_ENABLE_WIDTH 1 +#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_PROMISCUOUS_OFST 0 +#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_PROMISCUOUS_LBN 1 +#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_PROMISCUOUS_WIDTH 1 +/* receiving queue handle (for RSS mode, this is the base queue) */ +#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_QUEUE_OFST 4 +#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_QUEUE_LEN 4 +/* receive mode */ +#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_MODE_OFST 8 +#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_MODE_LEN 4 +/* enum: receiving to just the specified queue */ +#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_MODE_SIMPLE 0x0 +/* enum: receiving to multiple queues using RSS context */ +#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_MODE_RSS 0x1 +/* RSS context (for RX_MODE_RSS) */ +#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_CONTEXT_OFST 12 +#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_CONTEXT_LEN 4 + + +/***********************************/ +/* MC_CMD_SET_PARSER_DISP_CONFIG + * Change configuration related to the parser-dispatcher subsystem. + */ +#define MC_CMD_SET_PARSER_DISP_CONFIG 0xf9 +#undef MC_CMD_0xf9_PRIVILEGE_CTG + +#define MC_CMD_0xf9_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_SET_PARSER_DISP_CONFIG_IN msgrequest */ +#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_LENMIN 12 +#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_LENMAX 252 +#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_LENMAX_MCDI2 1020 +#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_LEN(num) (8+4*(num)) +#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_VALUE_NUM(len) (((len)-8)/4) +/* the type of configuration setting to change */ +#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_TYPE_OFST 0 +#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_TYPE_LEN 4 +/* enum: Per-TXQ enable for multicast UDP destination lookup for possible + * internal loopback. (ENTITY is a queue handle, VALUE is a single boolean.) + */ +#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_TXQ_MCAST_UDP_DST_LOOKUP_EN 0x0 +/* enum: Per-v-adaptor enable for suppression of self-transmissions on the + * internal loopback path. (ENTITY is an EVB_PORT_ID, VALUE is a single + * boolean.) + */ +#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_VADAPTOR_SUPPRESS_SELF_TX 0x1 +/* handle for the entity to update: queue handle, EVB port ID, etc. depending + * on the type of configuration setting being changed + */ +#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_ENTITY_OFST 4 +#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_ENTITY_LEN 4 +/* new value: the details depend on the type of configuration setting being + * changed + */ +#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_VALUE_OFST 8 +#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_VALUE_LEN 4 +#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_VALUE_MINNUM 1 +#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_VALUE_MAXNUM 61 +#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_VALUE_MAXNUM_MCDI2 253 + +/* MC_CMD_SET_PARSER_DISP_CONFIG_OUT msgresponse */ +#define MC_CMD_SET_PARSER_DISP_CONFIG_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_GET_PARSER_DISP_CONFIG + * Read configuration related to the parser-dispatcher subsystem. + */ +#define MC_CMD_GET_PARSER_DISP_CONFIG 0xfa +#undef MC_CMD_0xfa_PRIVILEGE_CTG + +#define MC_CMD_0xfa_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_PARSER_DISP_CONFIG_IN msgrequest */ +#define MC_CMD_GET_PARSER_DISP_CONFIG_IN_LEN 8 +/* the type of configuration setting to read */ +#define MC_CMD_GET_PARSER_DISP_CONFIG_IN_TYPE_OFST 0 +#define MC_CMD_GET_PARSER_DISP_CONFIG_IN_TYPE_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_SET_PARSER_DISP_CONFIG/MC_CMD_SET_PARSER_DISP_CONFIG_IN/TYPE */ +/* handle for the entity to query: queue handle, EVB port ID, etc. depending on + * the type of configuration setting being read + */ +#define MC_CMD_GET_PARSER_DISP_CONFIG_IN_ENTITY_OFST 4 +#define MC_CMD_GET_PARSER_DISP_CONFIG_IN_ENTITY_LEN 4 + +/* MC_CMD_GET_PARSER_DISP_CONFIG_OUT msgresponse */ +#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_LENMIN 4 +#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_LENMAX 252 +#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_LENMAX_MCDI2 1020 +#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_LEN(num) (0+4*(num)) +#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_NUM(len) (((len)-0)/4) +/* current value: the details depend on the type of configuration setting being + * read + */ +#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_OFST 0 +#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_LEN 4 +#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_MINNUM 1 +#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_MAXNUM 63 +#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_MAXNUM_MCDI2 255 + + +/***********************************/ +/* MC_CMD_SET_TX_PORT_SNIFF_CONFIG + * Configure TX port sniffing for the physical port associated with the calling + * function. Only a privileged function may change the port sniffing + * configuration. A copy of all traffic transmitted through the port may be + * delivered to a specific queue, or a set of queues with RSS. Note that these + * packets are delivered with transmit timestamps in the packet prefix, not + * receive timestamps, so it is likely that the queue(s) will need to be + * dedicated as TX sniff receivers. + */ +#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG 0xfb +#undef MC_CMD_0xfb_PRIVILEGE_CTG + +#define MC_CMD_0xfb_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN msgrequest */ +#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_LEN 16 +/* configuration flags */ +#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_FLAGS_OFST 0 +#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_FLAGS_LEN 4 +#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_ENABLE_OFST 0 +#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_ENABLE_LBN 0 +#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_ENABLE_WIDTH 1 +/* receive queue handle (for RSS mode, this is the base queue) */ +#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_QUEUE_OFST 4 +#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_QUEUE_LEN 4 +/* receive mode */ +#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_MODE_OFST 8 +#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_MODE_LEN 4 +/* enum: receive to just the specified queue */ +#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_MODE_SIMPLE 0x0 +/* enum: receive to multiple queues using RSS context */ +#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_MODE_RSS 0x1 +/* RSS context (for RX_MODE_RSS) as returned by MC_CMD_RSS_CONTEXT_ALLOC. Note + * that these handles should be considered opaque to the host, although a value + * of 0xFFFFFFFF is guaranteed never to be a valid handle. + */ +#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_CONTEXT_OFST 12 +#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_CONTEXT_LEN 4 + +/* MC_CMD_SET_TX_PORT_SNIFF_CONFIG_OUT msgresponse */ +#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_GET_TX_PORT_SNIFF_CONFIG + * Obtain the current TX port sniffing configuration for the physical port + * associated with the calling function. Only a privileged function may read + * the configuration. + */ +#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG 0xfc +#undef MC_CMD_0xfc_PRIVILEGE_CTG + +#define MC_CMD_0xfc_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_TX_PORT_SNIFF_CONFIG_IN msgrequest */ +#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_IN_LEN 0 + +/* MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT msgresponse */ +#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_LEN 16 +/* configuration flags */ +#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_FLAGS_OFST 0 +#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_FLAGS_LEN 4 +#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_ENABLE_OFST 0 +#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_ENABLE_LBN 0 +#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_ENABLE_WIDTH 1 +/* receiving queue handle (for RSS mode, this is the base queue) */ +#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_QUEUE_OFST 4 +#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_QUEUE_LEN 4 +/* receive mode */ +#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_OFST 8 +#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_LEN 4 +/* enum: receiving to just the specified queue */ +#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_SIMPLE 0x0 +/* enum: receiving to multiple queues using RSS context */ +#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_RSS 0x1 +/* RSS context (for RX_MODE_RSS) */ +#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_CONTEXT_OFST 12 +#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_CONTEXT_LEN 4 + + +/***********************************/ +/* MC_CMD_RMON_STATS_RX_ERRORS + * Per queue rx error stats. + */ +#define MC_CMD_RMON_STATS_RX_ERRORS 0xfe +#undef MC_CMD_0xfe_PRIVILEGE_CTG + +#define MC_CMD_0xfe_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_RMON_STATS_RX_ERRORS_IN msgrequest */ +#define MC_CMD_RMON_STATS_RX_ERRORS_IN_LEN 8 +/* The rx queue to get stats for. */ +#define MC_CMD_RMON_STATS_RX_ERRORS_IN_RX_QUEUE_OFST 0 +#define MC_CMD_RMON_STATS_RX_ERRORS_IN_RX_QUEUE_LEN 4 +#define MC_CMD_RMON_STATS_RX_ERRORS_IN_FLAGS_OFST 4 +#define MC_CMD_RMON_STATS_RX_ERRORS_IN_FLAGS_LEN 4 +#define MC_CMD_RMON_STATS_RX_ERRORS_IN_RST_OFST 4 +#define MC_CMD_RMON_STATS_RX_ERRORS_IN_RST_LBN 0 +#define MC_CMD_RMON_STATS_RX_ERRORS_IN_RST_WIDTH 1 + +/* MC_CMD_RMON_STATS_RX_ERRORS_OUT msgresponse */ +#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_LEN 16 +#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_CRC_ERRORS_OFST 0 +#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_CRC_ERRORS_LEN 4 +#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_TRUNC_ERRORS_OFST 4 +#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_TRUNC_ERRORS_LEN 4 +#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_RX_NO_DESC_DROPS_OFST 8 +#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_RX_NO_DESC_DROPS_LEN 4 +#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_RX_ABORT_OFST 12 +#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_RX_ABORT_LEN 4 + + +/***********************************/ +/* MC_CMD_GET_PCIE_RESOURCE_INFO + * Find out about available PCIE resources + */ +#define MC_CMD_GET_PCIE_RESOURCE_INFO 0xfd +#undef MC_CMD_0xfd_PRIVILEGE_CTG + +#define MC_CMD_0xfd_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_PCIE_RESOURCE_INFO_IN msgrequest */ +#define MC_CMD_GET_PCIE_RESOURCE_INFO_IN_LEN 0 + +/* MC_CMD_GET_PCIE_RESOURCE_INFO_OUT msgresponse */ +#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_LEN 28 +/* The maximum number of PFs the device can expose */ +#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_PFS_OFST 0 +#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_PFS_LEN 4 +/* The maximum number of VFs the device can expose in total */ +#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VFS_OFST 4 +#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VFS_LEN 4 +/* The maximum number of MSI-X vectors the device can provide in total */ +#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VECTORS_OFST 8 +#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VECTORS_LEN 4 +/* the number of MSI-X vectors the device will allocate by default to each PF + */ +#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_DEFAULT_PF_VECTORS_OFST 12 +#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_DEFAULT_PF_VECTORS_LEN 4 +/* the number of MSI-X vectors the device will allocate by default to each VF + */ +#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_DEFAULT_VF_VECTORS_OFST 16 +#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_DEFAULT_VF_VECTORS_LEN 4 +/* the maximum number of MSI-X vectors the device can allocate to any one PF */ +#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_PF_VECTORS_OFST 20 +#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_PF_VECTORS_LEN 4 +/* the maximum number of MSI-X vectors the device can allocate to any one VF */ +#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VF_VECTORS_OFST 24 +#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VF_VECTORS_LEN 4 + + +/***********************************/ +/* MC_CMD_GET_PORT_MODES + * Find out about available port modes + */ +#define MC_CMD_GET_PORT_MODES 0xff +#undef MC_CMD_0xff_PRIVILEGE_CTG + +#define MC_CMD_0xff_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_PORT_MODES_IN msgrequest */ +#define MC_CMD_GET_PORT_MODES_IN_LEN 0 + +/* MC_CMD_GET_PORT_MODES_OUT msgresponse */ +#define MC_CMD_GET_PORT_MODES_OUT_LEN 12 +/* Bitmask of port modes available on the board (indexed by TLV_PORT_MODE_*) + * that are supported for customer use in production firmware. + */ +#define MC_CMD_GET_PORT_MODES_OUT_MODES_OFST 0 +#define MC_CMD_GET_PORT_MODES_OUT_MODES_LEN 4 +/* Default (canonical) board mode */ +#define MC_CMD_GET_PORT_MODES_OUT_DEFAULT_MODE_OFST 4 +#define MC_CMD_GET_PORT_MODES_OUT_DEFAULT_MODE_LEN 4 +/* Current board mode */ +#define MC_CMD_GET_PORT_MODES_OUT_CURRENT_MODE_OFST 8 +#define MC_CMD_GET_PORT_MODES_OUT_CURRENT_MODE_LEN 4 + +/* MC_CMD_GET_PORT_MODES_OUT_V2 msgresponse */ +#define MC_CMD_GET_PORT_MODES_OUT_V2_LEN 16 +/* Bitmask of port modes available on the board (indexed by TLV_PORT_MODE_*) + * that are supported for customer use in production firmware. + */ +#define MC_CMD_GET_PORT_MODES_OUT_V2_MODES_OFST 0 +#define MC_CMD_GET_PORT_MODES_OUT_V2_MODES_LEN 4 +/* Default (canonical) board mode */ +#define MC_CMD_GET_PORT_MODES_OUT_V2_DEFAULT_MODE_OFST 4 +#define MC_CMD_GET_PORT_MODES_OUT_V2_DEFAULT_MODE_LEN 4 +/* Current board mode */ +#define MC_CMD_GET_PORT_MODES_OUT_V2_CURRENT_MODE_OFST 8 +#define MC_CMD_GET_PORT_MODES_OUT_V2_CURRENT_MODE_LEN 4 +/* Bitmask of engineering port modes available on the board (indexed by + * TLV_PORT_MODE_*). A superset of MC_CMD_GET_PORT_MODES_OUT/MODES that + * contains all modes implemented in firmware for a particular board. Modes + * listed in MODES are considered production modes and should be exposed in + * userland tools. Modes listed in ENGINEERING_MODES, but not in MODES should + * be considered hidden (not to be exposed in userland tools) and for + * engineering use only. There are no other semantic differences and any mode + * listed in either MODES or ENGINEERING_MODES can be set on the board. + */ +#define MC_CMD_GET_PORT_MODES_OUT_V2_ENGINEERING_MODES_OFST 12 +#define MC_CMD_GET_PORT_MODES_OUT_V2_ENGINEERING_MODES_LEN 4 + + +/***********************************/ +/* MC_CMD_OVERRIDE_PORT_MODE + * Override flash config port mode for subsequent MC reboot(s). Override data + * is stored in the presistent data section of DMEM and activated on next MC + * warm reboot. A cold reboot resets the override. It is assumed that a + * sufficient number of PFs are available and that port mapping is valid for + * the new port mode, as the override does not affect PF configuration. + */ +#define MC_CMD_OVERRIDE_PORT_MODE 0x137 +#undef MC_CMD_0x137_PRIVILEGE_CTG + +#define MC_CMD_0x137_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_OVERRIDE_PORT_MODE_IN msgrequest */ +#define MC_CMD_OVERRIDE_PORT_MODE_IN_LEN 8 +#define MC_CMD_OVERRIDE_PORT_MODE_IN_FLAGS_OFST 0 +#define MC_CMD_OVERRIDE_PORT_MODE_IN_FLAGS_LEN 4 +#define MC_CMD_OVERRIDE_PORT_MODE_IN_ENABLE_OFST 0 +#define MC_CMD_OVERRIDE_PORT_MODE_IN_ENABLE_LBN 0 +#define MC_CMD_OVERRIDE_PORT_MODE_IN_ENABLE_WIDTH 1 +/* New mode (TLV_PORT_MODE_*) to set, if override enabled */ +#define MC_CMD_OVERRIDE_PORT_MODE_IN_MODE_OFST 4 +#define MC_CMD_OVERRIDE_PORT_MODE_IN_MODE_LEN 4 + +/* MC_CMD_OVERRIDE_PORT_MODE_OUT msgresponse */ +#define MC_CMD_OVERRIDE_PORT_MODE_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_READ_ATB + * Sample voltages on the ATB + */ +#define MC_CMD_READ_ATB 0x100 +#undef MC_CMD_0x100_PRIVILEGE_CTG + +#define MC_CMD_0x100_PRIVILEGE_CTG SRIOV_CTG_INSECURE + +/* MC_CMD_READ_ATB_IN msgrequest */ +#define MC_CMD_READ_ATB_IN_LEN 16 +#define MC_CMD_READ_ATB_IN_SIGNAL_BUS_OFST 0 +#define MC_CMD_READ_ATB_IN_SIGNAL_BUS_LEN 4 +#define MC_CMD_READ_ATB_IN_BUS_CCOM 0x0 /* enum */ +#define MC_CMD_READ_ATB_IN_BUS_CKR 0x1 /* enum */ +#define MC_CMD_READ_ATB_IN_BUS_CPCIE 0x8 /* enum */ +#define MC_CMD_READ_ATB_IN_SIGNAL_EN_BITNO_OFST 4 +#define MC_CMD_READ_ATB_IN_SIGNAL_EN_BITNO_LEN 4 +#define MC_CMD_READ_ATB_IN_SIGNAL_SEL_OFST 8 +#define MC_CMD_READ_ATB_IN_SIGNAL_SEL_LEN 4 +#define MC_CMD_READ_ATB_IN_SETTLING_TIME_US_OFST 12 +#define MC_CMD_READ_ATB_IN_SETTLING_TIME_US_LEN 4 + +/* MC_CMD_READ_ATB_OUT msgresponse */ +#define MC_CMD_READ_ATB_OUT_LEN 4 +#define MC_CMD_READ_ATB_OUT_SAMPLE_MV_OFST 0 +#define MC_CMD_READ_ATB_OUT_SAMPLE_MV_LEN 4 + + +/***********************************/ +/* MC_CMD_GET_WORKAROUNDS + * Read the list of all implemented and all currently enabled workarounds. The + * enums here must correspond with those in MC_CMD_WORKAROUND. + */ +#define MC_CMD_GET_WORKAROUNDS 0x59 +#undef MC_CMD_0x59_PRIVILEGE_CTG + +#define MC_CMD_0x59_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_WORKAROUNDS_OUT msgresponse */ +#define MC_CMD_GET_WORKAROUNDS_OUT_LEN 8 +/* Each workaround is represented by a single bit according to the enums below. + */ +#define MC_CMD_GET_WORKAROUNDS_OUT_IMPLEMENTED_OFST 0 +#define MC_CMD_GET_WORKAROUNDS_OUT_IMPLEMENTED_LEN 4 +#define MC_CMD_GET_WORKAROUNDS_OUT_ENABLED_OFST 4 +#define MC_CMD_GET_WORKAROUNDS_OUT_ENABLED_LEN 4 +/* enum: Bug 17230 work around. */ +#define MC_CMD_GET_WORKAROUNDS_OUT_BUG17230 0x2 +/* enum: Bug 35388 work around (unsafe EVQ writes). */ +#define MC_CMD_GET_WORKAROUNDS_OUT_BUG35388 0x4 +/* enum: Bug35017 workaround (A64 tables must be identity map) */ +#define MC_CMD_GET_WORKAROUNDS_OUT_BUG35017 0x8 +/* enum: Bug 41750 present (MC_CMD_TRIGGER_INTERRUPT won't work) */ +#define MC_CMD_GET_WORKAROUNDS_OUT_BUG41750 0x10 +/* enum: Bug 42008 present (Interrupts can overtake associated events). Caution + * - before adding code that queries this workaround, remember that there's + * released Monza firmware that doesn't understand MC_CMD_WORKAROUND_BUG42008, + * and will hence (incorrectly) report that the bug doesn't exist. + */ +#define MC_CMD_GET_WORKAROUNDS_OUT_BUG42008 0x20 +/* enum: Bug 26807 features present in firmware (multicast filter chaining) */ +#define MC_CMD_GET_WORKAROUNDS_OUT_BUG26807 0x40 +/* enum: Bug 61265 work around (broken EVQ TMR writes). */ +#define MC_CMD_GET_WORKAROUNDS_OUT_BUG61265 0x80 + + +/***********************************/ +/* MC_CMD_PRIVILEGE_MASK + * Read/set privileges of an arbitrary PCIe function + */ +#define MC_CMD_PRIVILEGE_MASK 0x5a +#undef MC_CMD_0x5a_PRIVILEGE_CTG + +#define MC_CMD_0x5a_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_PRIVILEGE_MASK_IN msgrequest */ +#define MC_CMD_PRIVILEGE_MASK_IN_LEN 8 +/* The target function to have its mask read or set e.g. PF 0 = 0xFFFF0000, VF + * 1,3 = 0x00030001 + */ +#define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_OFST 0 +#define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_LEN 4 +#define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_PF_OFST 0 +#define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_PF_LBN 0 +#define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_PF_WIDTH 16 +#define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_VF_OFST 0 +#define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_VF_LBN 16 +#define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_VF_WIDTH 16 +#define MC_CMD_PRIVILEGE_MASK_IN_VF_NULL 0xffff /* enum */ +/* New privilege mask to be set. The mask will only be changed if the MSB is + * set to 1. + */ +#define MC_CMD_PRIVILEGE_MASK_IN_NEW_MASK_OFST 4 +#define MC_CMD_PRIVILEGE_MASK_IN_NEW_MASK_LEN 4 +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_ADMIN 0x1 /* enum */ +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_LINK 0x2 /* enum */ +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_ONLOAD 0x4 /* enum */ +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_PTP 0x8 /* enum */ +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_INSECURE_FILTERS 0x10 /* enum */ +/* enum: Deprecated. Equivalent to MAC_SPOOFING_TX combined with CHANGE_MAC. */ +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_MAC_SPOOFING 0x20 +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_UNICAST 0x40 /* enum */ +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_MULTICAST 0x80 /* enum */ +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_BROADCAST 0x100 /* enum */ +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_ALL_MULTICAST 0x200 /* enum */ +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_PROMISCUOUS 0x400 /* enum */ +/* enum: Allows to set the TX packets' source MAC address to any arbitrary MAC + * adress. + */ +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_MAC_SPOOFING_TX 0x800 +/* enum: Privilege that allows a Function to change the MAC address configured + * in its associated vAdapter/vPort. + */ +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_CHANGE_MAC 0x1000 +/* enum: Privilege that allows a Function to install filters that specify VLANs + * that are not in the permit list for the associated vPort. This privilege is + * primarily to support ESX where vPorts are created that restrict traffic to + * only a set of permitted VLANs. See the vPort flag FLAG_VLAN_RESTRICT. + */ +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_UNRESTRICTED_VLAN 0x2000 +/* enum: Privilege for insecure commands. Commands that belong to this group + * are not permitted on secure adapters regardless of the privilege mask. + */ +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_INSECURE 0x4000 +/* enum: Trusted Server Adapter (TSA) / ServerLock. Privilege for + * administrator-level operations that are not allowed from the local host once + * an adapter has Bound to a remote ServerLock Controller (see doxbox + * SF-117064-DG for background). + */ +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_ADMIN_TSA_UNBOUND 0x8000 +/* enum: Control the Match-Action Engine if present. See mcdi_mae.yml. */ +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_MAE 0x10000 +/* enum: This Function/client may call MC_CMD_CLIENT_ALLOC to create new + * dynamic client children of itself. + */ +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_ALLOC_CLIENT 0x20000 +/* enum: A dynamic client with this privilege may perform all the same DMA + * operations as the function client from which it is descended. + */ +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_FUNC_DMA 0x40000 +/* enum: A client with this privilege may perform DMA as any PCIe function on + * the device and to on-device DDR. It allows clients to use TX-DESC2CMPT-DESC + * descriptors, and to use TX-SEG-DESC and TX-MEM2MEM-DESC with an address + * space override (i.e. with the ADDR_SPC_EN bit set). + */ +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_ARBITRARY_DMA 0x80000 +/* enum: Set this bit to indicate that a new privilege mask is to be set, + * otherwise the command will only read the existing mask. + */ +#define MC_CMD_PRIVILEGE_MASK_IN_DO_CHANGE 0x80000000 + +/* MC_CMD_PRIVILEGE_MASK_OUT msgresponse */ +#define MC_CMD_PRIVILEGE_MASK_OUT_LEN 4 +/* For an admin function, always all the privileges are reported. */ +#define MC_CMD_PRIVILEGE_MASK_OUT_OLD_MASK_OFST 0 +#define MC_CMD_PRIVILEGE_MASK_OUT_OLD_MASK_LEN 4 + + +/***********************************/ +/* MC_CMD_LINK_STATE_MODE + * Read/set link state mode of a VF + */ +#define MC_CMD_LINK_STATE_MODE 0x5c +#undef MC_CMD_0x5c_PRIVILEGE_CTG + +#define MC_CMD_0x5c_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_LINK_STATE_MODE_IN msgrequest */ +#define MC_CMD_LINK_STATE_MODE_IN_LEN 8 +/* The target function to have its link state mode read or set, must be a VF + * e.g. VF 1,3 = 0x00030001 + */ +#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_OFST 0 +#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_LEN 4 +#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_PF_OFST 0 +#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_PF_LBN 0 +#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_PF_WIDTH 16 +#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_VF_OFST 0 +#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_VF_LBN 16 +#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_VF_WIDTH 16 +/* New link state mode to be set */ +#define MC_CMD_LINK_STATE_MODE_IN_NEW_MODE_OFST 4 +#define MC_CMD_LINK_STATE_MODE_IN_NEW_MODE_LEN 4 +#define MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_AUTO 0x0 /* enum */ +#define MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_UP 0x1 /* enum */ +#define MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_DOWN 0x2 /* enum */ +/* enum: Use this value to just read the existing setting without modifying it. + */ +#define MC_CMD_LINK_STATE_MODE_IN_DO_NOT_CHANGE 0xffffffff + +/* MC_CMD_LINK_STATE_MODE_OUT msgresponse */ +#define MC_CMD_LINK_STATE_MODE_OUT_LEN 4 +#define MC_CMD_LINK_STATE_MODE_OUT_OLD_MODE_OFST 0 +#define MC_CMD_LINK_STATE_MODE_OUT_OLD_MODE_LEN 4 + + +/***********************************/ +/* MC_CMD_GET_SNAPSHOT_LENGTH + * Obtain the current range of allowable values for the SNAPSHOT_LENGTH + * parameter to MC_CMD_INIT_RXQ. + */ +#define MC_CMD_GET_SNAPSHOT_LENGTH 0x101 +#undef MC_CMD_0x101_PRIVILEGE_CTG + +#define MC_CMD_0x101_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_SNAPSHOT_LENGTH_IN msgrequest */ +#define MC_CMD_GET_SNAPSHOT_LENGTH_IN_LEN 0 + +/* MC_CMD_GET_SNAPSHOT_LENGTH_OUT msgresponse */ +#define MC_CMD_GET_SNAPSHOT_LENGTH_OUT_LEN 8 +/* Minimum acceptable snapshot length. */ +#define MC_CMD_GET_SNAPSHOT_LENGTH_OUT_RX_SNAPLEN_MIN_OFST 0 +#define MC_CMD_GET_SNAPSHOT_LENGTH_OUT_RX_SNAPLEN_MIN_LEN 4 +/* Maximum acceptable snapshot length. */ +#define MC_CMD_GET_SNAPSHOT_LENGTH_OUT_RX_SNAPLEN_MAX_OFST 4 +#define MC_CMD_GET_SNAPSHOT_LENGTH_OUT_RX_SNAPLEN_MAX_LEN 4 + + +/***********************************/ +/* MC_CMD_FUSE_DIAGS + * Additional fuse diagnostics + */ +#define MC_CMD_FUSE_DIAGS 0x102 +#undef MC_CMD_0x102_PRIVILEGE_CTG + +#define MC_CMD_0x102_PRIVILEGE_CTG SRIOV_CTG_INSECURE + +/* MC_CMD_FUSE_DIAGS_IN msgrequest */ +#define MC_CMD_FUSE_DIAGS_IN_LEN 0 + +/* MC_CMD_FUSE_DIAGS_OUT msgresponse */ +#define MC_CMD_FUSE_DIAGS_OUT_LEN 48 +/* Total number of mismatched bits between pairs in area 0 */ +#define MC_CMD_FUSE_DIAGS_OUT_AREA0_MISMATCH_BITS_OFST 0 +#define MC_CMD_FUSE_DIAGS_OUT_AREA0_MISMATCH_BITS_LEN 4 +/* Total number of unexpectedly clear (set in B but not A) bits in area 0 */ +#define MC_CMD_FUSE_DIAGS_OUT_AREA0_PAIR_A_BAD_BITS_OFST 4 +#define MC_CMD_FUSE_DIAGS_OUT_AREA0_PAIR_A_BAD_BITS_LEN 4 +/* Total number of unexpectedly clear (set in A but not B) bits in area 0 */ +#define MC_CMD_FUSE_DIAGS_OUT_AREA0_PAIR_B_BAD_BITS_OFST 8 +#define MC_CMD_FUSE_DIAGS_OUT_AREA0_PAIR_B_BAD_BITS_LEN 4 +/* Checksum of data after logical OR of pairs in area 0 */ +#define MC_CMD_FUSE_DIAGS_OUT_AREA0_CHECKSUM_OFST 12 +#define MC_CMD_FUSE_DIAGS_OUT_AREA0_CHECKSUM_LEN 4 +/* Total number of mismatched bits between pairs in area 1 */ +#define MC_CMD_FUSE_DIAGS_OUT_AREA1_MISMATCH_BITS_OFST 16 +#define MC_CMD_FUSE_DIAGS_OUT_AREA1_MISMATCH_BITS_LEN 4 +/* Total number of unexpectedly clear (set in B but not A) bits in area 1 */ +#define MC_CMD_FUSE_DIAGS_OUT_AREA1_PAIR_A_BAD_BITS_OFST 20 +#define MC_CMD_FUSE_DIAGS_OUT_AREA1_PAIR_A_BAD_BITS_LEN 4 +/* Total number of unexpectedly clear (set in A but not B) bits in area 1 */ +#define MC_CMD_FUSE_DIAGS_OUT_AREA1_PAIR_B_BAD_BITS_OFST 24 +#define MC_CMD_FUSE_DIAGS_OUT_AREA1_PAIR_B_BAD_BITS_LEN 4 +/* Checksum of data after logical OR of pairs in area 1 */ +#define MC_CMD_FUSE_DIAGS_OUT_AREA1_CHECKSUM_OFST 28 +#define MC_CMD_FUSE_DIAGS_OUT_AREA1_CHECKSUM_LEN 4 +/* Total number of mismatched bits between pairs in area 2 */ +#define MC_CMD_FUSE_DIAGS_OUT_AREA2_MISMATCH_BITS_OFST 32 +#define MC_CMD_FUSE_DIAGS_OUT_AREA2_MISMATCH_BITS_LEN 4 +/* Total number of unexpectedly clear (set in B but not A) bits in area 2 */ +#define MC_CMD_FUSE_DIAGS_OUT_AREA2_PAIR_A_BAD_BITS_OFST 36 +#define MC_CMD_FUSE_DIAGS_OUT_AREA2_PAIR_A_BAD_BITS_LEN 4 +/* Total number of unexpectedly clear (set in A but not B) bits in area 2 */ +#define MC_CMD_FUSE_DIAGS_OUT_AREA2_PAIR_B_BAD_BITS_OFST 40 +#define MC_CMD_FUSE_DIAGS_OUT_AREA2_PAIR_B_BAD_BITS_LEN 4 +/* Checksum of data after logical OR of pairs in area 2 */ +#define MC_CMD_FUSE_DIAGS_OUT_AREA2_CHECKSUM_OFST 44 +#define MC_CMD_FUSE_DIAGS_OUT_AREA2_CHECKSUM_LEN 4 + + +/***********************************/ +/* MC_CMD_PRIVILEGE_MODIFY + * Modify the privileges of a set of PCIe functions. Note that this operation + * only effects non-admin functions unless the admin privilege itself is + * included in one of the masks provided. + */ +#define MC_CMD_PRIVILEGE_MODIFY 0x60 +#undef MC_CMD_0x60_PRIVILEGE_CTG + +#define MC_CMD_0x60_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_PRIVILEGE_MODIFY_IN msgrequest */ +#define MC_CMD_PRIVILEGE_MODIFY_IN_LEN 16 +/* The groups of functions to have their privilege masks modified. */ +#define MC_CMD_PRIVILEGE_MODIFY_IN_FN_GROUP_OFST 0 +#define MC_CMD_PRIVILEGE_MODIFY_IN_FN_GROUP_LEN 4 +#define MC_CMD_PRIVILEGE_MODIFY_IN_NONE 0x0 /* enum */ +#define MC_CMD_PRIVILEGE_MODIFY_IN_ALL 0x1 /* enum */ +#define MC_CMD_PRIVILEGE_MODIFY_IN_PFS_ONLY 0x2 /* enum */ +#define MC_CMD_PRIVILEGE_MODIFY_IN_VFS_ONLY 0x3 /* enum */ +#define MC_CMD_PRIVILEGE_MODIFY_IN_VFS_OF_PF 0x4 /* enum */ +#define MC_CMD_PRIVILEGE_MODIFY_IN_ONE 0x5 /* enum */ +/* For VFS_OF_PF specify the PF, for ONE specify the target function */ +#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_OFST 4 +#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_LEN 4 +#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_PF_OFST 4 +#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_PF_LBN 0 +#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_PF_WIDTH 16 +#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_VF_OFST 4 +#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_VF_LBN 16 +#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_VF_WIDTH 16 +/* Privileges to be added to the target functions. For privilege definitions + * refer to the command MC_CMD_PRIVILEGE_MASK + */ +#define MC_CMD_PRIVILEGE_MODIFY_IN_ADD_MASK_OFST 8 +#define MC_CMD_PRIVILEGE_MODIFY_IN_ADD_MASK_LEN 4 +/* Privileges to be removed from the target functions. For privilege + * definitions refer to the command MC_CMD_PRIVILEGE_MASK + */ +#define MC_CMD_PRIVILEGE_MODIFY_IN_REMOVE_MASK_OFST 12 +#define MC_CMD_PRIVILEGE_MODIFY_IN_REMOVE_MASK_LEN 4 + +/* MC_CMD_PRIVILEGE_MODIFY_OUT msgresponse */ +#define MC_CMD_PRIVILEGE_MODIFY_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_XPM_READ_BYTES + * Read XPM memory + */ +#define MC_CMD_XPM_READ_BYTES 0x103 +#undef MC_CMD_0x103_PRIVILEGE_CTG + +#define MC_CMD_0x103_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_XPM_READ_BYTES_IN msgrequest */ +#define MC_CMD_XPM_READ_BYTES_IN_LEN 8 +/* Start address (byte) */ +#define MC_CMD_XPM_READ_BYTES_IN_ADDR_OFST 0 +#define MC_CMD_XPM_READ_BYTES_IN_ADDR_LEN 4 +/* Count (bytes) */ +#define MC_CMD_XPM_READ_BYTES_IN_COUNT_OFST 4 +#define MC_CMD_XPM_READ_BYTES_IN_COUNT_LEN 4 + +/* MC_CMD_XPM_READ_BYTES_OUT msgresponse */ +#define MC_CMD_XPM_READ_BYTES_OUT_LENMIN 0 +#define MC_CMD_XPM_READ_BYTES_OUT_LENMAX 252 +#define MC_CMD_XPM_READ_BYTES_OUT_LENMAX_MCDI2 1020 +#define MC_CMD_XPM_READ_BYTES_OUT_LEN(num) (0+1*(num)) +#define MC_CMD_XPM_READ_BYTES_OUT_DATA_NUM(len) (((len)-0)/1) +/* Data */ +#define MC_CMD_XPM_READ_BYTES_OUT_DATA_OFST 0 +#define MC_CMD_XPM_READ_BYTES_OUT_DATA_LEN 1 +#define MC_CMD_XPM_READ_BYTES_OUT_DATA_MINNUM 0 +#define MC_CMD_XPM_READ_BYTES_OUT_DATA_MAXNUM 252 +#define MC_CMD_XPM_READ_BYTES_OUT_DATA_MAXNUM_MCDI2 1020 + + +/***********************************/ +/* MC_CMD_XPM_WRITE_BYTES + * Write XPM memory + */ +#define MC_CMD_XPM_WRITE_BYTES 0x104 +#undef MC_CMD_0x104_PRIVILEGE_CTG + +#define MC_CMD_0x104_PRIVILEGE_CTG SRIOV_CTG_INSECURE + +/* MC_CMD_XPM_WRITE_BYTES_IN msgrequest */ +#define MC_CMD_XPM_WRITE_BYTES_IN_LENMIN 8 +#define MC_CMD_XPM_WRITE_BYTES_IN_LENMAX 252 +#define MC_CMD_XPM_WRITE_BYTES_IN_LENMAX_MCDI2 1020 +#define MC_CMD_XPM_WRITE_BYTES_IN_LEN(num) (8+1*(num)) +#define MC_CMD_XPM_WRITE_BYTES_IN_DATA_NUM(len) (((len)-8)/1) +/* Start address (byte) */ +#define MC_CMD_XPM_WRITE_BYTES_IN_ADDR_OFST 0 +#define MC_CMD_XPM_WRITE_BYTES_IN_ADDR_LEN 4 +/* Count (bytes) */ +#define MC_CMD_XPM_WRITE_BYTES_IN_COUNT_OFST 4 +#define MC_CMD_XPM_WRITE_BYTES_IN_COUNT_LEN 4 +/* Data */ +#define MC_CMD_XPM_WRITE_BYTES_IN_DATA_OFST 8 +#define MC_CMD_XPM_WRITE_BYTES_IN_DATA_LEN 1 +#define MC_CMD_XPM_WRITE_BYTES_IN_DATA_MINNUM 0 +#define MC_CMD_XPM_WRITE_BYTES_IN_DATA_MAXNUM 244 +#define MC_CMD_XPM_WRITE_BYTES_IN_DATA_MAXNUM_MCDI2 1012 + +/* MC_CMD_XPM_WRITE_BYTES_OUT msgresponse */ +#define MC_CMD_XPM_WRITE_BYTES_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_XPM_READ_SECTOR + * Read XPM sector + */ +#define MC_CMD_XPM_READ_SECTOR 0x105 +#undef MC_CMD_0x105_PRIVILEGE_CTG + +#define MC_CMD_0x105_PRIVILEGE_CTG SRIOV_CTG_INSECURE + +/* MC_CMD_XPM_READ_SECTOR_IN msgrequest */ +#define MC_CMD_XPM_READ_SECTOR_IN_LEN 8 +/* Sector index */ +#define MC_CMD_XPM_READ_SECTOR_IN_INDEX_OFST 0 +#define MC_CMD_XPM_READ_SECTOR_IN_INDEX_LEN 4 +/* Sector size */ +#define MC_CMD_XPM_READ_SECTOR_IN_SIZE_OFST 4 +#define MC_CMD_XPM_READ_SECTOR_IN_SIZE_LEN 4 + +/* MC_CMD_XPM_READ_SECTOR_OUT msgresponse */ +#define MC_CMD_XPM_READ_SECTOR_OUT_LENMIN 4 +#define MC_CMD_XPM_READ_SECTOR_OUT_LENMAX 36 +#define MC_CMD_XPM_READ_SECTOR_OUT_LENMAX_MCDI2 36 +#define MC_CMD_XPM_READ_SECTOR_OUT_LEN(num) (4+1*(num)) +#define MC_CMD_XPM_READ_SECTOR_OUT_DATA_NUM(len) (((len)-4)/1) +/* Sector type */ +#define MC_CMD_XPM_READ_SECTOR_OUT_TYPE_OFST 0 +#define MC_CMD_XPM_READ_SECTOR_OUT_TYPE_LEN 4 +#define MC_CMD_XPM_READ_SECTOR_OUT_BLANK 0x0 /* enum */ +#define MC_CMD_XPM_READ_SECTOR_OUT_CRYPTO_KEY_128 0x1 /* enum */ +#define MC_CMD_XPM_READ_SECTOR_OUT_CRYPTO_KEY_256 0x2 /* enum */ +#define MC_CMD_XPM_READ_SECTOR_OUT_CRYPTO_DATA 0x3 /* enum */ +#define MC_CMD_XPM_READ_SECTOR_OUT_INVALID 0xff /* enum */ +/* Sector data */ +#define MC_CMD_XPM_READ_SECTOR_OUT_DATA_OFST 4 +#define MC_CMD_XPM_READ_SECTOR_OUT_DATA_LEN 1 +#define MC_CMD_XPM_READ_SECTOR_OUT_DATA_MINNUM 0 +#define MC_CMD_XPM_READ_SECTOR_OUT_DATA_MAXNUM 32 +#define MC_CMD_XPM_READ_SECTOR_OUT_DATA_MAXNUM_MCDI2 32 + + +/***********************************/ +/* MC_CMD_XPM_WRITE_SECTOR + * Write XPM sector + */ +#define MC_CMD_XPM_WRITE_SECTOR 0x106 +#undef MC_CMD_0x106_PRIVILEGE_CTG + +#define MC_CMD_0x106_PRIVILEGE_CTG SRIOV_CTG_INSECURE + +/* MC_CMD_XPM_WRITE_SECTOR_IN msgrequest */ +#define MC_CMD_XPM_WRITE_SECTOR_IN_LENMIN 12 +#define MC_CMD_XPM_WRITE_SECTOR_IN_LENMAX 44 +#define MC_CMD_XPM_WRITE_SECTOR_IN_LENMAX_MCDI2 44 +#define MC_CMD_XPM_WRITE_SECTOR_IN_LEN(num) (12+1*(num)) +#define MC_CMD_XPM_WRITE_SECTOR_IN_DATA_NUM(len) (((len)-12)/1) +/* If writing fails due to an uncorrectable error, try up to RETRIES following + * sectors (or until no more space available). If 0, only one write attempt is + * made. Note that uncorrectable errors are unlikely, thanks to XPM self-repair + * mechanism. + */ +#define MC_CMD_XPM_WRITE_SECTOR_IN_RETRIES_OFST 0 +#define MC_CMD_XPM_WRITE_SECTOR_IN_RETRIES_LEN 1 +#define MC_CMD_XPM_WRITE_SECTOR_IN_RESERVED_OFST 1 +#define MC_CMD_XPM_WRITE_SECTOR_IN_RESERVED_LEN 3 +/* Sector type */ +#define MC_CMD_XPM_WRITE_SECTOR_IN_TYPE_OFST 4 +#define MC_CMD_XPM_WRITE_SECTOR_IN_TYPE_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_XPM_READ_SECTOR/MC_CMD_XPM_READ_SECTOR_OUT/TYPE */ +/* Sector size */ +#define MC_CMD_XPM_WRITE_SECTOR_IN_SIZE_OFST 8 +#define MC_CMD_XPM_WRITE_SECTOR_IN_SIZE_LEN 4 +/* Sector data */ +#define MC_CMD_XPM_WRITE_SECTOR_IN_DATA_OFST 12 +#define MC_CMD_XPM_WRITE_SECTOR_IN_DATA_LEN 1 +#define MC_CMD_XPM_WRITE_SECTOR_IN_DATA_MINNUM 0 +#define MC_CMD_XPM_WRITE_SECTOR_IN_DATA_MAXNUM 32 +#define MC_CMD_XPM_WRITE_SECTOR_IN_DATA_MAXNUM_MCDI2 32 + +/* MC_CMD_XPM_WRITE_SECTOR_OUT msgresponse */ +#define MC_CMD_XPM_WRITE_SECTOR_OUT_LEN 4 +/* New sector index */ +#define MC_CMD_XPM_WRITE_SECTOR_OUT_INDEX_OFST 0 +#define MC_CMD_XPM_WRITE_SECTOR_OUT_INDEX_LEN 4 + + +/***********************************/ +/* MC_CMD_XPM_INVALIDATE_SECTOR + * Invalidate XPM sector + */ +#define MC_CMD_XPM_INVALIDATE_SECTOR 0x107 +#undef MC_CMD_0x107_PRIVILEGE_CTG + +#define MC_CMD_0x107_PRIVILEGE_CTG SRIOV_CTG_INSECURE + +/* MC_CMD_XPM_INVALIDATE_SECTOR_IN msgrequest */ +#define MC_CMD_XPM_INVALIDATE_SECTOR_IN_LEN 4 +/* Sector index */ +#define MC_CMD_XPM_INVALIDATE_SECTOR_IN_INDEX_OFST 0 +#define MC_CMD_XPM_INVALIDATE_SECTOR_IN_INDEX_LEN 4 + +/* MC_CMD_XPM_INVALIDATE_SECTOR_OUT msgresponse */ +#define MC_CMD_XPM_INVALIDATE_SECTOR_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_XPM_BLANK_CHECK + * Blank-check XPM memory and report bad locations + */ +#define MC_CMD_XPM_BLANK_CHECK 0x108 +#undef MC_CMD_0x108_PRIVILEGE_CTG + +#define MC_CMD_0x108_PRIVILEGE_CTG SRIOV_CTG_INSECURE + +/* MC_CMD_XPM_BLANK_CHECK_IN msgrequest */ +#define MC_CMD_XPM_BLANK_CHECK_IN_LEN 8 +/* Start address (byte) */ +#define MC_CMD_XPM_BLANK_CHECK_IN_ADDR_OFST 0 +#define MC_CMD_XPM_BLANK_CHECK_IN_ADDR_LEN 4 +/* Count (bytes) */ +#define MC_CMD_XPM_BLANK_CHECK_IN_COUNT_OFST 4 +#define MC_CMD_XPM_BLANK_CHECK_IN_COUNT_LEN 4 + +/* MC_CMD_XPM_BLANK_CHECK_OUT msgresponse */ +#define MC_CMD_XPM_BLANK_CHECK_OUT_LENMIN 4 +#define MC_CMD_XPM_BLANK_CHECK_OUT_LENMAX 252 +#define MC_CMD_XPM_BLANK_CHECK_OUT_LENMAX_MCDI2 1020 +#define MC_CMD_XPM_BLANK_CHECK_OUT_LEN(num) (4+2*(num)) +#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_ADDR_NUM(len) (((len)-4)/2) +/* Total number of bad (non-blank) locations */ +#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_COUNT_OFST 0 +#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_COUNT_LEN 4 +/* Addresses of bad locations (may be less than BAD_COUNT, if all cannot fit + * into MCDI response) + */ +#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_ADDR_OFST 4 +#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_ADDR_LEN 2 +#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_ADDR_MINNUM 0 +#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_ADDR_MAXNUM 124 +#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_ADDR_MAXNUM_MCDI2 508 + + +/***********************************/ +/* MC_CMD_XPM_REPAIR + * Blank-check and repair XPM memory + */ +#define MC_CMD_XPM_REPAIR 0x109 +#undef MC_CMD_0x109_PRIVILEGE_CTG + +#define MC_CMD_0x109_PRIVILEGE_CTG SRIOV_CTG_INSECURE + +/* MC_CMD_XPM_REPAIR_IN msgrequest */ +#define MC_CMD_XPM_REPAIR_IN_LEN 8 +/* Start address (byte) */ +#define MC_CMD_XPM_REPAIR_IN_ADDR_OFST 0 +#define MC_CMD_XPM_REPAIR_IN_ADDR_LEN 4 +/* Count (bytes) */ +#define MC_CMD_XPM_REPAIR_IN_COUNT_OFST 4 +#define MC_CMD_XPM_REPAIR_IN_COUNT_LEN 4 + +/* MC_CMD_XPM_REPAIR_OUT msgresponse */ +#define MC_CMD_XPM_REPAIR_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_XPM_DECODER_TEST + * Test XPM memory address decoders for gross manufacturing defects. Can only + * be performed on an unprogrammed part. + */ +#define MC_CMD_XPM_DECODER_TEST 0x10a +#undef MC_CMD_0x10a_PRIVILEGE_CTG + +#define MC_CMD_0x10a_PRIVILEGE_CTG SRIOV_CTG_INSECURE + +/* MC_CMD_XPM_DECODER_TEST_IN msgrequest */ +#define MC_CMD_XPM_DECODER_TEST_IN_LEN 0 + +/* MC_CMD_XPM_DECODER_TEST_OUT msgresponse */ +#define MC_CMD_XPM_DECODER_TEST_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_XPM_WRITE_TEST + * XPM memory write test. Test XPM write logic for gross manufacturing defects + * by writing to a dedicated test row. There are 16 locations in the test row + * and the test can only be performed on locations that have not been + * previously used (i.e. can be run at most 16 times). The test will pick the + * first available location to use, or fail with ENOSPC if none left. + */ +#define MC_CMD_XPM_WRITE_TEST 0x10b +#undef MC_CMD_0x10b_PRIVILEGE_CTG + +#define MC_CMD_0x10b_PRIVILEGE_CTG SRIOV_CTG_INSECURE + +/* MC_CMD_XPM_WRITE_TEST_IN msgrequest */ +#define MC_CMD_XPM_WRITE_TEST_IN_LEN 0 + +/* MC_CMD_XPM_WRITE_TEST_OUT msgresponse */ +#define MC_CMD_XPM_WRITE_TEST_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_EXEC_SIGNED + * Check the CMAC of the contents of IMEM and DMEM against the value supplied + * and if correct begin execution from the start of IMEM. The caller supplies a + * key ID, the length of IMEM and DMEM to validate and the expected CMAC. CMAC + * computation runs from the start of IMEM, and from the start of DMEM + 16k, + * to match flash booting. The command will respond with EINVAL if the CMAC + * does match, otherwise it will respond with success before it jumps to IMEM. + */ +#define MC_CMD_EXEC_SIGNED 0x10c +#undef MC_CMD_0x10c_PRIVILEGE_CTG + +#define MC_CMD_0x10c_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND + +/* MC_CMD_EXEC_SIGNED_IN msgrequest */ +#define MC_CMD_EXEC_SIGNED_IN_LEN 28 +/* the length of code to include in the CMAC */ +#define MC_CMD_EXEC_SIGNED_IN_CODELEN_OFST 0 +#define MC_CMD_EXEC_SIGNED_IN_CODELEN_LEN 4 +/* the length of date to include in the CMAC */ +#define MC_CMD_EXEC_SIGNED_IN_DATALEN_OFST 4 +#define MC_CMD_EXEC_SIGNED_IN_DATALEN_LEN 4 +/* the XPM sector containing the key to use */ +#define MC_CMD_EXEC_SIGNED_IN_KEYSECTOR_OFST 8 +#define MC_CMD_EXEC_SIGNED_IN_KEYSECTOR_LEN 4 +/* the expected CMAC value */ +#define MC_CMD_EXEC_SIGNED_IN_CMAC_OFST 12 +#define MC_CMD_EXEC_SIGNED_IN_CMAC_LEN 16 + +/* MC_CMD_EXEC_SIGNED_OUT msgresponse */ +#define MC_CMD_EXEC_SIGNED_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_PREPARE_SIGNED + * Prepare to upload a signed image. This will scrub the specified length of + * the data region, which must be at least as large as the DATALEN supplied to + * MC_CMD_EXEC_SIGNED. + */ +#define MC_CMD_PREPARE_SIGNED 0x10d +#undef MC_CMD_0x10d_PRIVILEGE_CTG + +#define MC_CMD_0x10d_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND + +/* MC_CMD_PREPARE_SIGNED_IN msgrequest */ +#define MC_CMD_PREPARE_SIGNED_IN_LEN 4 +/* the length of data area to clear */ +#define MC_CMD_PREPARE_SIGNED_IN_DATALEN_OFST 0 +#define MC_CMD_PREPARE_SIGNED_IN_DATALEN_LEN 4 + +/* MC_CMD_PREPARE_SIGNED_OUT msgresponse */ +#define MC_CMD_PREPARE_SIGNED_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_SET_SECURITY_RULE + * Set blacklist and/or whitelist action for a particular match criteria. + * (Medford-only; for use by SolarSecure apps, not directly by drivers. See + * SF-114946-SW.) NOTE - this message definition is provisional. It has not yet + * been used in any released code and may change during development. This note + * will be removed once it is regarded as stable. + */ +#define MC_CMD_SET_SECURITY_RULE 0x10f +#undef MC_CMD_0x10f_PRIVILEGE_CTG + +#define MC_CMD_0x10f_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND + +/* MC_CMD_SET_SECURITY_RULE_IN msgrequest */ +#define MC_CMD_SET_SECURITY_RULE_IN_LEN 92 +/* fields to include in match criteria */ +#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_FIELDS_OFST 0 +#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_FIELDS_LEN 4 +#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_REMOTE_IP_OFST 0 +#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_REMOTE_IP_LBN 0 +#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_REMOTE_IP_WIDTH 1 +#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_LOCAL_IP_OFST 0 +#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_LOCAL_IP_LBN 1 +#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_LOCAL_IP_WIDTH 1 +#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_REMOTE_MAC_OFST 0 +#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_REMOTE_MAC_LBN 2 +#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_REMOTE_MAC_WIDTH 1 +#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_REMOTE_PORT_OFST 0 +#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_REMOTE_PORT_LBN 3 +#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_REMOTE_PORT_WIDTH 1 +#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_LOCAL_MAC_OFST 0 +#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_LOCAL_MAC_LBN 4 +#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_LOCAL_MAC_WIDTH 1 +#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_LOCAL_PORT_OFST 0 +#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_LOCAL_PORT_LBN 5 +#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_LOCAL_PORT_WIDTH 1 +#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_ETHER_TYPE_OFST 0 +#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_ETHER_TYPE_LBN 6 +#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_ETHER_TYPE_WIDTH 1 +#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_INNER_VLAN_OFST 0 +#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_INNER_VLAN_LBN 7 +#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_INNER_VLAN_WIDTH 1 +#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_OUTER_VLAN_OFST 0 +#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_OUTER_VLAN_LBN 8 +#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_OUTER_VLAN_WIDTH 1 +#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_IP_PROTO_OFST 0 +#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_IP_PROTO_LBN 9 +#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_IP_PROTO_WIDTH 1 +#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_PHYSICAL_PORT_OFST 0 +#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_PHYSICAL_PORT_LBN 10 +#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_PHYSICAL_PORT_WIDTH 1 +#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_RESERVED_OFST 0 +#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_RESERVED_LBN 11 +#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_RESERVED_WIDTH 1 +#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_REMOTE_SUBNET_ID_OFST 0 +#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_REMOTE_SUBNET_ID_LBN 12 +#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_REMOTE_SUBNET_ID_WIDTH 1 +#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_REMOTE_PORTRANGE_ID_OFST 0 +#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_REMOTE_PORTRANGE_ID_LBN 13 +#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_REMOTE_PORTRANGE_ID_WIDTH 1 +#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_LOCAL_PORTRANGE_ID_OFST 0 +#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_LOCAL_PORTRANGE_ID_LBN 14 +#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_LOCAL_PORTRANGE_ID_WIDTH 1 +/* remote MAC address to match (as bytes in network order) */ +#define MC_CMD_SET_SECURITY_RULE_IN_REMOTE_MAC_OFST 4 +#define MC_CMD_SET_SECURITY_RULE_IN_REMOTE_MAC_LEN 6 +/* remote port to match (as bytes in network order) */ +#define MC_CMD_SET_SECURITY_RULE_IN_REMOTE_PORT_OFST 10 +#define MC_CMD_SET_SECURITY_RULE_IN_REMOTE_PORT_LEN 2 +/* local MAC address to match (as bytes in network order) */ +#define MC_CMD_SET_SECURITY_RULE_IN_LOCAL_MAC_OFST 12 +#define MC_CMD_SET_SECURITY_RULE_IN_LOCAL_MAC_LEN 6 +/* local port to match (as bytes in network order) */ +#define MC_CMD_SET_SECURITY_RULE_IN_LOCAL_PORT_OFST 18 +#define MC_CMD_SET_SECURITY_RULE_IN_LOCAL_PORT_LEN 2 +/* Ethernet type to match (as bytes in network order) */ +#define MC_CMD_SET_SECURITY_RULE_IN_ETHER_TYPE_OFST 20 +#define MC_CMD_SET_SECURITY_RULE_IN_ETHER_TYPE_LEN 2 +/* Inner VLAN tag to match (as bytes in network order) */ +#define MC_CMD_SET_SECURITY_RULE_IN_INNER_VLAN_OFST 22 +#define MC_CMD_SET_SECURITY_RULE_IN_INNER_VLAN_LEN 2 +/* Outer VLAN tag to match (as bytes in network order) */ +#define MC_CMD_SET_SECURITY_RULE_IN_OUTER_VLAN_OFST 24 +#define MC_CMD_SET_SECURITY_RULE_IN_OUTER_VLAN_LEN 2 +/* IP protocol to match (in low byte; set high byte to 0) */ +#define MC_CMD_SET_SECURITY_RULE_IN_IP_PROTO_OFST 26 +#define MC_CMD_SET_SECURITY_RULE_IN_IP_PROTO_LEN 2 +/* Physical port to match (as little-endian 32-bit value) */ +#define MC_CMD_SET_SECURITY_RULE_IN_PHYSICAL_PORT_OFST 28 +#define MC_CMD_SET_SECURITY_RULE_IN_PHYSICAL_PORT_LEN 4 +/* Reserved; set to 0 */ +#define MC_CMD_SET_SECURITY_RULE_IN_RESERVED_OFST 32 +#define MC_CMD_SET_SECURITY_RULE_IN_RESERVED_LEN 4 +/* remote IP address to match (as bytes in network order; set last 12 bytes to + * 0 for IPv4 address) + */ +#define MC_CMD_SET_SECURITY_RULE_IN_REMOTE_IP_OFST 36 +#define MC_CMD_SET_SECURITY_RULE_IN_REMOTE_IP_LEN 16 +/* local IP address to match (as bytes in network order; set last 12 bytes to 0 + * for IPv4 address) + */ +#define MC_CMD_SET_SECURITY_RULE_IN_LOCAL_IP_OFST 52 +#define MC_CMD_SET_SECURITY_RULE_IN_LOCAL_IP_LEN 16 +/* remote subnet ID to match (as little-endian 32-bit value); note that remote + * subnets are matched by mapping the remote IP address to a "subnet ID" via a + * data structure which must already have been configured using + * MC_CMD_SUBNET_MAP_SET_NODE appropriately + */ +#define MC_CMD_SET_SECURITY_RULE_IN_REMOTE_SUBNET_ID_OFST 68 +#define MC_CMD_SET_SECURITY_RULE_IN_REMOTE_SUBNET_ID_LEN 4 +/* remote portrange ID to match (as little-endian 32-bit value); note that + * remote port ranges are matched by mapping the remote port to a "portrange + * ID" via a data structure which must already have been configured using + * MC_CMD_REMOTE_PORTRANGE_MAP_SET_TREE + */ +#define MC_CMD_SET_SECURITY_RULE_IN_REMOTE_PORTRANGE_ID_OFST 72 +#define MC_CMD_SET_SECURITY_RULE_IN_REMOTE_PORTRANGE_ID_LEN 4 +/* local portrange ID to match (as little-endian 32-bit value); note that local + * port ranges are matched by mapping the local port to a "portrange ID" via a + * data structure which must already have been configured using + * MC_CMD_LOCAL_PORTRANGE_MAP_SET_TREE + */ +#define MC_CMD_SET_SECURITY_RULE_IN_LOCAL_PORTRANGE_ID_OFST 76 +#define MC_CMD_SET_SECURITY_RULE_IN_LOCAL_PORTRANGE_ID_LEN 4 +/* set the action for transmitted packets matching this rule */ +#define MC_CMD_SET_SECURITY_RULE_IN_TX_ACTION_OFST 80 +#define MC_CMD_SET_SECURITY_RULE_IN_TX_ACTION_LEN 4 +/* enum: make no decision */ +#define MC_CMD_SET_SECURITY_RULE_IN_TX_ACTION_NONE 0x0 +/* enum: decide to accept the packet */ +#define MC_CMD_SET_SECURITY_RULE_IN_TX_ACTION_WHITELIST 0x1 +/* enum: decide to drop the packet */ +#define MC_CMD_SET_SECURITY_RULE_IN_TX_ACTION_BLACKLIST 0x2 +/* enum: inform the TSA controller about some sample of packets matching this + * rule (via MC_CMD_TSA_INFO_IN_PKT_SAMPLE messages); may be bitwise-ORed with + * either the WHITELIST or BLACKLIST action + */ +#define MC_CMD_SET_SECURITY_RULE_IN_TX_ACTION_SAMPLE 0x4 +/* enum: do not change the current TX action */ +#define MC_CMD_SET_SECURITY_RULE_IN_TX_ACTION_UNCHANGED 0xffffffff +/* set the action for received packets matching this rule */ +#define MC_CMD_SET_SECURITY_RULE_IN_RX_ACTION_OFST 84 +#define MC_CMD_SET_SECURITY_RULE_IN_RX_ACTION_LEN 4 +/* enum: make no decision */ +#define MC_CMD_SET_SECURITY_RULE_IN_RX_ACTION_NONE 0x0 +/* enum: decide to accept the packet */ +#define MC_CMD_SET_SECURITY_RULE_IN_RX_ACTION_WHITELIST 0x1 +/* enum: decide to drop the packet */ +#define MC_CMD_SET_SECURITY_RULE_IN_RX_ACTION_BLACKLIST 0x2 +/* enum: inform the TSA controller about some sample of packets matching this + * rule (via MC_CMD_TSA_INFO_IN_PKT_SAMPLE messages); may be bitwise-ORed with + * either the WHITELIST or BLACKLIST action + */ +#define MC_CMD_SET_SECURITY_RULE_IN_RX_ACTION_SAMPLE 0x4 +/* enum: do not change the current RX action */ +#define MC_CMD_SET_SECURITY_RULE_IN_RX_ACTION_UNCHANGED 0xffffffff +/* counter ID to associate with this rule; IDs are allocated using + * MC_CMD_SECURITY_RULE_COUNTER_ALLOC + */ +#define MC_CMD_SET_SECURITY_RULE_IN_COUNTER_ID_OFST 88 +#define MC_CMD_SET_SECURITY_RULE_IN_COUNTER_ID_LEN 4 +/* enum: special value for the null counter ID */ +#define MC_CMD_SET_SECURITY_RULE_IN_COUNTER_ID_NONE 0x0 +/* enum: special value to tell the MC to allocate an available counter */ +#define MC_CMD_SET_SECURITY_RULE_IN_COUNTER_ID_SW_AUTO 0xeeeeeeee +/* enum: special value to request use of hardware counter (Medford2 only) */ +#define MC_CMD_SET_SECURITY_RULE_IN_COUNTER_ID_HW 0xffffffff + +/* MC_CMD_SET_SECURITY_RULE_OUT msgresponse */ +#define MC_CMD_SET_SECURITY_RULE_OUT_LEN 32 +/* new reference count for uses of counter ID */ +#define MC_CMD_SET_SECURITY_RULE_OUT_COUNTER_REFCNT_OFST 0 +#define MC_CMD_SET_SECURITY_RULE_OUT_COUNTER_REFCNT_LEN 4 +/* constructed match bits for this rule (as a tracing aid only) */ +#define MC_CMD_SET_SECURITY_RULE_OUT_LUE_MATCH_BITS_OFST 4 +#define MC_CMD_SET_SECURITY_RULE_OUT_LUE_MATCH_BITS_LEN 12 +/* constructed discriminator bits for this rule (as a tracing aid only) */ +#define MC_CMD_SET_SECURITY_RULE_OUT_LUE_DISCRIMINATOR_OFST 16 +#define MC_CMD_SET_SECURITY_RULE_OUT_LUE_DISCRIMINATOR_LEN 4 +/* base location for probes for this rule (as a tracing aid only) */ +#define MC_CMD_SET_SECURITY_RULE_OUT_LUE_PROBE_BASE_OFST 20 +#define MC_CMD_SET_SECURITY_RULE_OUT_LUE_PROBE_BASE_LEN 4 +/* step for probes for this rule (as a tracing aid only) */ +#define MC_CMD_SET_SECURITY_RULE_OUT_LUE_PROBE_STEP_OFST 24 +#define MC_CMD_SET_SECURITY_RULE_OUT_LUE_PROBE_STEP_LEN 4 +/* ID for reading back the counter */ +#define MC_CMD_SET_SECURITY_RULE_OUT_COUNTER_ID_OFST 28 +#define MC_CMD_SET_SECURITY_RULE_OUT_COUNTER_ID_LEN 4 + + +/***********************************/ +/* MC_CMD_RESET_SECURITY_RULES + * Reset all blacklist and whitelist actions for a particular physical port, or + * all ports. (Medford-only; for use by SolarSecure apps, not directly by + * drivers. See SF-114946-SW.) NOTE - this message definition is provisional. + * It has not yet been used in any released code and may change during + * development. This note will be removed once it is regarded as stable. + */ +#define MC_CMD_RESET_SECURITY_RULES 0x110 +#undef MC_CMD_0x110_PRIVILEGE_CTG + +#define MC_CMD_0x110_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND + +/* MC_CMD_RESET_SECURITY_RULES_IN msgrequest */ +#define MC_CMD_RESET_SECURITY_RULES_IN_LEN 4 +/* index of physical port to reset (or ALL_PHYSICAL_PORTS to reset all) */ +#define MC_CMD_RESET_SECURITY_RULES_IN_PHYSICAL_PORT_OFST 0 +#define MC_CMD_RESET_SECURITY_RULES_IN_PHYSICAL_PORT_LEN 4 +/* enum: special value to reset all physical ports */ +#define MC_CMD_RESET_SECURITY_RULES_IN_ALL_PHYSICAL_PORTS 0xffffffff + +/* MC_CMD_RESET_SECURITY_RULES_OUT msgresponse */ +#define MC_CMD_RESET_SECURITY_RULES_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_GET_SECURITY_RULESET_VERSION + * Return a large hash value representing a "version" of the complete set of + * currently active blacklist / whitelist rules and associated data structures. + * (Medford-only; for use by SolarSecure apps, not directly by drivers. See + * SF-114946-SW.) NOTE - this message definition is provisional. It has not yet + * been used in any released code and may change during development. This note + * will be removed once it is regarded as stable. + */ +#define MC_CMD_GET_SECURITY_RULESET_VERSION 0x111 +#undef MC_CMD_0x111_PRIVILEGE_CTG + +#define MC_CMD_0x111_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_GET_SECURITY_RULESET_VERSION_IN msgrequest */ +#define MC_CMD_GET_SECURITY_RULESET_VERSION_IN_LEN 0 + +/* MC_CMD_GET_SECURITY_RULESET_VERSION_OUT msgresponse */ +#define MC_CMD_GET_SECURITY_RULESET_VERSION_OUT_LENMIN 1 +#define MC_CMD_GET_SECURITY_RULESET_VERSION_OUT_LENMAX 252 +#define MC_CMD_GET_SECURITY_RULESET_VERSION_OUT_LENMAX_MCDI2 1020 +#define MC_CMD_GET_SECURITY_RULESET_VERSION_OUT_LEN(num) (0+1*(num)) +#define MC_CMD_GET_SECURITY_RULESET_VERSION_OUT_VERSION_NUM(len) (((len)-0)/1) +/* Opaque hash value; length may vary depending on the hash scheme used */ +#define MC_CMD_GET_SECURITY_RULESET_VERSION_OUT_VERSION_OFST 0 +#define MC_CMD_GET_SECURITY_RULESET_VERSION_OUT_VERSION_LEN 1 +#define MC_CMD_GET_SECURITY_RULESET_VERSION_OUT_VERSION_MINNUM 1 +#define MC_CMD_GET_SECURITY_RULESET_VERSION_OUT_VERSION_MAXNUM 252 +#define MC_CMD_GET_SECURITY_RULESET_VERSION_OUT_VERSION_MAXNUM_MCDI2 1020 + + +/***********************************/ +/* MC_CMD_SECURITY_RULE_COUNTER_ALLOC + * Allocate counters for use with blacklist / whitelist rules. (Medford-only; + * for use by SolarSecure apps, not directly by drivers. See SF-114946-SW.) + * NOTE - this message definition is provisional. It has not yet been used in + * any released code and may change during development. This note will be + * removed once it is regarded as stable. + */ +#define MC_CMD_SECURITY_RULE_COUNTER_ALLOC 0x112 +#undef MC_CMD_0x112_PRIVILEGE_CTG + +#define MC_CMD_0x112_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND + +/* MC_CMD_SECURITY_RULE_COUNTER_ALLOC_IN msgrequest */ +#define MC_CMD_SECURITY_RULE_COUNTER_ALLOC_IN_LEN 4 +/* the number of new counter IDs to request */ +#define MC_CMD_SECURITY_RULE_COUNTER_ALLOC_IN_NUM_COUNTERS_OFST 0 +#define MC_CMD_SECURITY_RULE_COUNTER_ALLOC_IN_NUM_COUNTERS_LEN 4 + +/* MC_CMD_SECURITY_RULE_COUNTER_ALLOC_OUT msgresponse */ +#define MC_CMD_SECURITY_RULE_COUNTER_ALLOC_OUT_LENMIN 4 +#define MC_CMD_SECURITY_RULE_COUNTER_ALLOC_OUT_LENMAX 252 +#define MC_CMD_SECURITY_RULE_COUNTER_ALLOC_OUT_LENMAX_MCDI2 1020 +#define MC_CMD_SECURITY_RULE_COUNTER_ALLOC_OUT_LEN(num) (4+4*(num)) +#define MC_CMD_SECURITY_RULE_COUNTER_ALLOC_OUT_COUNTER_ID_NUM(len) (((len)-4)/4) +/* the number of new counter IDs allocated (may be less than the number + * requested if resources are unavailable) + */ +#define MC_CMD_SECURITY_RULE_COUNTER_ALLOC_OUT_NUM_COUNTERS_OFST 0 +#define MC_CMD_SECURITY_RULE_COUNTER_ALLOC_OUT_NUM_COUNTERS_LEN 4 +/* new counter ID(s) */ +#define MC_CMD_SECURITY_RULE_COUNTER_ALLOC_OUT_COUNTER_ID_OFST 4 +#define MC_CMD_SECURITY_RULE_COUNTER_ALLOC_OUT_COUNTER_ID_LEN 4 +#define MC_CMD_SECURITY_RULE_COUNTER_ALLOC_OUT_COUNTER_ID_MINNUM 0 +#define MC_CMD_SECURITY_RULE_COUNTER_ALLOC_OUT_COUNTER_ID_MAXNUM 62 +#define MC_CMD_SECURITY_RULE_COUNTER_ALLOC_OUT_COUNTER_ID_MAXNUM_MCDI2 254 + + +/***********************************/ +/* MC_CMD_SECURITY_RULE_COUNTER_FREE + * Allocate counters for use with blacklist / whitelist rules. (Medford-only; + * for use by SolarSecure apps, not directly by drivers. See SF-114946-SW.) + * NOTE - this message definition is provisional. It has not yet been used in + * any released code and may change during development. This note will be + * removed once it is regarded as stable. + */ +#define MC_CMD_SECURITY_RULE_COUNTER_FREE 0x113 +#undef MC_CMD_0x113_PRIVILEGE_CTG + +#define MC_CMD_0x113_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND + +/* MC_CMD_SECURITY_RULE_COUNTER_FREE_IN msgrequest */ +#define MC_CMD_SECURITY_RULE_COUNTER_FREE_IN_LENMIN 4 +#define MC_CMD_SECURITY_RULE_COUNTER_FREE_IN_LENMAX 252 +#define MC_CMD_SECURITY_RULE_COUNTER_FREE_IN_LENMAX_MCDI2 1020 +#define MC_CMD_SECURITY_RULE_COUNTER_FREE_IN_LEN(num) (4+4*(num)) +#define MC_CMD_SECURITY_RULE_COUNTER_FREE_IN_COUNTER_ID_NUM(len) (((len)-4)/4) +/* the number of counter IDs to free */ +#define MC_CMD_SECURITY_RULE_COUNTER_FREE_IN_NUM_COUNTERS_OFST 0 +#define MC_CMD_SECURITY_RULE_COUNTER_FREE_IN_NUM_COUNTERS_LEN 4 +/* the counter ID(s) to free */ +#define MC_CMD_SECURITY_RULE_COUNTER_FREE_IN_COUNTER_ID_OFST 4 +#define MC_CMD_SECURITY_RULE_COUNTER_FREE_IN_COUNTER_ID_LEN 4 +#define MC_CMD_SECURITY_RULE_COUNTER_FREE_IN_COUNTER_ID_MINNUM 0 +#define MC_CMD_SECURITY_RULE_COUNTER_FREE_IN_COUNTER_ID_MAXNUM 62 +#define MC_CMD_SECURITY_RULE_COUNTER_FREE_IN_COUNTER_ID_MAXNUM_MCDI2 254 + +/* MC_CMD_SECURITY_RULE_COUNTER_FREE_OUT msgresponse */ +#define MC_CMD_SECURITY_RULE_COUNTER_FREE_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_SUBNET_MAP_SET_NODE + * Atomically update a trie node in the map of subnets to subnet IDs. The + * constants in the descriptions of the fields of this message may be retrieved + * by the GET_SECURITY_RULE_INFO op of MC_CMD_GET_PARSER_DISP_INFO. (Medford- + * only; for use by SolarSecure apps, not directly by drivers. See + * SF-114946-SW.) NOTE - this message definition is provisional. It has not yet + * been used in any released code and may change during development. This note + * will be removed once it is regarded as stable. + */ +#define MC_CMD_SUBNET_MAP_SET_NODE 0x114 +#undef MC_CMD_0x114_PRIVILEGE_CTG + +#define MC_CMD_0x114_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND + +/* MC_CMD_SUBNET_MAP_SET_NODE_IN msgrequest */ +#define MC_CMD_SUBNET_MAP_SET_NODE_IN_LENMIN 6 +#define MC_CMD_SUBNET_MAP_SET_NODE_IN_LENMAX 252 +#define MC_CMD_SUBNET_MAP_SET_NODE_IN_LENMAX_MCDI2 1020 +#define MC_CMD_SUBNET_MAP_SET_NODE_IN_LEN(num) (4+2*(num)) +#define MC_CMD_SUBNET_MAP_SET_NODE_IN_ENTRY_NUM(len) (((len)-4)/2) +/* node to update in the range 0 .. SUBNET_MAP_NUM_NODES-1 */ +#define MC_CMD_SUBNET_MAP_SET_NODE_IN_NODE_ID_OFST 0 +#define MC_CMD_SUBNET_MAP_SET_NODE_IN_NODE_ID_LEN 4 +/* SUBNET_MAP_NUM_ENTRIES_PER_NODE new entries; each entry is either a pointer + * to the next node, expressed as an offset in the trie memory (i.e. node ID + * multiplied by SUBNET_MAP_NUM_ENTRIES_PER_NODE), or a leaf value in the range + * SUBNET_ID_MIN .. SUBNET_ID_MAX + */ +#define MC_CMD_SUBNET_MAP_SET_NODE_IN_ENTRY_OFST 4 +#define MC_CMD_SUBNET_MAP_SET_NODE_IN_ENTRY_LEN 2 +#define MC_CMD_SUBNET_MAP_SET_NODE_IN_ENTRY_MINNUM 1 +#define MC_CMD_SUBNET_MAP_SET_NODE_IN_ENTRY_MAXNUM 124 +#define MC_CMD_SUBNET_MAP_SET_NODE_IN_ENTRY_MAXNUM_MCDI2 508 + +/* MC_CMD_SUBNET_MAP_SET_NODE_OUT msgresponse */ +#define MC_CMD_SUBNET_MAP_SET_NODE_OUT_LEN 0 + +/* PORTRANGE_TREE_ENTRY structuredef */ +#define PORTRANGE_TREE_ENTRY_LEN 4 +/* key for branch nodes (<= key takes left branch, > key takes right branch), + * or magic value for leaf nodes + */ +#define PORTRANGE_TREE_ENTRY_BRANCH_KEY_OFST 0 +#define PORTRANGE_TREE_ENTRY_BRANCH_KEY_LEN 2 +#define PORTRANGE_TREE_ENTRY_LEAF_NODE_KEY 0xffff /* enum */ +#define PORTRANGE_TREE_ENTRY_BRANCH_KEY_LBN 0 +#define PORTRANGE_TREE_ENTRY_BRANCH_KEY_WIDTH 16 +/* final portrange ID for leaf nodes (don't care for branch nodes) */ +#define PORTRANGE_TREE_ENTRY_LEAF_PORTRANGE_ID_OFST 2 +#define PORTRANGE_TREE_ENTRY_LEAF_PORTRANGE_ID_LEN 2 +#define PORTRANGE_TREE_ENTRY_LEAF_PORTRANGE_ID_LBN 16 +#define PORTRANGE_TREE_ENTRY_LEAF_PORTRANGE_ID_WIDTH 16 + + +/***********************************/ +/* MC_CMD_REMOTE_PORTRANGE_MAP_SET_TREE + * Atomically update the entire tree mapping remote port ranges to portrange + * IDs. The constants in the descriptions of the fields of this message may be + * retrieved by the GET_SECURITY_RULE_INFO op of MC_CMD_GET_PARSER_DISP_INFO. + * (Medford-only; for use by SolarSecure apps, not directly by drivers. See + * SF-114946-SW.) NOTE - this message definition is provisional. It has not yet + * been used in any released code and may change during development. This note + * will be removed once it is regarded as stable. + */ +#define MC_CMD_REMOTE_PORTRANGE_MAP_SET_TREE 0x115 +#undef MC_CMD_0x115_PRIVILEGE_CTG + +#define MC_CMD_0x115_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND + +/* MC_CMD_REMOTE_PORTRANGE_MAP_SET_TREE_IN msgrequest */ +#define MC_CMD_REMOTE_PORTRANGE_MAP_SET_TREE_IN_LENMIN 4 +#define MC_CMD_REMOTE_PORTRANGE_MAP_SET_TREE_IN_LENMAX 252 +#define MC_CMD_REMOTE_PORTRANGE_MAP_SET_TREE_IN_LENMAX_MCDI2 1020 +#define MC_CMD_REMOTE_PORTRANGE_MAP_SET_TREE_IN_LEN(num) (0+4*(num)) +#define MC_CMD_REMOTE_PORTRANGE_MAP_SET_TREE_IN_ENTRIES_NUM(len) (((len)-0)/4) +/* PORTRANGE_TREE_NUM_ENTRIES new entries, each laid out as a + * PORTRANGE_TREE_ENTRY + */ +#define MC_CMD_REMOTE_PORTRANGE_MAP_SET_TREE_IN_ENTRIES_OFST 0 +#define MC_CMD_REMOTE_PORTRANGE_MAP_SET_TREE_IN_ENTRIES_LEN 4 +#define MC_CMD_REMOTE_PORTRANGE_MAP_SET_TREE_IN_ENTRIES_MINNUM 1 +#define MC_CMD_REMOTE_PORTRANGE_MAP_SET_TREE_IN_ENTRIES_MAXNUM 63 +#define MC_CMD_REMOTE_PORTRANGE_MAP_SET_TREE_IN_ENTRIES_MAXNUM_MCDI2 255 + +/* MC_CMD_REMOTE_PORTRANGE_MAP_SET_TREE_OUT msgresponse */ +#define MC_CMD_REMOTE_PORTRANGE_MAP_SET_TREE_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_LOCAL_PORTRANGE_MAP_SET_TREE + * Atomically update the entire tree mapping remote port ranges to portrange + * IDs. The constants in the descriptions of the fields of this message may be + * retrieved by the GET_SECURITY_RULE_INFO op of MC_CMD_GET_PARSER_DISP_INFO. + * (Medford-only; for use by SolarSecure apps, not directly by drivers. See + * SF-114946-SW.) NOTE - this message definition is provisional. It has not yet + * been used in any released code and may change during development. This note + * will be removed once it is regarded as stable. + */ +#define MC_CMD_LOCAL_PORTRANGE_MAP_SET_TREE 0x116 +#undef MC_CMD_0x116_PRIVILEGE_CTG + +#define MC_CMD_0x116_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND + +/* MC_CMD_LOCAL_PORTRANGE_MAP_SET_TREE_IN msgrequest */ +#define MC_CMD_LOCAL_PORTRANGE_MAP_SET_TREE_IN_LENMIN 4 +#define MC_CMD_LOCAL_PORTRANGE_MAP_SET_TREE_IN_LENMAX 252 +#define MC_CMD_LOCAL_PORTRANGE_MAP_SET_TREE_IN_LENMAX_MCDI2 1020 +#define MC_CMD_LOCAL_PORTRANGE_MAP_SET_TREE_IN_LEN(num) (0+4*(num)) +#define MC_CMD_LOCAL_PORTRANGE_MAP_SET_TREE_IN_ENTRIES_NUM(len) (((len)-0)/4) +/* PORTRANGE_TREE_NUM_ENTRIES new entries, each laid out as a + * PORTRANGE_TREE_ENTRY + */ +#define MC_CMD_LOCAL_PORTRANGE_MAP_SET_TREE_IN_ENTRIES_OFST 0 +#define MC_CMD_LOCAL_PORTRANGE_MAP_SET_TREE_IN_ENTRIES_LEN 4 +#define MC_CMD_LOCAL_PORTRANGE_MAP_SET_TREE_IN_ENTRIES_MINNUM 1 +#define MC_CMD_LOCAL_PORTRANGE_MAP_SET_TREE_IN_ENTRIES_MAXNUM 63 +#define MC_CMD_LOCAL_PORTRANGE_MAP_SET_TREE_IN_ENTRIES_MAXNUM_MCDI2 255 + +/* MC_CMD_LOCAL_PORTRANGE_MAP_SET_TREE_OUT msgresponse */ +#define MC_CMD_LOCAL_PORTRANGE_MAP_SET_TREE_OUT_LEN 0 + +/* TUNNEL_ENCAP_UDP_PORT_ENTRY structuredef */ +#define TUNNEL_ENCAP_UDP_PORT_ENTRY_LEN 4 +/* UDP port (the standard ports are named below but any port may be used) */ +#define TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_OFST 0 +#define TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_LEN 2 +/* enum: the IANA allocated UDP port for VXLAN */ +#define TUNNEL_ENCAP_UDP_PORT_ENTRY_IANA_VXLAN_UDP_PORT 0x12b5 +/* enum: the IANA allocated UDP port for Geneve */ +#define TUNNEL_ENCAP_UDP_PORT_ENTRY_IANA_GENEVE_UDP_PORT 0x17c1 +#define TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_LBN 0 +#define TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_WIDTH 16 +/* tunnel encapsulation protocol (only those named below are supported) */ +#define TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_OFST 2 +#define TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_LEN 2 +/* enum: This port will be used for VXLAN on both IPv4 and IPv6 */ +#define TUNNEL_ENCAP_UDP_PORT_ENTRY_VXLAN 0x0 +/* enum: This port will be used for Geneve on both IPv4 and IPv6 */ +#define TUNNEL_ENCAP_UDP_PORT_ENTRY_GENEVE 0x1 +#define TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_LBN 16 +#define TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_WIDTH 16 + + +/***********************************/ +/* MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS + * Configure UDP ports for tunnel encapsulation hardware acceleration. The + * parser-dispatcher will attempt to parse traffic on these ports as tunnel + * encapsulation PDUs and filter them using the tunnel encapsulation filter + * chain rather than the standard filter chain. Note that this command can + * cause all functions to see a reset. (Available on Medford only.) + */ +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS 0x117 +#undef MC_CMD_0x117_PRIVILEGE_CTG + +#define MC_CMD_0x117_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN msgrequest */ +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_LENMIN 4 +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_LENMAX 68 +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_LENMAX_MCDI2 68 +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_LEN(num) (4+4*(num)) +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES_NUM(len) (((len)-4)/4) +/* Flags */ +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_FLAGS_OFST 0 +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_FLAGS_LEN 2 +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_UNLOADING_OFST 0 +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_UNLOADING_LBN 0 +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_UNLOADING_WIDTH 1 +/* The number of entries in the ENTRIES array */ +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_NUM_ENTRIES_OFST 2 +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_NUM_ENTRIES_LEN 2 +/* Entries defining the UDP port to protocol mapping, each laid out as a + * TUNNEL_ENCAP_UDP_PORT_ENTRY + */ +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES_OFST 4 +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES_LEN 4 +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES_MINNUM 0 +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES_MAXNUM 16 +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES_MAXNUM_MCDI2 16 + +/* MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT msgresponse */ +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_LEN 2 +/* Flags */ +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_FLAGS_OFST 0 +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_FLAGS_LEN 2 +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_RESETTING_OFST 0 +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_RESETTING_LBN 0 +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_RESETTING_WIDTH 1 + + +/***********************************/ +/* MC_CMD_RX_BALANCING + * Configure a port upconverter to distribute the packets on both RX engines. + * Packets are distributed based on a table with the destination vFIFO. The + * index of the table is a hash of source and destination of IPV4 and VLAN + * priority. + */ +#define MC_CMD_RX_BALANCING 0x118 +#undef MC_CMD_0x118_PRIVILEGE_CTG + +#define MC_CMD_0x118_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND + +/* MC_CMD_RX_BALANCING_IN msgrequest */ +#define MC_CMD_RX_BALANCING_IN_LEN 16 +/* The RX port whose upconverter table will be modified */ +#define MC_CMD_RX_BALANCING_IN_PORT_OFST 0 +#define MC_CMD_RX_BALANCING_IN_PORT_LEN 4 +/* The VLAN priority associated to the table index and vFIFO */ +#define MC_CMD_RX_BALANCING_IN_PRIORITY_OFST 4 +#define MC_CMD_RX_BALANCING_IN_PRIORITY_LEN 4 +/* The resulting bit of SRC^DST for indexing the table */ +#define MC_CMD_RX_BALANCING_IN_SRC_DST_OFST 8 +#define MC_CMD_RX_BALANCING_IN_SRC_DST_LEN 4 +/* The RX engine to which the vFIFO in the table entry will point to */ +#define MC_CMD_RX_BALANCING_IN_ENG_OFST 12 +#define MC_CMD_RX_BALANCING_IN_ENG_LEN 4 + +/* MC_CMD_RX_BALANCING_OUT msgresponse */ +#define MC_CMD_RX_BALANCING_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_TSA_BIND + * TSAN - TSAC binding communication protocol. Refer to SF-115479-TC for more + * info in respect to the binding protocol. + */ +#define MC_CMD_TSA_BIND 0x119 +#undef MC_CMD_0x119_PRIVILEGE_CTG + +#define MC_CMD_0x119_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_TSA_BIND_IN msgrequest: Protocol operation code */ +#define MC_CMD_TSA_BIND_IN_LEN 4 +#define MC_CMD_TSA_BIND_IN_OP_OFST 0 +#define MC_CMD_TSA_BIND_IN_OP_LEN 4 +/* enum: Obsolete. Use MC_CMD_SECURE_NIC_INFO_IN_STATUS. */ +#define MC_CMD_TSA_BIND_OP_GET_ID 0x1 +/* enum: Get a binding ticket from the TSAN. The binding ticket is used as part + * of the binding procedure to authorize the binding of an adapter to a TSAID. + * Refer to SF-114946-SW for more information. This sub-command is only + * available over a TLS secure connection between the TSAN and TSAC. + */ +#define MC_CMD_TSA_BIND_OP_GET_TICKET 0x2 +/* enum: Opcode associated with the propagation of a private key that TSAN uses + * as part of post-binding authentication procedure. More specifically, TSAN + * uses this key for a signing operation. TSAC uses the counterpart public key + * to verify the signature. Note - The post-binding authentication occurs when + * the TSAN-TSAC connection terminates and TSAN tries to reconnect. Refer to + * SF-114946-SW for more information. This sub-command is only available over a + * TLS secure connection between the TSAN and TSAC. + */ +#define MC_CMD_TSA_BIND_OP_SET_KEY 0x3 +/* enum: Request an insecure unbinding operation. This sub-command is available + * for any privileged client. + */ +#define MC_CMD_TSA_BIND_OP_UNBIND 0x4 +/* enum: Obsolete. Use MC_CMD_TSA_BIND_OP_SECURE_UNBIND. */ +#define MC_CMD_TSA_BIND_OP_UNBIND_EXT 0x5 +/* enum: Opcode associated with the propagation of the unbinding secret token. + * TSAN persists the unbinding secret token. Refer to SF-115479-TC for more + * information. This sub-command is only available over a TLS secure connection + * between the TSAN and TSAC. + */ +#define MC_CMD_TSA_BIND_OP_SET_UNBINDTOKEN 0x6 +/* enum: Obsolete. Use MC_CMD_TSA_BIND_OP_SECURE_DECOMMISSION. */ +#define MC_CMD_TSA_BIND_OP_DECOMMISSION 0x7 +/* enum: Obsolete. Use MC_CMD_GET_CERTIFICATE. */ +#define MC_CMD_TSA_BIND_OP_GET_CERTIFICATE 0x8 +/* enum: Request a secure unbinding operation using unbinding token. This sub- + * command is available for any privileged client. + */ +#define MC_CMD_TSA_BIND_OP_SECURE_UNBIND 0x9 +/* enum: Request a secure decommissioning operation. This sub-command is + * available for any privileged client. + */ +#define MC_CMD_TSA_BIND_OP_SECURE_DECOMMISSION 0xa +/* enum: Test facility that allows an adapter to be configured to behave as if + * Bound to a TSA controller with restricted MCDI administrator operations. + * This operation is primarily intended to aid host driver development. + */ +#define MC_CMD_TSA_BIND_OP_TEST_MCDI 0xb + +/* MC_CMD_TSA_BIND_IN_GET_ID msgrequest: Obsolete. Use + * MC_CMD_SECURE_NIC_INFO_IN_STATUS. + */ +#define MC_CMD_TSA_BIND_IN_GET_ID_LEN 20 +/* The operation requested. */ +#define MC_CMD_TSA_BIND_IN_GET_ID_OP_OFST 0 +#define MC_CMD_TSA_BIND_IN_GET_ID_OP_LEN 4 +/* Cryptographic nonce that TSAC generates and sends to TSAN. TSAC generates + * the nonce every time as part of the TSAN post-binding authentication + * procedure when the TSAN-TSAC connection terminates and TSAN does need to re- + * connect to the TSAC. Refer to SF-114946-SW for more information. + */ +#define MC_CMD_TSA_BIND_IN_GET_ID_NONCE_OFST 4 +#define MC_CMD_TSA_BIND_IN_GET_ID_NONCE_LEN 16 + +/* MC_CMD_TSA_BIND_IN_GET_TICKET msgrequest */ +#define MC_CMD_TSA_BIND_IN_GET_TICKET_LEN 4 +/* The operation requested. */ +#define MC_CMD_TSA_BIND_IN_GET_TICKET_OP_OFST 0 +#define MC_CMD_TSA_BIND_IN_GET_TICKET_OP_LEN 4 + +/* MC_CMD_TSA_BIND_IN_SET_KEY msgrequest */ +#define MC_CMD_TSA_BIND_IN_SET_KEY_LENMIN 5 +#define MC_CMD_TSA_BIND_IN_SET_KEY_LENMAX 252 +#define MC_CMD_TSA_BIND_IN_SET_KEY_LENMAX_MCDI2 1020 +#define MC_CMD_TSA_BIND_IN_SET_KEY_LEN(num) (4+1*(num)) +#define MC_CMD_TSA_BIND_IN_SET_KEY_DATKEY_NUM(len) (((len)-4)/1) +/* The operation requested. */ +#define MC_CMD_TSA_BIND_IN_SET_KEY_OP_OFST 0 +#define MC_CMD_TSA_BIND_IN_SET_KEY_OP_LEN 4 +/* This data blob contains the private key generated by the TSAC. TSAN uses + * this key for a signing operation. Note- This private key is used in + * conjunction with the post-binding TSAN authentication procedure that occurs + * when the TSAN-TSAC connection terminates and TSAN tries to reconnect. Refer + * to SF-114946-SW for more information. + */ +#define MC_CMD_TSA_BIND_IN_SET_KEY_DATKEY_OFST 4 +#define MC_CMD_TSA_BIND_IN_SET_KEY_DATKEY_LEN 1 +#define MC_CMD_TSA_BIND_IN_SET_KEY_DATKEY_MINNUM 1 +#define MC_CMD_TSA_BIND_IN_SET_KEY_DATKEY_MAXNUM 248 +#define MC_CMD_TSA_BIND_IN_SET_KEY_DATKEY_MAXNUM_MCDI2 1016 + +/* MC_CMD_TSA_BIND_IN_UNBIND msgrequest: Request an insecure unbinding + * operation. + */ +#define MC_CMD_TSA_BIND_IN_UNBIND_LEN 10 +/* The operation requested. */ +#define MC_CMD_TSA_BIND_IN_UNBIND_OP_OFST 0 +#define MC_CMD_TSA_BIND_IN_UNBIND_OP_LEN 4 +/* TSAN unique identifier for the network adapter */ +#define MC_CMD_TSA_BIND_IN_UNBIND_TSANID_OFST 4 +#define MC_CMD_TSA_BIND_IN_UNBIND_TSANID_LEN 6 + +/* MC_CMD_TSA_BIND_IN_UNBIND_EXT msgrequest: Obsolete. Use + * MC_CMD_TSA_BIND_IN_SECURE_UNBIND. + */ +#define MC_CMD_TSA_BIND_IN_UNBIND_EXT_LENMIN 93 +#define MC_CMD_TSA_BIND_IN_UNBIND_EXT_LENMAX 252 +#define MC_CMD_TSA_BIND_IN_UNBIND_EXT_LENMAX_MCDI2 1020 +#define MC_CMD_TSA_BIND_IN_UNBIND_EXT_LEN(num) (92+1*(num)) +#define MC_CMD_TSA_BIND_IN_UNBIND_EXT_SIG_NUM(len) (((len)-92)/1) +/* The operation requested. */ +#define MC_CMD_TSA_BIND_IN_UNBIND_EXT_OP_OFST 0 +#define MC_CMD_TSA_BIND_IN_UNBIND_EXT_OP_LEN 4 +/* TSAN unique identifier for the network adapter */ +#define MC_CMD_TSA_BIND_IN_UNBIND_EXT_TSANID_OFST 4 +#define MC_CMD_TSA_BIND_IN_UNBIND_EXT_TSANID_LEN 6 +/* Align the arguments to 32 bits */ +#define MC_CMD_TSA_BIND_IN_UNBIND_EXT_TSANID_RSVD_OFST 10 +#define MC_CMD_TSA_BIND_IN_UNBIND_EXT_TSANID_RSVD_LEN 2 +/* This attribute identifies the TSA infrastructure domain. The length of the + * TSAID attribute is limited to 64 bytes. This is how TSA SDK defines the max + * length. Note- The TSAID is the Organizational Unit Name filed as part of the + * root and server certificates. + */ +#define MC_CMD_TSA_BIND_IN_UNBIND_EXT_TSAID_OFST 12 +#define MC_CMD_TSA_BIND_IN_UNBIND_EXT_TSAID_LEN 1 +#define MC_CMD_TSA_BIND_IN_UNBIND_EXT_TSAID_NUM 64 +/* Unbinding secret token. The adapter validates this unbinding token by + * comparing it against the one stored on the adapter as part of the + * MC_CMD_TSA_BIND_IN_SET_UNBINDTOKEN msgrequest. Refer to SF-115479-TC for + * more information. + */ +#define MC_CMD_TSA_BIND_IN_UNBIND_EXT_UNBINDTOKEN_OFST 76 +#define MC_CMD_TSA_BIND_IN_UNBIND_EXT_UNBINDTOKEN_LEN 16 +/* This is the signature of the above mentioned fields- TSANID, TSAID and + * UNBINDTOKEN. As per current requirements, the SIG opaque data blob contains + * ECDSA ECC-384 based signature. The ECC curve is secp384r1. The signature is + * also ASN-1 encoded. Note- The signature is verified based on the public key + * stored into the root certificate that is provisioned on the adapter side. + * This key is known as the PUKtsaid. Refer to SF-115479-TC for more + * information. + */ +#define MC_CMD_TSA_BIND_IN_UNBIND_EXT_SIG_OFST 92 +#define MC_CMD_TSA_BIND_IN_UNBIND_EXT_SIG_LEN 1 +#define MC_CMD_TSA_BIND_IN_UNBIND_EXT_SIG_MINNUM 1 +#define MC_CMD_TSA_BIND_IN_UNBIND_EXT_SIG_MAXNUM 160 +#define MC_CMD_TSA_BIND_IN_UNBIND_EXT_SIG_MAXNUM_MCDI2 928 + +/* MC_CMD_TSA_BIND_IN_SET_UNBINDTOKEN msgrequest */ +#define MC_CMD_TSA_BIND_IN_SET_UNBINDTOKEN_LEN 20 +/* The operation requested. */ +#define MC_CMD_TSA_BIND_IN_SET_UNBINDTOKEN_OP_OFST 0 +#define MC_CMD_TSA_BIND_IN_SET_UNBINDTOKEN_OP_LEN 4 +/* Unbinding secret token. TSAN persists the unbinding secret token. Refer to + * SF-115479-TC for more information. + */ +#define MC_CMD_TSA_BIND_IN_SET_UNBINDTOKEN_UNBINDTOKEN_OFST 4 +#define MC_CMD_TSA_BIND_IN_SET_UNBINDTOKEN_UNBINDTOKEN_LEN 16 +/* enum: There are situations when the binding process does not complete + * successfully due to key, other attributes corruption at the database level + * (Controller). Adapter can't connect to the controller anymore. To recover, + * make usage of the decommission command that forces the adapter into + * unbinding state. + */ +#define MC_CMD_TSA_BIND_IN_SET_UNBINDTOKEN_ADAPTER_BINDING_FAILURE 0x1 + +/* MC_CMD_TSA_BIND_IN_DECOMMISSION msgrequest: Obsolete. Use + * MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION. + */ +#define MC_CMD_TSA_BIND_IN_DECOMMISSION_LENMIN 109 +#define MC_CMD_TSA_BIND_IN_DECOMMISSION_LENMAX 252 +#define MC_CMD_TSA_BIND_IN_DECOMMISSION_LENMAX_MCDI2 1020 +#define MC_CMD_TSA_BIND_IN_DECOMMISSION_LEN(num) (108+1*(num)) +#define MC_CMD_TSA_BIND_IN_DECOMMISSION_SIG_NUM(len) (((len)-108)/1) +/* This is the signature of the above mentioned fields- TSAID, USER and REASON. + * As per current requirements, the SIG opaque data blob contains ECDSA ECC-384 + * based signature. The ECC curve is secp384r1. The signature is also ASN-1 + * encoded . Note- The signature is verified based on the public key stored + * into the root certificate that is provisioned on the adapter side. This key + * is known as the PUKtsaid. Refer to SF-115479-TC for more information. + */ +#define MC_CMD_TSA_BIND_IN_DECOMMISSION_SIG_OFST 108 +#define MC_CMD_TSA_BIND_IN_DECOMMISSION_SIG_LEN 1 +#define MC_CMD_TSA_BIND_IN_DECOMMISSION_SIG_MINNUM 1 +#define MC_CMD_TSA_BIND_IN_DECOMMISSION_SIG_MAXNUM 144 +#define MC_CMD_TSA_BIND_IN_DECOMMISSION_SIG_MAXNUM_MCDI2 912 +/* The operation requested. */ +#define MC_CMD_TSA_BIND_IN_DECOMMISSION_OP_OFST 0 +#define MC_CMD_TSA_BIND_IN_DECOMMISSION_OP_LEN 4 +/* This attribute identifies the TSA infrastructure domain. The length of the + * TSAID attribute is limited to 64 bytes. This is how TSA SDK defines the max + * length. Note- The TSAID is the Organizational Unit Name filed as part of the + * root and server certificates. + */ +#define MC_CMD_TSA_BIND_IN_DECOMMISSION_TSAID_OFST 4 +#define MC_CMD_TSA_BIND_IN_DECOMMISSION_TSAID_LEN 1 +#define MC_CMD_TSA_BIND_IN_DECOMMISSION_TSAID_NUM 64 +/* User ID that comes, as an example, from the Controller. Note- The 33 byte + * length of this attribute is max length of the linux user name plus null + * character. + */ +#define MC_CMD_TSA_BIND_IN_DECOMMISSION_USER_OFST 68 +#define MC_CMD_TSA_BIND_IN_DECOMMISSION_USER_LEN 1 +#define MC_CMD_TSA_BIND_IN_DECOMMISSION_USER_NUM 33 +/* Align the arguments to 32 bits */ +#define MC_CMD_TSA_BIND_IN_DECOMMISSION_USER_RSVD_OFST 101 +#define MC_CMD_TSA_BIND_IN_DECOMMISSION_USER_RSVD_LEN 3 +/* Reason of why decommissioning happens Note- The list of reasons, defined as + * part of the enumeration below, can be extended. + */ +#define MC_CMD_TSA_BIND_IN_DECOMMISSION_REASON_OFST 104 +#define MC_CMD_TSA_BIND_IN_DECOMMISSION_REASON_LEN 4 + +/* MC_CMD_TSA_BIND_IN_GET_CERTIFICATE msgrequest: Obsolete. Use + * MC_CMD_GET_CERTIFICATE. + */ +#define MC_CMD_TSA_BIND_IN_GET_CERTIFICATE_LEN 8 +/* The operation requested, must be MC_CMD_TSA_BIND_OP_GET_CERTIFICATE. */ +#define MC_CMD_TSA_BIND_IN_GET_CERTIFICATE_OP_OFST 0 +#define MC_CMD_TSA_BIND_IN_GET_CERTIFICATE_OP_LEN 4 +/* Type of the certificate to be retrieved. */ +#define MC_CMD_TSA_BIND_IN_GET_CERTIFICATE_TYPE_OFST 4 +#define MC_CMD_TSA_BIND_IN_GET_CERTIFICATE_TYPE_LEN 4 +#define MC_CMD_TSA_BIND_IN_GET_CERTIFICATE_UNUSED 0x0 /* enum */ +/* enum: Adapter Authentication Certificate (AAC). The AAC is used by the + * controller to verify the authenticity of the adapter. + */ +#define MC_CMD_TSA_BIND_IN_GET_CERTIFICATE_AAC 0x1 +/* enum: Adapter Authentication Signing Certificate (AASC). The AASC is used by + * the controller to verify the validity of AAC. + */ +#define MC_CMD_TSA_BIND_IN_GET_CERTIFICATE_AASC 0x2 + +/* MC_CMD_TSA_BIND_IN_SECURE_UNBIND msgrequest: Request a secure unbinding + * operation using unbinding token. + */ +#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_LENMIN 97 +#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_LENMAX 200 +#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_LENMAX_MCDI2 200 +#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_LEN(num) (96+1*(num)) +#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_SIG_NUM(len) (((len)-96)/1) +/* The operation requested, must be MC_CMD_TSA_BIND_OP_SECURE_UNBIND. */ +#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_OP_OFST 0 +#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_OP_LEN 4 +/* Type of the message. (MESSAGE_TYPE_xxx) Must be + * MESSAGE_TYPE_TSA_SECURE_UNBIND. + */ +#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_MESSAGE_TYPE_OFST 4 +#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_MESSAGE_TYPE_LEN 4 +/* TSAN unique identifier for the network adapter */ +#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_TSANID_OFST 8 +#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_TSANID_LEN 6 +/* Align the arguments to 32 bits */ +#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_TSANID_RSVD_OFST 14 +#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_TSANID_RSVD_LEN 2 +/* A NUL padded US-ASCII string identifying the TSA infrastructure domain. This + * field is for information only, and not used by the firmware. Note- The TSAID + * is the Organizational Unit Name field as part of the root and server + * certificates. + */ +#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_TSAID_OFST 16 +#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_TSAID_LEN 1 +#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_TSAID_NUM 64 +/* Unbinding secret token. The adapter validates this unbinding token by + * comparing it against the one stored on the adapter as part of the + * MC_CMD_TSA_BIND_IN_SET_UNBINDTOKEN msgrequest. Refer to SF-115479-TC for + * more information. + */ +#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_UNBINDTOKEN_OFST 80 +#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_UNBINDTOKEN_LEN 16 +/* The signature computed and encoded as specified by MESSAGE_TYPE. */ +#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_SIG_OFST 96 +#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_SIG_LEN 1 +#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_SIG_MINNUM 1 +#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_SIG_MAXNUM 104 +#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_SIG_MAXNUM_MCDI2 104 + +/* MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION msgrequest: Request a secure + * decommissioning operation. + */ +#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_LENMIN 113 +#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_LENMAX 216 +#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_LENMAX_MCDI2 216 +#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_LEN(num) (112+1*(num)) +#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_SIG_NUM(len) (((len)-112)/1) +/* The operation requested, must be MC_CMD_TSA_BIND_OP_SECURE_DECOMMISSION. */ +#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_OP_OFST 0 +#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_OP_LEN 4 +/* Type of the message. (MESSAGE_TYPE_xxx) Must be + * MESSAGE_TYPE_SECURE_DECOMMISSION. + */ +#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_MESSAGE_TYPE_OFST 4 +#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_MESSAGE_TYPE_LEN 4 +/* A NUL padded US-ASCII string identifying the TSA infrastructure domain. This + * field is for information only, and not used by the firmware. Note- The TSAID + * is the Organizational Unit Name field as part of the root and server + * certificates. + */ +#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_TSAID_OFST 8 +#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_TSAID_LEN 1 +#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_TSAID_NUM 64 +/* A NUL padded US-ASCII string containing user name of the creator of the + * decommissioning ticket. This field is for information only, and not used by + * the firmware. + */ +#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_USER_OFST 72 +#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_USER_LEN 1 +#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_USER_NUM 36 +/* Reason of why decommissioning happens */ +#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_REASON_OFST 108 +#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_REASON_LEN 4 +/* enum: There are situations when the binding process does not complete + * successfully due to key, other attributes corruption at the database level + * (Controller). Adapter can't connect to the controller anymore. To recover, + * use the decommission command to force the adapter into unbound state. + */ +#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_ADAPTER_BINDING_FAILURE 0x1 +/* The signature computed and encoded as specified by MESSAGE_TYPE. */ +#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_SIG_OFST 112 +#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_SIG_LEN 1 +#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_SIG_MINNUM 1 +#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_SIG_MAXNUM 104 +#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_SIG_MAXNUM_MCDI2 104 + +/* MC_CMD_TSA_BIND_IN_TEST_MCDI msgrequest: Test mode that emulates MCDI + * interface restrictions of a bound adapter. This operation is intended for + * test use on adapters that are not deployed and bound to a TSA Controller. + * Using it on a Bound adapter will succeed but will not alter the MCDI + * privileges as MCDI operations will already be restricted. + */ +#define MC_CMD_TSA_BIND_IN_TEST_MCDI_LEN 8 +/* The operation requested must be MC_CMD_TSA_BIND_OP_TEST_MCDI. */ +#define MC_CMD_TSA_BIND_IN_TEST_MCDI_OP_OFST 0 +#define MC_CMD_TSA_BIND_IN_TEST_MCDI_OP_LEN 4 +/* Enable or disable emulation of bound adapter */ +#define MC_CMD_TSA_BIND_IN_TEST_MCDI_CTRL_OFST 4 +#define MC_CMD_TSA_BIND_IN_TEST_MCDI_CTRL_LEN 4 +#define MC_CMD_TSA_BIND_IN_TEST_MCDI_DISABLE 0x0 /* enum */ +#define MC_CMD_TSA_BIND_IN_TEST_MCDI_ENABLE 0x1 /* enum */ + +/* MC_CMD_TSA_BIND_OUT_GET_ID msgresponse: Obsolete. Use + * MC_CMD_SECURE_NIC_INFO_OUT_STATUS. + */ +#define MC_CMD_TSA_BIND_OUT_GET_ID_LENMIN 15 +#define MC_CMD_TSA_BIND_OUT_GET_ID_LENMAX 252 +#define MC_CMD_TSA_BIND_OUT_GET_ID_LENMAX_MCDI2 1020 +#define MC_CMD_TSA_BIND_OUT_GET_ID_LEN(num) (14+1*(num)) +#define MC_CMD_TSA_BIND_OUT_GET_ID_SIG_NUM(len) (((len)-14)/1) +/* The protocol operation code MC_CMD_TSA_BIND_OP_GET_ID that is sent back to + * the caller. + */ +#define MC_CMD_TSA_BIND_OUT_GET_ID_OP_OFST 0 +#define MC_CMD_TSA_BIND_OUT_GET_ID_OP_LEN 4 +/* Rules engine type. Note- The rules engine type allows TSAC to further + * identify the connected endpoint (e.g. TSAN, NIC Emulator) type and take the + * proper action accordingly. As an example, TSAC uses the rules engine type to + * select the SF key that differs in the case of TSAN vs. NIC Emulator. + */ +#define MC_CMD_TSA_BIND_OUT_GET_ID_RULE_ENGINE_OFST 4 +#define MC_CMD_TSA_BIND_OUT_GET_ID_RULE_ENGINE_LEN 4 +/* enum: Hardware rules engine. */ +#define MC_CMD_TSA_BIND_OUT_GET_ID_RULE_ENGINE_TSAN 0x1 +/* enum: Nic emulator rules engine. */ +#define MC_CMD_TSA_BIND_OUT_GET_ID_RULE_ENGINE_NEMU 0x2 +/* enum: SSFE. */ +#define MC_CMD_TSA_BIND_OUT_GET_ID_RULE_ENGINE_SSFE 0x3 +/* TSAN unique identifier for the network adapter */ +#define MC_CMD_TSA_BIND_OUT_GET_ID_TSANID_OFST 8 +#define MC_CMD_TSA_BIND_OUT_GET_ID_TSANID_LEN 6 +/* The signature data blob. The signature is computed against the message + * formed by TSAN ID concatenated with the NONCE value. Refer to SF-115479-TC + * for more information also in respect to the private keys that are used to + * sign the message based on TSAN pre/post-binding authentication procedure. + */ +#define MC_CMD_TSA_BIND_OUT_GET_ID_SIG_OFST 14 +#define MC_CMD_TSA_BIND_OUT_GET_ID_SIG_LEN 1 +#define MC_CMD_TSA_BIND_OUT_GET_ID_SIG_MINNUM 1 +#define MC_CMD_TSA_BIND_OUT_GET_ID_SIG_MAXNUM 238 +#define MC_CMD_TSA_BIND_OUT_GET_ID_SIG_MAXNUM_MCDI2 1006 + +/* MC_CMD_TSA_BIND_OUT_GET_TICKET msgresponse */ +#define MC_CMD_TSA_BIND_OUT_GET_TICKET_LENMIN 5 +#define MC_CMD_TSA_BIND_OUT_GET_TICKET_LENMAX 252 +#define MC_CMD_TSA_BIND_OUT_GET_TICKET_LENMAX_MCDI2 1020 +#define MC_CMD_TSA_BIND_OUT_GET_TICKET_LEN(num) (4+1*(num)) +#define MC_CMD_TSA_BIND_OUT_GET_TICKET_TICKET_NUM(len) (((len)-4)/1) +/* The protocol operation code MC_CMD_TSA_BIND_OP_GET_TICKET that is sent back + * to the caller. + */ +#define MC_CMD_TSA_BIND_OUT_GET_TICKET_OP_OFST 0 +#define MC_CMD_TSA_BIND_OUT_GET_TICKET_OP_LEN 4 +/* The ticket represents the data blob construct that TSAN sends to TSAC as + * part of the binding protocol. From the TSAN perspective the ticket is an + * opaque construct. For more info refer to SF-115479-TC. + */ +#define MC_CMD_TSA_BIND_OUT_GET_TICKET_TICKET_OFST 4 +#define MC_CMD_TSA_BIND_OUT_GET_TICKET_TICKET_LEN 1 +#define MC_CMD_TSA_BIND_OUT_GET_TICKET_TICKET_MINNUM 1 +#define MC_CMD_TSA_BIND_OUT_GET_TICKET_TICKET_MAXNUM 248 +#define MC_CMD_TSA_BIND_OUT_GET_TICKET_TICKET_MAXNUM_MCDI2 1016 + +/* MC_CMD_TSA_BIND_OUT_SET_KEY msgresponse */ +#define MC_CMD_TSA_BIND_OUT_SET_KEY_LEN 4 +/* The protocol operation code MC_CMD_TSA_BIND_OP_SET_KEY that is sent back to + * the caller. + */ +#define MC_CMD_TSA_BIND_OUT_SET_KEY_OP_OFST 0 +#define MC_CMD_TSA_BIND_OUT_SET_KEY_OP_LEN 4 + +/* MC_CMD_TSA_BIND_OUT_UNBIND msgresponse: Response to insecure unbind request. + */ +#define MC_CMD_TSA_BIND_OUT_UNBIND_LEN 8 +/* Same as MC_CMD_ERR field, but included as 0 in success cases */ +#define MC_CMD_TSA_BIND_OUT_UNBIND_RESULT_OFST 0 +#define MC_CMD_TSA_BIND_OUT_UNBIND_RESULT_LEN 4 +/* Extra status information */ +#define MC_CMD_TSA_BIND_OUT_UNBIND_INFO_OFST 4 +#define MC_CMD_TSA_BIND_OUT_UNBIND_INFO_LEN 4 +/* enum: Unbind successful. */ +#define MC_CMD_TSA_BIND_OUT_UNBIND_OK_UNBOUND 0x0 +/* enum: TSANID mismatch */ +#define MC_CMD_TSA_BIND_OUT_UNBIND_ERR_BAD_TSANID 0x1 +/* enum: Unable to remove the binding ticket from persistent storage. */ +#define MC_CMD_TSA_BIND_OUT_UNBIND_ERR_REMOVE_TICKET 0x2 +/* enum: TSAN is not bound to a binding ticket. */ +#define MC_CMD_TSA_BIND_OUT_UNBIND_ERR_NOT_BOUND 0x3 + +/* MC_CMD_TSA_BIND_OUT_UNBIND_EXT msgresponse: Obsolete. Use + * MC_CMD_TSA_BIND_OUT_SECURE_UNBIND. + */ +#define MC_CMD_TSA_BIND_OUT_UNBIND_EXT_LEN 8 +/* Same as MC_CMD_ERR field, but included as 0 in success cases */ +#define MC_CMD_TSA_BIND_OUT_UNBIND_EXT_RESULT_OFST 0 +#define MC_CMD_TSA_BIND_OUT_UNBIND_EXT_RESULT_LEN 4 +/* Extra status information */ +#define MC_CMD_TSA_BIND_OUT_UNBIND_EXT_INFO_OFST 4 +#define MC_CMD_TSA_BIND_OUT_UNBIND_EXT_INFO_LEN 4 +/* enum: Unbind successful. */ +#define MC_CMD_TSA_BIND_OUT_UNBIND_EXT_OK_UNBOUND 0x0 +/* enum: TSANID mismatch */ +#define MC_CMD_TSA_BIND_OUT_UNBIND_EXT_ERR_BAD_TSANID 0x1 +/* enum: Unable to remove the binding ticket from persistent storage. */ +#define MC_CMD_TSA_BIND_OUT_UNBIND_EXT_ERR_REMOVE_TICKET 0x2 +/* enum: TSAN is not bound to a binding ticket. */ +#define MC_CMD_TSA_BIND_OUT_UNBIND_EXT_ERR_NOT_BOUND 0x3 +/* enum: Invalid unbind token */ +#define MC_CMD_TSA_BIND_OUT_UNBIND_EXT_ERR_BAD_TOKEN 0x4 +/* enum: Invalid signature */ +#define MC_CMD_TSA_BIND_OUT_UNBIND_EXT_ERR_BAD_SIGNATURE 0x5 + +/* MC_CMD_TSA_BIND_OUT_SET_UNBINDTOKEN msgresponse */ +#define MC_CMD_TSA_BIND_OUT_SET_UNBINDTOKEN_LEN 4 +/* The protocol operation code MC_CMD_TSA_BIND_OP_SET_UNBINDTOKEN that is sent + * back to the caller. + */ +#define MC_CMD_TSA_BIND_OUT_SET_UNBINDTOKEN_OP_OFST 0 +#define MC_CMD_TSA_BIND_OUT_SET_UNBINDTOKEN_OP_LEN 4 + +/* MC_CMD_TSA_BIND_OUT_DECOMMISSION msgresponse: Obsolete. Use + * MC_CMD_TSA_BIND_OUT_SECURE_DECOMMISSION. + */ +#define MC_CMD_TSA_BIND_OUT_DECOMMISSION_LEN 4 +/* The protocol operation code MC_CMD_TSA_BIND_OP_DECOMMISSION that is sent + * back to the caller. + */ +#define MC_CMD_TSA_BIND_OUT_DECOMMISSION_OP_OFST 0 +#define MC_CMD_TSA_BIND_OUT_DECOMMISSION_OP_LEN 4 + +/* MC_CMD_TSA_BIND_OUT_GET_CERTIFICATE msgresponse */ +#define MC_CMD_TSA_BIND_OUT_GET_CERTIFICATE_LENMIN 9 +#define MC_CMD_TSA_BIND_OUT_GET_CERTIFICATE_LENMAX 252 +#define MC_CMD_TSA_BIND_OUT_GET_CERTIFICATE_LENMAX_MCDI2 1020 +#define MC_CMD_TSA_BIND_OUT_GET_CERTIFICATE_LEN(num) (8+1*(num)) +#define MC_CMD_TSA_BIND_OUT_GET_CERTIFICATE_DATA_NUM(len) (((len)-8)/1) +/* The protocol operation code MC_CMD_TSA_BIND_OP_GET_CERTIFICATE that is sent + * back to the caller. + */ +#define MC_CMD_TSA_BIND_OUT_GET_CERTIFICATE_OP_OFST 0 +#define MC_CMD_TSA_BIND_OUT_GET_CERTIFICATE_OP_LEN 4 +/* Type of the certificate. */ +#define MC_CMD_TSA_BIND_OUT_GET_CERTIFICATE_TYPE_OFST 4 +#define MC_CMD_TSA_BIND_OUT_GET_CERTIFICATE_TYPE_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_TSA_BIND_IN_GET_CERTIFICATE/TYPE */ +/* The certificate data. */ +#define MC_CMD_TSA_BIND_OUT_GET_CERTIFICATE_DATA_OFST 8 +#define MC_CMD_TSA_BIND_OUT_GET_CERTIFICATE_DATA_LEN 1 +#define MC_CMD_TSA_BIND_OUT_GET_CERTIFICATE_DATA_MINNUM 1 +#define MC_CMD_TSA_BIND_OUT_GET_CERTIFICATE_DATA_MAXNUM 244 +#define MC_CMD_TSA_BIND_OUT_GET_CERTIFICATE_DATA_MAXNUM_MCDI2 1012 + +/* MC_CMD_TSA_BIND_OUT_SECURE_UNBIND msgresponse: Response to secure unbind + * request. + */ +#define MC_CMD_TSA_BIND_OUT_SECURE_UNBIND_LEN 8 +/* The protocol operation code that is sent back to the caller. */ +#define MC_CMD_TSA_BIND_OUT_SECURE_UNBIND_OP_OFST 0 +#define MC_CMD_TSA_BIND_OUT_SECURE_UNBIND_OP_LEN 4 +#define MC_CMD_TSA_BIND_OUT_SECURE_UNBIND_RESULT_OFST 4 +#define MC_CMD_TSA_BIND_OUT_SECURE_UNBIND_RESULT_LEN 4 +/* enum: Unbind successful. */ +#define MC_CMD_TSA_BIND_OUT_SECURE_UNBIND_OK_UNBOUND 0x0 +/* enum: TSANID mismatch */ +#define MC_CMD_TSA_BIND_OUT_SECURE_UNBIND_ERR_BAD_TSANID 0x1 +/* enum: Unable to remove the binding ticket from persistent storage. */ +#define MC_CMD_TSA_BIND_OUT_SECURE_UNBIND_ERR_REMOVE_TICKET 0x2 +/* enum: TSAN is not bound to a domain. */ +#define MC_CMD_TSA_BIND_OUT_SECURE_UNBIND_ERR_NOT_BOUND 0x3 +/* enum: Invalid unbind token */ +#define MC_CMD_TSA_BIND_OUT_SECURE_UNBIND_ERR_BAD_TOKEN 0x4 +/* enum: Invalid signature */ +#define MC_CMD_TSA_BIND_OUT_SECURE_UNBIND_ERR_BAD_SIGNATURE 0x5 + +/* MC_CMD_TSA_BIND_OUT_SECURE_DECOMMISSION msgresponse: Response to secure + * decommission request. + */ +#define MC_CMD_TSA_BIND_OUT_SECURE_DECOMMISSION_LEN 8 +/* The protocol operation code that is sent back to the caller. */ +#define MC_CMD_TSA_BIND_OUT_SECURE_DECOMMISSION_OP_OFST 0 +#define MC_CMD_TSA_BIND_OUT_SECURE_DECOMMISSION_OP_LEN 4 +#define MC_CMD_TSA_BIND_OUT_SECURE_DECOMMISSION_RESULT_OFST 4 +#define MC_CMD_TSA_BIND_OUT_SECURE_DECOMMISSION_RESULT_LEN 4 +/* enum: Unbind successful. */ +#define MC_CMD_TSA_BIND_OUT_SECURE_DECOMMISSION_OK_UNBOUND 0x0 +/* enum: TSANID mismatch */ +#define MC_CMD_TSA_BIND_OUT_SECURE_DECOMMISSION_ERR_BAD_TSANID 0x1 +/* enum: Unable to remove the binding ticket from persistent storage. */ +#define MC_CMD_TSA_BIND_OUT_SECURE_DECOMMISSION_ERR_REMOVE_TICKET 0x2 +/* enum: TSAN is not bound to a domain. */ +#define MC_CMD_TSA_BIND_OUT_SECURE_DECOMMISSION_ERR_NOT_BOUND 0x3 +/* enum: Invalid unbind token */ +#define MC_CMD_TSA_BIND_OUT_SECURE_DECOMMISSION_ERR_BAD_TOKEN 0x4 +/* enum: Invalid signature */ +#define MC_CMD_TSA_BIND_OUT_SECURE_DECOMMISSION_ERR_BAD_SIGNATURE 0x5 + +/* MC_CMD_TSA_BIND_OUT_TEST_MCDI msgrequest */ +#define MC_CMD_TSA_BIND_OUT_TEST_MCDI_LEN 4 +/* The protocol operation code MC_CMD_TSA_BIND_OP_TEST_MCDI that is sent back + * to the caller. + */ +#define MC_CMD_TSA_BIND_OUT_TEST_MCDI_OP_OFST 0 +#define MC_CMD_TSA_BIND_OUT_TEST_MCDI_OP_LEN 4 + + +/***********************************/ +/* MC_CMD_MANAGE_SECURITY_RULESET_CACHE + * Manage the persistent NVRAM cache of security rules created with + * MC_CMD_SET_SECURITY_RULE. Note that the cache is not automatically updated + * as rules are added or removed; the active ruleset must be explicitly + * committed to the cache. The cache may also be explicitly invalidated, + * without affecting the currently active ruleset. When the cache is valid, it + * will be loaded at power on or MC reboot, instead of the default ruleset. + * Rollback of the currently active ruleset to the cached version (when it is + * valid) is also supported. (Medford-only; for use by SolarSecure apps, not + * directly by drivers. See SF-114946-SW.) NOTE - The only sub-operation + * allowed in an adapter bound to a TSA controller from the local host is + * OP_GET_CACHED_VERSION. All other sub-operations are prohibited. + */ +#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE 0x11a +#undef MC_CMD_0x11a_PRIVILEGE_CTG + +#define MC_CMD_0x11a_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_MANAGE_SECURITY_RULESET_CACHE_IN msgrequest */ +#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_IN_LEN 4 +/* the operation to perform */ +#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_IN_OP_OFST 0 +#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_IN_OP_LEN 4 +/* enum: reports the ruleset version that is cached in persistent storage but + * performs no other action + */ +#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_IN_OP_GET_CACHED_VERSION 0x0 +/* enum: rolls back the active state to the cached version. (May fail with + * ENOENT if there is no valid cached version.) + */ +#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_IN_OP_ROLLBACK 0x1 +/* enum: commits the active state to the persistent cache */ +#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_IN_OP_COMMIT 0x2 +/* enum: invalidates the persistent cache without affecting the active state */ +#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_IN_OP_INVALIDATE 0x3 + +/* MC_CMD_MANAGE_SECURITY_RULESET_CACHE_OUT msgresponse */ +#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_OUT_LENMIN 5 +#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_OUT_LENMAX 252 +#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_OUT_LENMAX_MCDI2 1020 +#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_OUT_LEN(num) (4+1*(num)) +#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_OUT_VERSION_NUM(len) (((len)-4)/1) +/* indicates whether the persistent cache is valid (after completion of the + * requested operation in the case of rollback, commit, or invalidate) + */ +#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_OUT_STATE_OFST 0 +#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_OUT_STATE_LEN 4 +/* enum: persistent cache is invalid (the VERSION field will be empty in this + * case) + */ +#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_OUT_STATE_INVALID 0x0 +/* enum: persistent cache is valid */ +#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_OUT_STATE_VALID 0x1 +/* cached ruleset version (after completion of the requested operation, in the + * case of rollback, commit, or invalidate) as an opaque hash value in the same + * form as MC_CMD_GET_SECURITY_RULESET_VERSION_OUT_VERSION + */ +#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_OUT_VERSION_OFST 4 +#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_OUT_VERSION_LEN 1 +#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_OUT_VERSION_MINNUM 1 +#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_OUT_VERSION_MAXNUM 248 +#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_OUT_VERSION_MAXNUM_MCDI2 1016 + + +/***********************************/ +/* MC_CMD_NVRAM_PRIVATE_APPEND + * Append a single TLV to the MC_USAGE_TLV partition. Returns MC_CMD_ERR_EEXIST + * if the tag is already present. + */ +#define MC_CMD_NVRAM_PRIVATE_APPEND 0x11c +#undef MC_CMD_0x11c_PRIVILEGE_CTG + +#define MC_CMD_0x11c_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND + +/* MC_CMD_NVRAM_PRIVATE_APPEND_IN msgrequest */ +#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_LENMIN 9 +#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_LENMAX 252 +#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_LENMAX_MCDI2 1020 +#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_LEN(num) (8+1*(num)) +#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_DATA_BUFFER_NUM(len) (((len)-8)/1) +/* The tag to be appended */ +#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_TAG_OFST 0 +#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_TAG_LEN 4 +/* The length of the data */ +#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_LENGTH_OFST 4 +#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_LENGTH_LEN 4 +/* The data to be contained in the TLV structure */ +#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_DATA_BUFFER_OFST 8 +#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_DATA_BUFFER_LEN 1 +#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_DATA_BUFFER_MINNUM 1 +#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_DATA_BUFFER_MAXNUM 244 +#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_DATA_BUFFER_MAXNUM_MCDI2 1012 + +/* MC_CMD_NVRAM_PRIVATE_APPEND_OUT msgresponse */ +#define MC_CMD_NVRAM_PRIVATE_APPEND_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_XPM_VERIFY_CONTENTS + * Verify that the contents of the XPM memory is correct (Medford only). This + * is used during manufacture to check that the XPM memory has been programmed + * correctly at ATE. + */ +#define MC_CMD_XPM_VERIFY_CONTENTS 0x11b +#undef MC_CMD_0x11b_PRIVILEGE_CTG + +#define MC_CMD_0x11b_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_XPM_VERIFY_CONTENTS_IN msgrequest */ +#define MC_CMD_XPM_VERIFY_CONTENTS_IN_LEN 4 +/* Data type to be checked */ +#define MC_CMD_XPM_VERIFY_CONTENTS_IN_DATA_TYPE_OFST 0 +#define MC_CMD_XPM_VERIFY_CONTENTS_IN_DATA_TYPE_LEN 4 + +/* MC_CMD_XPM_VERIFY_CONTENTS_OUT msgresponse */ +#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_LENMIN 12 +#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_LENMAX 252 +#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_LENMAX_MCDI2 1020 +#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_LEN(num) (12+1*(num)) +#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIGNATURE_NUM(len) (((len)-12)/1) +/* Number of sectors found (test builds only) */ +#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_NUM_SECTORS_OFST 0 +#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_NUM_SECTORS_LEN 4 +/* Number of bytes found (test builds only) */ +#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_NUM_BYTES_OFST 4 +#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_NUM_BYTES_LEN 4 +/* Length of signature */ +#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIG_LENGTH_OFST 8 +#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIG_LENGTH_LEN 4 +/* Signature */ +#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIGNATURE_OFST 12 +#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIGNATURE_LEN 1 +#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIGNATURE_MINNUM 0 +#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIGNATURE_MAXNUM 240 +#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIGNATURE_MAXNUM_MCDI2 1008 + + +/***********************************/ +/* MC_CMD_SET_EVQ_TMR + * Update the timer load, timer reload and timer mode values for a given EVQ. + * The requested timer values (in TMR_LOAD_REQ_NS and TMR_RELOAD_REQ_NS) will + * be rounded up to the granularity supported by the hardware, then truncated + * to the range supported by the hardware. The resulting value after the + * rounding and truncation will be returned to the caller (in TMR_LOAD_ACT_NS + * and TMR_RELOAD_ACT_NS). + */ +#define MC_CMD_SET_EVQ_TMR 0x120 +#undef MC_CMD_0x120_PRIVILEGE_CTG + +#define MC_CMD_0x120_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_SET_EVQ_TMR_IN msgrequest */ +#define MC_CMD_SET_EVQ_TMR_IN_LEN 16 +/* Function-relative queue instance */ +#define MC_CMD_SET_EVQ_TMR_IN_INSTANCE_OFST 0 +#define MC_CMD_SET_EVQ_TMR_IN_INSTANCE_LEN 4 +/* Requested value for timer load (in nanoseconds) */ +#define MC_CMD_SET_EVQ_TMR_IN_TMR_LOAD_REQ_NS_OFST 4 +#define MC_CMD_SET_EVQ_TMR_IN_TMR_LOAD_REQ_NS_LEN 4 +/* Requested value for timer reload (in nanoseconds) */ +#define MC_CMD_SET_EVQ_TMR_IN_TMR_RELOAD_REQ_NS_OFST 8 +#define MC_CMD_SET_EVQ_TMR_IN_TMR_RELOAD_REQ_NS_LEN 4 +/* Timer mode. Meanings as per EVQ_TMR_REG.TC_TIMER_VAL */ +#define MC_CMD_SET_EVQ_TMR_IN_TMR_MODE_OFST 12 +#define MC_CMD_SET_EVQ_TMR_IN_TMR_MODE_LEN 4 +#define MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_DIS 0x0 /* enum */ +#define MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_IMMED_START 0x1 /* enum */ +#define MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_TRIG_START 0x2 /* enum */ +#define MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_INT_HLDOFF 0x3 /* enum */ + +/* MC_CMD_SET_EVQ_TMR_OUT msgresponse */ +#define MC_CMD_SET_EVQ_TMR_OUT_LEN 8 +/* Actual value for timer load (in nanoseconds) */ +#define MC_CMD_SET_EVQ_TMR_OUT_TMR_LOAD_ACT_NS_OFST 0 +#define MC_CMD_SET_EVQ_TMR_OUT_TMR_LOAD_ACT_NS_LEN 4 +/* Actual value for timer reload (in nanoseconds) */ +#define MC_CMD_SET_EVQ_TMR_OUT_TMR_RELOAD_ACT_NS_OFST 4 +#define MC_CMD_SET_EVQ_TMR_OUT_TMR_RELOAD_ACT_NS_LEN 4 + + +/***********************************/ +/* MC_CMD_GET_EVQ_TMR_PROPERTIES + * Query properties about the event queue timers. + */ +#define MC_CMD_GET_EVQ_TMR_PROPERTIES 0x122 +#undef MC_CMD_0x122_PRIVILEGE_CTG + +#define MC_CMD_0x122_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_EVQ_TMR_PROPERTIES_IN msgrequest */ +#define MC_CMD_GET_EVQ_TMR_PROPERTIES_IN_LEN 0 + +/* MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT msgresponse */ +#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_LEN 36 +/* Reserved for future use. */ +#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_FLAGS_OFST 0 +#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_FLAGS_LEN 4 +/* For timers updated via writes to EVQ_TMR_REG, this is the time interval (in + * nanoseconds) for each increment of the timer load/reload count. The + * requested duration of a timer is this value multiplied by the timer + * load/reload count. + */ +#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_NS_PER_COUNT_OFST 4 +#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_NS_PER_COUNT_LEN 4 +/* For timers updated via writes to EVQ_TMR_REG, this is the maximum value + * allowed for timer load/reload counts. + */ +#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_MAX_COUNT_OFST 8 +#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_MAX_COUNT_LEN 4 +/* For timers updated via writes to EVQ_TMR_REG, timer load/reload counts not a + * multiple of this step size will be rounded in an implementation defined + * manner. + */ +#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_STEP_OFST 12 +#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_STEP_LEN 4 +/* Maximum timer duration (in nanoseconds) for timers updated via MCDI. Only + * meaningful if MC_CMD_SET_EVQ_TMR is implemented. + */ +#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_MCDI_TMR_MAX_NS_OFST 16 +#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_MCDI_TMR_MAX_NS_LEN 4 +/* Timer durations requested via MCDI that are not a multiple of this step size + * will be rounded up. Only meaningful if MC_CMD_SET_EVQ_TMR is implemented. + */ +#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_MCDI_TMR_STEP_NS_OFST 20 +#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_MCDI_TMR_STEP_NS_LEN 4 +/* For timers updated using the bug35388 workaround, this is the time interval + * (in nanoseconds) for each increment of the timer load/reload count. The + * requested duration of a timer is this value multiplied by the timer + * load/reload count. This field is only meaningful if the bug35388 workaround + * is enabled. + */ +#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_NS_PER_COUNT_OFST 24 +#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_NS_PER_COUNT_LEN 4 +/* For timers updated using the bug35388 workaround, this is the maximum value + * allowed for timer load/reload counts. This field is only meaningful if the + * bug35388 workaround is enabled. + */ +#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_MAX_COUNT_OFST 28 +#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_MAX_COUNT_LEN 4 +/* For timers updated using the bug35388 workaround, timer load/reload counts + * not a multiple of this step size will be rounded in an implementation + * defined manner. This field is only meaningful if the bug35388 workaround is + * enabled. + */ +#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_STEP_OFST 32 +#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_STEP_LEN 4 + + +/***********************************/ +/* MC_CMD_ALLOCATE_TX_VFIFO_CP + * When we use the TX_vFIFO_ULL mode, we can allocate common pools using the + * non used switch buffers. + */ +#define MC_CMD_ALLOCATE_TX_VFIFO_CP 0x11d +#undef MC_CMD_0x11d_PRIVILEGE_CTG + +#define MC_CMD_0x11d_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_ALLOCATE_TX_VFIFO_CP_IN msgrequest */ +#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_LEN 20 +/* Desired instance. Must be set to a specific instance, which is a function + * local queue index. The calling client must be the currently-assigned user of + * this VI (see MC_CMD_SET_VI_USER). + */ +#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_INSTANCE_OFST 0 +#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_INSTANCE_LEN 4 +/* Will the common pool be used as TX_vFIFO_ULL (1) */ +#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_MODE_OFST 4 +#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_MODE_LEN 4 +#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_ENABLED 0x1 /* enum */ +/* enum: Using this interface without TX_vFIFO_ULL is not supported for now */ +#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_DISABLED 0x0 +/* Number of buffers to reserve for the common pool */ +#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_SIZE_OFST 8 +#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_SIZE_LEN 4 +/* TX datapath to which the Common Pool is connected to. */ +#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_INGRESS_OFST 12 +#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_INGRESS_LEN 4 +/* enum: Extracts information from function */ +#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_USE_FUNCTION_VALUE -0x1 +/* Network port or RX Engine to which the common pool connects. */ +#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_EGRESS_OFST 16 +#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_EGRESS_LEN 4 +/* enum: Extracts information from function */ +/* MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_USE_FUNCTION_VALUE -0x1 */ +#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_PORT0 0x0 /* enum */ +#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_PORT1 0x1 /* enum */ +#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_PORT2 0x2 /* enum */ +#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_PORT3 0x3 /* enum */ +/* enum: To enable Switch loopback with Rx engine 0 */ +#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_RX_ENGINE0 0x4 +/* enum: To enable Switch loopback with Rx engine 1 */ +#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_RX_ENGINE1 0x5 + +/* MC_CMD_ALLOCATE_TX_VFIFO_CP_OUT msgresponse */ +#define MC_CMD_ALLOCATE_TX_VFIFO_CP_OUT_LEN 4 +/* ID of the common pool allocated */ +#define MC_CMD_ALLOCATE_TX_VFIFO_CP_OUT_CP_ID_OFST 0 +#define MC_CMD_ALLOCATE_TX_VFIFO_CP_OUT_CP_ID_LEN 4 + + +/***********************************/ +/* MC_CMD_ALLOCATE_TX_VFIFO_VFIFO + * When we use the TX_vFIFO_ULL mode, we can allocate vFIFOs using the + * previously allocated common pools. + */ +#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO 0x11e +#undef MC_CMD_0x11e_PRIVILEGE_CTG + +#define MC_CMD_0x11e_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN msgrequest */ +#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_LEN 20 +/* Common pool previously allocated to which the new vFIFO will be associated + */ +#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_CP_OFST 0 +#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_CP_LEN 4 +/* Port or RX engine to associate the vFIFO egress */ +#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_EGRESS_OFST 4 +#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_EGRESS_LEN 4 +/* enum: Extracts information from common pool */ +#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_USE_CP_VALUE -0x1 +#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PORT0 0x0 /* enum */ +#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PORT1 0x1 /* enum */ +#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PORT2 0x2 /* enum */ +#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PORT3 0x3 /* enum */ +/* enum: To enable Switch loopback with Rx engine 0 */ +#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_RX_ENGINE0 0x4 +/* enum: To enable Switch loopback with Rx engine 1 */ +#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_RX_ENGINE1 0x5 +/* Minimum number of buffers that the pool must have */ +#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_SIZE_OFST 8 +#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_SIZE_LEN 4 +/* enum: Do not check the space available */ +#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_NO_MINIMUM 0x0 +/* Will the vFIFO be used as TX_vFIFO_ULL */ +#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_MODE_OFST 12 +#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_MODE_LEN 4 +/* Network priority of the vFIFO,if applicable */ +#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PRIORITY_OFST 16 +#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PRIORITY_LEN 4 +/* enum: Search for the lowest unused priority */ +#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_LOWEST_AVAILABLE -0x1 + +/* MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT msgresponse */ +#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT_LEN 8 +/* Short vFIFO ID */ +#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT_VID_OFST 0 +#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT_VID_LEN 4 +/* Network priority of the vFIFO */ +#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT_PRIORITY_OFST 4 +#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT_PRIORITY_LEN 4 + + +/***********************************/ +/* MC_CMD_TEARDOWN_TX_VFIFO_VF + * This interface clears the configuration of the given vFIFO and leaves it + * ready to be re-used. + */ +#define MC_CMD_TEARDOWN_TX_VFIFO_VF 0x11f +#undef MC_CMD_0x11f_PRIVILEGE_CTG + +#define MC_CMD_0x11f_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_TEARDOWN_TX_VFIFO_VF_IN msgrequest */ +#define MC_CMD_TEARDOWN_TX_VFIFO_VF_IN_LEN 4 +/* Short vFIFO ID */ +#define MC_CMD_TEARDOWN_TX_VFIFO_VF_IN_VFIFO_OFST 0 +#define MC_CMD_TEARDOWN_TX_VFIFO_VF_IN_VFIFO_LEN 4 + +/* MC_CMD_TEARDOWN_TX_VFIFO_VF_OUT msgresponse */ +#define MC_CMD_TEARDOWN_TX_VFIFO_VF_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_DEALLOCATE_TX_VFIFO_CP + * This interface clears the configuration of the given common pool and leaves + * it ready to be re-used. + */ +#define MC_CMD_DEALLOCATE_TX_VFIFO_CP 0x121 +#undef MC_CMD_0x121_PRIVILEGE_CTG + +#define MC_CMD_0x121_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_DEALLOCATE_TX_VFIFO_CP_IN msgrequest */ +#define MC_CMD_DEALLOCATE_TX_VFIFO_CP_IN_LEN 4 +/* Common pool ID given when pool allocated */ +#define MC_CMD_DEALLOCATE_TX_VFIFO_CP_IN_POOL_ID_OFST 0 +#define MC_CMD_DEALLOCATE_TX_VFIFO_CP_IN_POOL_ID_LEN 4 + +/* MC_CMD_DEALLOCATE_TX_VFIFO_CP_OUT msgresponse */ +#define MC_CMD_DEALLOCATE_TX_VFIFO_CP_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_REKEY + * This request causes the NIC to generate a new per-NIC key and program it + * into the write-once memory. During the process all flash partitions that are + * protected with a CMAC are verified with the old per-NIC key and then signed + * with the new per-NIC key. If the NIC has already reached its rekey limit the + * REKEY op will return MC_CMD_ERR_ERANGE. The REKEY op may block until + * completion or it may return 0 and continue processing, therefore the caller + * must poll at least once to confirm that the rekeying has completed. The POLL + * operation returns MC_CMD_ERR_EBUSY if the rekey process is still running + * otherwise it will return the result of the last completed rekey operation, + * or 0 if there has not been a previous rekey. + */ +#define MC_CMD_REKEY 0x123 +#undef MC_CMD_0x123_PRIVILEGE_CTG + +#define MC_CMD_0x123_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND + +/* MC_CMD_REKEY_IN msgrequest */ +#define MC_CMD_REKEY_IN_LEN 4 +/* the type of operation requested */ +#define MC_CMD_REKEY_IN_OP_OFST 0 +#define MC_CMD_REKEY_IN_OP_LEN 4 +/* enum: Start the rekeying operation */ +#define MC_CMD_REKEY_IN_OP_REKEY 0x0 +/* enum: Poll for completion of the rekeying operation */ +#define MC_CMD_REKEY_IN_OP_POLL 0x1 + +/* MC_CMD_REKEY_OUT msgresponse */ +#define MC_CMD_REKEY_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS + * This interface allows the host to find out how many common pool buffers are + * not yet assigned. + */ +#define MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS 0x124 +#undef MC_CMD_0x124_PRIVILEGE_CTG + +#define MC_CMD_0x124_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_IN msgrequest */ +#define MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_IN_LEN 0 + +/* MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_OUT msgresponse */ +#define MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_OUT_LEN 8 +/* Available buffers for the ENG to NET vFIFOs. */ +#define MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_OUT_NET_OFST 0 +#define MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_OUT_NET_LEN 4 +/* Available buffers for the ENG to ENG and NET to ENG vFIFOs. */ +#define MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_OUT_ENG_OFST 4 +#define MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_OUT_ENG_LEN 4 + + +/***********************************/ +/* MC_CMD_SET_SECURITY_FUSES + * Change the security level of the adapter by setting bits in the write-once + * memory. The firmware maps each flag in the message to a set of one or more + * hardware-defined or software-defined bits and sets these bits in the write- + * once memory. For Medford the hardware-defined bits are defined in + * SF-112079-PS 5.3, the software-defined bits are defined in xpm.h. Returns 0 + * if all of the required bits were set and returns MC_CMD_ERR_EIO if any of + * the required bits were not set. + */ +#define MC_CMD_SET_SECURITY_FUSES 0x126 +#undef MC_CMD_0x126_PRIVILEGE_CTG + +#define MC_CMD_0x126_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND + +/* MC_CMD_SET_SECURITY_FUSES_IN msgrequest */ +#define MC_CMD_SET_SECURITY_FUSES_IN_LEN 4 +/* Flags specifying what type of security features are being set */ +#define MC_CMD_SET_SECURITY_FUSES_IN_FLAGS_OFST 0 +#define MC_CMD_SET_SECURITY_FUSES_IN_FLAGS_LEN 4 +#define MC_CMD_SET_SECURITY_FUSES_IN_SECURE_BOOT_OFST 0 +#define MC_CMD_SET_SECURITY_FUSES_IN_SECURE_BOOT_LBN 0 +#define MC_CMD_SET_SECURITY_FUSES_IN_SECURE_BOOT_WIDTH 1 +#define MC_CMD_SET_SECURITY_FUSES_IN_REJECT_TEST_SIGNED_OFST 0 +#define MC_CMD_SET_SECURITY_FUSES_IN_REJECT_TEST_SIGNED_LBN 1 +#define MC_CMD_SET_SECURITY_FUSES_IN_REJECT_TEST_SIGNED_WIDTH 1 +#define MC_CMD_SET_SECURITY_FUSES_IN_SOFT_CONFIG_OFST 0 +#define MC_CMD_SET_SECURITY_FUSES_IN_SOFT_CONFIG_LBN 31 +#define MC_CMD_SET_SECURITY_FUSES_IN_SOFT_CONFIG_WIDTH 1 + +/* MC_CMD_SET_SECURITY_FUSES_OUT msgresponse */ +#define MC_CMD_SET_SECURITY_FUSES_OUT_LEN 0 + +/* MC_CMD_SET_SECURITY_FUSES_V2_OUT msgresponse */ +#define MC_CMD_SET_SECURITY_FUSES_V2_OUT_LEN 4 +/* Flags specifying which security features are enforced on the NIC after the + * flags in the request have been applied. See + * MC_CMD_SET_SECURITY_FUSES_IN/FLAGS for flag definitions. + */ +#define MC_CMD_SET_SECURITY_FUSES_V2_OUT_FLAGS_OFST 0 +#define MC_CMD_SET_SECURITY_FUSES_V2_OUT_FLAGS_LEN 4 + + +/***********************************/ +/* MC_CMD_TSA_INFO + * Messages sent from TSA adapter to TSA controller. This command is only valid + * when the MCDI header has MESSAGE_TYPE set to MCDI_MESSAGE_TYPE_TSA. This + * command is not sent by the driver to the MC; it is sent from the MC to a TSA + * controller, being treated more like an alert message rather than a command; + * hence the MC does not expect a response in return. Doxbox reference + * SF-117371-SW + */ +#define MC_CMD_TSA_INFO 0x127 +#undef MC_CMD_0x127_PRIVILEGE_CTG + +#define MC_CMD_0x127_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND + +/* MC_CMD_TSA_INFO_IN msgrequest */ +#define MC_CMD_TSA_INFO_IN_LEN 4 +#define MC_CMD_TSA_INFO_IN_OP_HDR_OFST 0 +#define MC_CMD_TSA_INFO_IN_OP_HDR_LEN 4 +#define MC_CMD_TSA_INFO_IN_OP_OFST 0 +#define MC_CMD_TSA_INFO_IN_OP_LBN 0 +#define MC_CMD_TSA_INFO_IN_OP_WIDTH 16 +/* enum: Information about recently discovered local IP address of the adapter + */ +#define MC_CMD_TSA_INFO_OP_LOCAL_IP 0x1 +/* enum: Information about a sampled packet that either - did not match any + * black/white-list filters and was allowed by the default filter or - did not + * match any black/white-list filters and was denied by the default filter + */ +#define MC_CMD_TSA_INFO_OP_PKT_SAMPLE 0x2 +/* enum: Information about an unbind or decommission attempt. */ +#define MC_CMD_TSA_INFO_OP_UNBIND 0x3 + +/* MC_CMD_TSA_INFO_IN_LOCAL_IP msgrequest: + * + * The TSA controller maintains a list of IP addresses valid for each port of a + * TSA adapter. The TSA controller requires information from the adapter + * inorder to learn new IP addresses assigned to a physical port and to + * identify those that are no longer assigned to the physical port. For this + * purpose, the TSA adapter snoops ARP replys, gratuitous ARP requests and ARP + * probe packets seen on each physical port. This definition describes the + * format of the notification message sent from a TSA adapter to a TSA + * controller related to any information related to a change in IP address + * assignment for a port. Doxbox reference SF-117371. + * + * There may be a possibility of combining multiple notifications in a single + * message in future. When that happens, a new flag can be defined using the + * reserved bits to describe the extended format of this notification. + */ +#define MC_CMD_TSA_INFO_IN_LOCAL_IP_LEN 18 +#define MC_CMD_TSA_INFO_IN_LOCAL_IP_OP_HDR_OFST 0 +#define MC_CMD_TSA_INFO_IN_LOCAL_IP_OP_HDR_LEN 4 +/* Additional metadata describing the IP address information such as source of + * information retrieval, type of IP address, physical port number. + */ +#define MC_CMD_TSA_INFO_IN_LOCAL_IP_META_OFST 4 +#define MC_CMD_TSA_INFO_IN_LOCAL_IP_META_LEN 4 +#define MC_CMD_TSA_INFO_IN_LOCAL_IP_META_PORT_INDEX_OFST 4 +#define MC_CMD_TSA_INFO_IN_LOCAL_IP_META_PORT_INDEX_LBN 0 +#define MC_CMD_TSA_INFO_IN_LOCAL_IP_META_PORT_INDEX_WIDTH 8 +#define MC_CMD_TSA_INFO_IN_LOCAL_IP_RESERVED_OFST 4 +#define MC_CMD_TSA_INFO_IN_LOCAL_IP_RESERVED_LBN 8 +#define MC_CMD_TSA_INFO_IN_LOCAL_IP_RESERVED_WIDTH 8 +#define MC_CMD_TSA_INFO_IN_LOCAL_IP_META_REASON_OFST 4 +#define MC_CMD_TSA_INFO_IN_LOCAL_IP_META_REASON_LBN 16 +#define MC_CMD_TSA_INFO_IN_LOCAL_IP_META_REASON_WIDTH 8 +/* enum: ARP reply sent out of the physical port */ +#define MC_CMD_TSA_INFO_IP_REASON_TX_ARP 0x0 +/* enum: ARP probe packet received on the physical port */ +#define MC_CMD_TSA_INFO_IP_REASON_RX_ARP_PROBE 0x1 +/* enum: Gratuitous ARP packet received on the physical port */ +#define MC_CMD_TSA_INFO_IP_REASON_RX_GRATUITOUS_ARP 0x2 +/* enum: DHCP ACK packet received on the physical port */ +#define MC_CMD_TSA_INFO_IP_REASON_RX_DHCP_ACK 0x3 +#define MC_CMD_TSA_INFO_IN_LOCAL_IP_META_IPV4_OFST 4 +#define MC_CMD_TSA_INFO_IN_LOCAL_IP_META_IPV4_LBN 24 +#define MC_CMD_TSA_INFO_IN_LOCAL_IP_META_IPV4_WIDTH 1 +#define MC_CMD_TSA_INFO_IN_LOCAL_IP_RESERVED1_OFST 4 +#define MC_CMD_TSA_INFO_IN_LOCAL_IP_RESERVED1_LBN 25 +#define MC_CMD_TSA_INFO_IN_LOCAL_IP_RESERVED1_WIDTH 7 +/* IPV4 address retrieved from the sampled packets. This field is relevant only + * when META_IPV4 is set to 1. + */ +#define MC_CMD_TSA_INFO_IN_LOCAL_IP_IPV4_ADDR_OFST 8 +#define MC_CMD_TSA_INFO_IN_LOCAL_IP_IPV4_ADDR_LEN 4 +/* Target MAC address retrieved from the sampled packet. */ +#define MC_CMD_TSA_INFO_IN_LOCAL_IP_MAC_ADDR_OFST 12 +#define MC_CMD_TSA_INFO_IN_LOCAL_IP_MAC_ADDR_LEN 1 +#define MC_CMD_TSA_INFO_IN_LOCAL_IP_MAC_ADDR_NUM 6 + +/* MC_CMD_TSA_INFO_IN_PKT_SAMPLE msgrequest: + * + * It is desireable for the TSA controller to learn the traffic pattern of + * packets seen at the network port being monitored. In order to learn about + * the traffic pattern, the TSA controller may want to sample packets seen at + * the network port. Based on the packet samples that the TSA controller + * receives from the adapter, the controller may choose to configure additional + * black-list or white-list rules to allow or block packets as required. + * + * Although the entire sampled packet as seen on the network port is available + * to the MC the length of sampled packet sent to controller is restricted by + * MCDI payload size. Besides, the TSA controller does not require the entire + * packet to make decisions about filter updates. Hence the packet sample being + * passed to the controller is truncated to 128 bytes. This length is large + * enough to hold the ethernet header, IP header and maximum length of + * supported L4 protocol headers (IPv4 only, but can hold IPv6 header too, if + * required in future). + * + * The intention is that any future changes to this message format that are not + * backwards compatible will be defined with a new operation code. + */ +#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_LEN 136 +#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_OP_HDR_OFST 0 +#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_OP_HDR_LEN 4 +/* Additional metadata describing the sampled packet */ +#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_META_OFST 4 +#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_META_LEN 4 +#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_META_PORT_INDEX_OFST 4 +#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_META_PORT_INDEX_LBN 0 +#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_META_PORT_INDEX_WIDTH 8 +#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_META_DIRECTION_OFST 4 +#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_META_DIRECTION_LBN 8 +#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_META_DIRECTION_WIDTH 1 +#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_RESERVED_OFST 4 +#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_RESERVED_LBN 9 +#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_RESERVED_WIDTH 7 +#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_META_ACTION_MASK_OFST 4 +#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_META_ACTION_MASK_LBN 16 +#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_META_ACTION_MASK_WIDTH 4 +#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_META_ACTION_ALLOW_OFST 4 +#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_META_ACTION_ALLOW_LBN 16 +#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_META_ACTION_ALLOW_WIDTH 1 +#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_META_ACTION_DENY_OFST 4 +#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_META_ACTION_DENY_LBN 17 +#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_META_ACTION_DENY_WIDTH 1 +#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_META_ACTION_COUNT_OFST 4 +#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_META_ACTION_COUNT_LBN 18 +#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_META_ACTION_COUNT_WIDTH 1 +/* 128-byte raw prefix of the sampled packet which includes the ethernet + * header, IP header and L4 protocol header (only IPv4 supported initially). + * This provides the controller enough information about the packet sample to + * report traffic patterns seen on a network port and to make decisions + * concerning rule-set updates. + */ +#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_PACKET_DATA_OFST 8 +#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_PACKET_DATA_LEN 1 +#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_PACKET_DATA_NUM 128 + +/* MC_CMD_TSA_INFO_IN_UNBIND msgrequest: Information about an unbind or + * decommission attempt. The purpose of this event is to let the controller + * know about unbind and decommission attempts (both successful and failed) + * received from the adapter host. The event is not sent if the unbind or + * decommission request was received from the controller. + */ +#define MC_CMD_TSA_INFO_IN_UNBIND_LEN 12 +#define MC_CMD_TSA_INFO_IN_UNBIND_OP_HDR_OFST 0 +#define MC_CMD_TSA_INFO_IN_UNBIND_OP_HDR_LEN 4 +#define MC_CMD_TSA_INFO_IN_UNBIND_OP_OFST 0 +#define MC_CMD_TSA_INFO_IN_UNBIND_OP_LBN 0 +#define MC_CMD_TSA_INFO_IN_UNBIND_OP_WIDTH 16 +/* Type of the unbind attempt. */ +#define MC_CMD_TSA_INFO_IN_UNBIND_TYPE_OFST 4 +#define MC_CMD_TSA_INFO_IN_UNBIND_TYPE_LEN 4 +/* enum: This event is sent because MC_CMD_TSA_BIND_OP_SECURE_UNBIND was + * received from the adapter local host. + */ +#define MC_CMD_TSA_INFO_UNBIND_TYPE_SECURE_UNBIND 0x1 +/* enum: This event is sent because MC_CMD_TSA_BIND_OP_SECURE_DECOMMISSION was + * received from the adapter local host. + */ +#define MC_CMD_TSA_INFO_UNBIND_TYPE_SECURE_DECOMMISSION 0x2 +/* Result of the attempt. */ +#define MC_CMD_TSA_INFO_IN_UNBIND_RESULT_OFST 8 +#define MC_CMD_TSA_INFO_IN_UNBIND_RESULT_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_TSA_BIND/MC_CMD_TSA_BIND_OUT_SECURE_UNBIND/RESULT */ + +/* MC_CMD_TSA_INFO_OUT msgresponse */ +#define MC_CMD_TSA_INFO_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_HOST_INFO + * Commands to appply or retrieve host-related information from an adapter. + * Doxbox reference SF-117371-SW + */ +#define MC_CMD_HOST_INFO 0x128 +#undef MC_CMD_0x128_PRIVILEGE_CTG + +#define MC_CMD_0x128_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_HOST_INFO_IN msgrequest */ +#define MC_CMD_HOST_INFO_IN_LEN 4 +/* sub-operation code info */ +#define MC_CMD_HOST_INFO_IN_OP_HDR_OFST 0 +#define MC_CMD_HOST_INFO_IN_OP_HDR_LEN 4 +#define MC_CMD_HOST_INFO_IN_OP_OFST 0 +#define MC_CMD_HOST_INFO_IN_OP_LBN 0 +#define MC_CMD_HOST_INFO_IN_OP_WIDTH 16 +/* enum: Read a 16-byte unique host identifier from the adapter. This UUID + * helps to identify the host that an adapter is plugged into. This identifier + * is ideally the system UUID retrieved and set by the UEFI driver. If the UEFI + * driver is unable to extract the system UUID, it would still set a random + * 16-byte value into each supported SF adapter plugged into it. Host UUIDs may + * change if the system is power-cycled, however, they persist across adapter + * resets. If the host UUID was not set on an adapter, due to an unsupported + * version of UEFI driver, then this command returns an error. Doxbox reference + * - SF-117371-SW section 'Host UUID'. + */ +#define MC_CMD_HOST_INFO_OP_GET_UUID 0x0 +/* enum: Set a 16-byte unique host identifier on the adapter to identify the + * host that the adapter is plugged into. See MC_CMD_HOST_INFO_OP_GET_UUID for + * further details. + */ +#define MC_CMD_HOST_INFO_OP_SET_UUID 0x1 + +/* MC_CMD_HOST_INFO_IN_GET_UUID msgrequest */ +#define MC_CMD_HOST_INFO_IN_GET_UUID_LEN 4 +/* sub-operation code info */ +#define MC_CMD_HOST_INFO_IN_GET_UUID_OP_HDR_OFST 0 +#define MC_CMD_HOST_INFO_IN_GET_UUID_OP_HDR_LEN 4 + +/* MC_CMD_HOST_INFO_OUT_GET_UUID msgresponse */ +#define MC_CMD_HOST_INFO_OUT_GET_UUID_LEN 16 +/* 16-byte host UUID read out of the adapter. See MC_CMD_HOST_INFO_OP_GET_UUID + * for further details. + */ +#define MC_CMD_HOST_INFO_OUT_GET_UUID_HOST_UUID_OFST 0 +#define MC_CMD_HOST_INFO_OUT_GET_UUID_HOST_UUID_LEN 1 +#define MC_CMD_HOST_INFO_OUT_GET_UUID_HOST_UUID_NUM 16 + +/* MC_CMD_HOST_INFO_IN_SET_UUID msgrequest */ +#define MC_CMD_HOST_INFO_IN_SET_UUID_LEN 20 +/* sub-operation code info */ +#define MC_CMD_HOST_INFO_IN_SET_UUID_OP_HDR_OFST 0 +#define MC_CMD_HOST_INFO_IN_SET_UUID_OP_HDR_LEN 4 +/* 16-byte host UUID set on the adapter. See MC_CMD_HOST_INFO_OP_GET_UUID for + * further details. + */ +#define MC_CMD_HOST_INFO_IN_SET_UUID_HOST_UUID_OFST 4 +#define MC_CMD_HOST_INFO_IN_SET_UUID_HOST_UUID_LEN 1 +#define MC_CMD_HOST_INFO_IN_SET_UUID_HOST_UUID_NUM 16 + +/* MC_CMD_HOST_INFO_OUT_SET_UUID msgresponse */ +#define MC_CMD_HOST_INFO_OUT_SET_UUID_LEN 0 + + +/***********************************/ +/* MC_CMD_TSAN_INFO + * Get TSA adapter information. TSA controllers query each TSA adapter to learn + * some configuration parameters of each adapter. Doxbox reference SF-117371-SW + * section 'Adapter Information' + */ +#define MC_CMD_TSAN_INFO 0x129 +#undef MC_CMD_0x129_PRIVILEGE_CTG + +#define MC_CMD_0x129_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_TSAN_INFO_IN msgrequest */ +#define MC_CMD_TSAN_INFO_IN_LEN 4 +/* sub-operation code info */ +#define MC_CMD_TSAN_INFO_IN_OP_HDR_OFST 0 +#define MC_CMD_TSAN_INFO_IN_OP_HDR_LEN 4 +#define MC_CMD_TSAN_INFO_IN_OP_OFST 0 +#define MC_CMD_TSAN_INFO_IN_OP_LBN 0 +#define MC_CMD_TSAN_INFO_IN_OP_WIDTH 16 +/* enum: Read configuration parameters and IDs that uniquely identify an + * adapter. The parameters include - host identification, adapter + * identification string and number of physical ports on the adapter. + */ +#define MC_CMD_TSAN_INFO_OP_GET_CFG 0x0 + +/* MC_CMD_TSAN_INFO_IN_GET_CFG msgrequest */ +#define MC_CMD_TSAN_INFO_IN_GET_CFG_LEN 4 +/* sub-operation code info */ +#define MC_CMD_TSAN_INFO_IN_GET_CFG_OP_HDR_OFST 0 +#define MC_CMD_TSAN_INFO_IN_GET_CFG_OP_HDR_LEN 4 + +/* MC_CMD_TSAN_INFO_OUT_GET_CFG msgresponse */ +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_LEN 26 +/* Information about the configuration parameters returned in this response. */ +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_CONFIG_WORD_OFST 0 +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_CONFIG_WORD_LEN 4 +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_CAP_FLAGS_OFST 0 +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_CAP_FLAGS_LBN 0 +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_CAP_FLAGS_WIDTH 16 +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_FLAG_HOST_UUID_VALID_OFST 0 +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_FLAG_HOST_UUID_VALID_LBN 0 +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_FLAG_HOST_UUID_VALID_WIDTH 1 +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_NUM_PORTS_OFST 0 +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_NUM_PORTS_LBN 16 +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_NUM_PORTS_WIDTH 8 +/* 16-byte host UUID read out of the adapter. See MC_CMD_HOST_INFO_OP_GET_UUID + * for further details. + */ +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_HOST_UUID_OFST 4 +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_HOST_UUID_LEN 1 +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_HOST_UUID_NUM 16 +/* A unique identifier per adapter. The base MAC address of the card is used + * for this purpose. + */ +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_GUID_OFST 20 +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_GUID_LEN 1 +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_GUID_NUM 6 + +/* MC_CMD_TSAN_INFO_OUT_GET_CFG_V2 msgresponse */ +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_LEN 36 +/* Information about the configuration parameters returned in this response. */ +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_CONFIG_WORD_OFST 0 +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_CONFIG_WORD_LEN 4 +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_CAP_FLAGS_OFST 0 +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_CAP_FLAGS_LBN 0 +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_CAP_FLAGS_WIDTH 16 +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_FLAG_HOST_UUID_VALID_OFST 0 +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_FLAG_HOST_UUID_VALID_LBN 0 +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_FLAG_HOST_UUID_VALID_WIDTH 1 +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_NUM_PORTS_OFST 0 +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_NUM_PORTS_LBN 16 +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_NUM_PORTS_WIDTH 8 +/* 16-byte host UUID read out of the adapter. See MC_CMD_HOST_INFO_OP_GET_UUID + * for further details. + */ +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_HOST_UUID_OFST 4 +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_HOST_UUID_LEN 1 +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_HOST_UUID_NUM 16 +/* A unique identifier per adapter. The base MAC address of the card is used + * for this purpose. + */ +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_GUID_OFST 20 +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_GUID_LEN 1 +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_GUID_NUM 6 +/* Unused bytes, defined for 32-bit alignment of new fields. */ +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_UNUSED_OFST 26 +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_UNUSED_LEN 2 +/* Maximum number of TSA statistics counters in each direction of dataflow + * supported on the card. Note that the statistics counters are always + * allocated in pairs, i.e. a counter ID is associated with one Tx and one Rx + * counter. + */ +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_MAX_STATS_OFST 28 +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_MAX_STATS_LEN 4 +/* Width of each statistics counter (represented in bits). This gives an + * indication of wrap point to the user. + */ +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_STATS_WIDTH_OFST 32 +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_STATS_WIDTH_LEN 4 + + +/***********************************/ +/* MC_CMD_TSA_STATISTICS + * TSA adapter statistics operations. + */ +#define MC_CMD_TSA_STATISTICS 0x130 +#undef MC_CMD_0x130_PRIVILEGE_CTG + +#define MC_CMD_0x130_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND + +/* MC_CMD_TSA_STATISTICS_IN msgrequest */ +#define MC_CMD_TSA_STATISTICS_IN_LEN 4 +/* TSA statistics sub-operation code */ +#define MC_CMD_TSA_STATISTICS_IN_OP_CODE_OFST 0 +#define MC_CMD_TSA_STATISTICS_IN_OP_CODE_LEN 4 +/* enum: Get the configuration parameters that describe the TSA statistics + * layout on the adapter. + */ +#define MC_CMD_TSA_STATISTICS_OP_GET_CONFIG 0x0 +/* enum: Read and/or clear TSA statistics counters. */ +#define MC_CMD_TSA_STATISTICS_OP_READ_CLEAR 0x1 + +/* MC_CMD_TSA_STATISTICS_IN_GET_CONFIG msgrequest */ +#define MC_CMD_TSA_STATISTICS_IN_GET_CONFIG_LEN 4 +/* TSA statistics sub-operation code */ +#define MC_CMD_TSA_STATISTICS_IN_GET_CONFIG_OP_CODE_OFST 0 +#define MC_CMD_TSA_STATISTICS_IN_GET_CONFIG_OP_CODE_LEN 4 + +/* MC_CMD_TSA_STATISTICS_OUT_GET_CONFIG msgresponse */ +#define MC_CMD_TSA_STATISTICS_OUT_GET_CONFIG_LEN 8 +/* Maximum number of TSA statistics counters in each direction of dataflow + * supported on the card. Note that the statistics counters are always + * allocated in pairs, i.e. a counter ID is associated with one Tx and one Rx + * counter. + */ +#define MC_CMD_TSA_STATISTICS_OUT_GET_CONFIG_MAX_STATS_OFST 0 +#define MC_CMD_TSA_STATISTICS_OUT_GET_CONFIG_MAX_STATS_LEN 4 +/* Width of each statistics counter (represented in bits). This gives an + * indication of wrap point to the user. + */ +#define MC_CMD_TSA_STATISTICS_OUT_GET_CONFIG_STATS_WIDTH_OFST 4 +#define MC_CMD_TSA_STATISTICS_OUT_GET_CONFIG_STATS_WIDTH_LEN 4 + +/* MC_CMD_TSA_STATISTICS_IN_READ_CLEAR msgrequest */ +#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_LENMIN 20 +#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_LENMAX 252 +#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_LENMAX_MCDI2 1020 +#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_LEN(num) (16+4*(num)) +#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_COUNTER_ID_NUM(len) (((len)-16)/4) +/* TSA statistics sub-operation code */ +#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_OP_CODE_OFST 0 +#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_OP_CODE_LEN 4 +/* Parameters describing the statistics operation */ +#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_FLAGS_OFST 4 +#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_FLAGS_LEN 4 +#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_READ_OFST 4 +#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_READ_LBN 0 +#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_READ_WIDTH 1 +#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_CLEAR_OFST 4 +#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_CLEAR_LBN 1 +#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_CLEAR_WIDTH 1 +/* Counter ID list specification type */ +#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_MODE_OFST 8 +#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_MODE_LEN 4 +/* enum: The statistics counters are specified as an unordered list of + * individual counter ID. + */ +#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_LIST 0x0 +/* enum: The statistics counters are specified as a range of consecutive + * counter IDs. + */ +#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_RANGE 0x1 +/* Number of statistics counters */ +#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_NUM_STATS_OFST 12 +#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_NUM_STATS_LEN 4 +/* Counter IDs to be read/cleared. When mode is set to LIST, this entry holds a + * list of counter IDs to be operated on. When mode is set to RANGE, this entry + * holds a single counter ID representing the start of the range of counter IDs + * to be operated on. + */ +#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_COUNTER_ID_OFST 16 +#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_COUNTER_ID_LEN 4 +#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_COUNTER_ID_MINNUM 1 +#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_COUNTER_ID_MAXNUM 59 +#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_COUNTER_ID_MAXNUM_MCDI2 251 + +/* MC_CMD_TSA_STATISTICS_OUT_READ_CLEAR msgresponse */ +#define MC_CMD_TSA_STATISTICS_OUT_READ_CLEAR_LENMIN 24 +#define MC_CMD_TSA_STATISTICS_OUT_READ_CLEAR_LENMAX 248 +#define MC_CMD_TSA_STATISTICS_OUT_READ_CLEAR_LENMAX_MCDI2 1016 +#define MC_CMD_TSA_STATISTICS_OUT_READ_CLEAR_LEN(num) (8+16*(num)) +#define MC_CMD_TSA_STATISTICS_OUT_READ_CLEAR_STATS_COUNTERS_NUM(len) (((len)-8)/16) +/* Number of statistics counters returned in this response */ +#define MC_CMD_TSA_STATISTICS_OUT_READ_CLEAR_NUM_STATS_OFST 0 +#define MC_CMD_TSA_STATISTICS_OUT_READ_CLEAR_NUM_STATS_LEN 4 +/* MC_TSA_STATISTICS_ENTRY Note that this field is expected to start at a + * 64-bit aligned offset + */ +#define MC_CMD_TSA_STATISTICS_OUT_READ_CLEAR_STATS_COUNTERS_OFST 8 +#define MC_CMD_TSA_STATISTICS_OUT_READ_CLEAR_STATS_COUNTERS_LEN 16 +#define MC_CMD_TSA_STATISTICS_OUT_READ_CLEAR_STATS_COUNTERS_MINNUM 1 +#define MC_CMD_TSA_STATISTICS_OUT_READ_CLEAR_STATS_COUNTERS_MAXNUM 15 +#define MC_CMD_TSA_STATISTICS_OUT_READ_CLEAR_STATS_COUNTERS_MAXNUM_MCDI2 63 + +/* MC_TSA_STATISTICS_ENTRY structuredef */ +#define MC_TSA_STATISTICS_ENTRY_LEN 16 +/* Tx statistics counter */ +#define MC_TSA_STATISTICS_ENTRY_TX_STAT_OFST 0 +#define MC_TSA_STATISTICS_ENTRY_TX_STAT_LEN 8 +#define MC_TSA_STATISTICS_ENTRY_TX_STAT_LO_OFST 0 +#define MC_TSA_STATISTICS_ENTRY_TX_STAT_LO_LEN 4 +#define MC_TSA_STATISTICS_ENTRY_TX_STAT_LO_LBN 0 +#define MC_TSA_STATISTICS_ENTRY_TX_STAT_LO_WIDTH 32 +#define MC_TSA_STATISTICS_ENTRY_TX_STAT_HI_OFST 4 +#define MC_TSA_STATISTICS_ENTRY_TX_STAT_HI_LEN 4 +#define MC_TSA_STATISTICS_ENTRY_TX_STAT_HI_LBN 32 +#define MC_TSA_STATISTICS_ENTRY_TX_STAT_HI_WIDTH 32 +#define MC_TSA_STATISTICS_ENTRY_TX_STAT_LBN 0 +#define MC_TSA_STATISTICS_ENTRY_TX_STAT_WIDTH 64 +/* Rx statistics counter */ +#define MC_TSA_STATISTICS_ENTRY_RX_STAT_OFST 8 +#define MC_TSA_STATISTICS_ENTRY_RX_STAT_LEN 8 +#define MC_TSA_STATISTICS_ENTRY_RX_STAT_LO_OFST 8 +#define MC_TSA_STATISTICS_ENTRY_RX_STAT_LO_LEN 4 +#define MC_TSA_STATISTICS_ENTRY_RX_STAT_LO_LBN 64 +#define MC_TSA_STATISTICS_ENTRY_RX_STAT_LO_WIDTH 32 +#define MC_TSA_STATISTICS_ENTRY_RX_STAT_HI_OFST 12 +#define MC_TSA_STATISTICS_ENTRY_RX_STAT_HI_LEN 4 +#define MC_TSA_STATISTICS_ENTRY_RX_STAT_HI_LBN 96 +#define MC_TSA_STATISTICS_ENTRY_RX_STAT_HI_WIDTH 32 +#define MC_TSA_STATISTICS_ENTRY_RX_STAT_LBN 64 +#define MC_TSA_STATISTICS_ENTRY_RX_STAT_WIDTH 64 + + +/***********************************/ +/* MC_CMD_ERASE_INITIAL_NIC_SECRET + * This request causes the NIC to find the initial NIC secret (programmed + * during ATE) in XPM memory and if and only if the NIC has already been + * rekeyed with MC_CMD_REKEY, erase it. This is used by manftest after + * installing TSA binding certificates. See SF-117631-TC. + */ +#define MC_CMD_ERASE_INITIAL_NIC_SECRET 0x131 +#undef MC_CMD_0x131_PRIVILEGE_CTG + +#define MC_CMD_0x131_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND + +/* MC_CMD_ERASE_INITIAL_NIC_SECRET_IN msgrequest */ +#define MC_CMD_ERASE_INITIAL_NIC_SECRET_IN_LEN 0 + +/* MC_CMD_ERASE_INITIAL_NIC_SECRET_OUT msgresponse */ +#define MC_CMD_ERASE_INITIAL_NIC_SECRET_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_TSA_CONFIG + * TSA adapter configuration operations. This command is used to prepare the + * NIC for TSA binding. + */ +#define MC_CMD_TSA_CONFIG 0x64 +#undef MC_CMD_0x64_PRIVILEGE_CTG + +#define MC_CMD_0x64_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_TSA_CONFIG_IN msgrequest */ +#define MC_CMD_TSA_CONFIG_IN_LEN 4 +/* TSA configuration sub-operation code */ +#define MC_CMD_TSA_CONFIG_IN_OP_OFST 0 +#define MC_CMD_TSA_CONFIG_IN_OP_LEN 4 +/* enum: Append a single item to the tsa_config partition. Items will be + * encrypted unless they are declared as non-sensitive. Returns + * MC_CMD_ERR_EEXIST if the tag is already present. + */ +#define MC_CMD_TSA_CONFIG_OP_APPEND 0x1 +/* enum: Reset the tsa_config partition to a clean state. */ +#define MC_CMD_TSA_CONFIG_OP_RESET 0x2 +/* enum: Read back a configured item from tsa_config partition. Returns + * MC_CMD_ERR_ENOENT if the item doesn't exist, or MC_CMD_ERR_EPERM if the item + * is declared as sensitive (i.e. is encrypted). + */ +#define MC_CMD_TSA_CONFIG_OP_READ 0x3 + +/* MC_CMD_TSA_CONFIG_IN_APPEND msgrequest */ +#define MC_CMD_TSA_CONFIG_IN_APPEND_LENMIN 12 +#define MC_CMD_TSA_CONFIG_IN_APPEND_LENMAX 252 +#define MC_CMD_TSA_CONFIG_IN_APPEND_LENMAX_MCDI2 1020 +#define MC_CMD_TSA_CONFIG_IN_APPEND_LEN(num) (12+1*(num)) +#define MC_CMD_TSA_CONFIG_IN_APPEND_DATA_NUM(len) (((len)-12)/1) +/* TSA configuration sub-operation code. The value shall be + * MC_CMD_TSA_CONFIG_OP_APPEND. + */ +#define MC_CMD_TSA_CONFIG_IN_APPEND_OP_OFST 0 +#define MC_CMD_TSA_CONFIG_IN_APPEND_OP_LEN 4 +/* The tag to be appended */ +#define MC_CMD_TSA_CONFIG_IN_APPEND_TAG_OFST 4 +#define MC_CMD_TSA_CONFIG_IN_APPEND_TAG_LEN 4 +/* The length of the data in bytes */ +#define MC_CMD_TSA_CONFIG_IN_APPEND_LENGTH_OFST 8 +#define MC_CMD_TSA_CONFIG_IN_APPEND_LENGTH_LEN 4 +/* The item data */ +#define MC_CMD_TSA_CONFIG_IN_APPEND_DATA_OFST 12 +#define MC_CMD_TSA_CONFIG_IN_APPEND_DATA_LEN 1 +#define MC_CMD_TSA_CONFIG_IN_APPEND_DATA_MINNUM 0 +#define MC_CMD_TSA_CONFIG_IN_APPEND_DATA_MAXNUM 240 +#define MC_CMD_TSA_CONFIG_IN_APPEND_DATA_MAXNUM_MCDI2 1008 + +/* MC_CMD_TSA_CONFIG_OUT_APPEND msgresponse */ +#define MC_CMD_TSA_CONFIG_OUT_APPEND_LEN 0 + +/* MC_CMD_TSA_CONFIG_IN_RESET msgrequest */ +#define MC_CMD_TSA_CONFIG_IN_RESET_LEN 4 +/* TSA configuration sub-operation code. The value shall be + * MC_CMD_TSA_CONFIG_OP_RESET. + */ +#define MC_CMD_TSA_CONFIG_IN_RESET_OP_OFST 0 +#define MC_CMD_TSA_CONFIG_IN_RESET_OP_LEN 4 + +/* MC_CMD_TSA_CONFIG_OUT_RESET msgresponse */ +#define MC_CMD_TSA_CONFIG_OUT_RESET_LEN 0 + +/* MC_CMD_TSA_CONFIG_IN_READ msgrequest */ +#define MC_CMD_TSA_CONFIG_IN_READ_LEN 8 +/* TSA configuration sub-operation code. The value shall be + * MC_CMD_TSA_CONFIG_OP_READ. + */ +#define MC_CMD_TSA_CONFIG_IN_READ_OP_OFST 0 +#define MC_CMD_TSA_CONFIG_IN_READ_OP_LEN 4 +/* The tag to be read */ +#define MC_CMD_TSA_CONFIG_IN_READ_TAG_OFST 4 +#define MC_CMD_TSA_CONFIG_IN_READ_TAG_LEN 4 + +/* MC_CMD_TSA_CONFIG_OUT_READ msgresponse */ +#define MC_CMD_TSA_CONFIG_OUT_READ_LENMIN 8 +#define MC_CMD_TSA_CONFIG_OUT_READ_LENMAX 252 +#define MC_CMD_TSA_CONFIG_OUT_READ_LENMAX_MCDI2 1020 +#define MC_CMD_TSA_CONFIG_OUT_READ_LEN(num) (8+1*(num)) +#define MC_CMD_TSA_CONFIG_OUT_READ_DATA_NUM(len) (((len)-8)/1) +/* The tag that was read */ +#define MC_CMD_TSA_CONFIG_OUT_READ_TAG_OFST 0 +#define MC_CMD_TSA_CONFIG_OUT_READ_TAG_LEN 4 +/* The length of the data in bytes */ +#define MC_CMD_TSA_CONFIG_OUT_READ_LENGTH_OFST 4 +#define MC_CMD_TSA_CONFIG_OUT_READ_LENGTH_LEN 4 +/* The data of the item. */ +#define MC_CMD_TSA_CONFIG_OUT_READ_DATA_OFST 8 +#define MC_CMD_TSA_CONFIG_OUT_READ_DATA_LEN 1 +#define MC_CMD_TSA_CONFIG_OUT_READ_DATA_MINNUM 0 +#define MC_CMD_TSA_CONFIG_OUT_READ_DATA_MAXNUM 244 +#define MC_CMD_TSA_CONFIG_OUT_READ_DATA_MAXNUM_MCDI2 1012 + +/* MC_TSA_IPV4_ITEM structuredef */ +#define MC_TSA_IPV4_ITEM_LEN 8 +/* Additional metadata describing the IP address information such as the + * physical port number the address is being used on. Unused space in this + * field is reserved for future expansion. + */ +#define MC_TSA_IPV4_ITEM_IPV4_ADDR_META_OFST 0 +#define MC_TSA_IPV4_ITEM_IPV4_ADDR_META_LEN 4 +#define MC_TSA_IPV4_ITEM_PORT_IDX_OFST 0 +#define MC_TSA_IPV4_ITEM_PORT_IDX_LBN 0 +#define MC_TSA_IPV4_ITEM_PORT_IDX_WIDTH 8 +#define MC_TSA_IPV4_ITEM_IPV4_ADDR_META_LBN 0 +#define MC_TSA_IPV4_ITEM_IPV4_ADDR_META_WIDTH 32 +/* The IPv4 address in little endian byte order. */ +#define MC_TSA_IPV4_ITEM_IPV4_ADDR_OFST 4 +#define MC_TSA_IPV4_ITEM_IPV4_ADDR_LEN 4 +#define MC_TSA_IPV4_ITEM_IPV4_ADDR_LBN 32 +#define MC_TSA_IPV4_ITEM_IPV4_ADDR_WIDTH 32 + + +/***********************************/ +/* MC_CMD_TSA_IPADDR + * TSA operations relating to the monitoring and expiry of local IP addresses + * discovered by the controller. These commands are sent from a TSA controller + * to a TSA adapter. + */ +#define MC_CMD_TSA_IPADDR 0x65 +#undef MC_CMD_0x65_PRIVILEGE_CTG + +#define MC_CMD_0x65_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND + +/* MC_CMD_TSA_IPADDR_IN msgrequest */ +#define MC_CMD_TSA_IPADDR_IN_LEN 4 +/* Header containing information to identify which sub-operation of this + * command to perform. The header contains a 16-bit op-code. Unused space in + * this field is reserved for future expansion. + */ +#define MC_CMD_TSA_IPADDR_IN_OP_HDR_OFST 0 +#define MC_CMD_TSA_IPADDR_IN_OP_HDR_LEN 4 +#define MC_CMD_TSA_IPADDR_IN_OP_OFST 0 +#define MC_CMD_TSA_IPADDR_IN_OP_LBN 0 +#define MC_CMD_TSA_IPADDR_IN_OP_WIDTH 16 +/* enum: Request that the adapter verifies that the IPv4 addresses supplied are + * still in use by the host by sending ARP probes to the host. The MC does not + * wait for a response to the probes and sends an MCDI response to the + * controller once the probes have been sent to the host. The response to the + * probes (if there are any) will be forwarded to the controller using + * MC_CMD_TSA_INFO alerts. + */ +#define MC_CMD_TSA_IPADDR_OP_VALIDATE_IPV4 0x1 +/* enum: Notify the adapter that one or more IPv4 addresses are no longer valid + * for the host of the adapter. The adapter should remove the IPv4 addresses + * from its local cache. + */ +#define MC_CMD_TSA_IPADDR_OP_REMOVE_IPV4 0x2 + +/* MC_CMD_TSA_IPADDR_IN_VALIDATE_IPV4 msgrequest */ +#define MC_CMD_TSA_IPADDR_IN_VALIDATE_IPV4_LENMIN 16 +#define MC_CMD_TSA_IPADDR_IN_VALIDATE_IPV4_LENMAX 248 +#define MC_CMD_TSA_IPADDR_IN_VALIDATE_IPV4_LENMAX_MCDI2 1016 +#define MC_CMD_TSA_IPADDR_IN_VALIDATE_IPV4_LEN(num) (8+8*(num)) +#define MC_CMD_TSA_IPADDR_IN_VALIDATE_IPV4_IPV4_ITEM_NUM(len) (((len)-8)/8) +/* Header containing information to identify which sub-operation of this + * command to perform. The header contains a 16-bit op-code. Unused space in + * this field is reserved for future expansion. + */ +#define MC_CMD_TSA_IPADDR_IN_VALIDATE_IPV4_OP_HDR_OFST 0 +#define MC_CMD_TSA_IPADDR_IN_VALIDATE_IPV4_OP_HDR_LEN 4 +#define MC_CMD_TSA_IPADDR_IN_VALIDATE_IPV4_OP_OFST 0 +#define MC_CMD_TSA_IPADDR_IN_VALIDATE_IPV4_OP_LBN 0 +#define MC_CMD_TSA_IPADDR_IN_VALIDATE_IPV4_OP_WIDTH 16 +/* Number of IPv4 addresses to validate. */ +#define MC_CMD_TSA_IPADDR_IN_VALIDATE_IPV4_NUM_ITEMS_OFST 4 +#define MC_CMD_TSA_IPADDR_IN_VALIDATE_IPV4_NUM_ITEMS_LEN 4 +/* The IPv4 addresses to validate, in struct MC_TSA_IPV4_ITEM format. */ +#define MC_CMD_TSA_IPADDR_IN_VALIDATE_IPV4_IPV4_ITEM_OFST 8 +#define MC_CMD_TSA_IPADDR_IN_VALIDATE_IPV4_IPV4_ITEM_LEN 8 +#define MC_CMD_TSA_IPADDR_IN_VALIDATE_IPV4_IPV4_ITEM_LO_OFST 8 +#define MC_CMD_TSA_IPADDR_IN_VALIDATE_IPV4_IPV4_ITEM_LO_LEN 4 +#define MC_CMD_TSA_IPADDR_IN_VALIDATE_IPV4_IPV4_ITEM_LO_LBN 64 +#define MC_CMD_TSA_IPADDR_IN_VALIDATE_IPV4_IPV4_ITEM_LO_WIDTH 32 +#define MC_CMD_TSA_IPADDR_IN_VALIDATE_IPV4_IPV4_ITEM_HI_OFST 12 +#define MC_CMD_TSA_IPADDR_IN_VALIDATE_IPV4_IPV4_ITEM_HI_LEN 4 +#define MC_CMD_TSA_IPADDR_IN_VALIDATE_IPV4_IPV4_ITEM_HI_LBN 96 +#define MC_CMD_TSA_IPADDR_IN_VALIDATE_IPV4_IPV4_ITEM_HI_WIDTH 32 +#define MC_CMD_TSA_IPADDR_IN_VALIDATE_IPV4_IPV4_ITEM_MINNUM 1 +#define MC_CMD_TSA_IPADDR_IN_VALIDATE_IPV4_IPV4_ITEM_MAXNUM 30 +#define MC_CMD_TSA_IPADDR_IN_VALIDATE_IPV4_IPV4_ITEM_MAXNUM_MCDI2 126 + +/* MC_CMD_TSA_IPADDR_OUT_VALIDATE_IPV4 msgresponse */ +#define MC_CMD_TSA_IPADDR_OUT_VALIDATE_IPV4_LEN 0 + +/* MC_CMD_TSA_IPADDR_IN_REMOVE_IPV4 msgrequest */ +#define MC_CMD_TSA_IPADDR_IN_REMOVE_IPV4_LENMIN 16 +#define MC_CMD_TSA_IPADDR_IN_REMOVE_IPV4_LENMAX 248 +#define MC_CMD_TSA_IPADDR_IN_REMOVE_IPV4_LENMAX_MCDI2 1016 +#define MC_CMD_TSA_IPADDR_IN_REMOVE_IPV4_LEN(num) (8+8*(num)) +#define MC_CMD_TSA_IPADDR_IN_REMOVE_IPV4_IPV4_ITEM_NUM(len) (((len)-8)/8) +/* Header containing information to identify which sub-operation of this + * command to perform. The header contains a 16-bit op-code. Unused space in + * this field is reserved for future expansion. + */ +#define MC_CMD_TSA_IPADDR_IN_REMOVE_IPV4_OP_HDR_OFST 0 +#define MC_CMD_TSA_IPADDR_IN_REMOVE_IPV4_OP_HDR_LEN 4 +#define MC_CMD_TSA_IPADDR_IN_REMOVE_IPV4_OP_OFST 0 +#define MC_CMD_TSA_IPADDR_IN_REMOVE_IPV4_OP_LBN 0 +#define MC_CMD_TSA_IPADDR_IN_REMOVE_IPV4_OP_WIDTH 16 +/* Number of IPv4 addresses to remove. */ +#define MC_CMD_TSA_IPADDR_IN_REMOVE_IPV4_NUM_ITEMS_OFST 4 +#define MC_CMD_TSA_IPADDR_IN_REMOVE_IPV4_NUM_ITEMS_LEN 4 +/* The IPv4 addresses that have expired, in struct MC_TSA_IPV4_ITEM format. */ +#define MC_CMD_TSA_IPADDR_IN_REMOVE_IPV4_IPV4_ITEM_OFST 8 +#define MC_CMD_TSA_IPADDR_IN_REMOVE_IPV4_IPV4_ITEM_LEN 8 +#define MC_CMD_TSA_IPADDR_IN_REMOVE_IPV4_IPV4_ITEM_LO_OFST 8 +#define MC_CMD_TSA_IPADDR_IN_REMOVE_IPV4_IPV4_ITEM_LO_LEN 4 +#define MC_CMD_TSA_IPADDR_IN_REMOVE_IPV4_IPV4_ITEM_LO_LBN 64 +#define MC_CMD_TSA_IPADDR_IN_REMOVE_IPV4_IPV4_ITEM_LO_WIDTH 32 +#define MC_CMD_TSA_IPADDR_IN_REMOVE_IPV4_IPV4_ITEM_HI_OFST 12 +#define MC_CMD_TSA_IPADDR_IN_REMOVE_IPV4_IPV4_ITEM_HI_LEN 4 +#define MC_CMD_TSA_IPADDR_IN_REMOVE_IPV4_IPV4_ITEM_HI_LBN 96 +#define MC_CMD_TSA_IPADDR_IN_REMOVE_IPV4_IPV4_ITEM_HI_WIDTH 32 +#define MC_CMD_TSA_IPADDR_IN_REMOVE_IPV4_IPV4_ITEM_MINNUM 1 +#define MC_CMD_TSA_IPADDR_IN_REMOVE_IPV4_IPV4_ITEM_MAXNUM 30 +#define MC_CMD_TSA_IPADDR_IN_REMOVE_IPV4_IPV4_ITEM_MAXNUM_MCDI2 126 + +/* MC_CMD_TSA_IPADDR_OUT_REMOVE_IPV4 msgresponse */ +#define MC_CMD_TSA_IPADDR_OUT_REMOVE_IPV4_LEN 0 + + +/***********************************/ +/* MC_CMD_SECURE_NIC_INFO + * Get secure NIC information. While many of the features reported by these + * commands are related to TSA, they must be supported in firmware where TSA is + * disabled. + */ +#define MC_CMD_SECURE_NIC_INFO 0x132 +#undef MC_CMD_0x132_PRIVILEGE_CTG + +#define MC_CMD_0x132_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_SECURE_NIC_INFO_IN msgrequest */ +#define MC_CMD_SECURE_NIC_INFO_IN_LEN 4 +/* sub-operation code info */ +#define MC_CMD_SECURE_NIC_INFO_IN_OP_HDR_OFST 0 +#define MC_CMD_SECURE_NIC_INFO_IN_OP_HDR_LEN 4 +#define MC_CMD_SECURE_NIC_INFO_IN_OP_OFST 0 +#define MC_CMD_SECURE_NIC_INFO_IN_OP_LBN 0 +#define MC_CMD_SECURE_NIC_INFO_IN_OP_WIDTH 16 +/* enum: Get the status of various security settings, all signed along with a + * challenge chosen by the host. + */ +#define MC_CMD_SECURE_NIC_INFO_OP_STATUS 0x0 + +/* MC_CMD_SECURE_NIC_INFO_IN_STATUS msgrequest */ +#define MC_CMD_SECURE_NIC_INFO_IN_STATUS_LEN 24 +/* sub-operation code, must be MC_CMD_SECURE_NIC_INFO_OP_STATUS */ +#define MC_CMD_SECURE_NIC_INFO_IN_STATUS_OP_HDR_OFST 0 +#define MC_CMD_SECURE_NIC_INFO_IN_STATUS_OP_HDR_LEN 4 +/* Type of key to be used to sign response. */ +#define MC_CMD_SECURE_NIC_INFO_IN_STATUS_KEY_TYPE_OFST 4 +#define MC_CMD_SECURE_NIC_INFO_IN_STATUS_KEY_TYPE_LEN 4 +#define MC_CMD_SECURE_NIC_INFO_IN_STATUS_UNUSED 0x0 /* enum */ +/* enum: Solarflare adapter authentication key, installed by Manftest. */ +#define MC_CMD_SECURE_NIC_INFO_IN_STATUS_SF_ADAPTER_AUTH 0x1 +/* enum: TSA binding key, installed after adapter is bound to a TSA controller. + * This is not supported in firmware which does not support TSA. + */ +#define MC_CMD_SECURE_NIC_INFO_IN_STATUS_TSA_BINDING 0x2 +/* enum: Customer adapter authentication key. Installed by the customer in the + * field, but otherwise similar to the Solarflare adapter authentication key. + */ +#define MC_CMD_SECURE_NIC_INFO_IN_STATUS_CUSTOMER_ADAPTER_AUTH 0x3 +/* Random challenge generated by the host. */ +#define MC_CMD_SECURE_NIC_INFO_IN_STATUS_CHALLENGE_OFST 8 +#define MC_CMD_SECURE_NIC_INFO_IN_STATUS_CHALLENGE_LEN 16 + +/* MC_CMD_SECURE_NIC_INFO_OUT_STATUS msgresponse */ +#define MC_CMD_SECURE_NIC_INFO_OUT_STATUS_LEN 420 +/* Length of the signature in MSG_SIGNATURE. */ +#define MC_CMD_SECURE_NIC_INFO_OUT_STATUS_MSG_SIGNATURE_LEN_OFST 0 +#define MC_CMD_SECURE_NIC_INFO_OUT_STATUS_MSG_SIGNATURE_LEN_LEN 4 +/* Signature over the message, starting at MESSAGE_TYPE and continuing to the + * end of the MCDI response, allowing the message format to be extended. The + * signature uses ECDSA 384 encoding in ASN.1 format. It has variable length, + * with a maximum of 384 bytes. + */ +#define MC_CMD_SECURE_NIC_INFO_OUT_STATUS_MSG_SIGNATURE_OFST 4 +#define MC_CMD_SECURE_NIC_INFO_OUT_STATUS_MSG_SIGNATURE_LEN 384 +/* Enum value indicating the type of response. This protects against chosen + * message attacks. The enum values are random rather than sequential to make + * it unlikely that values will be reused should other commands in a different + * namespace need to create signed messages. + */ +#define MC_CMD_SECURE_NIC_INFO_OUT_STATUS_MESSAGE_TYPE_OFST 388 +#define MC_CMD_SECURE_NIC_INFO_OUT_STATUS_MESSAGE_TYPE_LEN 4 +/* enum: Message type value for the response to a + * MC_CMD_SECURE_NIC_INFO_IN_STATUS message. + */ +#define MC_CMD_SECURE_NIC_INFO_STATUS 0xdb4 +/* The challenge provided by the host in the MC_CMD_SECURE_NIC_INFO_IN_STATUS + * message + */ +#define MC_CMD_SECURE_NIC_INFO_OUT_STATUS_CHALLENGE_OFST 392 +#define MC_CMD_SECURE_NIC_INFO_OUT_STATUS_CHALLENGE_LEN 16 +/* The first 32 bits of XPM memory, which include security and flag bits, die + * ID and chip ID revision. The meaning of these bits is defined in + * mc/include/mc/xpm.h in the firmwaresrc repository. + */ +#define MC_CMD_SECURE_NIC_INFO_OUT_STATUS_XPM_STATUS_BITS_OFST 408 +#define MC_CMD_SECURE_NIC_INFO_OUT_STATUS_XPM_STATUS_BITS_LEN 4 +#define MC_CMD_SECURE_NIC_INFO_OUT_STATUS_FIRMWARE_VERSION_A_OFST 412 +#define MC_CMD_SECURE_NIC_INFO_OUT_STATUS_FIRMWARE_VERSION_A_LEN 2 +#define MC_CMD_SECURE_NIC_INFO_OUT_STATUS_FIRMWARE_VERSION_B_OFST 414 +#define MC_CMD_SECURE_NIC_INFO_OUT_STATUS_FIRMWARE_VERSION_B_LEN 2 +#define MC_CMD_SECURE_NIC_INFO_OUT_STATUS_FIRMWARE_VERSION_C_OFST 416 +#define MC_CMD_SECURE_NIC_INFO_OUT_STATUS_FIRMWARE_VERSION_C_LEN 2 +#define MC_CMD_SECURE_NIC_INFO_OUT_STATUS_FIRMWARE_VERSION_D_OFST 418 +#define MC_CMD_SECURE_NIC_INFO_OUT_STATUS_FIRMWARE_VERSION_D_LEN 2 + + +/***********************************/ +/* MC_CMD_TSA_TEST + * A simple ping-pong command just to test the adapter<>controller MCDI + * communication channel. This command makes not changes to the TSA adapter's + * internal state. It is used by the controller just to verify that the MCDI + * communication channel is working fine. This command takes no additonal + * parameters in request or response. + */ +#define MC_CMD_TSA_TEST 0x125 +#undef MC_CMD_0x125_PRIVILEGE_CTG + +#define MC_CMD_0x125_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND + +/* MC_CMD_TSA_TEST_IN msgrequest */ +#define MC_CMD_TSA_TEST_IN_LEN 0 + +/* MC_CMD_TSA_TEST_OUT msgresponse */ +#define MC_CMD_TSA_TEST_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_TSA_RULESET_OVERRIDE + * Override TSA ruleset that is currently active on the adapter. This operation + * does not modify the ruleset itself. This operation provides a mechanism to + * apply an allow-all or deny-all operation on all packets, thereby completely + * ignoring the rule-set configured on the adapter. The main purpose of this + * operation is to provide a deterministic state to the TSA firewall during + * rule-set transitions. + */ +#define MC_CMD_TSA_RULESET_OVERRIDE 0x12a +#undef MC_CMD_0x12a_PRIVILEGE_CTG + +#define MC_CMD_0x12a_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND + +/* MC_CMD_TSA_RULESET_OVERRIDE_IN msgrequest */ +#define MC_CMD_TSA_RULESET_OVERRIDE_IN_LEN 4 +/* The override state to apply. */ +#define MC_CMD_TSA_RULESET_OVERRIDE_IN_STATE_OFST 0 +#define MC_CMD_TSA_RULESET_OVERRIDE_IN_STATE_LEN 4 +/* enum: No override in place - the existing ruleset is in operation. */ +#define MC_CMD_TSA_RULESET_OVERRIDE_NONE 0x0 +/* enum: Block all packets seen on all datapath channel except those packets + * required for basic configuration of the TSA NIC such as ARPs and TSA- + * communication traffic. Such exceptional traffic is handled differently + * compared to TSA rulesets. + */ +#define MC_CMD_TSA_RULESET_OVERRIDE_BLOCK 0x1 +/* enum: Allow all packets through all datapath channel. The TSA adapter + * behaves like a normal NIC without any firewalls. + */ +#define MC_CMD_TSA_RULESET_OVERRIDE_ALLOW 0x2 + +/* MC_CMD_TSA_RULESET_OVERRIDE_OUT msgresponse */ +#define MC_CMD_TSA_RULESET_OVERRIDE_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_TSAC_REQUEST + * Generic command to send requests from a TSA controller to a TSA adapter. + * Specific usage is determined by the TYPE field. + */ +#define MC_CMD_TSAC_REQUEST 0x12b +#undef MC_CMD_0x12b_PRIVILEGE_CTG + +#define MC_CMD_0x12b_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND + +/* MC_CMD_TSAC_REQUEST_IN msgrequest */ +#define MC_CMD_TSAC_REQUEST_IN_LEN 4 +/* The type of request from the controller. */ +#define MC_CMD_TSAC_REQUEST_IN_TYPE_OFST 0 +#define MC_CMD_TSAC_REQUEST_IN_TYPE_LEN 4 +/* enum: Request the adapter to resend localIP information from it's cache. The + * command does not return any IP address information; IP addresses are sent as + * TSA notifications as descibed in MC_CMD_TSA_INFO_IN_LOCAL_IP. + */ +#define MC_CMD_TSAC_REQUEST_LOCALIP 0x0 + +/* MC_CMD_TSAC_REQUEST_OUT msgresponse */ +#define MC_CMD_TSAC_REQUEST_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_SUC_VERSION + * Get the version of the SUC + */ +#define MC_CMD_SUC_VERSION 0x134 +#undef MC_CMD_0x134_PRIVILEGE_CTG + +#define MC_CMD_0x134_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_SUC_VERSION_IN msgrequest */ +#define MC_CMD_SUC_VERSION_IN_LEN 0 + +/* MC_CMD_SUC_VERSION_OUT msgresponse */ +#define MC_CMD_SUC_VERSION_OUT_LEN 24 +/* The SUC firmware version as four numbers - a.b.c.d */ +#define MC_CMD_SUC_VERSION_OUT_VERSION_OFST 0 +#define MC_CMD_SUC_VERSION_OUT_VERSION_LEN 4 +#define MC_CMD_SUC_VERSION_OUT_VERSION_NUM 4 +/* The date, in seconds since the Unix epoch, when the firmware image was + * built. + */ +#define MC_CMD_SUC_VERSION_OUT_BUILD_DATE_OFST 16 +#define MC_CMD_SUC_VERSION_OUT_BUILD_DATE_LEN 4 +/* The ID of the SUC chip. This is specific to the platform but typically + * indicates family, memory sizes etc. See SF-116728-SW for further details. + */ +#define MC_CMD_SUC_VERSION_OUT_CHIP_ID_OFST 20 +#define MC_CMD_SUC_VERSION_OUT_CHIP_ID_LEN 4 + +/* MC_CMD_SUC_BOOT_VERSION_IN msgrequest: Get the version of the SUC boot + * loader. + */ +#define MC_CMD_SUC_BOOT_VERSION_IN_LEN 4 +#define MC_CMD_SUC_BOOT_VERSION_IN_MAGIC_OFST 0 +#define MC_CMD_SUC_BOOT_VERSION_IN_MAGIC_LEN 4 +/* enum: Requests the SUC boot version. */ +#define MC_CMD_SUC_VERSION_GET_BOOT_VERSION 0xb007700b + +/* MC_CMD_SUC_BOOT_VERSION_OUT msgresponse */ +#define MC_CMD_SUC_BOOT_VERSION_OUT_LEN 4 +/* The SUC boot version */ +#define MC_CMD_SUC_BOOT_VERSION_OUT_VERSION_OFST 0 +#define MC_CMD_SUC_BOOT_VERSION_OUT_VERSION_LEN 4 + + +/***********************************/ +/* MC_CMD_SUC_MANFTEST + * Operations to support manftest on SUC based systems. + */ +#define MC_CMD_SUC_MANFTEST 0x135 +#undef MC_CMD_0x135_PRIVILEGE_CTG + +#define MC_CMD_0x135_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND + +/* MC_CMD_SUC_MANFTEST_IN msgrequest */ +#define MC_CMD_SUC_MANFTEST_IN_LEN 4 +/* The manftest operation to be performed. */ +#define MC_CMD_SUC_MANFTEST_IN_OP_OFST 0 +#define MC_CMD_SUC_MANFTEST_IN_OP_LEN 4 +/* enum: Read serial number and use count. */ +#define MC_CMD_SUC_MANFTEST_WEAROUT_READ 0x0 +/* enum: Update use count on wearout adapter. */ +#define MC_CMD_SUC_MANFTEST_WEAROUT_UPDATE 0x1 +/* enum: Start an ADC calibration. */ +#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_START 0x2 +/* enum: Read the status of an ADC calibration. */ +#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_STATUS 0x3 +/* enum: Read the results of an ADC calibration. */ +#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_RESULT 0x4 +/* enum: Read the PCIe configuration. */ +#define MC_CMD_SUC_MANFTEST_CONFIG_PCIE_READ 0x5 +/* enum: Write the PCIe configuration. */ +#define MC_CMD_SUC_MANFTEST_CONFIG_PCIE_WRITE 0x6 +/* enum: Write FRU information to SUC. The FRU information is taken from the + * FRU_INFORMATION partition. Attempts to write to read-only FRUs are rejected. + */ +#define MC_CMD_SUC_MANFTEST_FRU_WRITE 0x7 +/* enum: Read UDID Vendor Specific ID from SUC persistent storage. */ +#define MC_CMD_SUC_MANFTEST_SMBUS_ID_READ 0x8 +/* enum: Write UDID Vendor Specific ID to SUC persistent storage for use in + * SMBus ARP. + */ +#define MC_CMD_SUC_MANFTEST_SMBUS_ID_WRITE 0x9 + +/* MC_CMD_SUC_MANFTEST_OUT msgresponse */ +#define MC_CMD_SUC_MANFTEST_OUT_LEN 0 + +/* MC_CMD_SUC_MANFTEST_WEAROUT_READ_IN msgrequest */ +#define MC_CMD_SUC_MANFTEST_WEAROUT_READ_IN_LEN 4 +/* The manftest operation to be performed. This must be + * MC_CMD_SUC_MANFTEST_WEAROUT_READ. + */ +#define MC_CMD_SUC_MANFTEST_WEAROUT_READ_IN_OP_OFST 0 +#define MC_CMD_SUC_MANFTEST_WEAROUT_READ_IN_OP_LEN 4 + +/* MC_CMD_SUC_MANFTEST_WEAROUT_READ_OUT msgresponse */ +#define MC_CMD_SUC_MANFTEST_WEAROUT_READ_OUT_LEN 20 +/* The serial number of the wearout adapter, see SF-112717-PR for format. */ +#define MC_CMD_SUC_MANFTEST_WEAROUT_READ_OUT_SERIAL_NUMBER_OFST 0 +#define MC_CMD_SUC_MANFTEST_WEAROUT_READ_OUT_SERIAL_NUMBER_LEN 16 +/* The use count of the wearout adapter. */ +#define MC_CMD_SUC_MANFTEST_WEAROUT_READ_OUT_USE_COUNT_OFST 16 +#define MC_CMD_SUC_MANFTEST_WEAROUT_READ_OUT_USE_COUNT_LEN 4 + +/* MC_CMD_SUC_MANFTEST_WEAROUT_UPDATE_IN msgrequest */ +#define MC_CMD_SUC_MANFTEST_WEAROUT_UPDATE_IN_LEN 4 +/* The manftest operation to be performed. This must be + * MC_CMD_SUC_MANFTEST_WEAROUT_UPDATE. + */ +#define MC_CMD_SUC_MANFTEST_WEAROUT_UPDATE_IN_OP_OFST 0 +#define MC_CMD_SUC_MANFTEST_WEAROUT_UPDATE_IN_OP_LEN 4 + +/* MC_CMD_SUC_MANFTEST_WEAROUT_UPDATE_OUT msgresponse */ +#define MC_CMD_SUC_MANFTEST_WEAROUT_UPDATE_OUT_LEN 0 + +/* MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_START_IN msgrequest */ +#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_START_IN_LEN 4 +/* The manftest operation to be performed. This must be + * MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_START. + */ +#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_START_IN_OP_OFST 0 +#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_START_IN_OP_LEN 4 + +/* MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_START_OUT msgresponse */ +#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_START_OUT_LEN 0 + +/* MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_STATUS_IN msgrequest */ +#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_STATUS_IN_LEN 4 +/* The manftest operation to be performed. This must be + * MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_STATUS. + */ +#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_STATUS_IN_OP_OFST 0 +#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_STATUS_IN_OP_LEN 4 + +/* MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_STATUS_OUT msgresponse */ +#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_STATUS_OUT_LEN 4 +/* The combined status of the calibration operation. */ +#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_STATUS_OUT_FLAGS_OFST 0 +#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_STATUS_OUT_FLAGS_LEN 4 +#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_STATUS_OUT_CALIBRATING_OFST 0 +#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_STATUS_OUT_CALIBRATING_LBN 0 +#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_STATUS_OUT_CALIBRATING_WIDTH 1 +#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_STATUS_OUT_FAILED_OFST 0 +#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_STATUS_OUT_FAILED_LBN 1 +#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_STATUS_OUT_FAILED_WIDTH 1 +#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_STATUS_OUT_RESULT_OFST 0 +#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_STATUS_OUT_RESULT_LBN 2 +#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_STATUS_OUT_RESULT_WIDTH 4 +#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_STATUS_OUT_INDEX_OFST 0 +#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_STATUS_OUT_INDEX_LBN 6 +#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_STATUS_OUT_INDEX_WIDTH 2 + +/* MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_RESULT_IN msgrequest */ +#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_RESULT_IN_LEN 4 +/* The manftest operation to be performed. This must be + * MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_RESULT. + */ +#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_RESULT_IN_OP_OFST 0 +#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_RESULT_IN_OP_LEN 4 + +/* MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_RESULT_OUT msgresponse */ +#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_RESULT_OUT_LEN 12 +/* The set of calibration results. */ +#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_RESULT_OUT_VALUE_OFST 0 +#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_RESULT_OUT_VALUE_LEN 4 +#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_RESULT_OUT_VALUE_NUM 3 + +/* MC_CMD_SUC_MANFTEST_CONFIG_PCIE_READ_IN msgrequest */ +#define MC_CMD_SUC_MANFTEST_CONFIG_PCIE_READ_IN_LEN 4 +/* The manftest operation to be performed. This must be + * MC_CMD_SUC_MANFTEST_CONFIG_PCIE_READ. + */ +#define MC_CMD_SUC_MANFTEST_CONFIG_PCIE_READ_IN_OP_OFST 0 +#define MC_CMD_SUC_MANFTEST_CONFIG_PCIE_READ_IN_OP_LEN 4 + +/* MC_CMD_SUC_MANFTEST_CONFIG_PCIE_READ_OUT msgresponse */ +#define MC_CMD_SUC_MANFTEST_CONFIG_PCIE_READ_OUT_LEN 4 +/* The PCIe vendor ID. */ +#define MC_CMD_SUC_MANFTEST_CONFIG_PCIE_READ_OUT_VENDOR_ID_OFST 0 +#define MC_CMD_SUC_MANFTEST_CONFIG_PCIE_READ_OUT_VENDOR_ID_LEN 2 +/* The PCIe device ID. */ +#define MC_CMD_SUC_MANFTEST_CONFIG_PCIE_READ_OUT_DEVICE_ID_OFST 2 +#define MC_CMD_SUC_MANFTEST_CONFIG_PCIE_READ_OUT_DEVICE_ID_LEN 2 + +/* MC_CMD_SUC_MANFTEST_CONFIG_PCIE_WRITE_IN msgrequest */ +#define MC_CMD_SUC_MANFTEST_CONFIG_PCIE_WRITE_IN_LEN 8 +/* The manftest operation to be performed. This must be + * MC_CMD_SUC_MANFTEST_CONFIG_PCIE_WRITE. + */ +#define MC_CMD_SUC_MANFTEST_CONFIG_PCIE_WRITE_IN_OP_OFST 0 +#define MC_CMD_SUC_MANFTEST_CONFIG_PCIE_WRITE_IN_OP_LEN 4 +/* The PCIe vendor ID. */ +#define MC_CMD_SUC_MANFTEST_CONFIG_PCIE_WRITE_IN_VENDOR_ID_OFST 4 +#define MC_CMD_SUC_MANFTEST_CONFIG_PCIE_WRITE_IN_VENDOR_ID_LEN 2 +/* The PCIe device ID. */ +#define MC_CMD_SUC_MANFTEST_CONFIG_PCIE_WRITE_IN_DEVICE_ID_OFST 6 +#define MC_CMD_SUC_MANFTEST_CONFIG_PCIE_WRITE_IN_DEVICE_ID_LEN 2 + +/* MC_CMD_SUC_MANFTEST_CONFIG_PCIE_WRITE_OUT msgresponse */ +#define MC_CMD_SUC_MANFTEST_CONFIG_PCIE_WRITE_OUT_LEN 0 + +/* MC_CMD_SUC_MANFTEST_FRU_WRITE_IN msgrequest */ +#define MC_CMD_SUC_MANFTEST_FRU_WRITE_IN_LEN 4 +/* The manftest operation to be performed. This must be + * MC_CMD_SUC_MANFTEST_FRU_WRITE + */ +#define MC_CMD_SUC_MANFTEST_FRU_WRITE_IN_OP_OFST 0 +#define MC_CMD_SUC_MANFTEST_FRU_WRITE_IN_OP_LEN 4 + +/* MC_CMD_SUC_MANFTEST_FRU_WRITE_OUT msgresponse */ +#define MC_CMD_SUC_MANFTEST_FRU_WRITE_OUT_LEN 0 + +/* MC_CMD_SUC_MANFTEST_SMBUS_ID_READ_IN msgrequest */ +#define MC_CMD_SUC_MANFTEST_SMBUS_ID_READ_IN_LEN 4 +/* The manftest operation to be performed. This must be + * MC_CMD_SUC_MANFTEST_SMBUS_ID_READ. + */ +#define MC_CMD_SUC_MANFTEST_SMBUS_ID_READ_IN_OP_OFST 0 +#define MC_CMD_SUC_MANFTEST_SMBUS_ID_READ_IN_OP_LEN 4 + +/* MC_CMD_SUC_MANFTEST_SMBUS_ID_READ_OUT msgresponse */ +#define MC_CMD_SUC_MANFTEST_SMBUS_ID_READ_OUT_LEN 4 +/* The SMBus ID. */ +#define MC_CMD_SUC_MANFTEST_SMBUS_ID_READ_OUT_SMBUS_ID_OFST 0 +#define MC_CMD_SUC_MANFTEST_SMBUS_ID_READ_OUT_SMBUS_ID_LEN 4 + +/* MC_CMD_SUC_MANFTEST_SMBUS_ID_WRITE_IN msgrequest */ +#define MC_CMD_SUC_MANFTEST_SMBUS_ID_WRITE_IN_LEN 8 +/* The manftest operation to be performed. This must be + * MC_CMD_SUC_MANFTEST_SMBUS_ID_WRITE. + */ +#define MC_CMD_SUC_MANFTEST_SMBUS_ID_WRITE_IN_OP_OFST 0 +#define MC_CMD_SUC_MANFTEST_SMBUS_ID_WRITE_IN_OP_LEN 4 +/* The SMBus ID. */ +#define MC_CMD_SUC_MANFTEST_SMBUS_ID_WRITE_IN_SMBUS_ID_OFST 4 +#define MC_CMD_SUC_MANFTEST_SMBUS_ID_WRITE_IN_SMBUS_ID_LEN 4 + +/* MC_CMD_SUC_MANFTEST_SMBUS_ID_WRITE_OUT msgresponse */ +#define MC_CMD_SUC_MANFTEST_SMBUS_ID_WRITE_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_GET_CERTIFICATE + * Request a certificate. + */ +#define MC_CMD_GET_CERTIFICATE 0x12c +#undef MC_CMD_0x12c_PRIVILEGE_CTG + +#define MC_CMD_0x12c_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_CERTIFICATE_IN msgrequest */ +#define MC_CMD_GET_CERTIFICATE_IN_LEN 8 +/* Type of the certificate to be retrieved. */ +#define MC_CMD_GET_CERTIFICATE_IN_TYPE_OFST 0 +#define MC_CMD_GET_CERTIFICATE_IN_TYPE_LEN 4 +#define MC_CMD_GET_CERTIFICATE_IN_UNUSED 0x0 /* enum */ +#define MC_CMD_GET_CERTIFICATE_IN_AAC 0x1 /* enum */ +/* enum: Adapter Authentication Certificate (AAC). The AAC is unique to each + * adapter and is used to verify its authenticity. It is installed by Manftest. + */ +#define MC_CMD_GET_CERTIFICATE_IN_ADAPTER_AUTH 0x1 +#define MC_CMD_GET_CERTIFICATE_IN_AASC 0x2 /* enum */ +/* enum: Adapter Authentication Signing Certificate (AASC). The AASC is shared + * by a group of adapters (typically a purchase order) and is used to verify + * the validity of AAC along with the SF root certificate. It is installed by + * Manftest. + */ +#define MC_CMD_GET_CERTIFICATE_IN_ADAPTER_AUTH_SIGNING 0x2 +#define MC_CMD_GET_CERTIFICATE_IN_CUSTOMER_AAC 0x3 /* enum */ +/* enum: Customer Adapter Authentication Certificate. The Customer AAC is + * unique to each adapter and is used to verify its authenticity in cases where + * either the AAC is not installed or a customer desires to use their own + * certificate chain. It is installed by the customer. + */ +#define MC_CMD_GET_CERTIFICATE_IN_CUSTOMER_ADAPTER_AUTH 0x3 +#define MC_CMD_GET_CERTIFICATE_IN_CUSTOMER_AASC 0x4 /* enum */ +/* enum: Customer Adapter Authentication Certificate. The Customer AASC is + * shared by a group of adapters and is used to verify the validity of the + * Customer AAC along with the customers root certificate. It is installed by + * the customer. + */ +#define MC_CMD_GET_CERTIFICATE_IN_CUSTOMER_ADAPTER_AUTH_SIGNING 0x4 +/* Offset, measured in bytes, relative to the start of the certificate data + * from which the certificate is to be retrieved. + */ +#define MC_CMD_GET_CERTIFICATE_IN_OFFSET_OFST 4 +#define MC_CMD_GET_CERTIFICATE_IN_OFFSET_LEN 4 + +/* MC_CMD_GET_CERTIFICATE_OUT msgresponse */ +#define MC_CMD_GET_CERTIFICATE_OUT_LENMIN 13 +#define MC_CMD_GET_CERTIFICATE_OUT_LENMAX 252 +#define MC_CMD_GET_CERTIFICATE_OUT_LENMAX_MCDI2 1020 +#define MC_CMD_GET_CERTIFICATE_OUT_LEN(num) (12+1*(num)) +#define MC_CMD_GET_CERTIFICATE_OUT_DATA_NUM(len) (((len)-12)/1) +/* Type of the certificate. */ +#define MC_CMD_GET_CERTIFICATE_OUT_TYPE_OFST 0 +#define MC_CMD_GET_CERTIFICATE_OUT_TYPE_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_GET_CERTIFICATE_IN/TYPE */ +/* Offset, measured in bytes, relative to the start of the certificate data + * from which data in this message starts. + */ +#define MC_CMD_GET_CERTIFICATE_OUT_OFFSET_OFST 4 +#define MC_CMD_GET_CERTIFICATE_OUT_OFFSET_LEN 4 +/* Total length of the certificate data. */ +#define MC_CMD_GET_CERTIFICATE_OUT_TOTAL_LENGTH_OFST 8 +#define MC_CMD_GET_CERTIFICATE_OUT_TOTAL_LENGTH_LEN 4 +/* The certificate data. */ +#define MC_CMD_GET_CERTIFICATE_OUT_DATA_OFST 12 +#define MC_CMD_GET_CERTIFICATE_OUT_DATA_LEN 1 +#define MC_CMD_GET_CERTIFICATE_OUT_DATA_MINNUM 1 +#define MC_CMD_GET_CERTIFICATE_OUT_DATA_MAXNUM 240 +#define MC_CMD_GET_CERTIFICATE_OUT_DATA_MAXNUM_MCDI2 1008 + + +/***********************************/ +/* MC_CMD_GET_NIC_GLOBAL + * Get a global value which applies to all PCI functions + */ +#define MC_CMD_GET_NIC_GLOBAL 0x12d +#undef MC_CMD_0x12d_PRIVILEGE_CTG + +#define MC_CMD_0x12d_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_NIC_GLOBAL_IN msgrequest */ +#define MC_CMD_GET_NIC_GLOBAL_IN_LEN 4 +/* Key to request value for, see enum values in MC_CMD_SET_NIC_GLOBAL. If the + * given key is unknown to the current firmware, the call will fail with + * ENOENT. + */ +#define MC_CMD_GET_NIC_GLOBAL_IN_KEY_OFST 0 +#define MC_CMD_GET_NIC_GLOBAL_IN_KEY_LEN 4 + +/* MC_CMD_GET_NIC_GLOBAL_OUT msgresponse */ +#define MC_CMD_GET_NIC_GLOBAL_OUT_LEN 4 +/* Value of requested key, see key descriptions below. */ +#define MC_CMD_GET_NIC_GLOBAL_OUT_VALUE_OFST 0 +#define MC_CMD_GET_NIC_GLOBAL_OUT_VALUE_LEN 4 + + +/***********************************/ +/* MC_CMD_SET_NIC_GLOBAL + * Set a global value which applies to all PCI functions. Most global values + * can only be changed under specific conditions, and this call will return an + * appropriate error otherwise (see key descriptions). + */ +#define MC_CMD_SET_NIC_GLOBAL 0x12e +#undef MC_CMD_0x12e_PRIVILEGE_CTG + +#define MC_CMD_0x12e_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_SET_NIC_GLOBAL_IN msgrequest */ +#define MC_CMD_SET_NIC_GLOBAL_IN_LEN 8 +/* Key to change value of. Firmware will return ENOENT for keys it doesn't know + * about. + */ +#define MC_CMD_SET_NIC_GLOBAL_IN_KEY_OFST 0 +#define MC_CMD_SET_NIC_GLOBAL_IN_KEY_LEN 4 +/* enum: Request switching the datapath firmware sub-variant. Currently only + * useful when running the DPDK f/w variant. See key values below, and the DPDK + * section of the EF10 Driver Writers Guide. Note that any driver attaching + * with the SUBVARIANT_AWARE flag cleared is implicitly considered as a request + * to switch back to the default sub-variant, and will thus reset this value. + * If a sub-variant switch happens, all other PCI functions will get their + * resources reset (they will see an MC reboot). + */ +#define MC_CMD_SET_NIC_GLOBAL_IN_FIRMWARE_SUBVARIANT 0x1 +/* New value to set, see key descriptions above. */ +#define MC_CMD_SET_NIC_GLOBAL_IN_VALUE_OFST 4 +#define MC_CMD_SET_NIC_GLOBAL_IN_VALUE_LEN 4 +/* enum: Only if KEY = FIRMWARE_SUBVARIANT. Default sub-variant with support + * for maximum features for the current f/w variant. A request from a + * privileged function to set this particular value will always succeed. + */ +#define MC_CMD_SET_NIC_GLOBAL_IN_FW_SUBVARIANT_DEFAULT 0x0 +/* enum: Only if KEY = FIRMWARE_SUBVARIANT. Increases packet rate at the cost + * of not supporting any TX checksum offloads. Only supported when running some + * f/w variants, others will return ENOTSUP (as reported by the homonymous bit + * in MC_CMD_GET_CAPABILITIES_V2). Can only be set when no other drivers are + * attached, and the calling driver must have no resources allocated. See the + * DPDK section of the EF10 Driver Writers Guide for a more detailed + * description with possible error codes. + */ +#define MC_CMD_SET_NIC_GLOBAL_IN_FW_SUBVARIANT_NO_TX_CSUM 0x1 + + +/***********************************/ +/* MC_CMD_LTSSM_TRACE_POLL + * Medford2 hardware has support for logging all LTSSM state transitions to a + * hardware buffer. When built with WITH_LTSSM_TRACE=1, the firmware will + * periodially dump the contents of this hardware buffer to an internal + * firmware buffer for later extraction. + */ +#define MC_CMD_LTSSM_TRACE_POLL 0x12f +#undef MC_CMD_0x12f_PRIVILEGE_CTG + +#define MC_CMD_0x12f_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_LTSSM_TRACE_POLL_IN msgrequest: Read transitions from the firmware + * internal buffer. + */ +#define MC_CMD_LTSSM_TRACE_POLL_IN_LEN 4 +/* The maximum number of row that the caller can accept. The format of each row + * is defined in MC_CMD_LTSSM_TRACE_POLL_OUT. + */ +#define MC_CMD_LTSSM_TRACE_POLL_IN_MAX_ROW_COUNT_OFST 0 +#define MC_CMD_LTSSM_TRACE_POLL_IN_MAX_ROW_COUNT_LEN 4 + +/* MC_CMD_LTSSM_TRACE_POLL_OUT msgresponse */ +#define MC_CMD_LTSSM_TRACE_POLL_OUT_LENMIN 16 +#define MC_CMD_LTSSM_TRACE_POLL_OUT_LENMAX 248 +#define MC_CMD_LTSSM_TRACE_POLL_OUT_LENMAX_MCDI2 1016 +#define MC_CMD_LTSSM_TRACE_POLL_OUT_LEN(num) (8+8*(num)) +#define MC_CMD_LTSSM_TRACE_POLL_OUT_ROWS_NUM(len) (((len)-8)/8) +#define MC_CMD_LTSSM_TRACE_POLL_OUT_FLAGS_OFST 0 +#define MC_CMD_LTSSM_TRACE_POLL_OUT_FLAGS_LEN 4 +#define MC_CMD_LTSSM_TRACE_POLL_OUT_HW_BUFFER_OVERFLOW_OFST 0 +#define MC_CMD_LTSSM_TRACE_POLL_OUT_HW_BUFFER_OVERFLOW_LBN 0 +#define MC_CMD_LTSSM_TRACE_POLL_OUT_HW_BUFFER_OVERFLOW_WIDTH 1 +#define MC_CMD_LTSSM_TRACE_POLL_OUT_FW_BUFFER_OVERFLOW_OFST 0 +#define MC_CMD_LTSSM_TRACE_POLL_OUT_FW_BUFFER_OVERFLOW_LBN 1 +#define MC_CMD_LTSSM_TRACE_POLL_OUT_FW_BUFFER_OVERFLOW_WIDTH 1 +#define MC_CMD_LTSSM_TRACE_POLL_OUT_CONTINUES_OFST 0 +#define MC_CMD_LTSSM_TRACE_POLL_OUT_CONTINUES_LBN 31 +#define MC_CMD_LTSSM_TRACE_POLL_OUT_CONTINUES_WIDTH 1 +/* The number of rows present in this response. */ +#define MC_CMD_LTSSM_TRACE_POLL_OUT_ROW_COUNT_OFST 4 +#define MC_CMD_LTSSM_TRACE_POLL_OUT_ROW_COUNT_LEN 4 +#define MC_CMD_LTSSM_TRACE_POLL_OUT_ROWS_OFST 8 +#define MC_CMD_LTSSM_TRACE_POLL_OUT_ROWS_LEN 8 +#define MC_CMD_LTSSM_TRACE_POLL_OUT_ROWS_LO_OFST 8 +#define MC_CMD_LTSSM_TRACE_POLL_OUT_ROWS_LO_LEN 4 +#define MC_CMD_LTSSM_TRACE_POLL_OUT_ROWS_LO_LBN 64 +#define MC_CMD_LTSSM_TRACE_POLL_OUT_ROWS_LO_WIDTH 32 +#define MC_CMD_LTSSM_TRACE_POLL_OUT_ROWS_HI_OFST 12 +#define MC_CMD_LTSSM_TRACE_POLL_OUT_ROWS_HI_LEN 4 +#define MC_CMD_LTSSM_TRACE_POLL_OUT_ROWS_HI_LBN 96 +#define MC_CMD_LTSSM_TRACE_POLL_OUT_ROWS_HI_WIDTH 32 +#define MC_CMD_LTSSM_TRACE_POLL_OUT_ROWS_MINNUM 0 +#define MC_CMD_LTSSM_TRACE_POLL_OUT_ROWS_MAXNUM 30 +#define MC_CMD_LTSSM_TRACE_POLL_OUT_ROWS_MAXNUM_MCDI2 126 +#define MC_CMD_LTSSM_TRACE_POLL_OUT_LTSSM_STATE_OFST 8 +#define MC_CMD_LTSSM_TRACE_POLL_OUT_LTSSM_STATE_LBN 0 +#define MC_CMD_LTSSM_TRACE_POLL_OUT_LTSSM_STATE_WIDTH 6 +#define MC_CMD_LTSSM_TRACE_POLL_OUT_RDLH_LINK_UP_OFST 8 +#define MC_CMD_LTSSM_TRACE_POLL_OUT_RDLH_LINK_UP_LBN 6 +#define MC_CMD_LTSSM_TRACE_POLL_OUT_RDLH_LINK_UP_WIDTH 1 +#define MC_CMD_LTSSM_TRACE_POLL_OUT_WAKE_N_OFST 8 +#define MC_CMD_LTSSM_TRACE_POLL_OUT_WAKE_N_LBN 7 +#define MC_CMD_LTSSM_TRACE_POLL_OUT_WAKE_N_WIDTH 1 +#define MC_CMD_LTSSM_TRACE_POLL_OUT_TIMESTAMP_PS_OFST 8 +#define MC_CMD_LTSSM_TRACE_POLL_OUT_TIMESTAMP_PS_LBN 8 +#define MC_CMD_LTSSM_TRACE_POLL_OUT_TIMESTAMP_PS_WIDTH 24 +/* The time of the LTSSM transition. Times are reported as fractional + * microseconds since MC boot (wrapping at 2^32us). The fractional part is + * reported in picoseconds. 0 <= TIMESTAMP_PS < 1000000 timestamp in seconds = + * ((TIMESTAMP_US + TIMESTAMP_PS / 1000000) / 1000000) + */ +#define MC_CMD_LTSSM_TRACE_POLL_OUT_TIMESTAMP_US_OFST 12 +#define MC_CMD_LTSSM_TRACE_POLL_OUT_TIMESTAMP_US_LEN 4 + + +/***********************************/ +/* MC_CMD_TELEMETRY_ENABLE + * This command enables telemetry processing of packets, allowing a remote host + * to gather information and analytics passing on the card. Enabling telemetry + * will have a performance cost. Not supported on all hardware and datapath + * variants. As of writing, only supported on Medford2 running full-featured + * firmware variant. + */ +#define MC_CMD_TELEMETRY_ENABLE 0x138 +#undef MC_CMD_0x138_PRIVILEGE_CTG + +#define MC_CMD_0x138_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_TELEMETRY_ENABLE_IN msgrequest */ +#define MC_CMD_TELEMETRY_ENABLE_IN_LEN 4 +#define MC_CMD_TELEMETRY_ENABLE_IN_STATE_OFST 0 +#define MC_CMD_TELEMETRY_ENABLE_IN_STATE_LEN 4 +/* enum: Disables telemetry functionality, returns the card to default + * behaviour of the configured datapath variant. + */ +#define MC_CMD_TELEMETRY_ENABLE_IN_DISABLE 0x0 +/* enum: Enables telemetry functionality on the currently configured datapath + * variant if supported. + */ +#define MC_CMD_TELEMETRY_ENABLE_IN_ENABLE 0x1 + +/* MC_CMD_TELEMETRY_ENABLE_OUT msgresponse */ +#define MC_CMD_TELEMETRY_ENABLE_OUT_LEN 0 + +/* TELEMETRY_CONFIG structuredef */ +#define TELEMETRY_CONFIG_LEN 36 +/* Bitfields to identify the list of config parameters included in the command. + * A bit-value of 1 indicates that the relevant config parameter field is + * valid; 0 indicates invalid and the config parameter field must be ignored by + * firmware. Firmware may however apply some default values for certain + * parameters. + */ +#define TELEMETRY_CONFIG_FLAGS_OFST 0 +#define TELEMETRY_CONFIG_FLAGS_LEN 4 +#define TELEMETRY_CONFIG_METRICS_COLLECTOR_IP_VALID_OFST 0 +#define TELEMETRY_CONFIG_METRICS_COLLECTOR_IP_VALID_LBN 0 +#define TELEMETRY_CONFIG_METRICS_COLLECTOR_IP_VALID_WIDTH 1 +#define TELEMETRY_CONFIG_METRICS_COLLECTOR_PORT_VALID_OFST 0 +#define TELEMETRY_CONFIG_METRICS_COLLECTOR_PORT_VALID_LBN 1 +#define TELEMETRY_CONFIG_METRICS_COLLECTOR_PORT_VALID_WIDTH 1 +#define TELEMETRY_CONFIG_MONITOR_TIMEOUT_MS_VALID_OFST 0 +#define TELEMETRY_CONFIG_MONITOR_TIMEOUT_MS_VALID_LBN 2 +#define TELEMETRY_CONFIG_MONITOR_TIMEOUT_MS_VALID_WIDTH 1 +#define TELEMETRY_CONFIG_MAX_METRICS_COUNT_VALID_OFST 0 +#define TELEMETRY_CONFIG_MAX_METRICS_COUNT_VALID_LBN 3 +#define TELEMETRY_CONFIG_MAX_METRICS_COUNT_VALID_WIDTH 1 +#define TELEMETRY_CONFIG_RESERVED1_OFST 0 +#define TELEMETRY_CONFIG_RESERVED1_LBN 4 +#define TELEMETRY_CONFIG_RESERVED1_WIDTH 28 +#define TELEMETRY_CONFIG_FLAGS_LBN 0 +#define TELEMETRY_CONFIG_FLAGS_WIDTH 32 +/* Collector IPv4/IPv6 address to which latency measurements are forwarded from + * the adapter (as bytes in network order; set last 12 bytes to 0 for IPv4 + * address). + */ +#define TELEMETRY_CONFIG_METRICS_COLLECTOR_IP_OFST 4 +#define TELEMETRY_CONFIG_METRICS_COLLECTOR_IP_LEN 16 +#define TELEMETRY_CONFIG_METRICS_COLLECTOR_IP_LBN 32 +#define TELEMETRY_CONFIG_METRICS_COLLECTOR_IP_WIDTH 128 +/* Collector Port number to which latency measurements are forwarded from the + * adapter (as bytes in network order). + */ +#define TELEMETRY_CONFIG_METRICS_COLLECTOR_PORT_OFST 20 +#define TELEMETRY_CONFIG_METRICS_COLLECTOR_PORT_LEN 2 +#define TELEMETRY_CONFIG_METRICS_COLLECTOR_PORT_LBN 160 +#define TELEMETRY_CONFIG_METRICS_COLLECTOR_PORT_WIDTH 16 +/* Unused - set to 0. */ +#define TELEMETRY_CONFIG_RESERVED2_OFST 22 +#define TELEMETRY_CONFIG_RESERVED2_LEN 2 +#define TELEMETRY_CONFIG_RESERVED2_LBN 176 +#define TELEMETRY_CONFIG_RESERVED2_WIDTH 16 +/* MAC address of the collector (as bytes in network order). */ +#define TELEMETRY_CONFIG_METRICS_COLLECTOR_MAC_ADDR_OFST 24 +#define TELEMETRY_CONFIG_METRICS_COLLECTOR_MAC_ADDR_LEN 6 +#define TELEMETRY_CONFIG_METRICS_COLLECTOR_MAC_ADDR_LBN 192 +#define TELEMETRY_CONFIG_METRICS_COLLECTOR_MAC_ADDR_WIDTH 48 +/* Maximum number of latency measurements to be made on a telemetry flow. */ +#define TELEMETRY_CONFIG_MAX_METRICS_COUNT_OFST 30 +#define TELEMETRY_CONFIG_MAX_METRICS_COUNT_LEN 2 +#define TELEMETRY_CONFIG_MAX_METRICS_COUNT_LBN 240 +#define TELEMETRY_CONFIG_MAX_METRICS_COUNT_WIDTH 16 +/* Maximum duration for which a telemetry flow is monitored (in millisecs). */ +#define TELEMETRY_CONFIG_MONITOR_TIMEOUT_MS_OFST 32 +#define TELEMETRY_CONFIG_MONITOR_TIMEOUT_MS_LEN 4 +#define TELEMETRY_CONFIG_MONITOR_TIMEOUT_MS_LBN 256 +#define TELEMETRY_CONFIG_MONITOR_TIMEOUT_MS_WIDTH 32 + + +/***********************************/ +/* MC_CMD_TELEMETRY_CONFIG + * This top-level command includes various sub-opcodes that are used to apply + * (and read-back) telemetry related configuration parameters on the NIC. + * Reference - SF-120569-SW Telemetry Firmware Design. + */ +#define MC_CMD_TELEMETRY_CONFIG 0x139 +#undef MC_CMD_0x139_PRIVILEGE_CTG + +#define MC_CMD_0x139_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_TELEMETRY_CONFIG_IN msgrequest */ +#define MC_CMD_TELEMETRY_CONFIG_IN_LEN 4 +/* Telemetry configuration sub-operation code */ +#define MC_CMD_TELEMETRY_CONFIG_IN_OP_OFST 0 +#define MC_CMD_TELEMETRY_CONFIG_IN_OP_LEN 4 +/* enum: Configure parameters for telemetry measurements. */ +#define MC_CMD_TELEMETRY_CONFIG_OP_SET 0x1 +/* enum: Read current values of parameters for telemetry measurements. */ +#define MC_CMD_TELEMETRY_CONFIG_OP_GET 0x2 + +/* MC_CMD_TELEMETRY_CONFIG_IN_SET msgrequest: This command configures the + * parameters necessary for tcp-latency measurements. The adapter adds a filter + * for every new tcp flow seen in both tx and rx directions and tracks the + * telemetry measurements related to the flow in a tracking table. Entries in + * the tracking table live as long as N measurements are made on the flow or + * the flow has been in the tracking table for the maximum configured duration. + * Telemetry measurements in this command refer to tcp-latency measurements for + * data-to-ack latency as well as data-to-data latency. All telemetry + * measurements are bundled into a UDP packet and forwarded to a collector + * whose IP address is configured using this command. + */ +#define MC_CMD_TELEMETRY_CONFIG_IN_SET_LEN 40 +/* Telemetry configuration sub-operation code. Must be set to + * MC_CMD_TELEMETRY_CONFIG_OP_SET. + */ +#define MC_CMD_TELEMETRY_CONFIG_IN_SET_OP_OFST 0 +#define MC_CMD_TELEMETRY_CONFIG_IN_SET_OP_LEN 4 +/* struct of type TELEMETRY_CONFIG. */ +#define MC_CMD_TELEMETRY_CONFIG_IN_SET_PARAMETERS_OFST 4 +#define MC_CMD_TELEMETRY_CONFIG_IN_SET_PARAMETERS_LEN 36 + +/* MC_CMD_TELEMETRY_CONFIG_OUT_SET msgresponse */ +#define MC_CMD_TELEMETRY_CONFIG_OUT_SET_LEN 0 + +/* MC_CMD_TELEMETRY_CONFIG_IN_GET msgrequest: This command reads out the + * current values of config parameters necessary for tcp-latency measurements. + * See MC_CMD_TELEMETRY_SET_CONFIG for more information about the configuration + * parameters. + */ +#define MC_CMD_TELEMETRY_CONFIG_IN_GET_LEN 4 +/* Telemetry configuration sub-operation code. Must be set to + * MC_CMD_TELEMETRY_CONFIG_OP_GET. + */ +#define MC_CMD_TELEMETRY_CONFIG_IN_GET_OP_OFST 0 +#define MC_CMD_TELEMETRY_CONFIG_IN_GET_OP_LEN 4 + +/* MC_CMD_TELEMETRY_CONFIG_OUT_GET msgresponse */ +#define MC_CMD_TELEMETRY_CONFIG_OUT_GET_LEN 36 +/* struct of type TELEMETRY_CONFIG. */ +#define MC_CMD_TELEMETRY_CONFIG_OUT_GET_PARAMETERS_OFST 0 +#define MC_CMD_TELEMETRY_CONFIG_OUT_GET_PARAMETERS_LEN 36 + + +/***********************************/ +/* MC_CMD_GET_RX_PREFIX_ID + * This command is part of the mechanism for configuring the format of the RX + * packet prefix. It takes as input a bitmask of the fields the host would like + * to be in the prefix. If the hardware supports RX prefixes with that + * combination of fields, then this command returns a list of prefix-ids, + * opaque identifiers suitable for use in the RX_PREFIX_ID field of a + * MC_CMD_INIT_RXQ_V5_IN message. If the combination of fields is not + * supported, returns ENOTSUP. If the firmware can't create any new prefix-ids + * due to resource constraints, returns ENOSPC. + */ +#define MC_CMD_GET_RX_PREFIX_ID 0x13b +#undef MC_CMD_0x13b_PRIVILEGE_CTG + +#define MC_CMD_0x13b_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_RX_PREFIX_ID_IN msgrequest */ +#define MC_CMD_GET_RX_PREFIX_ID_IN_LEN 8 +/* Field bitmask. */ +#define MC_CMD_GET_RX_PREFIX_ID_IN_FIELDS_OFST 0 +#define MC_CMD_GET_RX_PREFIX_ID_IN_FIELDS_LEN 8 +#define MC_CMD_GET_RX_PREFIX_ID_IN_FIELDS_LO_OFST 0 +#define MC_CMD_GET_RX_PREFIX_ID_IN_FIELDS_LO_LEN 4 +#define MC_CMD_GET_RX_PREFIX_ID_IN_FIELDS_LO_LBN 0 +#define MC_CMD_GET_RX_PREFIX_ID_IN_FIELDS_LO_WIDTH 32 +#define MC_CMD_GET_RX_PREFIX_ID_IN_FIELDS_HI_OFST 4 +#define MC_CMD_GET_RX_PREFIX_ID_IN_FIELDS_HI_LEN 4 +#define MC_CMD_GET_RX_PREFIX_ID_IN_FIELDS_HI_LBN 32 +#define MC_CMD_GET_RX_PREFIX_ID_IN_FIELDS_HI_WIDTH 32 +#define MC_CMD_GET_RX_PREFIX_ID_IN_LENGTH_OFST 0 +#define MC_CMD_GET_RX_PREFIX_ID_IN_LENGTH_LBN 0 +#define MC_CMD_GET_RX_PREFIX_ID_IN_LENGTH_WIDTH 1 +#define MC_CMD_GET_RX_PREFIX_ID_IN_RSS_HASH_VALID_OFST 0 +#define MC_CMD_GET_RX_PREFIX_ID_IN_RSS_HASH_VALID_LBN 1 +#define MC_CMD_GET_RX_PREFIX_ID_IN_RSS_HASH_VALID_WIDTH 1 +#define MC_CMD_GET_RX_PREFIX_ID_IN_USER_FLAG_OFST 0 +#define MC_CMD_GET_RX_PREFIX_ID_IN_USER_FLAG_LBN 2 +#define MC_CMD_GET_RX_PREFIX_ID_IN_USER_FLAG_WIDTH 1 +#define MC_CMD_GET_RX_PREFIX_ID_IN_CLASS_OFST 0 +#define MC_CMD_GET_RX_PREFIX_ID_IN_CLASS_LBN 3 +#define MC_CMD_GET_RX_PREFIX_ID_IN_CLASS_WIDTH 1 +#define MC_CMD_GET_RX_PREFIX_ID_IN_PARTIAL_TSTAMP_OFST 0 +#define MC_CMD_GET_RX_PREFIX_ID_IN_PARTIAL_TSTAMP_LBN 4 +#define MC_CMD_GET_RX_PREFIX_ID_IN_PARTIAL_TSTAMP_WIDTH 1 +#define MC_CMD_GET_RX_PREFIX_ID_IN_RSS_HASH_OFST 0 +#define MC_CMD_GET_RX_PREFIX_ID_IN_RSS_HASH_LBN 5 +#define MC_CMD_GET_RX_PREFIX_ID_IN_RSS_HASH_WIDTH 1 +#define MC_CMD_GET_RX_PREFIX_ID_IN_USER_MARK_OFST 0 +#define MC_CMD_GET_RX_PREFIX_ID_IN_USER_MARK_LBN 6 +#define MC_CMD_GET_RX_PREFIX_ID_IN_USER_MARK_WIDTH 1 +#define MC_CMD_GET_RX_PREFIX_ID_IN_INGRESS_MPORT_OFST 0 +#define MC_CMD_GET_RX_PREFIX_ID_IN_INGRESS_MPORT_LBN 7 +#define MC_CMD_GET_RX_PREFIX_ID_IN_INGRESS_MPORT_WIDTH 1 +#define MC_CMD_GET_RX_PREFIX_ID_IN_INGRESS_VPORT_OFST 0 +#define MC_CMD_GET_RX_PREFIX_ID_IN_INGRESS_VPORT_LBN 7 +#define MC_CMD_GET_RX_PREFIX_ID_IN_INGRESS_VPORT_WIDTH 1 +#define MC_CMD_GET_RX_PREFIX_ID_IN_CSUM_FRAME_OFST 0 +#define MC_CMD_GET_RX_PREFIX_ID_IN_CSUM_FRAME_LBN 8 +#define MC_CMD_GET_RX_PREFIX_ID_IN_CSUM_FRAME_WIDTH 1 +#define MC_CMD_GET_RX_PREFIX_ID_IN_VLAN_STRIP_TCI_OFST 0 +#define MC_CMD_GET_RX_PREFIX_ID_IN_VLAN_STRIP_TCI_LBN 9 +#define MC_CMD_GET_RX_PREFIX_ID_IN_VLAN_STRIP_TCI_WIDTH 1 +#define MC_CMD_GET_RX_PREFIX_ID_IN_VLAN_STRIPPED_OFST 0 +#define MC_CMD_GET_RX_PREFIX_ID_IN_VLAN_STRIPPED_LBN 10 +#define MC_CMD_GET_RX_PREFIX_ID_IN_VLAN_STRIPPED_WIDTH 1 +#define MC_CMD_GET_RX_PREFIX_ID_IN_VSWITCH_STATUS_OFST 0 +#define MC_CMD_GET_RX_PREFIX_ID_IN_VSWITCH_STATUS_LBN 11 +#define MC_CMD_GET_RX_PREFIX_ID_IN_VSWITCH_STATUS_WIDTH 1 + +/* MC_CMD_GET_RX_PREFIX_ID_OUT msgresponse */ +#define MC_CMD_GET_RX_PREFIX_ID_OUT_LENMIN 8 +#define MC_CMD_GET_RX_PREFIX_ID_OUT_LENMAX 252 +#define MC_CMD_GET_RX_PREFIX_ID_OUT_LENMAX_MCDI2 1020 +#define MC_CMD_GET_RX_PREFIX_ID_OUT_LEN(num) (4+4*(num)) +#define MC_CMD_GET_RX_PREFIX_ID_OUT_RX_PREFIX_ID_NUM(len) (((len)-4)/4) +/* Number of prefix-ids returned */ +#define MC_CMD_GET_RX_PREFIX_ID_OUT_NUM_RX_PREFIX_IDS_OFST 0 +#define MC_CMD_GET_RX_PREFIX_ID_OUT_NUM_RX_PREFIX_IDS_LEN 4 +/* Opaque prefix identifiers which can be passed into MC_CMD_INIT_RXQ_V5 or + * MC_CMD_QUERY_PREFIX_ID + */ +#define MC_CMD_GET_RX_PREFIX_ID_OUT_RX_PREFIX_ID_OFST 4 +#define MC_CMD_GET_RX_PREFIX_ID_OUT_RX_PREFIX_ID_LEN 4 +#define MC_CMD_GET_RX_PREFIX_ID_OUT_RX_PREFIX_ID_MINNUM 1 +#define MC_CMD_GET_RX_PREFIX_ID_OUT_RX_PREFIX_ID_MAXNUM 62 +#define MC_CMD_GET_RX_PREFIX_ID_OUT_RX_PREFIX_ID_MAXNUM_MCDI2 254 + +/* RX_PREFIX_FIELD_INFO structuredef: Information about a single RX prefix + * field + */ +#define RX_PREFIX_FIELD_INFO_LEN 4 +/* The offset of the field from the start of the prefix, in bits */ +#define RX_PREFIX_FIELD_INFO_OFFSET_BITS_OFST 0 +#define RX_PREFIX_FIELD_INFO_OFFSET_BITS_LEN 2 +#define RX_PREFIX_FIELD_INFO_OFFSET_BITS_LBN 0 +#define RX_PREFIX_FIELD_INFO_OFFSET_BITS_WIDTH 16 +/* The width of the field, in bits */ +#define RX_PREFIX_FIELD_INFO_WIDTH_BITS_OFST 2 +#define RX_PREFIX_FIELD_INFO_WIDTH_BITS_LEN 1 +#define RX_PREFIX_FIELD_INFO_WIDTH_BITS_LBN 16 +#define RX_PREFIX_FIELD_INFO_WIDTH_BITS_WIDTH 8 +/* The type of the field. These enum values are in the same order as the fields + * in the MC_CMD_GET_RX_PREFIX_ID_IN bitmask + */ +#define RX_PREFIX_FIELD_INFO_TYPE_OFST 3 +#define RX_PREFIX_FIELD_INFO_TYPE_LEN 1 +#define RX_PREFIX_FIELD_INFO_LENGTH 0x0 /* enum */ +#define RX_PREFIX_FIELD_INFO_RSS_HASH_VALID 0x1 /* enum */ +#define RX_PREFIX_FIELD_INFO_USER_FLAG 0x2 /* enum */ +#define RX_PREFIX_FIELD_INFO_CLASS 0x3 /* enum */ +#define RX_PREFIX_FIELD_INFO_PARTIAL_TSTAMP 0x4 /* enum */ +#define RX_PREFIX_FIELD_INFO_RSS_HASH 0x5 /* enum */ +#define RX_PREFIX_FIELD_INFO_USER_MARK 0x6 /* enum */ +#define RX_PREFIX_FIELD_INFO_INGRESS_MPORT 0x7 /* enum */ +#define RX_PREFIX_FIELD_INFO_INGRESS_VPORT 0x7 /* enum */ +#define RX_PREFIX_FIELD_INFO_CSUM_FRAME 0x8 /* enum */ +#define RX_PREFIX_FIELD_INFO_VLAN_STRIP_TCI 0x9 /* enum */ +#define RX_PREFIX_FIELD_INFO_VLAN_STRIPPED 0xa /* enum */ +#define RX_PREFIX_FIELD_INFO_VSWITCH_STATUS 0xb /* enum */ +#define RX_PREFIX_FIELD_INFO_TYPE_LBN 24 +#define RX_PREFIX_FIELD_INFO_TYPE_WIDTH 8 + +/* RX_PREFIX_FIXED_RESPONSE structuredef: Information about an RX prefix in + * which every field has a fixed offset and width + */ +#define RX_PREFIX_FIXED_RESPONSE_LENMIN 4 +#define RX_PREFIX_FIXED_RESPONSE_LENMAX 252 +#define RX_PREFIX_FIXED_RESPONSE_LENMAX_MCDI2 1020 +#define RX_PREFIX_FIXED_RESPONSE_LEN(num) (4+4*(num)) +#define RX_PREFIX_FIXED_RESPONSE_FIELDS_NUM(len) (((len)-4)/4) +/* Length of the RX prefix in bytes */ +#define RX_PREFIX_FIXED_RESPONSE_PREFIX_LENGTH_BYTES_OFST 0 +#define RX_PREFIX_FIXED_RESPONSE_PREFIX_LENGTH_BYTES_LEN 1 +#define RX_PREFIX_FIXED_RESPONSE_PREFIX_LENGTH_BYTES_LBN 0 +#define RX_PREFIX_FIXED_RESPONSE_PREFIX_LENGTH_BYTES_WIDTH 8 +/* Number of fields present in the prefix */ +#define RX_PREFIX_FIXED_RESPONSE_FIELD_COUNT_OFST 1 +#define RX_PREFIX_FIXED_RESPONSE_FIELD_COUNT_LEN 1 +#define RX_PREFIX_FIXED_RESPONSE_FIELD_COUNT_LBN 8 +#define RX_PREFIX_FIXED_RESPONSE_FIELD_COUNT_WIDTH 8 +#define RX_PREFIX_FIXED_RESPONSE_RESERVED_OFST 2 +#define RX_PREFIX_FIXED_RESPONSE_RESERVED_LEN 2 +#define RX_PREFIX_FIXED_RESPONSE_RESERVED_LBN 16 +#define RX_PREFIX_FIXED_RESPONSE_RESERVED_WIDTH 16 +/* Array of RX_PREFIX_FIELD_INFO structures, of length FIELD_COUNT */ +#define RX_PREFIX_FIXED_RESPONSE_FIELDS_OFST 4 +#define RX_PREFIX_FIXED_RESPONSE_FIELDS_LEN 4 +#define RX_PREFIX_FIXED_RESPONSE_FIELDS_MINNUM 0 +#define RX_PREFIX_FIXED_RESPONSE_FIELDS_MAXNUM 62 +#define RX_PREFIX_FIXED_RESPONSE_FIELDS_MAXNUM_MCDI2 254 +#define RX_PREFIX_FIXED_RESPONSE_FIELDS_LBN 32 +#define RX_PREFIX_FIXED_RESPONSE_FIELDS_WIDTH 32 + + +/***********************************/ +/* MC_CMD_QUERY_RX_PREFIX_ID + * This command takes an RX prefix id (obtained from MC_CMD_GET_RX_PREFIX_ID) + * and returns a description of the RX prefix of packets delievered to an RXQ + * created with that prefix id + */ +#define MC_CMD_QUERY_RX_PREFIX_ID 0x13c +#undef MC_CMD_0x13c_PRIVILEGE_CTG + +#define MC_CMD_0x13c_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_QUERY_RX_PREFIX_ID_IN msgrequest */ +#define MC_CMD_QUERY_RX_PREFIX_ID_IN_LEN 4 +/* Prefix id to query */ +#define MC_CMD_QUERY_RX_PREFIX_ID_IN_RX_PREFIX_ID_OFST 0 +#define MC_CMD_QUERY_RX_PREFIX_ID_IN_RX_PREFIX_ID_LEN 4 + +/* MC_CMD_QUERY_RX_PREFIX_ID_OUT msgresponse */ +#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_LENMIN 4 +#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_LENMAX 252 +#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_LENMAX_MCDI2 1020 +#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_LEN(num) (4+1*(num)) +#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_RESPONSE_NUM(len) (((len)-4)/1) +/* An enum describing the structure of this response. */ +#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_RESPONSE_TYPE_OFST 0 +#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_RESPONSE_TYPE_LEN 1 +/* enum: The response is of format RX_PREFIX_FIXED_RESPONSE */ +#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_RESPONSE_TYPE_FIXED 0x0 +#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_RESERVED_OFST 1 +#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_RESERVED_LEN 3 +/* The response. Its format is as defined by the RESPONSE_TYPE value */ +#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_RESPONSE_OFST 4 +#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_RESPONSE_LEN 1 +#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_RESPONSE_MINNUM 0 +#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_RESPONSE_MAXNUM 248 +#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_RESPONSE_MAXNUM_MCDI2 1016 + + +/***********************************/ +/* MC_CMD_BUNDLE + * A command to perform various bundle-related operations on insecure cards. + */ +#define MC_CMD_BUNDLE 0x13d +#undef MC_CMD_0x13d_PRIVILEGE_CTG + +#define MC_CMD_0x13d_PRIVILEGE_CTG SRIOV_CTG_INSECURE + +/* MC_CMD_BUNDLE_IN msgrequest */ +#define MC_CMD_BUNDLE_IN_LEN 4 +/* Sub-command code */ +#define MC_CMD_BUNDLE_IN_OP_OFST 0 +#define MC_CMD_BUNDLE_IN_OP_LEN 4 +/* enum: Get the current host access mode set on component partitions. */ +#define MC_CMD_BUNDLE_IN_OP_COMPONENT_ACCESS_GET 0x0 +/* enum: Set the host access mode set on component partitions. */ +#define MC_CMD_BUNDLE_IN_OP_COMPONENT_ACCESS_SET 0x1 + +/* MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_GET_IN msgrequest: Retrieve the current + * access mode on component partitions such as MC_FIRMWARE, SUC_FIRMWARE and + * EXPANSION_UEFI. This command only works on engineering (insecure) cards. On + * secure adapters, this command returns MC_CMD_ERR_EPERM. + */ +#define MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_GET_IN_LEN 4 +/* Sub-command code. Must be OP_COMPONENT_ACCESS_GET. */ +#define MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_GET_IN_OP_OFST 0 +#define MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_GET_IN_OP_LEN 4 + +/* MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_GET_OUT msgresponse: Returns the access + * control mode. + */ +#define MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_GET_OUT_LEN 4 +/* Access mode of component partitions. */ +#define MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_GET_OUT_ACCESS_MODE_OFST 0 +#define MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_GET_OUT_ACCESS_MODE_LEN 4 +/* enum: Component partitions are read-only from the host. */ +#define MC_CMD_BUNDLE_COMPONENTS_READ_ONLY 0x0 +/* enum: Component partitions can read read-from written-to by the host. */ +#define MC_CMD_BUNDLE_COMPONENTS_READ_WRITE 0x1 + +/* MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_SET_IN msgrequest: The component + * partitions such as MC_FIRMWARE, SUC_FIRMWARE, EXPANSION_UEFI are set as + * read-only on firmware built with bundle support. This command marks these + * partitions as read/writeable. The access status set by this command does not + * persist across MC reboots. This command only works on engineering (insecure) + * cards. On secure adapters, this command returns MC_CMD_ERR_EPERM. + */ +#define MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_SET_IN_LEN 8 +/* Sub-command code. Must be OP_COMPONENT_ACCESS_SET. */ +#define MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_SET_IN_OP_OFST 0 +#define MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_SET_IN_OP_LEN 4 +/* Access mode of component partitions. */ +#define MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_SET_IN_ACCESS_MODE_OFST 4 +#define MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_SET_IN_ACCESS_MODE_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_GET_OUT/ACCESS_MODE */ + +/* MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_SET_OUT msgresponse */ +#define MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_SET_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_GET_VPD + * Read all VPD starting from a given address + */ +#define MC_CMD_GET_VPD 0x165 +#undef MC_CMD_0x165_PRIVILEGE_CTG + +#define MC_CMD_0x165_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_VPD_IN msgresponse */ +#define MC_CMD_GET_VPD_IN_LEN 4 +/* VPD address to start from. In case VPD is longer than MCDI buffer + * (unlikely), user can make multiple calls with different starting addresses. + */ +#define MC_CMD_GET_VPD_IN_ADDR_OFST 0 +#define MC_CMD_GET_VPD_IN_ADDR_LEN 4 + +/* MC_CMD_GET_VPD_OUT msgresponse */ +#define MC_CMD_GET_VPD_OUT_LENMIN 0 +#define MC_CMD_GET_VPD_OUT_LENMAX 252 +#define MC_CMD_GET_VPD_OUT_LENMAX_MCDI2 1020 +#define MC_CMD_GET_VPD_OUT_LEN(num) (0+1*(num)) +#define MC_CMD_GET_VPD_OUT_DATA_NUM(len) (((len)-0)/1) +/* VPD data returned. */ +#define MC_CMD_GET_VPD_OUT_DATA_OFST 0 +#define MC_CMD_GET_VPD_OUT_DATA_LEN 1 +#define MC_CMD_GET_VPD_OUT_DATA_MINNUM 0 +#define MC_CMD_GET_VPD_OUT_DATA_MAXNUM 252 +#define MC_CMD_GET_VPD_OUT_DATA_MAXNUM_MCDI2 1020 + + +/***********************************/ +/* MC_CMD_GET_NCSI_INFO + * Provide information about the NC-SI stack + */ +#define MC_CMD_GET_NCSI_INFO 0x167 +#undef MC_CMD_0x167_PRIVILEGE_CTG + +#define MC_CMD_0x167_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_NCSI_INFO_IN msgrequest */ +#define MC_CMD_GET_NCSI_INFO_IN_LEN 8 +/* Operation to be performed */ +#define MC_CMD_GET_NCSI_INFO_IN_OP_OFST 0 +#define MC_CMD_GET_NCSI_INFO_IN_OP_LEN 4 +/* enum: Information on the link settings. */ +#define MC_CMD_GET_NCSI_INFO_IN_OP_LINK 0x0 +/* enum: Statistics associated with the channel */ +#define MC_CMD_GET_NCSI_INFO_IN_OP_STATISTICS 0x1 +/* The NC-SI channel on which the operation is to be performed */ +#define MC_CMD_GET_NCSI_INFO_IN_CHANNEL_OFST 4 +#define MC_CMD_GET_NCSI_INFO_IN_CHANNEL_LEN 4 + +/* MC_CMD_GET_NCSI_INFO_LINK_OUT msgresponse */ +#define MC_CMD_GET_NCSI_INFO_LINK_OUT_LEN 12 +/* Settings as received from BMC. */ +#define MC_CMD_GET_NCSI_INFO_LINK_OUT_SETTINGS_OFST 0 +#define MC_CMD_GET_NCSI_INFO_LINK_OUT_SETTINGS_LEN 4 +/* Advertised capabilities applied to channel. */ +#define MC_CMD_GET_NCSI_INFO_LINK_OUT_ADV_CAP_OFST 4 +#define MC_CMD_GET_NCSI_INFO_LINK_OUT_ADV_CAP_LEN 4 +/* General status */ +#define MC_CMD_GET_NCSI_INFO_LINK_OUT_STATUS_OFST 8 +#define MC_CMD_GET_NCSI_INFO_LINK_OUT_STATUS_LEN 4 +#define MC_CMD_GET_NCSI_INFO_LINK_OUT_STATE_OFST 8 +#define MC_CMD_GET_NCSI_INFO_LINK_OUT_STATE_LBN 0 +#define MC_CMD_GET_NCSI_INFO_LINK_OUT_STATE_WIDTH 2 +#define MC_CMD_GET_NCSI_INFO_LINK_OUT_ENABLE_OFST 8 +#define MC_CMD_GET_NCSI_INFO_LINK_OUT_ENABLE_LBN 2 +#define MC_CMD_GET_NCSI_INFO_LINK_OUT_ENABLE_WIDTH 1 +#define MC_CMD_GET_NCSI_INFO_LINK_OUT_NETWORK_TX_OFST 8 +#define MC_CMD_GET_NCSI_INFO_LINK_OUT_NETWORK_TX_LBN 3 +#define MC_CMD_GET_NCSI_INFO_LINK_OUT_NETWORK_TX_WIDTH 1 +#define MC_CMD_GET_NCSI_INFO_LINK_OUT_ATTACHED_OFST 8 +#define MC_CMD_GET_NCSI_INFO_LINK_OUT_ATTACHED_LBN 4 +#define MC_CMD_GET_NCSI_INFO_LINK_OUT_ATTACHED_WIDTH 1 + +/* MC_CMD_GET_NCSI_INFO_STATISTICS_OUT msgresponse */ +#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_LEN 28 +/* The number of NC-SI commands received. */ +#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_NCSI_CMDS_RX_OFST 0 +#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_NCSI_CMDS_RX_LEN 4 +/* The number of NC-SI commands dropped. */ +#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_NCSI_PKTS_DROPPED_OFST 4 +#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_NCSI_PKTS_DROPPED_LEN 4 +/* The number of invalid NC-SI commands received. */ +#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_NCSI_CMD_TYPE_ERRS_OFST 8 +#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_NCSI_CMD_TYPE_ERRS_LEN 4 +/* The number of checksum errors seen. */ +#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_NCSI_CMD_CSUM_ERRS_OFST 12 +#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_NCSI_CMD_CSUM_ERRS_LEN 4 +/* The number of NC-SI requests received. */ +#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_NCSI_RX_PKTS_OFST 16 +#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_NCSI_RX_PKTS_LEN 4 +/* The number of NC-SI responses sent (includes AENs) */ +#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_NCSI_TX_PKTS_OFST 20 +#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_NCSI_TX_PKTS_LEN 4 +/* The number of NC-SI AENs sent */ +#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_AENS_SENT_OFST 24 +#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_AENS_SENT_LEN 4 + + +/***********************************/ +/* MC_CMD_FIRMWARE_SET_LOCKDOWN + * System lockdown, when enabled firmware updates are blocked. + */ +#define MC_CMD_FIRMWARE_SET_LOCKDOWN 0x16f +#undef MC_CMD_0x16f_PRIVILEGE_CTG + +#define MC_CMD_0x16f_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_FIRMWARE_SET_LOCKDOWN_IN msgrequest: This MCDI command is to enable + * only because lockdown can only be disabled by a PMCI command or a cold reset + * of the system. + */ +#define MC_CMD_FIRMWARE_SET_LOCKDOWN_IN_LEN 0 + +/* MC_CMD_FIRMWARE_SET_LOCKDOWN_OUT msgresponse */ +#define MC_CMD_FIRMWARE_SET_LOCKDOWN_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_GET_TEST_FEATURES + * This command returns device details knowledge of which may be required by + * test infrastructure. Although safe, it is not intended to be used by + * production drivers, and the structure returned intentionally has no public + * documentation. + */ +#define MC_CMD_GET_TEST_FEATURES 0x1ac +#undef MC_CMD_0x1ac_PRIVILEGE_CTG + +#define MC_CMD_0x1ac_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_TEST_FEATURES_IN msgrequest: Request test features. */ +#define MC_CMD_GET_TEST_FEATURES_IN_LEN 0 + +/* MC_CMD_GET_TEST_FEATURE_OUT msgresponse */ +#define MC_CMD_GET_TEST_FEATURE_OUT_LENMIN 4 +#define MC_CMD_GET_TEST_FEATURE_OUT_LENMAX 252 +#define MC_CMD_GET_TEST_FEATURE_OUT_LENMAX_MCDI2 1020 +#define MC_CMD_GET_TEST_FEATURE_OUT_LEN(num) (0+4*(num)) +#define MC_CMD_GET_TEST_FEATURE_OUT_TEST_FEATURES_NUM(len) (((len)-0)/4) +/* Test-specific NIC information. Production drivers must treat this as opaque. + * The layout is defined in the private TEST_FEATURES_LAYOUT structure. + */ +#define MC_CMD_GET_TEST_FEATURE_OUT_TEST_FEATURES_OFST 0 +#define MC_CMD_GET_TEST_FEATURE_OUT_TEST_FEATURES_LEN 4 +#define MC_CMD_GET_TEST_FEATURE_OUT_TEST_FEATURES_MINNUM 1 +#define MC_CMD_GET_TEST_FEATURE_OUT_TEST_FEATURES_MAXNUM 63 +#define MC_CMD_GET_TEST_FEATURE_OUT_TEST_FEATURES_MAXNUM_MCDI2 255 + + +/***********************************/ +/* MC_CMD_FPGA + * A command to perform various fpga-related operations on platforms that + * include FPGAs. Note that some platforms may only support a subset of these + * operations. + */ +#define MC_CMD_FPGA 0x1bf +#undef MC_CMD_0x1bf_PRIVILEGE_CTG + +#define MC_CMD_0x1bf_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_FPGA_IN msgrequest */ +#define MC_CMD_FPGA_IN_LEN 4 +/* Sub-command code */ +#define MC_CMD_FPGA_IN_OP_OFST 0 +#define MC_CMD_FPGA_IN_OP_LEN 4 +/* enum: Get the FPGA version string. */ +#define MC_CMD_FPGA_IN_OP_GET_VERSION 0x0 +/* enum: Read bitmask of features supported in the FPGA image. */ +#define MC_CMD_FPGA_IN_OP_GET_CAPABILITIES 0x1 +/* enum: Perform a FPGA reset. */ +#define MC_CMD_FPGA_IN_OP_RESET 0x2 +/* enum: Set active flash device. */ +#define MC_CMD_FPGA_IN_OP_SELECT_FLASH 0x3 +/* enum: Get active flash device. */ +#define MC_CMD_FPGA_IN_OP_GET_ACTIVE_FLASH 0x4 +/* enum: Configure internal link i.e. the FPGA port facing the ASIC. */ +#define MC_CMD_FPGA_IN_OP_SET_INTERNAL_LINK 0x5 +/* enum: Read internal link configuration. */ +#define MC_CMD_FPGA_IN_OP_GET_INTERNAL_LINK 0x6 +/* enum: Get MAC statistics of FPGA external port. */ +#define MC_CMD_FPGA_IN_OP_GET_MAC_STATS 0x7 +/* enum: Set configuration on internal FPGA MAC. */ +#define MC_CMD_FPGA_IN_OP_SET_INTERNAL_MAC 0x8 + +/* MC_CMD_FPGA_OP_GET_VERSION_IN msgrequest: Get the FPGA version string. A + * free-format string is returned in response to this command. Any checks on + * supported FPGA operations are based on the response to + * MC_CMD_FPGA_OP_GET_CAPABILITIES. + */ +#define MC_CMD_FPGA_OP_GET_VERSION_IN_LEN 4 +/* Sub-command code. Must be OP_GET_VERSION */ +#define MC_CMD_FPGA_OP_GET_VERSION_IN_OP_OFST 0 +#define MC_CMD_FPGA_OP_GET_VERSION_IN_OP_LEN 4 + +/* MC_CMD_FPGA_OP_GET_VERSION_OUT msgresponse: Returns the version string. */ +#define MC_CMD_FPGA_OP_GET_VERSION_OUT_LENMIN 0 +#define MC_CMD_FPGA_OP_GET_VERSION_OUT_LENMAX 252 +#define MC_CMD_FPGA_OP_GET_VERSION_OUT_LENMAX_MCDI2 1020 +#define MC_CMD_FPGA_OP_GET_VERSION_OUT_LEN(num) (0+1*(num)) +#define MC_CMD_FPGA_OP_GET_VERSION_OUT_VERSION_NUM(len) (((len)-0)/1) +/* Null-terminated string containing version information. */ +#define MC_CMD_FPGA_OP_GET_VERSION_OUT_VERSION_OFST 0 +#define MC_CMD_FPGA_OP_GET_VERSION_OUT_VERSION_LEN 1 +#define MC_CMD_FPGA_OP_GET_VERSION_OUT_VERSION_MINNUM 0 +#define MC_CMD_FPGA_OP_GET_VERSION_OUT_VERSION_MAXNUM 252 +#define MC_CMD_FPGA_OP_GET_VERSION_OUT_VERSION_MAXNUM_MCDI2 1020 + +/* MC_CMD_FPGA_OP_GET_CAPABILITIES_IN msgrequest: Read bitmask of features + * supported in the FPGA image. + */ +#define MC_CMD_FPGA_OP_GET_CAPABILITIES_IN_LEN 4 +/* Sub-command code. Must be OP_GET_CAPABILITIES */ +#define MC_CMD_FPGA_OP_GET_CAPABILITIES_IN_OP_OFST 0 +#define MC_CMD_FPGA_OP_GET_CAPABILITIES_IN_OP_LEN 4 + +/* MC_CMD_FPGA_OP_GET_CAPABILITIES_OUT msgresponse: Returns the version string. + */ +#define MC_CMD_FPGA_OP_GET_CAPABILITIES_OUT_LEN 4 +/* Bit-mask of supported features. */ +#define MC_CMD_FPGA_OP_GET_CAPABILITIES_OUT_CAPABILITIES_OFST 0 +#define MC_CMD_FPGA_OP_GET_CAPABILITIES_OUT_CAPABILITIES_LEN 4 +#define MC_CMD_FPGA_OP_GET_CAPABILITIES_OUT_MAC_OFST 0 +#define MC_CMD_FPGA_OP_GET_CAPABILITIES_OUT_MAC_LBN 0 +#define MC_CMD_FPGA_OP_GET_CAPABILITIES_OUT_MAC_WIDTH 1 +#define MC_CMD_FPGA_OP_GET_CAPABILITIES_OUT_MAE_OFST 0 +#define MC_CMD_FPGA_OP_GET_CAPABILITIES_OUT_MAE_LBN 1 +#define MC_CMD_FPGA_OP_GET_CAPABILITIES_OUT_MAE_WIDTH 1 + +/* MC_CMD_FPGA_OP_RESET_IN msgrequest: Perform a FPGA reset operation where + * supported. + */ +#define MC_CMD_FPGA_OP_RESET_IN_LEN 4 +/* Sub-command code. Must be OP_RESET */ +#define MC_CMD_FPGA_OP_RESET_IN_OP_OFST 0 +#define MC_CMD_FPGA_OP_RESET_IN_OP_LEN 4 + +/* MC_CMD_FPGA_OP_RESET_OUT msgresponse */ +#define MC_CMD_FPGA_OP_RESET_OUT_LEN 0 + +/* MC_CMD_FPGA_OP_SELECT_FLASH_IN msgrequest: Set active FPGA flash device. + * Returns EINVAL if selected flash index does not exist on the platform under + * test. + */ +#define MC_CMD_FPGA_OP_SELECT_FLASH_IN_LEN 8 +/* Sub-command code. Must be OP_SELECT_FLASH */ +#define MC_CMD_FPGA_OP_SELECT_FLASH_IN_OP_OFST 0 +#define MC_CMD_FPGA_OP_SELECT_FLASH_IN_OP_LEN 4 +/* Flash device identifier. */ +#define MC_CMD_FPGA_OP_SELECT_FLASH_IN_FLASH_ID_OFST 4 +#define MC_CMD_FPGA_OP_SELECT_FLASH_IN_FLASH_ID_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_FPGA_FLASH_INDEX */ + +/* MC_CMD_FPGA_OP_SELECT_FLASH_OUT msgresponse */ +#define MC_CMD_FPGA_OP_SELECT_FLASH_OUT_LEN 0 + +/* MC_CMD_FPGA_OP_GET_ACTIVE_FLASH_IN msgrequest: Get active FPGA flash device. + */ +#define MC_CMD_FPGA_OP_GET_ACTIVE_FLASH_IN_LEN 4 +/* Sub-command code. Must be OP_GET_ACTIVE_FLASH */ +#define MC_CMD_FPGA_OP_GET_ACTIVE_FLASH_IN_OP_OFST 0 +#define MC_CMD_FPGA_OP_GET_ACTIVE_FLASH_IN_OP_LEN 4 + +/* MC_CMD_FPGA_OP_GET_ACTIVE_FLASH_OUT msgresponse: Returns flash identifier + * for current active flash. + */ +#define MC_CMD_FPGA_OP_GET_ACTIVE_FLASH_OUT_LEN 4 +/* Flash device identifier. */ +#define MC_CMD_FPGA_OP_GET_ACTIVE_FLASH_OUT_FLASH_ID_OFST 0 +#define MC_CMD_FPGA_OP_GET_ACTIVE_FLASH_OUT_FLASH_ID_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_FPGA_FLASH_INDEX */ + +/* MC_CMD_FPGA_OP_SET_INTERNAL_LINK_IN msgrequest: Configure FPGA internal + * port, facing the ASIC + */ +#define MC_CMD_FPGA_OP_SET_INTERNAL_LINK_IN_LEN 12 +/* Sub-command code. Must be OP_SET_INTERNAL_LINK */ +#define MC_CMD_FPGA_OP_SET_INTERNAL_LINK_IN_OP_OFST 0 +#define MC_CMD_FPGA_OP_SET_INTERNAL_LINK_IN_OP_LEN 4 +/* Flags */ +#define MC_CMD_FPGA_OP_SET_INTERNAL_LINK_IN_FLAGS_OFST 4 +#define MC_CMD_FPGA_OP_SET_INTERNAL_LINK_IN_FLAGS_LEN 4 +#define MC_CMD_FPGA_OP_SET_INTERNAL_LINK_IN_LINK_STATE_OFST 4 +#define MC_CMD_FPGA_OP_SET_INTERNAL_LINK_IN_LINK_STATE_LBN 0 +#define MC_CMD_FPGA_OP_SET_INTERNAL_LINK_IN_LINK_STATE_WIDTH 2 +/* enum: Unmodified, same as last state set by firmware */ +#define MC_CMD_FPGA_OP_SET_INTERNAL_LINK_IN_AUTO 0x0 +/* enum: Configure link-up */ +#define MC_CMD_FPGA_OP_SET_INTERNAL_LINK_IN_UP 0x1 +/* enum: Configure link-down */ +#define MC_CMD_FPGA_OP_SET_INTERNAL_LINK_IN_DOWN 0x2 +#define MC_CMD_FPGA_OP_SET_INTERNAL_LINK_IN_FLUSH_OFST 4 +#define MC_CMD_FPGA_OP_SET_INTERNAL_LINK_IN_FLUSH_LBN 2 +#define MC_CMD_FPGA_OP_SET_INTERNAL_LINK_IN_FLUSH_WIDTH 1 +/* Link speed to be applied on FPGA internal port MAC. */ +#define MC_CMD_FPGA_OP_SET_INTERNAL_LINK_IN_SPEED_OFST 8 +#define MC_CMD_FPGA_OP_SET_INTERNAL_LINK_IN_SPEED_LEN 4 + +/* MC_CMD_FPGA_OP_SET_INTERNAL_LINK_OUT msgresponse */ +#define MC_CMD_FPGA_OP_SET_INTERNAL_LINK_OUT_LEN 0 + +/* MC_CMD_FPGA_OP_GET_INTERNAL_LINK_IN msgrequest: Read FPGA internal port + * configuration and status + */ +#define MC_CMD_FPGA_OP_GET_INTERNAL_LINK_IN_LEN 4 +/* Sub-command code. Must be OP_GET_INTERNAL_LINK */ +#define MC_CMD_FPGA_OP_GET_INTERNAL_LINK_IN_OP_OFST 0 +#define MC_CMD_FPGA_OP_GET_INTERNAL_LINK_IN_OP_LEN 4 + +/* MC_CMD_FPGA_OP_GET_INTERNAL_LINK_OUT msgresponse: Response format for read + * FPGA internal port configuration and status + */ +#define MC_CMD_FPGA_OP_GET_INTERNAL_LINK_OUT_LEN 8 +/* Flags */ +#define MC_CMD_FPGA_OP_GET_INTERNAL_LINK_OUT_FLAGS_OFST 0 +#define MC_CMD_FPGA_OP_GET_INTERNAL_LINK_OUT_FLAGS_LEN 4 +#define MC_CMD_FPGA_OP_GET_INTERNAL_LINK_OUT_LINK_STATE_OFST 0 +#define MC_CMD_FPGA_OP_GET_INTERNAL_LINK_OUT_LINK_STATE_LBN 0 +#define MC_CMD_FPGA_OP_GET_INTERNAL_LINK_OUT_LINK_STATE_WIDTH 2 +/* Enum values, see field(s): */ +/* MC_CMD_FPGA_OP_SET_INTERNAL_LINK_IN/FLAGS */ +/* Link speed set on FPGA internal port MAC. */ +#define MC_CMD_FPGA_OP_GET_INTERNAL_LINK_OUT_SPEED_OFST 4 +#define MC_CMD_FPGA_OP_GET_INTERNAL_LINK_OUT_SPEED_LEN 4 + +/* MC_CMD_FPGA_OP_GET_MAC_STATS_IN msgrequest: Get FPGA external port MAC + * statistics. + */ +#define MC_CMD_FPGA_OP_GET_MAC_STATS_IN_LEN 4 +/* Sub-command code. Must be OP_GET_MAC_STATS. */ +#define MC_CMD_FPGA_OP_GET_MAC_STATS_IN_OP_OFST 0 +#define MC_CMD_FPGA_OP_GET_MAC_STATS_IN_OP_LEN 4 + +/* MC_CMD_FPGA_OP_GET_MAC_STATS_OUT msgresponse */ +#define MC_CMD_FPGA_OP_GET_MAC_STATS_OUT_LENMIN 4 +#define MC_CMD_FPGA_OP_GET_MAC_STATS_OUT_LENMAX 252 +#define MC_CMD_FPGA_OP_GET_MAC_STATS_OUT_LENMAX_MCDI2 1020 +#define MC_CMD_FPGA_OP_GET_MAC_STATS_OUT_LEN(num) (4+8*(num)) +#define MC_CMD_FPGA_OP_GET_MAC_STATS_OUT_STATISTICS_NUM(len) (((len)-4)/8) +#define MC_CMD_FPGA_OP_GET_MAC_STATS_OUT_NUM_STATS_OFST 0 +#define MC_CMD_FPGA_OP_GET_MAC_STATS_OUT_NUM_STATS_LEN 4 +#define MC_CMD_FPGA_OP_GET_MAC_STATS_OUT_STATISTICS_OFST 4 +#define MC_CMD_FPGA_OP_GET_MAC_STATS_OUT_STATISTICS_LEN 8 +#define MC_CMD_FPGA_OP_GET_MAC_STATS_OUT_STATISTICS_LO_OFST 4 +#define MC_CMD_FPGA_OP_GET_MAC_STATS_OUT_STATISTICS_LO_LEN 4 +#define MC_CMD_FPGA_OP_GET_MAC_STATS_OUT_STATISTICS_LO_LBN 32 +#define MC_CMD_FPGA_OP_GET_MAC_STATS_OUT_STATISTICS_LO_WIDTH 32 +#define MC_CMD_FPGA_OP_GET_MAC_STATS_OUT_STATISTICS_HI_OFST 8 +#define MC_CMD_FPGA_OP_GET_MAC_STATS_OUT_STATISTICS_HI_LEN 4 +#define MC_CMD_FPGA_OP_GET_MAC_STATS_OUT_STATISTICS_HI_LBN 64 +#define MC_CMD_FPGA_OP_GET_MAC_STATS_OUT_STATISTICS_HI_WIDTH 32 +#define MC_CMD_FPGA_OP_GET_MAC_STATS_OUT_STATISTICS_MINNUM 0 +#define MC_CMD_FPGA_OP_GET_MAC_STATS_OUT_STATISTICS_MAXNUM 31 +#define MC_CMD_FPGA_OP_GET_MAC_STATS_OUT_STATISTICS_MAXNUM_MCDI2 127 +#define MC_CMD_FPGA_MAC_TX_TOTAL_PACKETS 0x0 /* enum */ +#define MC_CMD_FPGA_MAC_TX_TOTAL_BYTES 0x1 /* enum */ +#define MC_CMD_FPGA_MAC_TX_TOTAL_GOOD_PACKETS 0x2 /* enum */ +#define MC_CMD_FPGA_MAC_TX_TOTAL_GOOD_BYTES 0x3 /* enum */ +#define MC_CMD_FPGA_MAC_TX_BAD_FCS 0x4 /* enum */ +#define MC_CMD_FPGA_MAC_TX_PAUSE 0x5 /* enum */ +#define MC_CMD_FPGA_MAC_TX_USER_PAUSE 0x6 /* enum */ +#define MC_CMD_FPGA_MAC_RX_TOTAL_PACKETS 0x7 /* enum */ +#define MC_CMD_FPGA_MAC_RX_TOTAL_BYTES 0x8 /* enum */ +#define MC_CMD_FPGA_MAC_RX_TOTAL_GOOD_PACKETS 0x9 /* enum */ +#define MC_CMD_FPGA_MAC_RX_TOTAL_GOOD_BYTES 0xa /* enum */ +#define MC_CMD_FPGA_MAC_RX_BAD_FCS 0xb /* enum */ +#define MC_CMD_FPGA_MAC_RX_PAUSE 0xc /* enum */ +#define MC_CMD_FPGA_MAC_RX_USER_PAUSE 0xd /* enum */ +#define MC_CMD_FPGA_MAC_RX_UNDERSIZE 0xe /* enum */ +#define MC_CMD_FPGA_MAC_RX_OVERSIZE 0xf /* enum */ +#define MC_CMD_FPGA_MAC_RX_FRAMING_ERR 0x10 /* enum */ +#define MC_CMD_FPGA_MAC_FEC_UNCORRECTED_ERRORS 0x11 /* enum */ +#define MC_CMD_FPGA_MAC_FEC_CORRECTED_ERRORS 0x12 /* enum */ + +/* MC_CMD_FPGA_OP_SET_INTERNAL_MAC_IN msgrequest: Configures the internal port + * MAC on the FPGA. + */ +#define MC_CMD_FPGA_OP_SET_INTERNAL_MAC_IN_LEN 20 +/* Sub-command code. Must be OP_SET_INTERNAL_MAC. */ +#define MC_CMD_FPGA_OP_SET_INTERNAL_MAC_IN_OP_OFST 0 +#define MC_CMD_FPGA_OP_SET_INTERNAL_MAC_IN_OP_LEN 4 +/* Select which parameters to configure. */ +#define MC_CMD_FPGA_OP_SET_INTERNAL_MAC_IN_CONTROL_OFST 4 +#define MC_CMD_FPGA_OP_SET_INTERNAL_MAC_IN_CONTROL_LEN 4 +#define MC_CMD_FPGA_OP_SET_INTERNAL_MAC_IN_CFG_MTU_OFST 4 +#define MC_CMD_FPGA_OP_SET_INTERNAL_MAC_IN_CFG_MTU_LBN 0 +#define MC_CMD_FPGA_OP_SET_INTERNAL_MAC_IN_CFG_MTU_WIDTH 1 +#define MC_CMD_FPGA_OP_SET_INTERNAL_MAC_IN_CFG_DRAIN_OFST 4 +#define MC_CMD_FPGA_OP_SET_INTERNAL_MAC_IN_CFG_DRAIN_LBN 1 +#define MC_CMD_FPGA_OP_SET_INTERNAL_MAC_IN_CFG_DRAIN_WIDTH 1 +#define MC_CMD_FPGA_OP_SET_INTERNAL_MAC_IN_CFG_FCNTL_OFST 4 +#define MC_CMD_FPGA_OP_SET_INTERNAL_MAC_IN_CFG_FCNTL_LBN 2 +#define MC_CMD_FPGA_OP_SET_INTERNAL_MAC_IN_CFG_FCNTL_WIDTH 1 +/* The MTU to be programmed into the MAC. */ +#define MC_CMD_FPGA_OP_SET_INTERNAL_MAC_IN_MTU_OFST 8 +#define MC_CMD_FPGA_OP_SET_INTERNAL_MAC_IN_MTU_LEN 4 +/* Drain Tx FIFO */ +#define MC_CMD_FPGA_OP_SET_INTERNAL_MAC_IN_DRAIN_OFST 12 +#define MC_CMD_FPGA_OP_SET_INTERNAL_MAC_IN_DRAIN_LEN 4 +/* flow control configuration. See MC_CMD_SET_MAC/MC_CMD_SET_MAC_IN/FCNTL. */ +#define MC_CMD_FPGA_OP_SET_INTERNAL_MAC_IN_FCNTL_OFST 16 +#define MC_CMD_FPGA_OP_SET_INTERNAL_MAC_IN_FCNTL_LEN 4 + +/* MC_CMD_FPGA_OP_SET_INTERNAL_MAC_OUT msgresponse */ +#define MC_CMD_FPGA_OP_SET_INTERNAL_MAC_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_EXTERNAL_MAE_GET_LINK_MODE + * This command is expected to be used on a U25 board with an MAE in the FPGA. + * It does not modify the operational state of the NIC. The modes are described + * in XN-200039-TC - U25 OVS packet formats. + */ +#define MC_CMD_EXTERNAL_MAE_GET_LINK_MODE 0x1c0 +#undef MC_CMD_0x1c0_PRIVILEGE_CTG + +#define MC_CMD_0x1c0_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_EXTERNAL_MAE_GET_LINK_MODE_IN msgrequest */ +#define MC_CMD_EXTERNAL_MAE_GET_LINK_MODE_IN_LEN 0 + +/* MC_CMD_EXTERNAL_MAE_GET_LINK_MODE_OUT msgresponse */ +#define MC_CMD_EXTERNAL_MAE_GET_LINK_MODE_OUT_LEN 4 +/* The current link mode */ +#define MC_CMD_EXTERNAL_MAE_GET_LINK_MODE_OUT_MODE_OFST 0 +#define MC_CMD_EXTERNAL_MAE_GET_LINK_MODE_OUT_MODE_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_EXTERNAL_MAE_LINK_MODE */ + + +/***********************************/ +/* MC_CMD_EXTERNAL_MAE_SET_LINK_MODE + * This command is expected to be used on a U25 board with an MAE in the FPGA. + * The modes are described in XN-200039-TC - U25 OVS packet formats. This + * command will set the link between the FPGA and the X2 to the specified new + * mode. It will first enter bootstrap mode, make sure there are no packets in + * flight and then enter the requested mode. In order to make sure there are no + * packets in flight, it will flush the X2 TX path, the FPGA RX path from the + * X2, the FPGA TX path to the X2 and the X2 RX path. The driver is responsible + * for making sure there are no TX or RX descriptors posted on any TXQ or RXQ + * associated with the affected port before invoking this command. This command + * is run implicitly with MODE set to LEGACY when MC_CMD_DRV_ATTACH is + * executed. + */ +#define MC_CMD_EXTERNAL_MAE_SET_LINK_MODE 0x1c1 +#undef MC_CMD_0x1c1_PRIVILEGE_CTG + +#define MC_CMD_0x1c1_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_EXTERNAL_MAE_SET_LINK_MODE_IN msgrequest */ +#define MC_CMD_EXTERNAL_MAE_SET_LINK_MODE_IN_LEN 4 +/* The new link mode. */ +#define MC_CMD_EXTERNAL_MAE_SET_LINK_MODE_IN_MODE_OFST 0 +#define MC_CMD_EXTERNAL_MAE_SET_LINK_MODE_IN_MODE_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_EXTERNAL_MAE_LINK_MODE */ + +/* MC_CMD_EXTERNAL_MAE_SET_LINK_MODE_OUT msgresponse */ +#define MC_CMD_EXTERNAL_MAE_SET_LINK_MODE_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_GET_BUFTBL_STATS + * Currently EF10 only. Read usage and limits for Buffer Table + */ +#define MC_CMD_GET_BUFTBL_STATS 0x6a +#undef MC_CMD_0x6a_PRIVILEGE_CTG + +#define MC_CMD_0x6a_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_BUFTBL_STATS_IN msgrequest */ +#define MC_CMD_GET_BUFTBL_STATS_IN_LEN 0 + +/* MC_CMD_GET_BUFTBL_STATS_OUT msgresponse */ +#define MC_CMD_GET_BUFTBL_STATS_OUT_LEN 40 +/* number of buffer table entries per set */ +#define MC_CMD_GET_BUFTBL_STATS_OUT_BUFTBL_ENTRIES_PER_SET_OFST 0 +#define MC_CMD_GET_BUFTBL_STATS_OUT_BUFTBL_ENTRIES_PER_SET_LEN 4 +/* number of buffer table entries per cluster */ +#define MC_CMD_GET_BUFTBL_STATS_OUT_BUFTBL_ENTRIES_PER_CLUSTER_OFST 4 +#define MC_CMD_GET_BUFTBL_STATS_OUT_BUFTBL_ENTRIES_PER_CLUSTER_LEN 4 +/* Maximum size buffer table can grow to, in clusters. On EF10, this can + * potentially vary depending on the size of the Descriptor Cache. + */ +#define MC_CMD_GET_BUFTBL_STATS_OUT_BUFTBL_MAX_CLUSTERS_OFST 8 +#define MC_CMD_GET_BUFTBL_STATS_OUT_BUFTBL_MAX_CLUSTERS_LEN 4 +/* High water mark for number of buffer table clusters which have been + * allocated. + */ +#define MC_CMD_GET_BUFTBL_STATS_OUT_BUFTBL_HIGH_WATER_CLUSTERS_OFST 12 +#define MC_CMD_GET_BUFTBL_STATS_OUT_BUFTBL_HIGH_WATER_CLUSTERS_LEN 4 +/* Number of free buffer table clusters on the free cluster list. */ +#define MC_CMD_GET_BUFTBL_STATS_OUT_BUFTBL_FREE_CLUSTERS_OFST 16 +#define MC_CMD_GET_BUFTBL_STATS_OUT_BUFTBL_FREE_CLUSTERS_LEN 4 +/* Number of free buffer table sets on the free set list. */ +#define MC_CMD_GET_BUFTBL_STATS_OUT_BUFTBL_FREE_SETS_OFST 20 +#define MC_CMD_GET_BUFTBL_STATS_OUT_BUFTBL_FREE_SETS_LEN 4 +/* Number of chunks of fully-used clusters allocated to the MC for EVQ, RXQ and + * TXQs. + */ +#define MC_CMD_GET_BUFTBL_STATS_OUT_BUFTBL_MC_FULL_CLUSTERS_OFST 24 +#define MC_CMD_GET_BUFTBL_STATS_OUT_BUFTBL_MC_FULL_CLUSTERS_LEN 4 +/* Number of chunks in partially-used clusters allocated to the MC for EVQ, RXQ + * and TXQs. + */ +#define MC_CMD_GET_BUFTBL_STATS_OUT_BUFTBL_MC_PART_CLUSTERS_OFST 28 +#define MC_CMD_GET_BUFTBL_STATS_OUT_BUFTBL_MC_PART_CLUSTERS_LEN 4 +/* Number of buffer table sets (chunks) allocated to the host via + * MC_CMD_ALLOC_BUFTBL_CHUNK. + */ +#define MC_CMD_GET_BUFTBL_STATS_OUT_BUFTBL_HOST_SETS_OFST 32 +#define MC_CMD_GET_BUFTBL_STATS_OUT_BUFTBL_HOST_SETS_LEN 4 +/* Maximum number of VIs per NIC. On EF10 this is the current value as used to + * size the Descriptor Cache in hardware. + */ +#define MC_CMD_GET_BUFTBL_STATS_OUT_VI_MAX_OFST 36 +#define MC_CMD_GET_BUFTBL_STATS_OUT_VI_MAX_LEN 4 + +/* CLIENT_HANDLE structuredef: A client is an abstract entity that can make + * requests of the device and that can own resources managed by the device. + * Examples of clients include PCIe functions and dynamic clients. A client + * handle is a 32b opaque value used to refer to a client. Further details can + * be found within XN-200418-TC. + */ +#define CLIENT_HANDLE_LEN 4 +#define CLIENT_HANDLE_OPAQUE_OFST 0 +#define CLIENT_HANDLE_OPAQUE_LEN 4 +/* enum: A client handle guaranteed never to refer to a real client. */ +#define CLIENT_HANDLE_NULL 0xffffffff +/* enum: Used to refer to the calling client. */ +#define CLIENT_HANDLE_SELF 0xfffffffe +#define CLIENT_HANDLE_OPAQUE_LBN 0 +#define CLIENT_HANDLE_OPAQUE_WIDTH 32 + +/* CLOCK_INFO structuredef: Information about a single hardware clock */ +#define CLOCK_INFO_LEN 28 +/* Enumeration that uniquely identifies the clock */ +#define CLOCK_INFO_CLOCK_ID_OFST 0 +#define CLOCK_INFO_CLOCK_ID_LEN 2 +/* enum: The Riverhead CMC (card MC) */ +#define CLOCK_INFO_CLOCK_CMC 0x0 +/* enum: The Riverhead NMC (network MC) */ +#define CLOCK_INFO_CLOCK_NMC 0x1 +/* enum: The Riverhead SDNET slice main logic */ +#define CLOCK_INFO_CLOCK_SDNET 0x2 +/* enum: The Riverhead SDNET LUT */ +#define CLOCK_INFO_CLOCK_SDNET_LUT 0x3 +/* enum: The Riverhead SDNET control logic */ +#define CLOCK_INFO_CLOCK_SDNET_CTRL 0x4 +/* enum: The Riverhead Streaming SubSystem */ +#define CLOCK_INFO_CLOCK_SSS 0x5 +/* enum: The Riverhead network MAC and associated CSR registers */ +#define CLOCK_INFO_CLOCK_MAC 0x6 +#define CLOCK_INFO_CLOCK_ID_LBN 0 +#define CLOCK_INFO_CLOCK_ID_WIDTH 16 +/* Assorted flags */ +#define CLOCK_INFO_FLAGS_OFST 2 +#define CLOCK_INFO_FLAGS_LEN 2 +#define CLOCK_INFO_SETTABLE_OFST 2 +#define CLOCK_INFO_SETTABLE_LBN 0 +#define CLOCK_INFO_SETTABLE_WIDTH 1 +#define CLOCK_INFO_FLAGS_LBN 16 +#define CLOCK_INFO_FLAGS_WIDTH 16 +/* The frequency in HZ */ +#define CLOCK_INFO_FREQUENCY_OFST 4 +#define CLOCK_INFO_FREQUENCY_LEN 8 +#define CLOCK_INFO_FREQUENCY_LO_OFST 4 +#define CLOCK_INFO_FREQUENCY_LO_LEN 4 +#define CLOCK_INFO_FREQUENCY_LO_LBN 32 +#define CLOCK_INFO_FREQUENCY_LO_WIDTH 32 +#define CLOCK_INFO_FREQUENCY_HI_OFST 8 +#define CLOCK_INFO_FREQUENCY_HI_LEN 4 +#define CLOCK_INFO_FREQUENCY_HI_LBN 64 +#define CLOCK_INFO_FREQUENCY_HI_WIDTH 32 +#define CLOCK_INFO_FREQUENCY_LBN 32 +#define CLOCK_INFO_FREQUENCY_WIDTH 64 +/* Human-readable ASCII name for clock, with NUL termination */ +#define CLOCK_INFO_NAME_OFST 12 +#define CLOCK_INFO_NAME_LEN 1 +#define CLOCK_INFO_NAME_NUM 16 +#define CLOCK_INFO_NAME_LBN 96 +#define CLOCK_INFO_NAME_WIDTH 8 + +/* SCHED_CREDIT_CHECK_RESULT structuredef */ +#define SCHED_CREDIT_CHECK_RESULT_LEN 16 +/* The instance of the scheduler. Refer to XN-200389-AW (snic/hnic) and + * XN-200425-TC (cdx) for the location of these schedulers in the hardware. + */ +#define SCHED_CREDIT_CHECK_RESULT_SCHED_INSTANCE_OFST 0 +#define SCHED_CREDIT_CHECK_RESULT_SCHED_INSTANCE_LEN 1 +#define SCHED_CREDIT_CHECK_RESULT_HUB_HOST_A 0x0 /* enum */ +#define SCHED_CREDIT_CHECK_RESULT_HUB_NET_A 0x1 /* enum */ +#define SCHED_CREDIT_CHECK_RESULT_HUB_B 0x2 /* enum */ +#define SCHED_CREDIT_CHECK_RESULT_HUB_HOST_C 0x3 /* enum */ +#define SCHED_CREDIT_CHECK_RESULT_HUB_NET_TX 0x4 /* enum */ +#define SCHED_CREDIT_CHECK_RESULT_HUB_HOST_D 0x5 /* enum */ +#define SCHED_CREDIT_CHECK_RESULT_HUB_REPLAY 0x6 /* enum */ +#define SCHED_CREDIT_CHECK_RESULT_DMAC_H2C 0x7 /* enum */ +#define SCHED_CREDIT_CHECK_RESULT_HUB_NET_B 0x8 /* enum */ +#define SCHED_CREDIT_CHECK_RESULT_HUB_NET_REPLAY 0x9 /* enum */ +#define SCHED_CREDIT_CHECK_RESULT_ADAPTER_C2H_C 0xa /* enum */ +#define SCHED_CREDIT_CHECK_RESULT_A2_H2C_C 0xb /* enum */ +#define SCHED_CREDIT_CHECK_RESULT_A3_SOFT_ADAPTOR_C 0xc /* enum */ +#define SCHED_CREDIT_CHECK_RESULT_A4_DPU_WRITE_C 0xd /* enum */ +#define SCHED_CREDIT_CHECK_RESULT_JRC_RRU 0xe /* enum */ +#define SCHED_CREDIT_CHECK_RESULT_CDM_SINK 0xf /* enum */ +#define SCHED_CREDIT_CHECK_RESULT_PCIE_SINK 0x10 /* enum */ +#define SCHED_CREDIT_CHECK_RESULT_UPORT_SINK 0x11 /* enum */ +#define SCHED_CREDIT_CHECK_RESULT_PSX_SINK 0x12 /* enum */ +#define SCHED_CREDIT_CHECK_RESULT_A5_DPU_READ_C 0x13 /* enum */ +#define SCHED_CREDIT_CHECK_RESULT_SCHED_INSTANCE_LBN 0 +#define SCHED_CREDIT_CHECK_RESULT_SCHED_INSTANCE_WIDTH 8 +/* The type of node that this result refers to. */ +#define SCHED_CREDIT_CHECK_RESULT_NODE_TYPE_OFST 1 +#define SCHED_CREDIT_CHECK_RESULT_NODE_TYPE_LEN 1 +/* enum: Destination node */ +#define SCHED_CREDIT_CHECK_RESULT_DEST 0x0 +/* enum: Source node */ +#define SCHED_CREDIT_CHECK_RESULT_SOURCE 0x1 +/* enum: Destination node credit type 1 (new to the Keystone schedulers, see + * SF-120268-TC) + */ +#define SCHED_CREDIT_CHECK_RESULT_DEST_CREDIT1 0x2 +#define SCHED_CREDIT_CHECK_RESULT_NODE_TYPE_LBN 8 +#define SCHED_CREDIT_CHECK_RESULT_NODE_TYPE_WIDTH 8 +/* Level of node in scheduler hierarchy (level 0 is the bottom of the + * hierarchy, increasing towards the root node). + */ +#define SCHED_CREDIT_CHECK_RESULT_NODE_LEVEL_OFST 2 +#define SCHED_CREDIT_CHECK_RESULT_NODE_LEVEL_LEN 2 +#define SCHED_CREDIT_CHECK_RESULT_NODE_LEVEL_LBN 16 +#define SCHED_CREDIT_CHECK_RESULT_NODE_LEVEL_WIDTH 16 +/* Node index */ +#define SCHED_CREDIT_CHECK_RESULT_NODE_INDEX_OFST 4 +#define SCHED_CREDIT_CHECK_RESULT_NODE_INDEX_LEN 4 +#define SCHED_CREDIT_CHECK_RESULT_NODE_INDEX_LBN 32 +#define SCHED_CREDIT_CHECK_RESULT_NODE_INDEX_WIDTH 32 +/* The number of credits the node is expected to have. */ +#define SCHED_CREDIT_CHECK_RESULT_EXPECTED_CREDITS_OFST 8 +#define SCHED_CREDIT_CHECK_RESULT_EXPECTED_CREDITS_LEN 4 +#define SCHED_CREDIT_CHECK_RESULT_EXPECTED_CREDITS_LBN 64 +#define SCHED_CREDIT_CHECK_RESULT_EXPECTED_CREDITS_WIDTH 32 +/* The number of credits the node actually had. */ +#define SCHED_CREDIT_CHECK_RESULT_ACTUAL_CREDITS_OFST 12 +#define SCHED_CREDIT_CHECK_RESULT_ACTUAL_CREDITS_LEN 4 +#define SCHED_CREDIT_CHECK_RESULT_ACTUAL_CREDITS_LBN 96 +#define SCHED_CREDIT_CHECK_RESULT_ACTUAL_CREDITS_WIDTH 32 + + +/***********************************/ +/* MC_CMD_GET_CLOCKS_INFO + * Get information about the device clocks + */ +#define MC_CMD_GET_CLOCKS_INFO 0x166 +#undef MC_CMD_0x166_PRIVILEGE_CTG + +#define MC_CMD_0x166_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_CLOCKS_INFO_IN msgrequest */ +#define MC_CMD_GET_CLOCKS_INFO_IN_LEN 0 + +/* MC_CMD_GET_CLOCKS_INFO_OUT msgresponse */ +#define MC_CMD_GET_CLOCKS_INFO_OUT_LENMIN 0 +#define MC_CMD_GET_CLOCKS_INFO_OUT_LENMAX 252 +#define MC_CMD_GET_CLOCKS_INFO_OUT_LENMAX_MCDI2 1008 +#define MC_CMD_GET_CLOCKS_INFO_OUT_LEN(num) (0+28*(num)) +#define MC_CMD_GET_CLOCKS_INFO_OUT_INFOS_NUM(len) (((len)-0)/28) +/* An array of CLOCK_INFO structures. */ +#define MC_CMD_GET_CLOCKS_INFO_OUT_INFOS_OFST 0 +#define MC_CMD_GET_CLOCKS_INFO_OUT_INFOS_LEN 28 +#define MC_CMD_GET_CLOCKS_INFO_OUT_INFOS_MINNUM 0 +#define MC_CMD_GET_CLOCKS_INFO_OUT_INFOS_MAXNUM 9 +#define MC_CMD_GET_CLOCKS_INFO_OUT_INFOS_MAXNUM_MCDI2 36 + + +/***********************************/ +/* MC_CMD_VNIC_ENCAP_RULE_ADD + * Add a rule for detecting encapsulations in the VNIC stage. Currently this + * only affects checksum validation in VNIC RX - on TX the send descriptor + * explicitly specifies encapsulation. These rules are per-VNIC, i.e. only + * apply to the current driver. If a rule matches, then the packet is + * considered to have the corresponding encapsulation type, and the inner + * packet is parsed. It is up to the driver to ensure that overlapping rules + * are not inserted. (If a packet would match multiple rules, a random one of + * them will be used.) A rule with the exact same match criteria may not be + * inserted twice (EALREADY). Only a limited number MATCH_FLAGS values are + * supported, use MC_CMD_GET_PARSER_DISP_INFO with OP + * OP_GET_SUPPORTED_VNIC_ENCAP_RULE_MATCHES to get a list of supported + * combinations. Each driver may only have a limited set of active rules - + * returns ENOSPC if the caller's table is full. + */ +#define MC_CMD_VNIC_ENCAP_RULE_ADD 0x16d +#undef MC_CMD_0x16d_PRIVILEGE_CTG + +#define MC_CMD_0x16d_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_VNIC_ENCAP_RULE_ADD_IN msgrequest */ +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_LEN 36 +/* Set to MAE_MPORT_SELECTOR_ASSIGNED. In the future this may be relaxed. */ +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MPORT_SELECTOR_OFST 0 +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MPORT_SELECTOR_LEN 4 +/* Any non-zero bits other than the ones named below or an unsupported + * combination will cause the NIC to return EOPNOTSUPP. In the future more + * flags may be added. + */ +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_FLAGS_OFST 4 +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_FLAGS_LEN 4 +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_ETHER_TYPE_OFST 4 +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_ETHER_TYPE_LBN 0 +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_ETHER_TYPE_WIDTH 1 +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_OUTER_VLAN_OFST 4 +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_OUTER_VLAN_LBN 1 +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_OUTER_VLAN_WIDTH 1 +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_DST_IP_OFST 4 +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_DST_IP_LBN 2 +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_DST_IP_WIDTH 1 +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_IP_PROTO_OFST 4 +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_IP_PROTO_LBN 3 +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_IP_PROTO_WIDTH 1 +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_DST_PORT_OFST 4 +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_DST_PORT_LBN 4 +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_DST_PORT_WIDTH 1 +/* Only if MATCH_ETHER_TYPE is set. Ethertype value as bytes in network order. + * Currently only IPv4 (0x0800) and IPv6 (0x86DD) ethertypes may be used. + */ +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_ETHER_TYPE_OFST 8 +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_ETHER_TYPE_LEN 2 +/* Only if MATCH_OUTER_VLAN is set. VID value as bytes in network order. + * (Deprecated) + */ +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_OUTER_VLAN_LBN 80 +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_OUTER_VLAN_WIDTH 12 +/* Only if MATCH_OUTER_VLAN is set. Aligned wrapper for OUTER_VLAN_VID. */ +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_OUTER_VLAN_WORD_OFST 10 +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_OUTER_VLAN_WORD_LEN 2 +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_OUTER_VLAN_VID_OFST 10 +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_OUTER_VLAN_VID_LBN 0 +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_OUTER_VLAN_VID_WIDTH 12 +/* Only if MATCH_DST_IP is set. IP address as bytes in network order. In the + * case of IPv4, the IP should be in the first 4 bytes and all other bytes + * should be zero. + */ +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_DST_IP_OFST 12 +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_DST_IP_LEN 16 +/* Only if MATCH_IP_PROTO is set. Currently only UDP proto (17) may be used. */ +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_IP_PROTO_OFST 28 +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_IP_PROTO_LEN 1 +/* Actions that should be applied to packets match the rule. */ +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_ACTION_FLAGS_OFST 29 +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_ACTION_FLAGS_LEN 1 +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_STRIP_OUTER_VLAN_OFST 29 +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_STRIP_OUTER_VLAN_LBN 0 +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_STRIP_OUTER_VLAN_WIDTH 1 +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_RSS_ON_OUTER_OFST 29 +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_RSS_ON_OUTER_LBN 1 +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_RSS_ON_OUTER_WIDTH 1 +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_STEER_ON_OUTER_OFST 29 +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_STEER_ON_OUTER_LBN 2 +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_STEER_ON_OUTER_WIDTH 1 +/* Only if MATCH_DST_PORT is set. Port number as bytes in network order. */ +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_DST_PORT_OFST 30 +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_DST_PORT_LEN 2 +/* Resulting encapsulation type, as per MAE_MCDI_ENCAP_TYPE enumeration. */ +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_ENCAP_TYPE_OFST 32 +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_ENCAP_TYPE_LEN 4 + +/* MC_CMD_VNIC_ENCAP_RULE_ADD_OUT msgresponse */ +#define MC_CMD_VNIC_ENCAP_RULE_ADD_OUT_LEN 4 +/* Handle to inserted rule. Used for removing the rule. */ +#define MC_CMD_VNIC_ENCAP_RULE_ADD_OUT_HANDLE_OFST 0 +#define MC_CMD_VNIC_ENCAP_RULE_ADD_OUT_HANDLE_LEN 4 + + +/***********************************/ +/* MC_CMD_VNIC_ENCAP_RULE_REMOVE + * Remove a VNIC encapsulation rule. Packets which would have previously + * matched the rule will then be considered as unencapsulated. Returns EALREADY + * if the input HANDLE doesn't correspond to an existing rule. + */ +#define MC_CMD_VNIC_ENCAP_RULE_REMOVE 0x16e +#undef MC_CMD_0x16e_PRIVILEGE_CTG + +#define MC_CMD_0x16e_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_VNIC_ENCAP_RULE_REMOVE_IN msgrequest */ +#define MC_CMD_VNIC_ENCAP_RULE_REMOVE_IN_LEN 4 +/* Handle which was returned by MC_CMD_VNIC_ENCAP_RULE_ADD. */ +#define MC_CMD_VNIC_ENCAP_RULE_REMOVE_IN_HANDLE_OFST 0 +#define MC_CMD_VNIC_ENCAP_RULE_REMOVE_IN_HANDLE_LEN 4 + +/* MC_CMD_VNIC_ENCAP_RULE_REMOVE_OUT msgresponse */ +#define MC_CMD_VNIC_ENCAP_RULE_REMOVE_OUT_LEN 0 + +/* UUID structuredef: An RFC4122 standard UUID. The values here are stored in + * the endianness specified by the RFC; users should ignore the broken-out + * fields and instead do straight memory copies to ensure correct ordering. + */ +#define UUID_LEN 16 +#define UUID_TIME_LOW_OFST 0 +#define UUID_TIME_LOW_LEN 4 +#define UUID_TIME_LOW_LBN 0 +#define UUID_TIME_LOW_WIDTH 32 +#define UUID_TIME_MID_OFST 4 +#define UUID_TIME_MID_LEN 2 +#define UUID_TIME_MID_LBN 32 +#define UUID_TIME_MID_WIDTH 16 +#define UUID_TIME_HI_LBN 52 +#define UUID_TIME_HI_WIDTH 12 +#define UUID_VERSION_LBN 48 +#define UUID_VERSION_WIDTH 4 +#define UUID_RESERVED_LBN 64 +#define UUID_RESERVED_WIDTH 2 +#define UUID_CLK_SEQ_LBN 66 +#define UUID_CLK_SEQ_WIDTH 14 +#define UUID_NODE_OFST 10 +#define UUID_NODE_LEN 6 +#define UUID_NODE_LBN 80 +#define UUID_NODE_WIDTH 48 + + +/***********************************/ +/* MC_CMD_PLUGIN_ALLOC + * Create a handle to a datapath plugin's extension. This involves finding a + * currently-loaded plugin offering the given functionality (as identified by + * the UUID) and allocating a handle to track the usage of it. Plugin + * functionality is identified by 'extension' rather than any other identifier + * so that a single plugin bitfile may offer more than one piece of independent + * functionality. If two bitfiles are loaded which both offer the same + * extension, then the metadata is interrogated further to determine which is + * the newest and that is the one opened. See SF-123625-SW for architectural + * detail on datapath plugins. + */ +#define MC_CMD_PLUGIN_ALLOC 0x1ad +#undef MC_CMD_0x1ad_PRIVILEGE_CTG + +#define MC_CMD_0x1ad_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_PLUGIN_ALLOC_IN msgrequest */ +#define MC_CMD_PLUGIN_ALLOC_IN_LEN 24 +/* The functionality requested of the plugin, as a UUID structure */ +#define MC_CMD_PLUGIN_ALLOC_IN_UUID_OFST 0 +#define MC_CMD_PLUGIN_ALLOC_IN_UUID_LEN 16 +/* Additional options for opening the handle */ +#define MC_CMD_PLUGIN_ALLOC_IN_FLAGS_OFST 16 +#define MC_CMD_PLUGIN_ALLOC_IN_FLAGS_LEN 4 +#define MC_CMD_PLUGIN_ALLOC_IN_FLAG_INFO_ONLY_OFST 16 +#define MC_CMD_PLUGIN_ALLOC_IN_FLAG_INFO_ONLY_LBN 0 +#define MC_CMD_PLUGIN_ALLOC_IN_FLAG_INFO_ONLY_WIDTH 1 +#define MC_CMD_PLUGIN_ALLOC_IN_FLAG_ALLOW_DISABLED_OFST 16 +#define MC_CMD_PLUGIN_ALLOC_IN_FLAG_ALLOW_DISABLED_LBN 1 +#define MC_CMD_PLUGIN_ALLOC_IN_FLAG_ALLOW_DISABLED_WIDTH 1 +/* Load the extension only if it is in the specified administrative group. + * Specify ANY to load the extension wherever it is found (if there are + * multiple choices then the extension with the highest MINOR_VER/PATCH_VER + * will be loaded). See MC_CMD_PLUGIN_GET_META_GLOBAL for a description of + * administrative groups. + */ +#define MC_CMD_PLUGIN_ALLOC_IN_ADMIN_GROUP_OFST 20 +#define MC_CMD_PLUGIN_ALLOC_IN_ADMIN_GROUP_LEN 2 +/* enum: Load the extension from any ADMIN_GROUP. */ +#define MC_CMD_PLUGIN_ALLOC_IN_ANY 0xffff +/* Reserved */ +#define MC_CMD_PLUGIN_ALLOC_IN_RESERVED_OFST 22 +#define MC_CMD_PLUGIN_ALLOC_IN_RESERVED_LEN 2 + +/* MC_CMD_PLUGIN_ALLOC_OUT msgresponse */ +#define MC_CMD_PLUGIN_ALLOC_OUT_LEN 4 +/* Unique identifier of this usage */ +#define MC_CMD_PLUGIN_ALLOC_OUT_HANDLE_OFST 0 +#define MC_CMD_PLUGIN_ALLOC_OUT_HANDLE_LEN 4 + + +/***********************************/ +/* MC_CMD_PLUGIN_FREE + * Delete a handle to a plugin's extension. + */ +#define MC_CMD_PLUGIN_FREE 0x1ae +#undef MC_CMD_0x1ae_PRIVILEGE_CTG + +#define MC_CMD_0x1ae_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_PLUGIN_FREE_IN msgrequest */ +#define MC_CMD_PLUGIN_FREE_IN_LEN 4 +/* Handle returned by MC_CMD_PLUGIN_ALLOC_OUT */ +#define MC_CMD_PLUGIN_FREE_IN_HANDLE_OFST 0 +#define MC_CMD_PLUGIN_FREE_IN_HANDLE_LEN 4 + +/* MC_CMD_PLUGIN_FREE_OUT msgresponse */ +#define MC_CMD_PLUGIN_FREE_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_PLUGIN_GET_META_GLOBAL + * Returns the global metadata applying to the whole plugin extension. See the + * other metadata calls for subtypes of data. + */ +#define MC_CMD_PLUGIN_GET_META_GLOBAL 0x1af +#undef MC_CMD_0x1af_PRIVILEGE_CTG + +#define MC_CMD_0x1af_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_PLUGIN_GET_META_GLOBAL_IN msgrequest */ +#define MC_CMD_PLUGIN_GET_META_GLOBAL_IN_LEN 4 +/* Handle returned by MC_CMD_PLUGIN_ALLOC_OUT */ +#define MC_CMD_PLUGIN_GET_META_GLOBAL_IN_HANDLE_OFST 0 +#define MC_CMD_PLUGIN_GET_META_GLOBAL_IN_HANDLE_LEN 4 + +/* MC_CMD_PLUGIN_GET_META_GLOBAL_OUT msgresponse */ +#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_LEN 36 +/* Unique identifier of this plugin extension. This is identical to the value + * which was requested when the handle was allocated. + */ +#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_UUID_OFST 0 +#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_UUID_LEN 16 +/* semver sub-version of this plugin extension */ +#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MINOR_VER_OFST 16 +#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MINOR_VER_LEN 2 +/* semver micro-version of this plugin extension */ +#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_PATCH_VER_OFST 18 +#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_PATCH_VER_LEN 2 +/* Number of different messages which can be sent to this extension */ +#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_NUM_MSGS_OFST 20 +#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_NUM_MSGS_LEN 4 +/* Byte offset within the VI window of the plugin's mapped CSR window. */ +#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MAPPED_CSR_OFFSET_OFST 24 +#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MAPPED_CSR_OFFSET_LEN 2 +/* Number of bytes mapped through to the plugin's CSRs. 0 if that feature was + * not requested by the plugin (in which case MAPPED_CSR_OFFSET and + * MAPPED_CSR_FLAGS are ignored). + */ +#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MAPPED_CSR_SIZE_OFST 26 +#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MAPPED_CSR_SIZE_LEN 2 +/* Flags indicating how to perform the CSR window mapping. */ +#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MAPPED_CSR_FLAGS_OFST 28 +#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MAPPED_CSR_FLAGS_LEN 4 +#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MAPPED_CSR_FLAG_READ_OFST 28 +#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MAPPED_CSR_FLAG_READ_LBN 0 +#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MAPPED_CSR_FLAG_READ_WIDTH 1 +#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MAPPED_CSR_FLAG_WRITE_OFST 28 +#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MAPPED_CSR_FLAG_WRITE_LBN 1 +#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MAPPED_CSR_FLAG_WRITE_WIDTH 1 +/* Identifier of the set of extensions which all change state together. + * Extensions having the same ADMIN_GROUP will always load and unload at the + * same time. ADMIN_GROUP values themselves are arbitrary (but they contain a + * generation number as an implementation detail to ensure that they're not + * reused rapidly). + */ +#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_ADMIN_GROUP_OFST 32 +#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_ADMIN_GROUP_LEN 1 +/* Bitshift in MC_CMD_DEVEL_CLIENT_PRIVILEGE_MODIFY's MASK parameters + * corresponding to this extension, i.e. set the bit 1<0 will return data from the most recent snapshot. The GENERATION field + * in the response allows callers to verify that all responses correspond to + * the same snapshot. + */ +#define MC_CMD_CHECK_SCHEDULER_CREDITS_IN_PAGE_OFST 4 +#define MC_CMD_CHECK_SCHEDULER_CREDITS_IN_PAGE_LEN 4 + +/* MC_CMD_CHECK_SCHEDULER_CREDITS_OUT msgresponse */ +#define MC_CMD_CHECK_SCHEDULER_CREDITS_OUT_LENMIN 16 +#define MC_CMD_CHECK_SCHEDULER_CREDITS_OUT_LENMAX 240 +#define MC_CMD_CHECK_SCHEDULER_CREDITS_OUT_LENMAX_MCDI2 1008 +#define MC_CMD_CHECK_SCHEDULER_CREDITS_OUT_LEN(num) (16+16*(num)) +#define MC_CMD_CHECK_SCHEDULER_CREDITS_OUT_RESULTS_NUM(len) (((len)-16)/16) +/* The total number of results (across all pages). */ +#define MC_CMD_CHECK_SCHEDULER_CREDITS_OUT_TOTAL_RESULTS_OFST 0 +#define MC_CMD_CHECK_SCHEDULER_CREDITS_OUT_TOTAL_RESULTS_LEN 4 +/* The number of pages that the response is split across. */ +#define MC_CMD_CHECK_SCHEDULER_CREDITS_OUT_NUM_PAGES_OFST 4 +#define MC_CMD_CHECK_SCHEDULER_CREDITS_OUT_NUM_PAGES_LEN 4 +/* The number of results in this response. */ +#define MC_CMD_CHECK_SCHEDULER_CREDITS_OUT_RESULTS_THIS_PAGE_OFST 8 +#define MC_CMD_CHECK_SCHEDULER_CREDITS_OUT_RESULTS_THIS_PAGE_LEN 4 +/* Result generation count. Incremented any time a request is made with PAGE=0. + */ +#define MC_CMD_CHECK_SCHEDULER_CREDITS_OUT_GENERATION_OFST 12 +#define MC_CMD_CHECK_SCHEDULER_CREDITS_OUT_GENERATION_LEN 4 +/* The results, as an array of SCHED_CREDIT_CHECK_RESULT structures. */ +#define MC_CMD_CHECK_SCHEDULER_CREDITS_OUT_RESULTS_OFST 16 +#define MC_CMD_CHECK_SCHEDULER_CREDITS_OUT_RESULTS_LEN 16 +#define MC_CMD_CHECK_SCHEDULER_CREDITS_OUT_RESULTS_MINNUM 0 +#define MC_CMD_CHECK_SCHEDULER_CREDITS_OUT_RESULTS_MAXNUM 14 +#define MC_CMD_CHECK_SCHEDULER_CREDITS_OUT_RESULTS_MAXNUM_MCDI2 62 + + +/***********************************/ +/* MC_CMD_TXQ_STATS + * Query per-TXQ statistics. + */ +#define MC_CMD_TXQ_STATS 0x1d5 +#undef MC_CMD_0x1d5_PRIVILEGE_CTG + +#define MC_CMD_0x1d5_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_TXQ_STATS_IN msgrequest */ +#define MC_CMD_TXQ_STATS_IN_LEN 8 +/* Instance of TXQ to retrieve statistics for */ +#define MC_CMD_TXQ_STATS_IN_INSTANCE_OFST 0 +#define MC_CMD_TXQ_STATS_IN_INSTANCE_LEN 4 +/* Flags for the request */ +#define MC_CMD_TXQ_STATS_IN_FLAGS_OFST 4 +#define MC_CMD_TXQ_STATS_IN_FLAGS_LEN 4 +#define MC_CMD_TXQ_STATS_IN_CLEAR_OFST 4 +#define MC_CMD_TXQ_STATS_IN_CLEAR_LBN 0 +#define MC_CMD_TXQ_STATS_IN_CLEAR_WIDTH 1 + +/* MC_CMD_TXQ_STATS_OUT msgresponse */ +#define MC_CMD_TXQ_STATS_OUT_LENMIN 0 +#define MC_CMD_TXQ_STATS_OUT_LENMAX 248 +#define MC_CMD_TXQ_STATS_OUT_LENMAX_MCDI2 1016 +#define MC_CMD_TXQ_STATS_OUT_LEN(num) (0+8*(num)) +#define MC_CMD_TXQ_STATS_OUT_STATISTICS_NUM(len) (((len)-0)/8) +#define MC_CMD_TXQ_STATS_OUT_STATISTICS_OFST 0 +#define MC_CMD_TXQ_STATS_OUT_STATISTICS_LEN 8 +#define MC_CMD_TXQ_STATS_OUT_STATISTICS_LO_OFST 0 +#define MC_CMD_TXQ_STATS_OUT_STATISTICS_LO_LEN 4 +#define MC_CMD_TXQ_STATS_OUT_STATISTICS_LO_LBN 0 +#define MC_CMD_TXQ_STATS_OUT_STATISTICS_LO_WIDTH 32 +#define MC_CMD_TXQ_STATS_OUT_STATISTICS_HI_OFST 4 +#define MC_CMD_TXQ_STATS_OUT_STATISTICS_HI_LEN 4 +#define MC_CMD_TXQ_STATS_OUT_STATISTICS_HI_LBN 32 +#define MC_CMD_TXQ_STATS_OUT_STATISTICS_HI_WIDTH 32 +#define MC_CMD_TXQ_STATS_OUT_STATISTICS_MINNUM 0 +#define MC_CMD_TXQ_STATS_OUT_STATISTICS_MAXNUM 31 +#define MC_CMD_TXQ_STATS_OUT_STATISTICS_MAXNUM_MCDI2 127 +#define MC_CMD_TXQ_STATS_CTPIO_MAX_FILL 0x0 /* enum */ + +/* FUNCTION_PERSONALITY structuredef: The meanings of the personalities are + * defined in SF-120734-TC with more information in SF-122717-TC. + */ +#define FUNCTION_PERSONALITY_LEN 4 +#define FUNCTION_PERSONALITY_ID_OFST 0 +#define FUNCTION_PERSONALITY_ID_LEN 4 +/* enum: Function has no assigned personality */ +#define FUNCTION_PERSONALITY_NULL 0x0 +/* enum: Function has an EF100-style function control window and VI windows + * with both EF100 and vDPA doorbells. + */ +#define FUNCTION_PERSONALITY_EF100 0x1 +/* enum: Function has virtio net device configuration registers and doorbells + * for virtio queue pairs. + */ +#define FUNCTION_PERSONALITY_VIRTIO_NET 0x2 +/* enum: Function has virtio block device configuration registers and a + * doorbell for a single virtqueue. + */ +#define FUNCTION_PERSONALITY_VIRTIO_BLK 0x3 +/* enum: Function is a Xilinx acceleration device - management function */ +#define FUNCTION_PERSONALITY_ACCEL_MGMT 0x4 +/* enum: Function is a Xilinx acceleration device - user function */ +#define FUNCTION_PERSONALITY_ACCEL_USR 0x5 +#define FUNCTION_PERSONALITY_ID_LBN 0 +#define FUNCTION_PERSONALITY_ID_WIDTH 32 + + +/***********************************/ +/* MC_CMD_VIRTIO_GET_FEATURES + * Get a list of the virtio features supported by the device. + */ +#define MC_CMD_VIRTIO_GET_FEATURES 0x168 +#undef MC_CMD_0x168_PRIVILEGE_CTG + +#define MC_CMD_0x168_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_VIRTIO_GET_FEATURES_IN msgrequest */ +#define MC_CMD_VIRTIO_GET_FEATURES_IN_LEN 4 +/* Type of device to get features for. Matches the device id as defined by the + * virtio spec. + */ +#define MC_CMD_VIRTIO_GET_FEATURES_IN_DEVICE_ID_OFST 0 +#define MC_CMD_VIRTIO_GET_FEATURES_IN_DEVICE_ID_LEN 4 +/* enum: Reserved. Do not use. */ +#define MC_CMD_VIRTIO_GET_FEATURES_IN_RESERVED 0x0 +/* enum: Net device. */ +#define MC_CMD_VIRTIO_GET_FEATURES_IN_NET 0x1 +/* enum: Block device. */ +#define MC_CMD_VIRTIO_GET_FEATURES_IN_BLOCK 0x2 + +/* MC_CMD_VIRTIO_GET_FEATURES_OUT msgresponse */ +#define MC_CMD_VIRTIO_GET_FEATURES_OUT_LEN 8 +/* Features supported by the device. The result is a bitfield in the format of + * the feature bits of the specified device type as defined in the virtIO 1.1 + * specification ( https://docs.oasis- + * open.org/virtio/virtio/v1.1/csprd01/virtio-v1.1-csprd01.pdf ) + */ +#define MC_CMD_VIRTIO_GET_FEATURES_OUT_FEATURES_OFST 0 +#define MC_CMD_VIRTIO_GET_FEATURES_OUT_FEATURES_LEN 8 +#define MC_CMD_VIRTIO_GET_FEATURES_OUT_FEATURES_LO_OFST 0 +#define MC_CMD_VIRTIO_GET_FEATURES_OUT_FEATURES_LO_LEN 4 +#define MC_CMD_VIRTIO_GET_FEATURES_OUT_FEATURES_LO_LBN 0 +#define MC_CMD_VIRTIO_GET_FEATURES_OUT_FEATURES_LO_WIDTH 32 +#define MC_CMD_VIRTIO_GET_FEATURES_OUT_FEATURES_HI_OFST 4 +#define MC_CMD_VIRTIO_GET_FEATURES_OUT_FEATURES_HI_LEN 4 +#define MC_CMD_VIRTIO_GET_FEATURES_OUT_FEATURES_HI_LBN 32 +#define MC_CMD_VIRTIO_GET_FEATURES_OUT_FEATURES_HI_WIDTH 32 + + +/***********************************/ +/* MC_CMD_VIRTIO_TEST_FEATURES + * Query whether a given set of features is supported. Fails with ENOSUP if the + * driver requests a feature the device doesn't support. Fails with EINVAL if + * the driver fails to request a feature which the device requires. + */ +#define MC_CMD_VIRTIO_TEST_FEATURES 0x169 +#undef MC_CMD_0x169_PRIVILEGE_CTG + +#define MC_CMD_0x169_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_VIRTIO_TEST_FEATURES_IN msgrequest */ +#define MC_CMD_VIRTIO_TEST_FEATURES_IN_LEN 16 +/* Type of device to test features for. Matches the device id as defined by the + * virtio spec. + */ +#define MC_CMD_VIRTIO_TEST_FEATURES_IN_DEVICE_ID_OFST 0 +#define MC_CMD_VIRTIO_TEST_FEATURES_IN_DEVICE_ID_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_VIRTIO_GET_FEATURES/MC_CMD_VIRTIO_GET_FEATURES_IN/DEVICE_ID */ +#define MC_CMD_VIRTIO_TEST_FEATURES_IN_RESERVED_OFST 4 +#define MC_CMD_VIRTIO_TEST_FEATURES_IN_RESERVED_LEN 4 +/* Features requested. Same format as the returned value from + * MC_CMD_VIRTIO_GET_FEATURES. + */ +#define MC_CMD_VIRTIO_TEST_FEATURES_IN_FEATURES_OFST 8 +#define MC_CMD_VIRTIO_TEST_FEATURES_IN_FEATURES_LEN 8 +#define MC_CMD_VIRTIO_TEST_FEATURES_IN_FEATURES_LO_OFST 8 +#define MC_CMD_VIRTIO_TEST_FEATURES_IN_FEATURES_LO_LEN 4 +#define MC_CMD_VIRTIO_TEST_FEATURES_IN_FEATURES_LO_LBN 64 +#define MC_CMD_VIRTIO_TEST_FEATURES_IN_FEATURES_LO_WIDTH 32 +#define MC_CMD_VIRTIO_TEST_FEATURES_IN_FEATURES_HI_OFST 12 +#define MC_CMD_VIRTIO_TEST_FEATURES_IN_FEATURES_HI_LEN 4 +#define MC_CMD_VIRTIO_TEST_FEATURES_IN_FEATURES_HI_LBN 96 +#define MC_CMD_VIRTIO_TEST_FEATURES_IN_FEATURES_HI_WIDTH 32 + +/* MC_CMD_VIRTIO_TEST_FEATURES_OUT msgresponse */ +#define MC_CMD_VIRTIO_TEST_FEATURES_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_VIRTIO_GET_CAPABILITIES + * Get virtio capabilities supported by the device. Returns general virtio + * capabilities and limitations of the hardware / firmware implementation + * (hardware device as a whole), rather than that of individual configured + * virtio devices. At present, only the absolute maximum number of queues + * allowed on multi-queue devices is returned. Response is expected to be + * extended as necessary in the future. + */ +#define MC_CMD_VIRTIO_GET_CAPABILITIES 0x1d3 +#undef MC_CMD_0x1d3_PRIVILEGE_CTG + +#define MC_CMD_0x1d3_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_VIRTIO_GET_CAPABILITIES_IN msgrequest */ +#define MC_CMD_VIRTIO_GET_CAPABILITIES_IN_LEN 4 +/* Type of device to get capabilities for. Matches the device id as defined by + * the virtio spec. + */ +#define MC_CMD_VIRTIO_GET_CAPABILITIES_IN_DEVICE_ID_OFST 0 +#define MC_CMD_VIRTIO_GET_CAPABILITIES_IN_DEVICE_ID_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_VIRTIO_GET_FEATURES/MC_CMD_VIRTIO_GET_FEATURES_IN/DEVICE_ID */ + +/* MC_CMD_VIRTIO_GET_CAPABILITIES_OUT msgresponse */ +#define MC_CMD_VIRTIO_GET_CAPABILITIES_OUT_LEN 4 +/* Maximum number of queues supported for a single device instance */ +#define MC_CMD_VIRTIO_GET_CAPABILITIES_OUT_MAX_QUEUES_OFST 0 +#define MC_CMD_VIRTIO_GET_CAPABILITIES_OUT_MAX_QUEUES_LEN 4 + + +/***********************************/ +/* MC_CMD_VIRTIO_INIT_QUEUE + * Create a virtio virtqueue. Fails with EALREADY if the queue already exists. + * Fails with ENOSUP if a feature is requested that isn't supported. Fails with + * EINVAL if a required feature isn't requested, or any other parameter is + * invalid. + */ +#define MC_CMD_VIRTIO_INIT_QUEUE 0x16a +#undef MC_CMD_0x16a_PRIVILEGE_CTG + +#define MC_CMD_0x16a_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_VIRTIO_INIT_QUEUE_REQ msgrequest */ +#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_LEN 68 +/* Type of virtqueue to create. A network rxq and a txq can exist at the same + * time on a single VI. + */ +#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_QUEUE_TYPE_OFST 0 +#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_QUEUE_TYPE_LEN 1 +/* enum: A network device receive queue */ +#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_NET_RXQ 0x0 +/* enum: A network device transmit queue */ +#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_NET_TXQ 0x1 +/* enum: A block device request queue */ +#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_BLOCK 0x2 +#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_RESERVED_OFST 1 +#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_RESERVED_LEN 1 +/* If the calling function is a PF and this field is not VF_NULL, create the + * queue on the specified child VF instead of on the PF. + */ +#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_TARGET_VF_OFST 2 +#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_TARGET_VF_LEN 2 +/* enum: No VF, create queue on the PF. */ +#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_VF_NULL 0xffff +/* Desired instance. This is the function-local index of the associated VI, not + * the virtqueue number as counted by the virtqueue spec. + */ +#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_INSTANCE_OFST 4 +#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_INSTANCE_LEN 4 +/* Queue size, in entries. Must be a power of two. */ +#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_SIZE_OFST 8 +#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_SIZE_LEN 4 +/* Flags */ +#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_FLAGS_OFST 12 +#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_FLAGS_LEN 4 +#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_USE_PASID_OFST 12 +#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_USE_PASID_LBN 0 +#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_USE_PASID_WIDTH 1 +/* Address of the descriptor table in the virtqueue. */ +#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_DESC_TBL_ADDR_OFST 16 +#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_DESC_TBL_ADDR_LEN 8 +#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_DESC_TBL_ADDR_LO_OFST 16 +#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_DESC_TBL_ADDR_LO_LEN 4 +#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_DESC_TBL_ADDR_LO_LBN 128 +#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_DESC_TBL_ADDR_LO_WIDTH 32 +#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_DESC_TBL_ADDR_HI_OFST 20 +#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_DESC_TBL_ADDR_HI_LEN 4 +#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_DESC_TBL_ADDR_HI_LBN 160 +#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_DESC_TBL_ADDR_HI_WIDTH 32 +/* Address of the available ring in the virtqueue. */ +#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_AVAIL_RING_ADDR_OFST 24 +#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_AVAIL_RING_ADDR_LEN 8 +#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_AVAIL_RING_ADDR_LO_OFST 24 +#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_AVAIL_RING_ADDR_LO_LEN 4 +#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_AVAIL_RING_ADDR_LO_LBN 192 +#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_AVAIL_RING_ADDR_LO_WIDTH 32 +#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_AVAIL_RING_ADDR_HI_OFST 28 +#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_AVAIL_RING_ADDR_HI_LEN 4 +#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_AVAIL_RING_ADDR_HI_LBN 224 +#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_AVAIL_RING_ADDR_HI_WIDTH 32 +/* Address of the used ring in the virtqueue. */ +#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_USED_RING_ADDR_OFST 32 +#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_USED_RING_ADDR_LEN 8 +#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_USED_RING_ADDR_LO_OFST 32 +#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_USED_RING_ADDR_LO_LEN 4 +#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_USED_RING_ADDR_LO_LBN 256 +#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_USED_RING_ADDR_LO_WIDTH 32 +#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_USED_RING_ADDR_HI_OFST 36 +#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_USED_RING_ADDR_HI_LEN 4 +#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_USED_RING_ADDR_HI_LBN 288 +#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_USED_RING_ADDR_HI_WIDTH 32 +/* PASID to use on PCIe transactions involving this queue. Ignored if the + * USE_PASID flag is not set. + */ +#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_PASID_OFST 40 +#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_PASID_LEN 4 +/* Which MSIX vector to use for this virtqueue, or NO_VECTOR if MSIX should not + * be used. + */ +#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_MSIX_VECTOR_OFST 44 +#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_MSIX_VECTOR_LEN 2 +/* enum: Do not enable interrupts for this virtqueue */ +#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_NO_VECTOR 0xffff +#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_RESERVED2_OFST 46 +#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_RESERVED2_LEN 2 +/* Virtio features to apply to this queue. Same format as the in the virtio + * spec and in the return from MC_CMD_VIRTIO_GET_FEATURES. Must be a subset of + * the features returned from MC_CMD_VIRTIO_GET_FEATURES. Features are per- + * queue because with vDPA multiple queues on the same function can be passed + * through to different virtual hosts as independent devices. + */ +#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_FEATURES_OFST 48 +#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_FEATURES_LEN 8 +#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_FEATURES_LO_OFST 48 +#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_FEATURES_LO_LEN 4 +#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_FEATURES_LO_LBN 384 +#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_FEATURES_LO_WIDTH 32 +#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_FEATURES_HI_OFST 52 +#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_FEATURES_HI_LEN 4 +#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_FEATURES_HI_LBN 416 +#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_FEATURES_HI_WIDTH 32 +/* Enum values, see field(s): */ +/* MC_CMD_VIRTIO_GET_FEATURES/MC_CMD_VIRTIO_GET_FEATURES_OUT/FEATURES */ +/* The initial available index for this virtqueue. If this queue is being + * created to be migrated into, this should be the FINAL_AVAIL_IDX value + * returned by MC_CMD_VIRTIO_FINI_QUEUE of the queue being migrated from (or + * equivalent if the original queue was on a thirdparty device). Otherwise, it + * should be zero. + */ +#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_INITIAL_AVAIL_IDX_OFST 56 +#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_INITIAL_AVAIL_IDX_LEN 4 +/* Alias of INITIAL_AVAIL_IDX, kept for compatibility. */ +#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_INITIAL_PIDX_OFST 56 +#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_INITIAL_PIDX_LEN 4 +/* The initial used index for this virtqueue. If this queue is being created to + * be migrated into, this should be the FINAL_USED_IDX value returned by + * MC_CMD_VIRTIO_FINI_QUEUE of the queue being migrated from (or equivalent if + * the original queue was on a thirdparty device). Otherwise, it should be + * zero. + */ +#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_INITIAL_USED_IDX_OFST 60 +#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_INITIAL_USED_IDX_LEN 4 +/* Alias of INITIAL_USED_IDX, kept for compatibility. */ +#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_INITIAL_CIDX_OFST 60 +#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_INITIAL_CIDX_LEN 4 +/* A MAE_MPORT_SELECTOR defining which mport this queue should be associated + * with. Use MAE_MPORT_SELECTOR_ASSIGNED to request the default mport for the + * function this queue is being created on. + */ +#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_MPORT_SELECTOR_OFST 64 +#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_MPORT_SELECTOR_LEN 4 + +/* MC_CMD_VIRTIO_INIT_QUEUE_RESP msgresponse */ +#define MC_CMD_VIRTIO_INIT_QUEUE_RESP_LEN 0 + + +/***********************************/ +/* MC_CMD_VIRTIO_FINI_QUEUE + * Destroy a virtio virtqueue + */ +#define MC_CMD_VIRTIO_FINI_QUEUE 0x16b +#undef MC_CMD_0x16b_PRIVILEGE_CTG + +#define MC_CMD_0x16b_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_VIRTIO_FINI_QUEUE_REQ msgrequest */ +#define MC_CMD_VIRTIO_FINI_QUEUE_REQ_LEN 8 +/* Type of virtqueue to destroy. */ +#define MC_CMD_VIRTIO_FINI_QUEUE_REQ_QUEUE_TYPE_OFST 0 +#define MC_CMD_VIRTIO_FINI_QUEUE_REQ_QUEUE_TYPE_LEN 1 +/* Enum values, see field(s): */ +/* MC_CMD_VIRTIO_INIT_QUEUE/MC_CMD_VIRTIO_INIT_QUEUE_REQ/QUEUE_TYPE */ +#define MC_CMD_VIRTIO_FINI_QUEUE_REQ_RESERVED_OFST 1 +#define MC_CMD_VIRTIO_FINI_QUEUE_REQ_RESERVED_LEN 1 +/* If the calling function is a PF and this field is not VF_NULL, destroy the + * queue on the specified child VF instead of on the PF. + */ +#define MC_CMD_VIRTIO_FINI_QUEUE_REQ_TARGET_VF_OFST 2 +#define MC_CMD_VIRTIO_FINI_QUEUE_REQ_TARGET_VF_LEN 2 +/* enum: No VF, destroy the queue on the PF. */ +#define MC_CMD_VIRTIO_FINI_QUEUE_REQ_VF_NULL 0xffff +/* Instance to destroy */ +#define MC_CMD_VIRTIO_FINI_QUEUE_REQ_INSTANCE_OFST 4 +#define MC_CMD_VIRTIO_FINI_QUEUE_REQ_INSTANCE_LEN 4 + +/* MC_CMD_VIRTIO_FINI_QUEUE_RESP msgresponse */ +#define MC_CMD_VIRTIO_FINI_QUEUE_RESP_LEN 8 +/* The available index of the virtqueue when the queue was stopped. */ +#define MC_CMD_VIRTIO_FINI_QUEUE_RESP_FINAL_AVAIL_IDX_OFST 0 +#define MC_CMD_VIRTIO_FINI_QUEUE_RESP_FINAL_AVAIL_IDX_LEN 4 +/* Alias of FINAL_AVAIL_IDX, kept for compatibility. */ +#define MC_CMD_VIRTIO_FINI_QUEUE_RESP_FINAL_PIDX_OFST 0 +#define MC_CMD_VIRTIO_FINI_QUEUE_RESP_FINAL_PIDX_LEN 4 +/* The used index of the virtqueue when the queue was stopped. */ +#define MC_CMD_VIRTIO_FINI_QUEUE_RESP_FINAL_USED_IDX_OFST 4 +#define MC_CMD_VIRTIO_FINI_QUEUE_RESP_FINAL_USED_IDX_LEN 4 +/* Alias of FINAL_USED_IDX, kept for compatibility. */ +#define MC_CMD_VIRTIO_FINI_QUEUE_RESP_FINAL_CIDX_OFST 4 +#define MC_CMD_VIRTIO_FINI_QUEUE_RESP_FINAL_CIDX_LEN 4 + + +/***********************************/ +/* MC_CMD_VIRTIO_GET_DOORBELL_OFFSET + * Get the offset in the BAR of the doorbells for a VI. Doesn't require the + * queue(s) to be allocated. + */ +#define MC_CMD_VIRTIO_GET_DOORBELL_OFFSET 0x16c +#undef MC_CMD_0x16c_PRIVILEGE_CTG + +#define MC_CMD_0x16c_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_VIRTIO_GET_DOORBELL_OFFSET_REQ msgrequest */ +#define MC_CMD_VIRTIO_GET_DOORBELL_OFFSET_REQ_LEN 8 +/* Type of device to get information for. Matches the device id as defined by + * the virtio spec. + */ +#define MC_CMD_VIRTIO_GET_DOORBELL_OFFSET_REQ_DEVICE_ID_OFST 0 +#define MC_CMD_VIRTIO_GET_DOORBELL_OFFSET_REQ_DEVICE_ID_LEN 1 +/* Enum values, see field(s): */ +/* MC_CMD_VIRTIO_GET_FEATURES/MC_CMD_VIRTIO_GET_FEATURES_IN/DEVICE_ID */ +#define MC_CMD_VIRTIO_GET_DOORBELL_OFFSET_REQ_RESERVED_OFST 1 +#define MC_CMD_VIRTIO_GET_DOORBELL_OFFSET_REQ_RESERVED_LEN 1 +/* If the calling function is a PF and this field is not VF_NULL, query the VI + * on the specified child VF instead of on the PF. + */ +#define MC_CMD_VIRTIO_GET_DOORBELL_OFFSET_REQ_TARGET_VF_OFST 2 +#define MC_CMD_VIRTIO_GET_DOORBELL_OFFSET_REQ_TARGET_VF_LEN 2 +/* enum: No VF, query the PF. */ +#define MC_CMD_VIRTIO_GET_DOORBELL_OFFSET_REQ_VF_NULL 0xffff +/* VI instance to query */ +#define MC_CMD_VIRTIO_GET_DOORBELL_OFFSET_REQ_INSTANCE_OFST 4 +#define MC_CMD_VIRTIO_GET_DOORBELL_OFFSET_REQ_INSTANCE_LEN 4 + +/* MC_CMD_VIRTIO_GET_NET_DOORBELL_OFFSET_RESP msgresponse */ +#define MC_CMD_VIRTIO_GET_NET_DOORBELL_OFFSET_RESP_LEN 8 +/* Offset of RX doorbell in BAR */ +#define MC_CMD_VIRTIO_GET_NET_DOORBELL_OFFSET_RESP_RX_DBL_OFFSET_OFST 0 +#define MC_CMD_VIRTIO_GET_NET_DOORBELL_OFFSET_RESP_RX_DBL_OFFSET_LEN 4 +/* Offset of TX doorbell in BAR */ +#define MC_CMD_VIRTIO_GET_NET_DOORBELL_OFFSET_RESP_TX_DBL_OFFSET_OFST 4 +#define MC_CMD_VIRTIO_GET_NET_DOORBELL_OFFSET_RESP_TX_DBL_OFFSET_LEN 4 + +/* MC_CMD_VIRTIO_GET_BLOCK_DOORBELL_OFFSET_RESP msgresponse */ +#define MC_CMD_VIRTIO_GET_BLOCK_DOORBELL_OFFSET_RESP_LEN 4 +/* Offset of request doorbell in BAR */ +#define MC_CMD_VIRTIO_GET_BLOCK_DOORBELL_OFFSET_RESP_DBL_OFFSET_OFST 0 +#define MC_CMD_VIRTIO_GET_BLOCK_DOORBELL_OFFSET_RESP_DBL_OFFSET_LEN 4 + +/* PCIE_FUNCTION structuredef: Structure representing a PCIe function ID + * (interface/PF/VF tuple) + */ +#define PCIE_FUNCTION_LEN 8 +/* PCIe PF function number */ +#define PCIE_FUNCTION_PF_OFST 0 +#define PCIE_FUNCTION_PF_LEN 2 +/* enum: Wildcard value representing any available function (e.g in resource + * allocation requests) + */ +#define PCIE_FUNCTION_PF_ANY 0xfffe +/* enum: Value representing invalid (null) function */ +#define PCIE_FUNCTION_PF_NULL 0xffff +#define PCIE_FUNCTION_PF_LBN 0 +#define PCIE_FUNCTION_PF_WIDTH 16 +/* PCIe VF Function number (PF relative) */ +#define PCIE_FUNCTION_VF_OFST 2 +#define PCIE_FUNCTION_VF_LEN 2 +/* enum: Wildcard value representing any available function (e.g in resource + * allocation requests) + */ +#define PCIE_FUNCTION_VF_ANY 0xfffe +/* enum: Function is a PF (when PF != PF_NULL) or invalid function (when PF == + * PF_NULL) + */ +#define PCIE_FUNCTION_VF_NULL 0xffff +#define PCIE_FUNCTION_VF_LBN 16 +#define PCIE_FUNCTION_VF_WIDTH 16 +/* PCIe interface of the function. Values should be taken from the + * PCIE_INTERFACE enum + */ +#define PCIE_FUNCTION_INTF_OFST 4 +#define PCIE_FUNCTION_INTF_LEN 4 +/* enum: Host PCIe interface. (Alias for HOST_PRIMARY, provided for backwards + * compatibility) + */ +#define PCIE_FUNCTION_INTF_HOST 0x0 +/* enum: Application Processor interface (alias for NIC_EMBEDDED, provided for + * backwards compatibility) + */ +#define PCIE_FUNCTION_INTF_AP 0x1 +#define PCIE_FUNCTION_INTF_LBN 32 +#define PCIE_FUNCTION_INTF_WIDTH 32 + +/* QUEUE_ID structuredef: Structure representing an absolute queue identifier + * (absolute VI number + VI relative queue number). On Keystone, a VI can + * contain multiple queues (at present, up to 2), each with separate controls + * for direction. This structure is required to uniquely identify the absolute + * source queue for descriptor proxy functions. + */ +#define QUEUE_ID_LEN 4 +/* Absolute VI number */ +#define QUEUE_ID_ABS_VI_OFST 0 +#define QUEUE_ID_ABS_VI_LEN 2 +#define QUEUE_ID_ABS_VI_LBN 0 +#define QUEUE_ID_ABS_VI_WIDTH 16 +/* Relative queue number within the VI */ +#define QUEUE_ID_REL_QUEUE_LBN 16 +#define QUEUE_ID_REL_QUEUE_WIDTH 1 +#define QUEUE_ID_RESERVED_LBN 17 +#define QUEUE_ID_RESERVED_WIDTH 15 + + +/***********************************/ +/* MC_CMD_DESC_PROXY_FUNC_CREATE + * Descriptor proxy functions are abstract devices that forward all request + * submitted to the host PCIe function (descriptors submitted to Virtio or + * EF100 queues) to be handled on another function (most commonly on the + * embedded Application Processor), via EF100 descriptor proxy, memory-to- + * memory and descriptor-to-completion mechanisms. Primary user is Virtio-blk + * subsystem, see SF-122927-TC. This function allocates a new descriptor proxy + * function on the host and assigns a user-defined label. The actual function + * configuration is not persisted until the caller configures it with + * MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN and commits with + * MC_CMD_DESC_PROXY_FUNC_COMMIT_IN. + */ +#define MC_CMD_DESC_PROXY_FUNC_CREATE 0x172 +#undef MC_CMD_0x172_PRIVILEGE_CTG + +#define MC_CMD_0x172_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_DESC_PROXY_FUNC_CREATE_IN msgrequest */ +#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_LEN 52 +/* PCIe Function ID to allocate (as struct PCIE_FUNCTION). Set to + * {PF_ANY,VF_ANY,interface} for "any available function" Set to + * {PF_ANY,VF_NULL,interface} for "any available PF" + */ +#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_OFST 0 +#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_LEN 8 +#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_LO_OFST 0 +#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_LO_LEN 4 +#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_LO_LBN 0 +#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_LO_WIDTH 32 +#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_HI_OFST 4 +#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_HI_LEN 4 +#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_HI_LBN 32 +#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_HI_WIDTH 32 +#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_PF_OFST 0 +#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_PF_LEN 2 +#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_VF_OFST 2 +#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_VF_LEN 2 +#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_INTF_OFST 4 +#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_INTF_LEN 4 +/* The personality to set. The meanings of the personalities are defined in + * SF-120734-TC with more information in SF-122717-TC. At present, we only + * support proxying for VIRTIO_BLK + */ +#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_PERSONALITY_OFST 8 +#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_PERSONALITY_LEN 4 +/* Enum values, see field(s): */ +/* FUNCTION_PERSONALITY/ID */ +/* User-defined label (zero-terminated ASCII string) to uniquely identify the + * function + */ +#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_LABEL_OFST 12 +#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_LABEL_LEN 40 + +/* MC_CMD_DESC_PROXY_FUNC_CREATE_OUT msgresponse */ +#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_LEN 12 +/* Handle to the descriptor proxy function */ +#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_HANDLE_OFST 0 +#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_HANDLE_LEN 4 +/* Allocated function ID (as struct PCIE_FUNCTION) */ +#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_OFST 4 +#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_LEN 8 +#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_LO_OFST 4 +#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_LO_LEN 4 +#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_LO_LBN 32 +#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_LO_WIDTH 32 +#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_HI_OFST 8 +#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_HI_LEN 4 +#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_HI_LBN 64 +#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_HI_WIDTH 32 +#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_PF_OFST 4 +#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_PF_LEN 2 +#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_VF_OFST 6 +#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_VF_LEN 2 +#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_INTF_OFST 8 +#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_INTF_LEN 4 + + +/***********************************/ +/* MC_CMD_DESC_PROXY_FUNC_DESTROY + * Remove an existing descriptor proxy function. Underlying function + * personality and configuration reverts back to factory default. Function + * configuration is committed immediately to specified store and any function + * ownership is released. + */ +#define MC_CMD_DESC_PROXY_FUNC_DESTROY 0x173 +#undef MC_CMD_0x173_PRIVILEGE_CTG + +#define MC_CMD_0x173_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_DESC_PROXY_FUNC_DESTROY_IN msgrequest */ +#define MC_CMD_DESC_PROXY_FUNC_DESTROY_IN_LEN 44 +/* User-defined label (zero-terminated ASCII string) to uniquely identify the + * function + */ +#define MC_CMD_DESC_PROXY_FUNC_DESTROY_IN_LABEL_OFST 0 +#define MC_CMD_DESC_PROXY_FUNC_DESTROY_IN_LABEL_LEN 40 +/* Store from which to remove function configuration */ +#define MC_CMD_DESC_PROXY_FUNC_DESTROY_IN_STORE_OFST 40 +#define MC_CMD_DESC_PROXY_FUNC_DESTROY_IN_STORE_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_DESC_PROXY_FUNC_COMMIT/MC_CMD_DESC_PROXY_FUNC_COMMIT_IN/STORE */ + +/* MC_CMD_DESC_PROXY_FUNC_DESTROY_OUT msgresponse */ +#define MC_CMD_DESC_PROXY_FUNC_DESTROY_OUT_LEN 0 + +/* VIRTIO_BLK_CONFIG structuredef: Virtio block device configuration. See + * Virtio specification v1.1, Sections 5.2.3 and 6 for definition of feature + * bits. See Virtio specification v1.1, Section 5.2.4 (struct + * virtio_blk_config) for definition of remaining configuration fields + */ +#define VIRTIO_BLK_CONFIG_LEN 68 +/* Virtio block device features to advertise, per Virtio 1.1, 5.2.3 and 6 */ +#define VIRTIO_BLK_CONFIG_FEATURES_OFST 0 +#define VIRTIO_BLK_CONFIG_FEATURES_LEN 8 +#define VIRTIO_BLK_CONFIG_FEATURES_LO_OFST 0 +#define VIRTIO_BLK_CONFIG_FEATURES_LO_LEN 4 +#define VIRTIO_BLK_CONFIG_FEATURES_LO_LBN 0 +#define VIRTIO_BLK_CONFIG_FEATURES_LO_WIDTH 32 +#define VIRTIO_BLK_CONFIG_FEATURES_HI_OFST 4 +#define VIRTIO_BLK_CONFIG_FEATURES_HI_LEN 4 +#define VIRTIO_BLK_CONFIG_FEATURES_HI_LBN 32 +#define VIRTIO_BLK_CONFIG_FEATURES_HI_WIDTH 32 +#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_BARRIER_OFST 0 +#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_BARRIER_LBN 0 +#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_BARRIER_WIDTH 1 +#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_SIZE_MAX_OFST 0 +#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_SIZE_MAX_LBN 1 +#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_SIZE_MAX_WIDTH 1 +#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_SEG_MAX_OFST 0 +#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_SEG_MAX_LBN 2 +#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_SEG_MAX_WIDTH 1 +#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_GEOMETRY_OFST 0 +#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_GEOMETRY_LBN 4 +#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_GEOMETRY_WIDTH 1 +#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_RO_OFST 0 +#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_RO_LBN 5 +#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_RO_WIDTH 1 +#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_BLK_SIZE_OFST 0 +#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_BLK_SIZE_LBN 6 +#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_BLK_SIZE_WIDTH 1 +#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_SCSI_OFST 0 +#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_SCSI_LBN 7 +#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_SCSI_WIDTH 1 +#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_FLUSH_OFST 0 +#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_FLUSH_LBN 9 +#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_FLUSH_WIDTH 1 +#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_TOPOLOGY_OFST 0 +#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_TOPOLOGY_LBN 10 +#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_TOPOLOGY_WIDTH 1 +#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_CONFIG_WCE_OFST 0 +#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_CONFIG_WCE_LBN 11 +#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_CONFIG_WCE_WIDTH 1 +#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_MQ_OFST 0 +#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_MQ_LBN 12 +#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_MQ_WIDTH 1 +#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_DISCARD_OFST 0 +#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_DISCARD_LBN 13 +#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_DISCARD_WIDTH 1 +#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_WRITE_ZEROES_OFST 0 +#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_WRITE_ZEROES_LBN 14 +#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_WRITE_ZEROES_WIDTH 1 +#define VIRTIO_BLK_CONFIG_VIRTIO_F_RING_INDIRECT_DESC_OFST 0 +#define VIRTIO_BLK_CONFIG_VIRTIO_F_RING_INDIRECT_DESC_LBN 28 +#define VIRTIO_BLK_CONFIG_VIRTIO_F_RING_INDIRECT_DESC_WIDTH 1 +#define VIRTIO_BLK_CONFIG_VIRTIO_F_RING_EVENT_IDX_OFST 0 +#define VIRTIO_BLK_CONFIG_VIRTIO_F_RING_EVENT_IDX_LBN 29 +#define VIRTIO_BLK_CONFIG_VIRTIO_F_RING_EVENT_IDX_WIDTH 1 +#define VIRTIO_BLK_CONFIG_VIRTIO_F_VERSION_1_OFST 0 +#define VIRTIO_BLK_CONFIG_VIRTIO_F_VERSION_1_LBN 32 +#define VIRTIO_BLK_CONFIG_VIRTIO_F_VERSION_1_WIDTH 1 +#define VIRTIO_BLK_CONFIG_VIRTIO_F_ACCESS_PLATFORM_OFST 0 +#define VIRTIO_BLK_CONFIG_VIRTIO_F_ACCESS_PLATFORM_LBN 33 +#define VIRTIO_BLK_CONFIG_VIRTIO_F_ACCESS_PLATFORM_WIDTH 1 +#define VIRTIO_BLK_CONFIG_VIRTIO_F_RING_PACKED_OFST 0 +#define VIRTIO_BLK_CONFIG_VIRTIO_F_RING_PACKED_LBN 34 +#define VIRTIO_BLK_CONFIG_VIRTIO_F_RING_PACKED_WIDTH 1 +#define VIRTIO_BLK_CONFIG_VIRTIO_F_IN_ORDER_OFST 0 +#define VIRTIO_BLK_CONFIG_VIRTIO_F_IN_ORDER_LBN 35 +#define VIRTIO_BLK_CONFIG_VIRTIO_F_IN_ORDER_WIDTH 1 +#define VIRTIO_BLK_CONFIG_VIRTIO_F_ORDER_PLATFORM_OFST 0 +#define VIRTIO_BLK_CONFIG_VIRTIO_F_ORDER_PLATFORM_LBN 36 +#define VIRTIO_BLK_CONFIG_VIRTIO_F_ORDER_PLATFORM_WIDTH 1 +#define VIRTIO_BLK_CONFIG_VIRTIO_F_SR_IOV_OFST 0 +#define VIRTIO_BLK_CONFIG_VIRTIO_F_SR_IOV_LBN 37 +#define VIRTIO_BLK_CONFIG_VIRTIO_F_SR_IOV_WIDTH 1 +#define VIRTIO_BLK_CONFIG_VIRTIO_F_NOTIFICATION_DATA_OFST 0 +#define VIRTIO_BLK_CONFIG_VIRTIO_F_NOTIFICATION_DATA_LBN 38 +#define VIRTIO_BLK_CONFIG_VIRTIO_F_NOTIFICATION_DATA_WIDTH 1 +#define VIRTIO_BLK_CONFIG_FEATURES_LBN 0 +#define VIRTIO_BLK_CONFIG_FEATURES_WIDTH 64 +/* The capacity of the device (expressed in 512-byte sectors) */ +#define VIRTIO_BLK_CONFIG_CAPACITY_OFST 8 +#define VIRTIO_BLK_CONFIG_CAPACITY_LEN 8 +#define VIRTIO_BLK_CONFIG_CAPACITY_LO_OFST 8 +#define VIRTIO_BLK_CONFIG_CAPACITY_LO_LEN 4 +#define VIRTIO_BLK_CONFIG_CAPACITY_LO_LBN 64 +#define VIRTIO_BLK_CONFIG_CAPACITY_LO_WIDTH 32 +#define VIRTIO_BLK_CONFIG_CAPACITY_HI_OFST 12 +#define VIRTIO_BLK_CONFIG_CAPACITY_HI_LEN 4 +#define VIRTIO_BLK_CONFIG_CAPACITY_HI_LBN 96 +#define VIRTIO_BLK_CONFIG_CAPACITY_HI_WIDTH 32 +#define VIRTIO_BLK_CONFIG_CAPACITY_LBN 64 +#define VIRTIO_BLK_CONFIG_CAPACITY_WIDTH 64 +/* Maximum size of any single segment. Only valid when VIRTIO_BLK_F_SIZE_MAX is + * set. + */ +#define VIRTIO_BLK_CONFIG_SIZE_MAX_OFST 16 +#define VIRTIO_BLK_CONFIG_SIZE_MAX_LEN 4 +#define VIRTIO_BLK_CONFIG_SIZE_MAX_LBN 128 +#define VIRTIO_BLK_CONFIG_SIZE_MAX_WIDTH 32 +/* Maximum number of segments in a request. Only valid when + * VIRTIO_BLK_F_SEG_MAX is set. + */ +#define VIRTIO_BLK_CONFIG_SEG_MAX_OFST 20 +#define VIRTIO_BLK_CONFIG_SEG_MAX_LEN 4 +#define VIRTIO_BLK_CONFIG_SEG_MAX_LBN 160 +#define VIRTIO_BLK_CONFIG_SEG_MAX_WIDTH 32 +/* Disk-style geometry - cylinders. Only valid when VIRTIO_BLK_F_GEOMETRY is + * set. + */ +#define VIRTIO_BLK_CONFIG_CYLINDERS_OFST 24 +#define VIRTIO_BLK_CONFIG_CYLINDERS_LEN 2 +#define VIRTIO_BLK_CONFIG_CYLINDERS_LBN 192 +#define VIRTIO_BLK_CONFIG_CYLINDERS_WIDTH 16 +/* Disk-style geometry - heads. Only valid when VIRTIO_BLK_F_GEOMETRY is set. + */ +#define VIRTIO_BLK_CONFIG_HEADS_OFST 26 +#define VIRTIO_BLK_CONFIG_HEADS_LEN 1 +#define VIRTIO_BLK_CONFIG_HEADS_LBN 208 +#define VIRTIO_BLK_CONFIG_HEADS_WIDTH 8 +/* Disk-style geometry - sectors. Only valid when VIRTIO_BLK_F_GEOMETRY is set. + */ +#define VIRTIO_BLK_CONFIG_SECTORS_OFST 27 +#define VIRTIO_BLK_CONFIG_SECTORS_LEN 1 +#define VIRTIO_BLK_CONFIG_SECTORS_LBN 216 +#define VIRTIO_BLK_CONFIG_SECTORS_WIDTH 8 +/* Block size of disk. Only valid when VIRTIO_BLK_F_BLK_SIZE is set. */ +#define VIRTIO_BLK_CONFIG_BLK_SIZE_OFST 28 +#define VIRTIO_BLK_CONFIG_BLK_SIZE_LEN 4 +#define VIRTIO_BLK_CONFIG_BLK_SIZE_LBN 224 +#define VIRTIO_BLK_CONFIG_BLK_SIZE_WIDTH 32 +/* Block topology - number of logical blocks per physical block (log2). Only + * valid when VIRTIO_BLK_F_TOPOLOGY is set. + */ +#define VIRTIO_BLK_CONFIG_PHYSICAL_BLOCK_EXP_OFST 32 +#define VIRTIO_BLK_CONFIG_PHYSICAL_BLOCK_EXP_LEN 1 +#define VIRTIO_BLK_CONFIG_PHYSICAL_BLOCK_EXP_LBN 256 +#define VIRTIO_BLK_CONFIG_PHYSICAL_BLOCK_EXP_WIDTH 8 +/* Block topology - offset of first aligned logical block. Only valid when + * VIRTIO_BLK_F_TOPOLOGY is set. + */ +#define VIRTIO_BLK_CONFIG_ALIGNMENT_OFFSET_OFST 33 +#define VIRTIO_BLK_CONFIG_ALIGNMENT_OFFSET_LEN 1 +#define VIRTIO_BLK_CONFIG_ALIGNMENT_OFFSET_LBN 264 +#define VIRTIO_BLK_CONFIG_ALIGNMENT_OFFSET_WIDTH 8 +/* Block topology - suggested minimum I/O size in blocks. Only valid when + * VIRTIO_BLK_F_TOPOLOGY is set. + */ +#define VIRTIO_BLK_CONFIG_MIN_IO_SIZE_OFST 34 +#define VIRTIO_BLK_CONFIG_MIN_IO_SIZE_LEN 2 +#define VIRTIO_BLK_CONFIG_MIN_IO_SIZE_LBN 272 +#define VIRTIO_BLK_CONFIG_MIN_IO_SIZE_WIDTH 16 +/* Block topology - optimal (suggested maximum) I/O size in blocks. Only valid + * when VIRTIO_BLK_F_TOPOLOGY is set. + */ +#define VIRTIO_BLK_CONFIG_OPT_IO_SIZE_OFST 36 +#define VIRTIO_BLK_CONFIG_OPT_IO_SIZE_LEN 4 +#define VIRTIO_BLK_CONFIG_OPT_IO_SIZE_LBN 288 +#define VIRTIO_BLK_CONFIG_OPT_IO_SIZE_WIDTH 32 +/* Unused, set to zero. Note that virtio_blk_config.writeback is volatile and + * not carried in config data. + */ +#define VIRTIO_BLK_CONFIG_UNUSED0_OFST 40 +#define VIRTIO_BLK_CONFIG_UNUSED0_LEN 2 +#define VIRTIO_BLK_CONFIG_UNUSED0_LBN 320 +#define VIRTIO_BLK_CONFIG_UNUSED0_WIDTH 16 +/* Number of queues. Only valid if the VIRTIO_BLK_F_MQ feature is negotiated. + */ +#define VIRTIO_BLK_CONFIG_NUM_QUEUES_OFST 42 +#define VIRTIO_BLK_CONFIG_NUM_QUEUES_LEN 2 +#define VIRTIO_BLK_CONFIG_NUM_QUEUES_LBN 336 +#define VIRTIO_BLK_CONFIG_NUM_QUEUES_WIDTH 16 +/* Maximum discard sectors size, in 512-byte units. Only valid if + * VIRTIO_BLK_F_DISCARD is set. + */ +#define VIRTIO_BLK_CONFIG_MAX_DISCARD_SECTORS_OFST 44 +#define VIRTIO_BLK_CONFIG_MAX_DISCARD_SECTORS_LEN 4 +#define VIRTIO_BLK_CONFIG_MAX_DISCARD_SECTORS_LBN 352 +#define VIRTIO_BLK_CONFIG_MAX_DISCARD_SECTORS_WIDTH 32 +/* Maximum discard segment number. Only valid if VIRTIO_BLK_F_DISCARD is set. + */ +#define VIRTIO_BLK_CONFIG_MAX_DISCARD_SEG_OFST 48 +#define VIRTIO_BLK_CONFIG_MAX_DISCARD_SEG_LEN 4 +#define VIRTIO_BLK_CONFIG_MAX_DISCARD_SEG_LBN 384 +#define VIRTIO_BLK_CONFIG_MAX_DISCARD_SEG_WIDTH 32 +/* Discard sector alignment, in 512-byte units. Only valid if + * VIRTIO_BLK_F_DISCARD is set. + */ +#define VIRTIO_BLK_CONFIG_DISCARD_SECTOR_ALIGNMENT_OFST 52 +#define VIRTIO_BLK_CONFIG_DISCARD_SECTOR_ALIGNMENT_LEN 4 +#define VIRTIO_BLK_CONFIG_DISCARD_SECTOR_ALIGNMENT_LBN 416 +#define VIRTIO_BLK_CONFIG_DISCARD_SECTOR_ALIGNMENT_WIDTH 32 +/* Maximum write zeroes sectors size, in 512-byte units. Only valid if + * VIRTIO_BLK_F_WRITE_ZEROES is set. + */ +#define VIRTIO_BLK_CONFIG_MAX_WRITE_ZEROES_SECTORS_OFST 56 +#define VIRTIO_BLK_CONFIG_MAX_WRITE_ZEROES_SECTORS_LEN 4 +#define VIRTIO_BLK_CONFIG_MAX_WRITE_ZEROES_SECTORS_LBN 448 +#define VIRTIO_BLK_CONFIG_MAX_WRITE_ZEROES_SECTORS_WIDTH 32 +/* Maximum write zeroes segment number. Only valid if VIRTIO_BLK_F_WRITE_ZEROES + * is set. + */ +#define VIRTIO_BLK_CONFIG_MAX_WRITE_ZEROES_SEG_OFST 60 +#define VIRTIO_BLK_CONFIG_MAX_WRITE_ZEROES_SEG_LEN 4 +#define VIRTIO_BLK_CONFIG_MAX_WRITE_ZEROES_SEG_LBN 480 +#define VIRTIO_BLK_CONFIG_MAX_WRITE_ZEROES_SEG_WIDTH 32 +/* Write zeroes request can result in deallocating one or more sectors. Only + * valid if VIRTIO_BLK_F_WRITE_ZEROES is set. + */ +#define VIRTIO_BLK_CONFIG_WRITE_ZEROES_MAY_UNMAP_OFST 64 +#define VIRTIO_BLK_CONFIG_WRITE_ZEROES_MAY_UNMAP_LEN 1 +#define VIRTIO_BLK_CONFIG_WRITE_ZEROES_MAY_UNMAP_LBN 512 +#define VIRTIO_BLK_CONFIG_WRITE_ZEROES_MAY_UNMAP_WIDTH 8 +/* Unused, set to zero. */ +#define VIRTIO_BLK_CONFIG_UNUSED1_OFST 65 +#define VIRTIO_BLK_CONFIG_UNUSED1_LEN 3 +#define VIRTIO_BLK_CONFIG_UNUSED1_LBN 520 +#define VIRTIO_BLK_CONFIG_UNUSED1_WIDTH 24 + + +/***********************************/ +/* MC_CMD_DESC_PROXY_FUNC_CONFIG_SET + * Set configuration for an existing descriptor proxy function. Configuration + * data must match function personality. The actual function configuration is + * not persisted until the caller commits with MC_CMD_DESC_PROXY_FUNC_COMMIT_IN + */ +#define MC_CMD_DESC_PROXY_FUNC_CONFIG_SET 0x174 +#undef MC_CMD_0x174_PRIVILEGE_CTG + +#define MC_CMD_0x174_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN msgrequest */ +#define MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN_LENMIN 20 +#define MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN_LENMAX 252 +#define MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN_LENMAX_MCDI2 1020 +#define MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN_LEN(num) (20+1*(num)) +#define MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN_CONFIG_NUM(len) (((len)-20)/1) +/* Handle to descriptor proxy function (as returned by + * MC_CMD_DESC_PROXY_FUNC_OPEN) + */ +#define MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN_HANDLE_OFST 0 +#define MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN_HANDLE_LEN 4 +/* Reserved for future extension, set to zero. */ +#define MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN_RESERVED_OFST 4 +#define MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN_RESERVED_LEN 16 +/* Configuration data. Format of configuration data is determined implicitly + * from function personality referred to by HANDLE. Currently, only supported + * format is VIRTIO_BLK_CONFIG. + */ +#define MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN_CONFIG_OFST 20 +#define MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN_CONFIG_LEN 1 +#define MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN_CONFIG_MINNUM 0 +#define MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN_CONFIG_MAXNUM 232 +#define MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN_CONFIG_MAXNUM_MCDI2 1000 + +/* MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_OUT msgresponse */ +#define MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_DESC_PROXY_FUNC_COMMIT + * Commit function configuration to non-volatile or volatile store. Once + * configuration is applied to hardware (which may happen immediately or on + * next function/device reset) a DESC_PROXY_FUNC_CONFIG_SET MCDI event will be + * delivered to callers MCDI event queue. + */ +#define MC_CMD_DESC_PROXY_FUNC_COMMIT 0x175 +#undef MC_CMD_0x175_PRIVILEGE_CTG + +#define MC_CMD_0x175_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_DESC_PROXY_FUNC_COMMIT_IN msgrequest */ +#define MC_CMD_DESC_PROXY_FUNC_COMMIT_IN_LEN 8 +/* Handle to descriptor proxy function (as returned by + * MC_CMD_DESC_PROXY_FUNC_OPEN) + */ +#define MC_CMD_DESC_PROXY_FUNC_COMMIT_IN_HANDLE_OFST 0 +#define MC_CMD_DESC_PROXY_FUNC_COMMIT_IN_HANDLE_LEN 4 +#define MC_CMD_DESC_PROXY_FUNC_COMMIT_IN_STORE_OFST 4 +#define MC_CMD_DESC_PROXY_FUNC_COMMIT_IN_STORE_LEN 4 +/* enum: Store into non-volatile (dynamic) config */ +#define MC_CMD_DESC_PROXY_FUNC_COMMIT_IN_NON_VOLATILE 0x0 +/* enum: Store into volatile (ephemeral) config */ +#define MC_CMD_DESC_PROXY_FUNC_COMMIT_IN_VOLATILE 0x1 + +/* MC_CMD_DESC_PROXY_FUNC_COMMIT_OUT msgresponse */ +#define MC_CMD_DESC_PROXY_FUNC_COMMIT_OUT_LEN 4 +/* Generation count to be delivered in an event once configuration becomes live + */ +#define MC_CMD_DESC_PROXY_FUNC_COMMIT_OUT_CONFIG_GENERATION_OFST 0 +#define MC_CMD_DESC_PROXY_FUNC_COMMIT_OUT_CONFIG_GENERATION_LEN 4 + + +/***********************************/ +/* MC_CMD_DESC_PROXY_FUNC_OPEN + * Retrieve a handle for an existing descriptor proxy function. Returns an + * integer handle, valid until function is deallocated, MC rebooted or power- + * cycle. Returns ENODEV if no function with given label exists. + */ +#define MC_CMD_DESC_PROXY_FUNC_OPEN 0x176 +#undef MC_CMD_0x176_PRIVILEGE_CTG + +#define MC_CMD_0x176_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_DESC_PROXY_FUNC_OPEN_IN msgrequest */ +#define MC_CMD_DESC_PROXY_FUNC_OPEN_IN_LEN 40 +/* User-defined label (zero-terminated ASCII string) to uniquely identify the + * function + */ +#define MC_CMD_DESC_PROXY_FUNC_OPEN_IN_LABEL_OFST 0 +#define MC_CMD_DESC_PROXY_FUNC_OPEN_IN_LABEL_LEN 40 + +/* MC_CMD_DESC_PROXY_FUNC_OPEN_OUT msgresponse */ +#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_LENMIN 40 +#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_LENMAX 252 +#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_LENMAX_MCDI2 1020 +#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_LEN(num) (40+1*(num)) +#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_CONFIG_NUM(len) (((len)-40)/1) +/* Handle to the descriptor proxy function */ +#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_HANDLE_OFST 0 +#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_HANDLE_LEN 4 +/* PCIe Function ID (as struct PCIE_FUNCTION) */ +#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_OFST 4 +#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_LEN 8 +#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_LO_OFST 4 +#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_LO_LEN 4 +#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_LO_LBN 32 +#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_LO_WIDTH 32 +#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_HI_OFST 8 +#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_HI_LEN 4 +#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_HI_LBN 64 +#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_HI_WIDTH 32 +#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_PF_OFST 4 +#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_PF_LEN 2 +#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_VF_OFST 6 +#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_VF_LEN 2 +#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_INTF_OFST 8 +#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_INTF_LEN 4 +/* Function personality */ +#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_PERSONALITY_OFST 12 +#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_PERSONALITY_LEN 4 +/* Enum values, see field(s): */ +/* FUNCTION_PERSONALITY/ID */ +/* Function configuration state */ +#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_CONFIG_STATUS_OFST 16 +#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_CONFIG_STATUS_LEN 4 +/* enum: Function configuration is visible to the host (live) */ +#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_LIVE 0x0 +/* enum: Function configuration is pending reset */ +#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_PENDING 0x1 +/* enum: Function configuration is missing (created, but no configuration + * committed) + */ +#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_UNCONFIGURED 0x2 +/* Generation count to be delivered in an event once the configuration becomes + * live (if status is "pending") + */ +#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_CONFIG_GENERATION_OFST 20 +#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_CONFIG_GENERATION_LEN 4 +/* Reserved for future extension, set to zero. */ +#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_RESERVED_OFST 24 +#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_RESERVED_LEN 16 +/* Configuration data corresponding to function personality. Currently, only + * supported format is VIRTIO_BLK_CONFIG. Not valid if status is UNCONFIGURED. + */ +#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_CONFIG_OFST 40 +#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_CONFIG_LEN 1 +#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_CONFIG_MINNUM 0 +#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_CONFIG_MAXNUM 212 +#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_CONFIG_MAXNUM_MCDI2 980 + + +/***********************************/ +/* MC_CMD_DESC_PROXY_FUNC_CLOSE + * Releases a handle for an open descriptor proxy function. If proxying was + * enabled on the device, the caller is expected to gracefully stop it using + * MC_CMD_DESC_PROXY_FUNC_DISABLE prior to calling this function. Closing an + * active device without disabling proxying will result in forced close, which + * will put the device into a failed state and signal the host driver of the + * error (for virtio, DEVICE_NEEDS_RESET flag would be set on the host side) + */ +#define MC_CMD_DESC_PROXY_FUNC_CLOSE 0x1a1 +#undef MC_CMD_0x1a1_PRIVILEGE_CTG + +#define MC_CMD_0x1a1_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_DESC_PROXY_FUNC_CLOSE_IN msgrequest */ +#define MC_CMD_DESC_PROXY_FUNC_CLOSE_IN_LEN 4 +/* Handle to the descriptor proxy function */ +#define MC_CMD_DESC_PROXY_FUNC_CLOSE_IN_HANDLE_OFST 0 +#define MC_CMD_DESC_PROXY_FUNC_CLOSE_IN_HANDLE_LEN 4 + +/* MC_CMD_DESC_PROXY_FUNC_CLOSE_OUT msgresponse */ +#define MC_CMD_DESC_PROXY_FUNC_CLOSE_OUT_LEN 0 + +/* DESC_PROXY_FUNC_MAP structuredef */ +#define DESC_PROXY_FUNC_MAP_LEN 52 +/* PCIe function ID (as struct PCIE_FUNCTION) */ +#define DESC_PROXY_FUNC_MAP_FUNC_OFST 0 +#define DESC_PROXY_FUNC_MAP_FUNC_LEN 8 +#define DESC_PROXY_FUNC_MAP_FUNC_LO_OFST 0 +#define DESC_PROXY_FUNC_MAP_FUNC_LO_LEN 4 +#define DESC_PROXY_FUNC_MAP_FUNC_LO_LBN 0 +#define DESC_PROXY_FUNC_MAP_FUNC_LO_WIDTH 32 +#define DESC_PROXY_FUNC_MAP_FUNC_HI_OFST 4 +#define DESC_PROXY_FUNC_MAP_FUNC_HI_LEN 4 +#define DESC_PROXY_FUNC_MAP_FUNC_HI_LBN 32 +#define DESC_PROXY_FUNC_MAP_FUNC_HI_WIDTH 32 +#define DESC_PROXY_FUNC_MAP_FUNC_LBN 0 +#define DESC_PROXY_FUNC_MAP_FUNC_WIDTH 64 +#define DESC_PROXY_FUNC_MAP_FUNC_PF_OFST 0 +#define DESC_PROXY_FUNC_MAP_FUNC_PF_LEN 2 +#define DESC_PROXY_FUNC_MAP_FUNC_PF_LBN 0 +#define DESC_PROXY_FUNC_MAP_FUNC_PF_WIDTH 16 +#define DESC_PROXY_FUNC_MAP_FUNC_VF_OFST 2 +#define DESC_PROXY_FUNC_MAP_FUNC_VF_LEN 2 +#define DESC_PROXY_FUNC_MAP_FUNC_VF_LBN 16 +#define DESC_PROXY_FUNC_MAP_FUNC_VF_WIDTH 16 +#define DESC_PROXY_FUNC_MAP_FUNC_INTF_OFST 4 +#define DESC_PROXY_FUNC_MAP_FUNC_INTF_LEN 4 +#define DESC_PROXY_FUNC_MAP_FUNC_INTF_LBN 32 +#define DESC_PROXY_FUNC_MAP_FUNC_INTF_WIDTH 32 +/* Function personality */ +#define DESC_PROXY_FUNC_MAP_PERSONALITY_OFST 8 +#define DESC_PROXY_FUNC_MAP_PERSONALITY_LEN 4 +/* Enum values, see field(s): */ +/* FUNCTION_PERSONALITY/ID */ +#define DESC_PROXY_FUNC_MAP_PERSONALITY_LBN 64 +#define DESC_PROXY_FUNC_MAP_PERSONALITY_WIDTH 32 +/* User-defined label (zero-terminated ASCII string) to uniquely identify the + * function + */ +#define DESC_PROXY_FUNC_MAP_LABEL_OFST 12 +#define DESC_PROXY_FUNC_MAP_LABEL_LEN 40 +#define DESC_PROXY_FUNC_MAP_LABEL_LBN 96 +#define DESC_PROXY_FUNC_MAP_LABEL_WIDTH 320 + + +/***********************************/ +/* MC_CMD_DESC_PROXY_FUNC_ENUM + * Enumerate existing descriptor proxy functions + */ +#define MC_CMD_DESC_PROXY_FUNC_ENUM 0x177 +#undef MC_CMD_0x177_PRIVILEGE_CTG + +#define MC_CMD_0x177_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_DESC_PROXY_FUNC_ENUM_IN msgrequest */ +#define MC_CMD_DESC_PROXY_FUNC_ENUM_IN_LEN 4 +/* Starting index, set to 0 on first request. See + * MC_CMD_DESC_PROXY_FUNC_ENUM_OUT/FLAGS. + */ +#define MC_CMD_DESC_PROXY_FUNC_ENUM_IN_START_IDX_OFST 0 +#define MC_CMD_DESC_PROXY_FUNC_ENUM_IN_START_IDX_LEN 4 + +/* MC_CMD_DESC_PROXY_FUNC_ENUM_OUT msgresponse */ +#define MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_LENMIN 4 +#define MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_LENMAX 212 +#define MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_LENMAX_MCDI2 992 +#define MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_LEN(num) (4+52*(num)) +#define MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_FUNC_MAP_NUM(len) (((len)-4)/52) +#define MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_FLAGS_OFST 0 +#define MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_FLAGS_LEN 4 +#define MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_MORE_DATA_OFST 0 +#define MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_MORE_DATA_LBN 0 +#define MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_MORE_DATA_WIDTH 1 +/* Function map, as array of DESC_PROXY_FUNC_MAP */ +#define MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_FUNC_MAP_OFST 4 +#define MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_FUNC_MAP_LEN 52 +#define MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_FUNC_MAP_MINNUM 0 +#define MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_FUNC_MAP_MAXNUM 4 +#define MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_FUNC_MAP_MAXNUM_MCDI2 19 +#define MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_FUNC_MAP_FUNC_OFST 4 +#define MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_FUNC_MAP_FUNC_LEN 8 +#define MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_FUNC_MAP_FUNC_LO_OFST 4 +#define MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_FUNC_MAP_FUNC_LO_LEN 4 +#define MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_FUNC_MAP_FUNC_LO_LBN 32 +#define MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_FUNC_MAP_FUNC_LO_WIDTH 32 +#define MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_FUNC_MAP_FUNC_HI_OFST 8 +#define MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_FUNC_MAP_FUNC_HI_LEN 4 +#define MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_FUNC_MAP_FUNC_HI_LBN 64 +#define MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_FUNC_MAP_FUNC_HI_WIDTH 32 +#define MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_FUNC_MAP_FUNC_PF_OFST 4 +#define MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_FUNC_MAP_FUNC_PF_LEN 2 +#define MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_FUNC_MAP_FUNC_VF_OFST 6 +#define MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_FUNC_MAP_FUNC_VF_LEN 2 +#define MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_FUNC_MAP_FUNC_INTF_OFST 8 +#define MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_FUNC_MAP_FUNC_INTF_LEN 4 +#define MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_FUNC_MAP_PERSONALITY_OFST 12 +#define MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_FUNC_MAP_PERSONALITY_LEN 4 +#define MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_FUNC_MAP_LABEL_OFST 16 +#define MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_FUNC_MAP_LABEL_LEN 40 + + +/***********************************/ +/* MC_CMD_DESC_PROXY_FUNC_ENABLE + * Enable descriptor proxying for function into target event queue. Returns VI + * allocation info for the proxy source function, so that the caller can map + * absolute VI IDs from descriptor proxy events back to the originating + * function. This is a legacy function that only supports single queue proxy + * devices. It is also limited in that it can only be called after host driver + * attach (once VI allocation is known) and will return MC_CMD_ERR_ENOTCONN + * otherwise. For new code, see MC_CMD_DESC_PROXY_FUNC_ENABLE_QUEUE which + * supports multi-queue devices and has no dependency on host driver attach. + */ +#define MC_CMD_DESC_PROXY_FUNC_ENABLE 0x178 +#undef MC_CMD_0x178_PRIVILEGE_CTG + +#define MC_CMD_0x178_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_DESC_PROXY_FUNC_ENABLE_IN msgrequest */ +#define MC_CMD_DESC_PROXY_FUNC_ENABLE_IN_LEN 8 +/* Handle to descriptor proxy function (as returned by + * MC_CMD_DESC_PROXY_FUNC_OPEN) + */ +#define MC_CMD_DESC_PROXY_FUNC_ENABLE_IN_HANDLE_OFST 0 +#define MC_CMD_DESC_PROXY_FUNC_ENABLE_IN_HANDLE_LEN 4 +/* Descriptor proxy sink queue (caller function relative). Must be extended + * width event queue + */ +#define MC_CMD_DESC_PROXY_FUNC_ENABLE_IN_TARGET_EVQ_OFST 4 +#define MC_CMD_DESC_PROXY_FUNC_ENABLE_IN_TARGET_EVQ_LEN 4 + +/* MC_CMD_DESC_PROXY_FUNC_ENABLE_OUT msgresponse */ +#define MC_CMD_DESC_PROXY_FUNC_ENABLE_OUT_LEN 8 +/* The number of VIs allocated on the function */ +#define MC_CMD_DESC_PROXY_FUNC_ENABLE_OUT_VI_COUNT_OFST 0 +#define MC_CMD_DESC_PROXY_FUNC_ENABLE_OUT_VI_COUNT_LEN 4 +/* The base absolute VI number allocated to the function. */ +#define MC_CMD_DESC_PROXY_FUNC_ENABLE_OUT_VI_BASE_OFST 4 +#define MC_CMD_DESC_PROXY_FUNC_ENABLE_OUT_VI_BASE_LEN 4 + + +/***********************************/ +/* MC_CMD_DESC_PROXY_FUNC_ENABLE_QUEUE + * Enable descriptor proxying for a source queue on a host function into target + * event queue. Source queue number is a relative virtqueue number on the + * source function (0 to max_virtqueues-1). For a multi-queue device, the + * caller must enable all source queues individually. To retrieve absolute VI + * information for the source function (so that VI IDs from descriptor proxy + * events can be mapped back to source function / queue) see + * MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO + */ +#define MC_CMD_DESC_PROXY_FUNC_ENABLE_QUEUE 0x1d0 +#undef MC_CMD_0x1d0_PRIVILEGE_CTG + +#define MC_CMD_0x1d0_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_DESC_PROXY_FUNC_ENABLE_QUEUE_IN msgrequest */ +#define MC_CMD_DESC_PROXY_FUNC_ENABLE_QUEUE_IN_LEN 12 +/* Handle to descriptor proxy function (as returned by + * MC_CMD_DESC_PROXY_FUNC_OPEN) + */ +#define MC_CMD_DESC_PROXY_FUNC_ENABLE_QUEUE_IN_HANDLE_OFST 0 +#define MC_CMD_DESC_PROXY_FUNC_ENABLE_QUEUE_IN_HANDLE_LEN 4 +/* Source relative queue number to enable proxying on */ +#define MC_CMD_DESC_PROXY_FUNC_ENABLE_QUEUE_IN_SOURCE_QUEUE_OFST 4 +#define MC_CMD_DESC_PROXY_FUNC_ENABLE_QUEUE_IN_SOURCE_QUEUE_LEN 4 +/* Descriptor proxy sink queue (caller function relative). Must be extended + * width event queue + */ +#define MC_CMD_DESC_PROXY_FUNC_ENABLE_QUEUE_IN_TARGET_EVQ_OFST 8 +#define MC_CMD_DESC_PROXY_FUNC_ENABLE_QUEUE_IN_TARGET_EVQ_LEN 4 + +/* MC_CMD_DESC_PROXY_FUNC_ENABLE_QUEUE_OUT msgresponse */ +#define MC_CMD_DESC_PROXY_FUNC_ENABLE_QUEUE_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_DESC_PROXY_FUNC_DISABLE + * Disable descriptor proxying for function. For multi-queue functions, + * disables all queues. + */ +#define MC_CMD_DESC_PROXY_FUNC_DISABLE 0x179 +#undef MC_CMD_0x179_PRIVILEGE_CTG + +#define MC_CMD_0x179_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_DESC_PROXY_FUNC_DISABLE_IN msgrequest */ +#define MC_CMD_DESC_PROXY_FUNC_DISABLE_IN_LEN 4 +/* Handle to descriptor proxy function (as returned by + * MC_CMD_DESC_PROXY_FUNC_OPEN) + */ +#define MC_CMD_DESC_PROXY_FUNC_DISABLE_IN_HANDLE_OFST 0 +#define MC_CMD_DESC_PROXY_FUNC_DISABLE_IN_HANDLE_LEN 4 + +/* MC_CMD_DESC_PROXY_FUNC_DISABLE_OUT msgresponse */ +#define MC_CMD_DESC_PROXY_FUNC_DISABLE_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_DESC_PROXY_FUNC_DISABLE_QUEUE + * Disable descriptor proxying for a specific source queue on a function. + */ +#define MC_CMD_DESC_PROXY_FUNC_DISABLE_QUEUE 0x1d1 +#undef MC_CMD_0x1d1_PRIVILEGE_CTG + +#define MC_CMD_0x1d1_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_DESC_PROXY_FUNC_DISABLE_QUEUE_IN msgrequest */ +#define MC_CMD_DESC_PROXY_FUNC_DISABLE_QUEUE_IN_LEN 8 +/* Handle to descriptor proxy function (as returned by + * MC_CMD_DESC_PROXY_FUNC_OPEN) + */ +#define MC_CMD_DESC_PROXY_FUNC_DISABLE_QUEUE_IN_HANDLE_OFST 0 +#define MC_CMD_DESC_PROXY_FUNC_DISABLE_QUEUE_IN_HANDLE_LEN 4 +/* Source relative queue number to disable proxying on */ +#define MC_CMD_DESC_PROXY_FUNC_DISABLE_QUEUE_IN_SOURCE_QUEUE_OFST 4 +#define MC_CMD_DESC_PROXY_FUNC_DISABLE_QUEUE_IN_SOURCE_QUEUE_LEN 4 + +/* MC_CMD_DESC_PROXY_FUNC_DISABLE_QUEUE_OUT msgresponse */ +#define MC_CMD_DESC_PROXY_FUNC_DISABLE_QUEUE_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_DESC_PROXY_GET_VI_INFO + * Returns absolute VI allocation information for the descriptor proxy source + * function referenced by HANDLE, so that the caller can map absolute VI IDs + * from descriptor proxy events back to the originating function and queue. The + * call is only valid after the host driver for the source function has + * attached (after receiving a driver attach event for the descriptor proxy + * function) and will fail with ENOTCONN otherwise. + */ +#define MC_CMD_DESC_PROXY_GET_VI_INFO 0x1d2 +#undef MC_CMD_0x1d2_PRIVILEGE_CTG + +#define MC_CMD_0x1d2_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_DESC_PROXY_GET_VI_INFO_IN msgrequest */ +#define MC_CMD_DESC_PROXY_GET_VI_INFO_IN_LEN 4 +/* Handle to descriptor proxy function (as returned by + * MC_CMD_DESC_PROXY_FUNC_OPEN) + */ +#define MC_CMD_DESC_PROXY_GET_VI_INFO_IN_HANDLE_OFST 0 +#define MC_CMD_DESC_PROXY_GET_VI_INFO_IN_HANDLE_LEN 4 + +/* MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT msgresponse */ +#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_LENMIN 0 +#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_LENMAX 252 +#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_LENMAX_MCDI2 1020 +#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_LEN(num) (0+4*(num)) +#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_VI_MAP_NUM(len) (((len)-0)/4) +/* VI information (VI ID + VI relative queue number) for each of the source + * queues (in order from 0 to max_virtqueues-1), as array of QUEUE_ID + * structures. + */ +#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_VI_MAP_OFST 0 +#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_VI_MAP_LEN 4 +#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_VI_MAP_MINNUM 0 +#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_VI_MAP_MAXNUM 63 +#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_VI_MAP_MAXNUM_MCDI2 255 +#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_VI_MAP_ABS_VI_OFST 0 +#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_VI_MAP_ABS_VI_LEN 2 +#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_VI_MAP_REL_QUEUE_LBN 16 +#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_VI_MAP_REL_QUEUE_WIDTH 1 +#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_VI_MAP_RESERVED_LBN 17 +#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_VI_MAP_RESERVED_WIDTH 15 + + +/***********************************/ +/* MC_CMD_GET_ADDR_SPC_ID + * Get Address space identifier for use in mem2mem descriptors for a given + * target. See SF-120734-TC for details on ADDR_SPC_IDs and mem2mem + * descriptors. + */ +#define MC_CMD_GET_ADDR_SPC_ID 0x1a0 +#undef MC_CMD_0x1a0_PRIVILEGE_CTG + +#define MC_CMD_0x1a0_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_GET_ADDR_SPC_ID_IN msgrequest */ +#define MC_CMD_GET_ADDR_SPC_ID_IN_LEN 16 +/* Resource type to get ADDR_SPC_ID for */ +#define MC_CMD_GET_ADDR_SPC_ID_IN_TYPE_OFST 0 +#define MC_CMD_GET_ADDR_SPC_ID_IN_TYPE_LEN 4 +/* enum: Address space ID for host/AP memory DMA over the same interface this + * MCDI was called on + */ +#define MC_CMD_GET_ADDR_SPC_ID_IN_SELF 0x0 +/* enum: Address space ID for host/AP memory DMA via PCI interface and function + * specified by FUNC + */ +#define MC_CMD_GET_ADDR_SPC_ID_IN_PCI_FUNC 0x1 +/* enum: Address space ID for host/AP memory DMA via PCI interface and function + * specified by FUNC with PASID value specified by PASID + */ +#define MC_CMD_GET_ADDR_SPC_ID_IN_PCI_FUNC_PASID 0x2 +/* enum: Address space ID for host/AP memory DMA via PCI interface and function + * specified by FUNC with PASID value of relative VI specified by VI + */ +#define MC_CMD_GET_ADDR_SPC_ID_IN_REL_VI 0x3 +/* enum: Address space ID for host/AP memory DMA via PCI interface, function + * and PASID value of absolute VI specified by VI + */ +#define MC_CMD_GET_ADDR_SPC_ID_IN_ABS_VI 0x4 +/* enum: Address space ID for host memory DMA via PCI interface and function of + * descriptor proxy function specified by HANDLE + */ +#define MC_CMD_GET_ADDR_SPC_ID_IN_DESC_PROXY_HANDLE 0x5 +/* enum: Address space ID for DMA to/from MC memory */ +#define MC_CMD_GET_ADDR_SPC_ID_IN_MC_MEM 0x6 +/* enum: Address space ID for DMA to/from other SmartNIC memory (on-chip, DDR) + */ +#define MC_CMD_GET_ADDR_SPC_ID_IN_NIC_MEM 0x7 +/* PCIe Function ID (as struct PCIE_FUNCTION). Only valid if TYPE is PCI_FUNC, + * PCI_FUNC_PASID or REL_VI. + */ +#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_OFST 4 +#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_LEN 8 +#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_LO_OFST 4 +#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_LO_LEN 4 +#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_LO_LBN 32 +#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_LO_WIDTH 32 +#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_HI_OFST 8 +#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_HI_LEN 4 +#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_HI_LBN 64 +#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_HI_WIDTH 32 +#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_PF_OFST 4 +#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_PF_LEN 2 +#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_VF_OFST 6 +#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_VF_LEN 2 +#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_INTF_OFST 8 +#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_INTF_LEN 4 +/* PASID value. Only valid if TYPE is PCI_FUNC_PASID. */ +#define MC_CMD_GET_ADDR_SPC_ID_IN_PASID_OFST 12 +#define MC_CMD_GET_ADDR_SPC_ID_IN_PASID_LEN 4 +/* Relative or absolute VI number. Only valid if TYPE is REL_VI or ABS_VI */ +#define MC_CMD_GET_ADDR_SPC_ID_IN_VI_OFST 12 +#define MC_CMD_GET_ADDR_SPC_ID_IN_VI_LEN 4 +/* Descriptor proxy function handle. Only valid if TYPE is DESC_PROXY_HANDLE. + */ +#define MC_CMD_GET_ADDR_SPC_ID_IN_HANDLE_OFST 4 +#define MC_CMD_GET_ADDR_SPC_ID_IN_HANDLE_LEN 4 + +/* MC_CMD_GET_ADDR_SPC_ID_OUT msgresponse */ +#define MC_CMD_GET_ADDR_SPC_ID_OUT_LEN 8 +/* Address Space ID for the requested target. Only the lower 36 bits are valid + * in the current SmartNIC implementation. + */ +#define MC_CMD_GET_ADDR_SPC_ID_OUT_ADDR_SPC_ID_OFST 0 +#define MC_CMD_GET_ADDR_SPC_ID_OUT_ADDR_SPC_ID_LEN 8 +#define MC_CMD_GET_ADDR_SPC_ID_OUT_ADDR_SPC_ID_LO_OFST 0 +#define MC_CMD_GET_ADDR_SPC_ID_OUT_ADDR_SPC_ID_LO_LEN 4 +#define MC_CMD_GET_ADDR_SPC_ID_OUT_ADDR_SPC_ID_LO_LBN 0 +#define MC_CMD_GET_ADDR_SPC_ID_OUT_ADDR_SPC_ID_LO_WIDTH 32 +#define MC_CMD_GET_ADDR_SPC_ID_OUT_ADDR_SPC_ID_HI_OFST 4 +#define MC_CMD_GET_ADDR_SPC_ID_OUT_ADDR_SPC_ID_HI_LEN 4 +#define MC_CMD_GET_ADDR_SPC_ID_OUT_ADDR_SPC_ID_HI_LBN 32 +#define MC_CMD_GET_ADDR_SPC_ID_OUT_ADDR_SPC_ID_HI_WIDTH 32 + + +/***********************************/ +/* MC_CMD_GET_CLIENT_HANDLE + * Obtain a handle for a client given a description of that client. N.B. this + * command is subject to change given the open discussion about how PCIe + * functions should be referenced on an iEP (integrated endpoint: functions + * span multiple buses) and multihost (multiple PCIe interfaces) system. + */ +#define MC_CMD_GET_CLIENT_HANDLE 0x1c3 +#undef MC_CMD_0x1c3_PRIVILEGE_CTG + +#define MC_CMD_0x1c3_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_CLIENT_HANDLE_IN msgrequest */ +#define MC_CMD_GET_CLIENT_HANDLE_IN_LEN 12 +/* Type of client to get a client handle for */ +#define MC_CMD_GET_CLIENT_HANDLE_IN_TYPE_OFST 0 +#define MC_CMD_GET_CLIENT_HANDLE_IN_TYPE_LEN 4 +/* enum: Obtain a client handle for a PCIe function-type client. */ +#define MC_CMD_GET_CLIENT_HANDLE_IN_TYPE_FUNC 0x0 +/* PCIe Function ID (as struct PCIE_FUNCTION). Valid when TYPE==FUNC. Use: - + * INTF=CALLER, PF=PF_NULL, VF=VF_NULL to refer to the calling function - + * INTF=CALLER, PF=PF_NULL, VF=... to refer to a VF child of the calling PF or + * a sibling VF of the calling VF. - INTF=CALLER, PF=..., VF=VF_NULL to refer + * to a PF on the calling interface - INTF=CALLER, PF=..., VF=... to refer to a + * VF on the calling interface - INTF=..., PF=..., VF=VF_NULL to refer to a PF + * on a named interface - INTF=..., PF=..., VF=... to refer to a VF on a named + * interface where ... refers to a small integer for the VF/PF fields, and to + * values from the PCIE_INTERFACE enum for for the INTF field. It's only + * meaningful to use INTF=CALLER within a structure that's an argument to + * MC_CMD_DEVEL_GET_CLIENT_HANDLE. + */ +#define MC_CMD_GET_CLIENT_HANDLE_IN_FUNC_OFST 4 +#define MC_CMD_GET_CLIENT_HANDLE_IN_FUNC_LEN 8 +#define MC_CMD_GET_CLIENT_HANDLE_IN_FUNC_LO_OFST 4 +#define MC_CMD_GET_CLIENT_HANDLE_IN_FUNC_LO_LEN 4 +#define MC_CMD_GET_CLIENT_HANDLE_IN_FUNC_LO_LBN 32 +#define MC_CMD_GET_CLIENT_HANDLE_IN_FUNC_LO_WIDTH 32 +#define MC_CMD_GET_CLIENT_HANDLE_IN_FUNC_HI_OFST 8 +#define MC_CMD_GET_CLIENT_HANDLE_IN_FUNC_HI_LEN 4 +#define MC_CMD_GET_CLIENT_HANDLE_IN_FUNC_HI_LBN 64 +#define MC_CMD_GET_CLIENT_HANDLE_IN_FUNC_HI_WIDTH 32 +/* enum: NULL value for the INTF field of struct PCIE_FUNCTION. Provided for + * backwards compatibility only, callers should use PCIE_INTERFACE_CALLER. + */ +#define MC_CMD_GET_CLIENT_HANDLE_IN_PCIE_FUNCTION_INTF_NULL 0xffffffff +#define MC_CMD_GET_CLIENT_HANDLE_IN_FUNC_PF_OFST 4 +#define MC_CMD_GET_CLIENT_HANDLE_IN_FUNC_PF_LEN 2 +#define MC_CMD_GET_CLIENT_HANDLE_IN_FUNC_VF_OFST 6 +#define MC_CMD_GET_CLIENT_HANDLE_IN_FUNC_VF_LEN 2 +#define MC_CMD_GET_CLIENT_HANDLE_IN_FUNC_INTF_OFST 8 +#define MC_CMD_GET_CLIENT_HANDLE_IN_FUNC_INTF_LEN 4 + +/* MC_CMD_GET_CLIENT_HANDLE_OUT msgresponse */ +#define MC_CMD_GET_CLIENT_HANDLE_OUT_LEN 4 +#define MC_CMD_GET_CLIENT_HANDLE_OUT_HANDLE_OFST 0 +#define MC_CMD_GET_CLIENT_HANDLE_OUT_HANDLE_LEN 4 + +/* MAE_FIELD_FLAGS structuredef */ +#define MAE_FIELD_FLAGS_LEN 4 +#define MAE_FIELD_FLAGS_FLAT_OFST 0 +#define MAE_FIELD_FLAGS_FLAT_LEN 4 +#define MAE_FIELD_FLAGS_SUPPORT_STATUS_OFST 0 +#define MAE_FIELD_FLAGS_SUPPORT_STATUS_LBN 0 +#define MAE_FIELD_FLAGS_SUPPORT_STATUS_WIDTH 6 +#define MAE_FIELD_FLAGS_MASK_AFFECTS_CLASS_OFST 0 +#define MAE_FIELD_FLAGS_MASK_AFFECTS_CLASS_LBN 6 +#define MAE_FIELD_FLAGS_MASK_AFFECTS_CLASS_WIDTH 1 +#define MAE_FIELD_FLAGS_MATCH_AFFECTS_CLASS_OFST 0 +#define MAE_FIELD_FLAGS_MATCH_AFFECTS_CLASS_LBN 7 +#define MAE_FIELD_FLAGS_MATCH_AFFECTS_CLASS_WIDTH 1 +#define MAE_FIELD_FLAGS_FLAT_LBN 0 +#define MAE_FIELD_FLAGS_FLAT_WIDTH 32 + +/* MAE_ENC_FIELD_PAIRS structuredef: Mask and value pairs for all fields that + * it makes sense to use to determine the encapsulation type of a packet. Its + * intended use is to keep a common packing of fields across multiple MCDI + * commands, keeping things inherently sychronised and allowing code shared. To + * use in an MCDI command, the command should end with a variable length byte + * array populated with this structure. Do not extend this structure. Instead, + * create _Vx versions with the necessary fields appended. That way, the + * existing semantics for extending MCDI commands are preserved. + */ +#define MAE_ENC_FIELD_PAIRS_LEN 156 +#define MAE_ENC_FIELD_PAIRS_INGRESS_MPORT_SELECTOR_OFST 0 +#define MAE_ENC_FIELD_PAIRS_INGRESS_MPORT_SELECTOR_LEN 4 +#define MAE_ENC_FIELD_PAIRS_INGRESS_MPORT_SELECTOR_LBN 0 +#define MAE_ENC_FIELD_PAIRS_INGRESS_MPORT_SELECTOR_WIDTH 32 +#define MAE_ENC_FIELD_PAIRS_INGRESS_MPORT_SELECTOR_MASK_OFST 4 +#define MAE_ENC_FIELD_PAIRS_INGRESS_MPORT_SELECTOR_MASK_LEN 4 +#define MAE_ENC_FIELD_PAIRS_INGRESS_MPORT_SELECTOR_MASK_LBN 32 +#define MAE_ENC_FIELD_PAIRS_INGRESS_MPORT_SELECTOR_MASK_WIDTH 32 +#define MAE_ENC_FIELD_PAIRS_ENC_ETHER_TYPE_BE_OFST 8 +#define MAE_ENC_FIELD_PAIRS_ENC_ETHER_TYPE_BE_LEN 2 +#define MAE_ENC_FIELD_PAIRS_ENC_ETHER_TYPE_BE_LBN 64 +#define MAE_ENC_FIELD_PAIRS_ENC_ETHER_TYPE_BE_WIDTH 16 +#define MAE_ENC_FIELD_PAIRS_ENC_ETHER_TYPE_BE_MASK_OFST 10 +#define MAE_ENC_FIELD_PAIRS_ENC_ETHER_TYPE_BE_MASK_LEN 2 +#define MAE_ENC_FIELD_PAIRS_ENC_ETHER_TYPE_BE_MASK_LBN 80 +#define MAE_ENC_FIELD_PAIRS_ENC_ETHER_TYPE_BE_MASK_WIDTH 16 +#define MAE_ENC_FIELD_PAIRS_ENC_VLAN0_TCI_BE_OFST 12 +#define MAE_ENC_FIELD_PAIRS_ENC_VLAN0_TCI_BE_LEN 2 +#define MAE_ENC_FIELD_PAIRS_ENC_VLAN0_TCI_BE_LBN 96 +#define MAE_ENC_FIELD_PAIRS_ENC_VLAN0_TCI_BE_WIDTH 16 +#define MAE_ENC_FIELD_PAIRS_ENC_VLAN0_TCI_BE_MASK_OFST 14 +#define MAE_ENC_FIELD_PAIRS_ENC_VLAN0_TCI_BE_MASK_LEN 2 +#define MAE_ENC_FIELD_PAIRS_ENC_VLAN0_TCI_BE_MASK_LBN 112 +#define MAE_ENC_FIELD_PAIRS_ENC_VLAN0_TCI_BE_MASK_WIDTH 16 +#define MAE_ENC_FIELD_PAIRS_ENC_VLAN0_PROTO_BE_OFST 16 +#define MAE_ENC_FIELD_PAIRS_ENC_VLAN0_PROTO_BE_LEN 2 +#define MAE_ENC_FIELD_PAIRS_ENC_VLAN0_PROTO_BE_LBN 128 +#define MAE_ENC_FIELD_PAIRS_ENC_VLAN0_PROTO_BE_WIDTH 16 +#define MAE_ENC_FIELD_PAIRS_ENC_VLAN0_PROTO_BE_MASK_OFST 18 +#define MAE_ENC_FIELD_PAIRS_ENC_VLAN0_PROTO_BE_MASK_LEN 2 +#define MAE_ENC_FIELD_PAIRS_ENC_VLAN0_PROTO_BE_MASK_LBN 144 +#define MAE_ENC_FIELD_PAIRS_ENC_VLAN0_PROTO_BE_MASK_WIDTH 16 +#define MAE_ENC_FIELD_PAIRS_ENC_VLAN1_TCI_BE_OFST 20 +#define MAE_ENC_FIELD_PAIRS_ENC_VLAN1_TCI_BE_LEN 2 +#define MAE_ENC_FIELD_PAIRS_ENC_VLAN1_TCI_BE_LBN 160 +#define MAE_ENC_FIELD_PAIRS_ENC_VLAN1_TCI_BE_WIDTH 16 +#define MAE_ENC_FIELD_PAIRS_ENC_VLAN1_TCI_BE_MASK_OFST 22 +#define MAE_ENC_FIELD_PAIRS_ENC_VLAN1_TCI_BE_MASK_LEN 2 +#define MAE_ENC_FIELD_PAIRS_ENC_VLAN1_TCI_BE_MASK_LBN 176 +#define MAE_ENC_FIELD_PAIRS_ENC_VLAN1_TCI_BE_MASK_WIDTH 16 +#define MAE_ENC_FIELD_PAIRS_ENC_VLAN1_PROTO_BE_OFST 24 +#define MAE_ENC_FIELD_PAIRS_ENC_VLAN1_PROTO_BE_LEN 2 +#define MAE_ENC_FIELD_PAIRS_ENC_VLAN1_PROTO_BE_LBN 192 +#define MAE_ENC_FIELD_PAIRS_ENC_VLAN1_PROTO_BE_WIDTH 16 +#define MAE_ENC_FIELD_PAIRS_ENC_VLAN1_PROTO_BE_MASK_OFST 26 +#define MAE_ENC_FIELD_PAIRS_ENC_VLAN1_PROTO_BE_MASK_LEN 2 +#define MAE_ENC_FIELD_PAIRS_ENC_VLAN1_PROTO_BE_MASK_LBN 208 +#define MAE_ENC_FIELD_PAIRS_ENC_VLAN1_PROTO_BE_MASK_WIDTH 16 +#define MAE_ENC_FIELD_PAIRS_ENC_ETH_SADDR_BE_OFST 28 +#define MAE_ENC_FIELD_PAIRS_ENC_ETH_SADDR_BE_LEN 6 +#define MAE_ENC_FIELD_PAIRS_ENC_ETH_SADDR_BE_LBN 224 +#define MAE_ENC_FIELD_PAIRS_ENC_ETH_SADDR_BE_WIDTH 48 +#define MAE_ENC_FIELD_PAIRS_ENC_ETH_SADDR_BE_MASK_OFST 34 +#define MAE_ENC_FIELD_PAIRS_ENC_ETH_SADDR_BE_MASK_LEN 6 +#define MAE_ENC_FIELD_PAIRS_ENC_ETH_SADDR_BE_MASK_LBN 272 +#define MAE_ENC_FIELD_PAIRS_ENC_ETH_SADDR_BE_MASK_WIDTH 48 +#define MAE_ENC_FIELD_PAIRS_ENC_ETH_DADDR_BE_OFST 40 +#define MAE_ENC_FIELD_PAIRS_ENC_ETH_DADDR_BE_LEN 6 +#define MAE_ENC_FIELD_PAIRS_ENC_ETH_DADDR_BE_LBN 320 +#define MAE_ENC_FIELD_PAIRS_ENC_ETH_DADDR_BE_WIDTH 48 +#define MAE_ENC_FIELD_PAIRS_ENC_ETH_DADDR_BE_MASK_OFST 46 +#define MAE_ENC_FIELD_PAIRS_ENC_ETH_DADDR_BE_MASK_LEN 6 +#define MAE_ENC_FIELD_PAIRS_ENC_ETH_DADDR_BE_MASK_LBN 368 +#define MAE_ENC_FIELD_PAIRS_ENC_ETH_DADDR_BE_MASK_WIDTH 48 +#define MAE_ENC_FIELD_PAIRS_ENC_SRC_IP4_BE_OFST 52 +#define MAE_ENC_FIELD_PAIRS_ENC_SRC_IP4_BE_LEN 4 +#define MAE_ENC_FIELD_PAIRS_ENC_SRC_IP4_BE_LBN 416 +#define MAE_ENC_FIELD_PAIRS_ENC_SRC_IP4_BE_WIDTH 32 +#define MAE_ENC_FIELD_PAIRS_ENC_SRC_IP4_BE_MASK_OFST 56 +#define MAE_ENC_FIELD_PAIRS_ENC_SRC_IP4_BE_MASK_LEN 4 +#define MAE_ENC_FIELD_PAIRS_ENC_SRC_IP4_BE_MASK_LBN 448 +#define MAE_ENC_FIELD_PAIRS_ENC_SRC_IP4_BE_MASK_WIDTH 32 +#define MAE_ENC_FIELD_PAIRS_ENC_SRC_IP6_BE_OFST 60 +#define MAE_ENC_FIELD_PAIRS_ENC_SRC_IP6_BE_LEN 16 +#define MAE_ENC_FIELD_PAIRS_ENC_SRC_IP6_BE_LBN 480 +#define MAE_ENC_FIELD_PAIRS_ENC_SRC_IP6_BE_WIDTH 128 +#define MAE_ENC_FIELD_PAIRS_ENC_SRC_IP6_BE_MASK_OFST 76 +#define MAE_ENC_FIELD_PAIRS_ENC_SRC_IP6_BE_MASK_LEN 16 +#define MAE_ENC_FIELD_PAIRS_ENC_SRC_IP6_BE_MASK_LBN 608 +#define MAE_ENC_FIELD_PAIRS_ENC_SRC_IP6_BE_MASK_WIDTH 128 +#define MAE_ENC_FIELD_PAIRS_ENC_DST_IP4_BE_OFST 92 +#define MAE_ENC_FIELD_PAIRS_ENC_DST_IP4_BE_LEN 4 +#define MAE_ENC_FIELD_PAIRS_ENC_DST_IP4_BE_LBN 736 +#define MAE_ENC_FIELD_PAIRS_ENC_DST_IP4_BE_WIDTH 32 +#define MAE_ENC_FIELD_PAIRS_ENC_DST_IP4_BE_MASK_OFST 96 +#define MAE_ENC_FIELD_PAIRS_ENC_DST_IP4_BE_MASK_LEN 4 +#define MAE_ENC_FIELD_PAIRS_ENC_DST_IP4_BE_MASK_LBN 768 +#define MAE_ENC_FIELD_PAIRS_ENC_DST_IP4_BE_MASK_WIDTH 32 +#define MAE_ENC_FIELD_PAIRS_ENC_DST_IP6_BE_OFST 100 +#define MAE_ENC_FIELD_PAIRS_ENC_DST_IP6_BE_LEN 16 +#define MAE_ENC_FIELD_PAIRS_ENC_DST_IP6_BE_LBN 800 +#define MAE_ENC_FIELD_PAIRS_ENC_DST_IP6_BE_WIDTH 128 +#define MAE_ENC_FIELD_PAIRS_ENC_DST_IP6_BE_MASK_OFST 116 +#define MAE_ENC_FIELD_PAIRS_ENC_DST_IP6_BE_MASK_LEN 16 +#define MAE_ENC_FIELD_PAIRS_ENC_DST_IP6_BE_MASK_LBN 928 +#define MAE_ENC_FIELD_PAIRS_ENC_DST_IP6_BE_MASK_WIDTH 128 +#define MAE_ENC_FIELD_PAIRS_ENC_IP_PROTO_OFST 132 +#define MAE_ENC_FIELD_PAIRS_ENC_IP_PROTO_LEN 1 +#define MAE_ENC_FIELD_PAIRS_ENC_IP_PROTO_LBN 1056 +#define MAE_ENC_FIELD_PAIRS_ENC_IP_PROTO_WIDTH 8 +#define MAE_ENC_FIELD_PAIRS_ENC_IP_PROTO_MASK_OFST 133 +#define MAE_ENC_FIELD_PAIRS_ENC_IP_PROTO_MASK_LEN 1 +#define MAE_ENC_FIELD_PAIRS_ENC_IP_PROTO_MASK_LBN 1064 +#define MAE_ENC_FIELD_PAIRS_ENC_IP_PROTO_MASK_WIDTH 8 +#define MAE_ENC_FIELD_PAIRS_ENC_IP_TOS_OFST 134 +#define MAE_ENC_FIELD_PAIRS_ENC_IP_TOS_LEN 1 +#define MAE_ENC_FIELD_PAIRS_ENC_IP_TOS_LBN 1072 +#define MAE_ENC_FIELD_PAIRS_ENC_IP_TOS_WIDTH 8 +#define MAE_ENC_FIELD_PAIRS_ENC_IP_TOS_MASK_OFST 135 +#define MAE_ENC_FIELD_PAIRS_ENC_IP_TOS_MASK_LEN 1 +#define MAE_ENC_FIELD_PAIRS_ENC_IP_TOS_MASK_LBN 1080 +#define MAE_ENC_FIELD_PAIRS_ENC_IP_TOS_MASK_WIDTH 8 +#define MAE_ENC_FIELD_PAIRS_ENC_IP_TTL_OFST 136 +#define MAE_ENC_FIELD_PAIRS_ENC_IP_TTL_LEN 1 +#define MAE_ENC_FIELD_PAIRS_ENC_IP_TTL_LBN 1088 +#define MAE_ENC_FIELD_PAIRS_ENC_IP_TTL_WIDTH 8 +#define MAE_ENC_FIELD_PAIRS_ENC_IP_TTL_MASK_OFST 137 +#define MAE_ENC_FIELD_PAIRS_ENC_IP_TTL_MASK_LEN 1 +#define MAE_ENC_FIELD_PAIRS_ENC_IP_TTL_MASK_LBN 1096 +#define MAE_ENC_FIELD_PAIRS_ENC_IP_TTL_MASK_WIDTH 8 +/* Deprecated in favour of ENC_FLAGS alias. */ +#define MAE_ENC_FIELD_PAIRS_ENC_VLAN_FLAGS_OFST 138 +#define MAE_ENC_FIELD_PAIRS_ENC_VLAN_FLAGS_LEN 1 +#define MAE_ENC_FIELD_PAIRS_ENC_HAS_OVLAN_OFST 138 +#define MAE_ENC_FIELD_PAIRS_ENC_HAS_OVLAN_LBN 0 +#define MAE_ENC_FIELD_PAIRS_ENC_HAS_OVLAN_WIDTH 1 +#define MAE_ENC_FIELD_PAIRS_ENC_HAS_IVLAN_OFST 138 +#define MAE_ENC_FIELD_PAIRS_ENC_HAS_IVLAN_LBN 1 +#define MAE_ENC_FIELD_PAIRS_ENC_HAS_IVLAN_WIDTH 1 +#define MAE_ENC_FIELD_PAIRS_ENC_IP_FRAG_OFST 138 +#define MAE_ENC_FIELD_PAIRS_ENC_IP_FRAG_LBN 2 +#define MAE_ENC_FIELD_PAIRS_ENC_IP_FRAG_WIDTH 1 +#define MAE_ENC_FIELD_PAIRS_ENC_VLAN_FLAGS_LBN 1104 +#define MAE_ENC_FIELD_PAIRS_ENC_VLAN_FLAGS_WIDTH 8 +/* More generic alias for ENC_VLAN_FLAGS. */ +#define MAE_ENC_FIELD_PAIRS_ENC_FLAGS_OFST 138 +#define MAE_ENC_FIELD_PAIRS_ENC_FLAGS_LEN 1 +#define MAE_ENC_FIELD_PAIRS_ENC_FLAGS_LBN 1104 +#define MAE_ENC_FIELD_PAIRS_ENC_FLAGS_WIDTH 8 +/* Deprecated in favour of ENC_FLAGS_MASK alias. */ +#define MAE_ENC_FIELD_PAIRS_ENC_VLAN_FLAGS_MASK_OFST 139 +#define MAE_ENC_FIELD_PAIRS_ENC_VLAN_FLAGS_MASK_LEN 1 +#define MAE_ENC_FIELD_PAIRS_ENC_HAS_OVLAN_MASK_OFST 139 +#define MAE_ENC_FIELD_PAIRS_ENC_HAS_OVLAN_MASK_LBN 0 +#define MAE_ENC_FIELD_PAIRS_ENC_HAS_OVLAN_MASK_WIDTH 1 +#define MAE_ENC_FIELD_PAIRS_ENC_HAS_IVLAN_MASK_OFST 139 +#define MAE_ENC_FIELD_PAIRS_ENC_HAS_IVLAN_MASK_LBN 1 +#define MAE_ENC_FIELD_PAIRS_ENC_HAS_IVLAN_MASK_WIDTH 1 +#define MAE_ENC_FIELD_PAIRS_ENC_IP_FRAG_MASK_OFST 139 +#define MAE_ENC_FIELD_PAIRS_ENC_IP_FRAG_MASK_LBN 2 +#define MAE_ENC_FIELD_PAIRS_ENC_IP_FRAG_MASK_WIDTH 1 +#define MAE_ENC_FIELD_PAIRS_ENC_VLAN_FLAGS_MASK_LBN 1112 +#define MAE_ENC_FIELD_PAIRS_ENC_VLAN_FLAGS_MASK_WIDTH 8 +/* More generic alias for ENC_FLAGS_MASK. */ +#define MAE_ENC_FIELD_PAIRS_ENC_FLAGS_MASK_OFST 139 +#define MAE_ENC_FIELD_PAIRS_ENC_FLAGS_MASK_LEN 1 +#define MAE_ENC_FIELD_PAIRS_ENC_FLAGS_MASK_LBN 1112 +#define MAE_ENC_FIELD_PAIRS_ENC_FLAGS_MASK_WIDTH 8 +#define MAE_ENC_FIELD_PAIRS_ENC_IP_FLAGS_BE_OFST 140 +#define MAE_ENC_FIELD_PAIRS_ENC_IP_FLAGS_BE_LEN 4 +#define MAE_ENC_FIELD_PAIRS_ENC_IP_FLAGS_BE_LBN 1120 +#define MAE_ENC_FIELD_PAIRS_ENC_IP_FLAGS_BE_WIDTH 32 +#define MAE_ENC_FIELD_PAIRS_ENC_IP_FLAGS_BE_MASK_OFST 144 +#define MAE_ENC_FIELD_PAIRS_ENC_IP_FLAGS_BE_MASK_LEN 4 +#define MAE_ENC_FIELD_PAIRS_ENC_IP_FLAGS_BE_MASK_LBN 1152 +#define MAE_ENC_FIELD_PAIRS_ENC_IP_FLAGS_BE_MASK_WIDTH 32 +#define MAE_ENC_FIELD_PAIRS_ENC_L4_SPORT_BE_OFST 148 +#define MAE_ENC_FIELD_PAIRS_ENC_L4_SPORT_BE_LEN 2 +#define MAE_ENC_FIELD_PAIRS_ENC_L4_SPORT_BE_LBN 1184 +#define MAE_ENC_FIELD_PAIRS_ENC_L4_SPORT_BE_WIDTH 16 +#define MAE_ENC_FIELD_PAIRS_ENC_L4_SPORT_BE_MASK_OFST 150 +#define MAE_ENC_FIELD_PAIRS_ENC_L4_SPORT_BE_MASK_LEN 2 +#define MAE_ENC_FIELD_PAIRS_ENC_L4_SPORT_BE_MASK_LBN 1200 +#define MAE_ENC_FIELD_PAIRS_ENC_L4_SPORT_BE_MASK_WIDTH 16 +#define MAE_ENC_FIELD_PAIRS_ENC_L4_DPORT_BE_OFST 152 +#define MAE_ENC_FIELD_PAIRS_ENC_L4_DPORT_BE_LEN 2 +#define MAE_ENC_FIELD_PAIRS_ENC_L4_DPORT_BE_LBN 1216 +#define MAE_ENC_FIELD_PAIRS_ENC_L4_DPORT_BE_WIDTH 16 +#define MAE_ENC_FIELD_PAIRS_ENC_L4_DPORT_BE_MASK_OFST 154 +#define MAE_ENC_FIELD_PAIRS_ENC_L4_DPORT_BE_MASK_LEN 2 +#define MAE_ENC_FIELD_PAIRS_ENC_L4_DPORT_BE_MASK_LBN 1232 +#define MAE_ENC_FIELD_PAIRS_ENC_L4_DPORT_BE_MASK_WIDTH 16 + +/* MAE_FIELD_MASK_VALUE_PAIRS structuredef: Mask and value pairs for all fields + * currently defined. Same semantics as MAE_ENC_FIELD_PAIRS. + */ +#define MAE_FIELD_MASK_VALUE_PAIRS_LEN 344 +#define MAE_FIELD_MASK_VALUE_PAIRS_INGRESS_MPORT_SELECTOR_OFST 0 +#define MAE_FIELD_MASK_VALUE_PAIRS_INGRESS_MPORT_SELECTOR_LEN 4 +#define MAE_FIELD_MASK_VALUE_PAIRS_INGRESS_MPORT_SELECTOR_LBN 0 +#define MAE_FIELD_MASK_VALUE_PAIRS_INGRESS_MPORT_SELECTOR_WIDTH 32 +#define MAE_FIELD_MASK_VALUE_PAIRS_INGRESS_MPORT_SELECTOR_MASK_OFST 4 +#define MAE_FIELD_MASK_VALUE_PAIRS_INGRESS_MPORT_SELECTOR_MASK_LEN 4 +#define MAE_FIELD_MASK_VALUE_PAIRS_INGRESS_MPORT_SELECTOR_MASK_LBN 32 +#define MAE_FIELD_MASK_VALUE_PAIRS_INGRESS_MPORT_SELECTOR_MASK_WIDTH 32 +#define MAE_FIELD_MASK_VALUE_PAIRS_MARK_OFST 8 +#define MAE_FIELD_MASK_VALUE_PAIRS_MARK_LEN 4 +#define MAE_FIELD_MASK_VALUE_PAIRS_MARK_LBN 64 +#define MAE_FIELD_MASK_VALUE_PAIRS_MARK_WIDTH 32 +#define MAE_FIELD_MASK_VALUE_PAIRS_MARK_MASK_OFST 12 +#define MAE_FIELD_MASK_VALUE_PAIRS_MARK_MASK_LEN 4 +#define MAE_FIELD_MASK_VALUE_PAIRS_MARK_MASK_LBN 96 +#define MAE_FIELD_MASK_VALUE_PAIRS_MARK_MASK_WIDTH 32 +#define MAE_FIELD_MASK_VALUE_PAIRS_ETHER_TYPE_BE_OFST 16 +#define MAE_FIELD_MASK_VALUE_PAIRS_ETHER_TYPE_BE_LEN 2 +#define MAE_FIELD_MASK_VALUE_PAIRS_ETHER_TYPE_BE_LBN 128 +#define MAE_FIELD_MASK_VALUE_PAIRS_ETHER_TYPE_BE_WIDTH 16 +#define MAE_FIELD_MASK_VALUE_PAIRS_ETHER_TYPE_BE_MASK_OFST 18 +#define MAE_FIELD_MASK_VALUE_PAIRS_ETHER_TYPE_BE_MASK_LEN 2 +#define MAE_FIELD_MASK_VALUE_PAIRS_ETHER_TYPE_BE_MASK_LBN 144 +#define MAE_FIELD_MASK_VALUE_PAIRS_ETHER_TYPE_BE_MASK_WIDTH 16 +#define MAE_FIELD_MASK_VALUE_PAIRS_VLAN0_TCI_BE_OFST 20 +#define MAE_FIELD_MASK_VALUE_PAIRS_VLAN0_TCI_BE_LEN 2 +#define MAE_FIELD_MASK_VALUE_PAIRS_VLAN0_TCI_BE_LBN 160 +#define MAE_FIELD_MASK_VALUE_PAIRS_VLAN0_TCI_BE_WIDTH 16 +#define MAE_FIELD_MASK_VALUE_PAIRS_VLAN0_TCI_BE_MASK_OFST 22 +#define MAE_FIELD_MASK_VALUE_PAIRS_VLAN0_TCI_BE_MASK_LEN 2 +#define MAE_FIELD_MASK_VALUE_PAIRS_VLAN0_TCI_BE_MASK_LBN 176 +#define MAE_FIELD_MASK_VALUE_PAIRS_VLAN0_TCI_BE_MASK_WIDTH 16 +#define MAE_FIELD_MASK_VALUE_PAIRS_VLAN0_PROTO_BE_OFST 24 +#define MAE_FIELD_MASK_VALUE_PAIRS_VLAN0_PROTO_BE_LEN 2 +#define MAE_FIELD_MASK_VALUE_PAIRS_VLAN0_PROTO_BE_LBN 192 +#define MAE_FIELD_MASK_VALUE_PAIRS_VLAN0_PROTO_BE_WIDTH 16 +#define MAE_FIELD_MASK_VALUE_PAIRS_VLAN0_PROTO_BE_MASK_OFST 26 +#define MAE_FIELD_MASK_VALUE_PAIRS_VLAN0_PROTO_BE_MASK_LEN 2 +#define MAE_FIELD_MASK_VALUE_PAIRS_VLAN0_PROTO_BE_MASK_LBN 208 +#define MAE_FIELD_MASK_VALUE_PAIRS_VLAN0_PROTO_BE_MASK_WIDTH 16 +#define MAE_FIELD_MASK_VALUE_PAIRS_VLAN1_TCI_BE_OFST 28 +#define MAE_FIELD_MASK_VALUE_PAIRS_VLAN1_TCI_BE_LEN 2 +#define MAE_FIELD_MASK_VALUE_PAIRS_VLAN1_TCI_BE_LBN 224 +#define MAE_FIELD_MASK_VALUE_PAIRS_VLAN1_TCI_BE_WIDTH 16 +#define MAE_FIELD_MASK_VALUE_PAIRS_VLAN1_TCI_BE_MASK_OFST 30 +#define MAE_FIELD_MASK_VALUE_PAIRS_VLAN1_TCI_BE_MASK_LEN 2 +#define MAE_FIELD_MASK_VALUE_PAIRS_VLAN1_TCI_BE_MASK_LBN 240 +#define MAE_FIELD_MASK_VALUE_PAIRS_VLAN1_TCI_BE_MASK_WIDTH 16 +#define MAE_FIELD_MASK_VALUE_PAIRS_VLAN1_PROTO_BE_OFST 32 +#define MAE_FIELD_MASK_VALUE_PAIRS_VLAN1_PROTO_BE_LEN 2 +#define MAE_FIELD_MASK_VALUE_PAIRS_VLAN1_PROTO_BE_LBN 256 +#define MAE_FIELD_MASK_VALUE_PAIRS_VLAN1_PROTO_BE_WIDTH 16 +#define MAE_FIELD_MASK_VALUE_PAIRS_VLAN1_PROTO_BE_MASK_OFST 34 +#define MAE_FIELD_MASK_VALUE_PAIRS_VLAN1_PROTO_BE_MASK_LEN 2 +#define MAE_FIELD_MASK_VALUE_PAIRS_VLAN1_PROTO_BE_MASK_LBN 272 +#define MAE_FIELD_MASK_VALUE_PAIRS_VLAN1_PROTO_BE_MASK_WIDTH 16 +#define MAE_FIELD_MASK_VALUE_PAIRS_ETH_SADDR_BE_OFST 36 +#define MAE_FIELD_MASK_VALUE_PAIRS_ETH_SADDR_BE_LEN 6 +#define MAE_FIELD_MASK_VALUE_PAIRS_ETH_SADDR_BE_LBN 288 +#define MAE_FIELD_MASK_VALUE_PAIRS_ETH_SADDR_BE_WIDTH 48 +#define MAE_FIELD_MASK_VALUE_PAIRS_ETH_SADDR_BE_MASK_OFST 42 +#define MAE_FIELD_MASK_VALUE_PAIRS_ETH_SADDR_BE_MASK_LEN 6 +#define MAE_FIELD_MASK_VALUE_PAIRS_ETH_SADDR_BE_MASK_LBN 336 +#define MAE_FIELD_MASK_VALUE_PAIRS_ETH_SADDR_BE_MASK_WIDTH 48 +#define MAE_FIELD_MASK_VALUE_PAIRS_ETH_DADDR_BE_OFST 48 +#define MAE_FIELD_MASK_VALUE_PAIRS_ETH_DADDR_BE_LEN 6 +#define MAE_FIELD_MASK_VALUE_PAIRS_ETH_DADDR_BE_LBN 384 +#define MAE_FIELD_MASK_VALUE_PAIRS_ETH_DADDR_BE_WIDTH 48 +#define MAE_FIELD_MASK_VALUE_PAIRS_ETH_DADDR_BE_MASK_OFST 54 +#define MAE_FIELD_MASK_VALUE_PAIRS_ETH_DADDR_BE_MASK_LEN 6 +#define MAE_FIELD_MASK_VALUE_PAIRS_ETH_DADDR_BE_MASK_LBN 432 +#define MAE_FIELD_MASK_VALUE_PAIRS_ETH_DADDR_BE_MASK_WIDTH 48 +#define MAE_FIELD_MASK_VALUE_PAIRS_SRC_IP4_BE_OFST 60 +#define MAE_FIELD_MASK_VALUE_PAIRS_SRC_IP4_BE_LEN 4 +#define MAE_FIELD_MASK_VALUE_PAIRS_SRC_IP4_BE_LBN 480 +#define MAE_FIELD_MASK_VALUE_PAIRS_SRC_IP4_BE_WIDTH 32 +#define MAE_FIELD_MASK_VALUE_PAIRS_SRC_IP4_BE_MASK_OFST 64 +#define MAE_FIELD_MASK_VALUE_PAIRS_SRC_IP4_BE_MASK_LEN 4 +#define MAE_FIELD_MASK_VALUE_PAIRS_SRC_IP4_BE_MASK_LBN 512 +#define MAE_FIELD_MASK_VALUE_PAIRS_SRC_IP4_BE_MASK_WIDTH 32 +#define MAE_FIELD_MASK_VALUE_PAIRS_SRC_IP6_BE_OFST 68 +#define MAE_FIELD_MASK_VALUE_PAIRS_SRC_IP6_BE_LEN 16 +#define MAE_FIELD_MASK_VALUE_PAIRS_SRC_IP6_BE_LBN 544 +#define MAE_FIELD_MASK_VALUE_PAIRS_SRC_IP6_BE_WIDTH 128 +#define MAE_FIELD_MASK_VALUE_PAIRS_SRC_IP6_BE_MASK_OFST 84 +#define MAE_FIELD_MASK_VALUE_PAIRS_SRC_IP6_BE_MASK_LEN 16 +#define MAE_FIELD_MASK_VALUE_PAIRS_SRC_IP6_BE_MASK_LBN 672 +#define MAE_FIELD_MASK_VALUE_PAIRS_SRC_IP6_BE_MASK_WIDTH 128 +#define MAE_FIELD_MASK_VALUE_PAIRS_DST_IP4_BE_OFST 100 +#define MAE_FIELD_MASK_VALUE_PAIRS_DST_IP4_BE_LEN 4 +#define MAE_FIELD_MASK_VALUE_PAIRS_DST_IP4_BE_LBN 800 +#define MAE_FIELD_MASK_VALUE_PAIRS_DST_IP4_BE_WIDTH 32 +#define MAE_FIELD_MASK_VALUE_PAIRS_DST_IP4_BE_MASK_OFST 104 +#define MAE_FIELD_MASK_VALUE_PAIRS_DST_IP4_BE_MASK_LEN 4 +#define MAE_FIELD_MASK_VALUE_PAIRS_DST_IP4_BE_MASK_LBN 832 +#define MAE_FIELD_MASK_VALUE_PAIRS_DST_IP4_BE_MASK_WIDTH 32 +#define MAE_FIELD_MASK_VALUE_PAIRS_DST_IP6_BE_OFST 108 +#define MAE_FIELD_MASK_VALUE_PAIRS_DST_IP6_BE_LEN 16 +#define MAE_FIELD_MASK_VALUE_PAIRS_DST_IP6_BE_LBN 864 +#define MAE_FIELD_MASK_VALUE_PAIRS_DST_IP6_BE_WIDTH 128 +#define MAE_FIELD_MASK_VALUE_PAIRS_DST_IP6_BE_MASK_OFST 124 +#define MAE_FIELD_MASK_VALUE_PAIRS_DST_IP6_BE_MASK_LEN 16 +#define MAE_FIELD_MASK_VALUE_PAIRS_DST_IP6_BE_MASK_LBN 992 +#define MAE_FIELD_MASK_VALUE_PAIRS_DST_IP6_BE_MASK_WIDTH 128 +#define MAE_FIELD_MASK_VALUE_PAIRS_IP_PROTO_OFST 140 +#define MAE_FIELD_MASK_VALUE_PAIRS_IP_PROTO_LEN 1 +#define MAE_FIELD_MASK_VALUE_PAIRS_IP_PROTO_LBN 1120 +#define MAE_FIELD_MASK_VALUE_PAIRS_IP_PROTO_WIDTH 8 +#define MAE_FIELD_MASK_VALUE_PAIRS_IP_PROTO_MASK_OFST 141 +#define MAE_FIELD_MASK_VALUE_PAIRS_IP_PROTO_MASK_LEN 1 +#define MAE_FIELD_MASK_VALUE_PAIRS_IP_PROTO_MASK_LBN 1128 +#define MAE_FIELD_MASK_VALUE_PAIRS_IP_PROTO_MASK_WIDTH 8 +#define MAE_FIELD_MASK_VALUE_PAIRS_IP_TOS_OFST 142 +#define MAE_FIELD_MASK_VALUE_PAIRS_IP_TOS_LEN 1 +#define MAE_FIELD_MASK_VALUE_PAIRS_IP_TOS_LBN 1136 +#define MAE_FIELD_MASK_VALUE_PAIRS_IP_TOS_WIDTH 8 +#define MAE_FIELD_MASK_VALUE_PAIRS_IP_TOS_MASK_OFST 143 +#define MAE_FIELD_MASK_VALUE_PAIRS_IP_TOS_MASK_LEN 1 +#define MAE_FIELD_MASK_VALUE_PAIRS_IP_TOS_MASK_LBN 1144 +#define MAE_FIELD_MASK_VALUE_PAIRS_IP_TOS_MASK_WIDTH 8 +/* Due to hardware limitations, firmware may return + * MC_CMD_ERR_EINVAL(BAD_IP_TTL) when attempting to match on an IP_TTL value + * other than 1. + */ +#define MAE_FIELD_MASK_VALUE_PAIRS_IP_TTL_OFST 144 +#define MAE_FIELD_MASK_VALUE_PAIRS_IP_TTL_LEN 1 +#define MAE_FIELD_MASK_VALUE_PAIRS_IP_TTL_LBN 1152 +#define MAE_FIELD_MASK_VALUE_PAIRS_IP_TTL_WIDTH 8 +#define MAE_FIELD_MASK_VALUE_PAIRS_IP_TTL_MASK_OFST 145 +#define MAE_FIELD_MASK_VALUE_PAIRS_IP_TTL_MASK_LEN 1 +#define MAE_FIELD_MASK_VALUE_PAIRS_IP_TTL_MASK_LBN 1160 +#define MAE_FIELD_MASK_VALUE_PAIRS_IP_TTL_MASK_WIDTH 8 +#define MAE_FIELD_MASK_VALUE_PAIRS_IP_FLAGS_BE_OFST 148 +#define MAE_FIELD_MASK_VALUE_PAIRS_IP_FLAGS_BE_LEN 4 +#define MAE_FIELD_MASK_VALUE_PAIRS_IP_FLAGS_BE_LBN 1184 +#define MAE_FIELD_MASK_VALUE_PAIRS_IP_FLAGS_BE_WIDTH 32 +#define MAE_FIELD_MASK_VALUE_PAIRS_IP_FLAGS_BE_MASK_OFST 152 +#define MAE_FIELD_MASK_VALUE_PAIRS_IP_FLAGS_BE_MASK_LEN 4 +#define MAE_FIELD_MASK_VALUE_PAIRS_IP_FLAGS_BE_MASK_LBN 1216 +#define MAE_FIELD_MASK_VALUE_PAIRS_IP_FLAGS_BE_MASK_WIDTH 32 +#define MAE_FIELD_MASK_VALUE_PAIRS_L4_SPORT_BE_OFST 156 +#define MAE_FIELD_MASK_VALUE_PAIRS_L4_SPORT_BE_LEN 2 +#define MAE_FIELD_MASK_VALUE_PAIRS_L4_SPORT_BE_LBN 1248 +#define MAE_FIELD_MASK_VALUE_PAIRS_L4_SPORT_BE_WIDTH 16 +#define MAE_FIELD_MASK_VALUE_PAIRS_L4_SPORT_BE_MASK_OFST 158 +#define MAE_FIELD_MASK_VALUE_PAIRS_L4_SPORT_BE_MASK_LEN 2 +#define MAE_FIELD_MASK_VALUE_PAIRS_L4_SPORT_BE_MASK_LBN 1264 +#define MAE_FIELD_MASK_VALUE_PAIRS_L4_SPORT_BE_MASK_WIDTH 16 +#define MAE_FIELD_MASK_VALUE_PAIRS_L4_DPORT_BE_OFST 160 +#define MAE_FIELD_MASK_VALUE_PAIRS_L4_DPORT_BE_LEN 2 +#define MAE_FIELD_MASK_VALUE_PAIRS_L4_DPORT_BE_LBN 1280 +#define MAE_FIELD_MASK_VALUE_PAIRS_L4_DPORT_BE_WIDTH 16 +#define MAE_FIELD_MASK_VALUE_PAIRS_L4_DPORT_BE_MASK_OFST 162 +#define MAE_FIELD_MASK_VALUE_PAIRS_L4_DPORT_BE_MASK_LEN 2 +#define MAE_FIELD_MASK_VALUE_PAIRS_L4_DPORT_BE_MASK_LBN 1296 +#define MAE_FIELD_MASK_VALUE_PAIRS_L4_DPORT_BE_MASK_WIDTH 16 +#define MAE_FIELD_MASK_VALUE_PAIRS_TCP_FLAGS_BE_OFST 164 +#define MAE_FIELD_MASK_VALUE_PAIRS_TCP_FLAGS_BE_LEN 2 +#define MAE_FIELD_MASK_VALUE_PAIRS_TCP_FLAGS_BE_LBN 1312 +#define MAE_FIELD_MASK_VALUE_PAIRS_TCP_FLAGS_BE_WIDTH 16 +#define MAE_FIELD_MASK_VALUE_PAIRS_TCP_FLAGS_BE_MASK_OFST 166 +#define MAE_FIELD_MASK_VALUE_PAIRS_TCP_FLAGS_BE_MASK_LEN 2 +#define MAE_FIELD_MASK_VALUE_PAIRS_TCP_FLAGS_BE_MASK_LBN 1328 +#define MAE_FIELD_MASK_VALUE_PAIRS_TCP_FLAGS_BE_MASK_WIDTH 16 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENCAP_TYPE_OFST 168 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENCAP_TYPE_LEN 4 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENCAP_TYPE_LBN 1344 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENCAP_TYPE_WIDTH 32 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENCAP_TYPE_MASK_OFST 172 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENCAP_TYPE_MASK_LEN 4 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENCAP_TYPE_MASK_LBN 1376 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENCAP_TYPE_MASK_WIDTH 32 +#define MAE_FIELD_MASK_VALUE_PAIRS_OUTER_RULE_ID_OFST 176 +#define MAE_FIELD_MASK_VALUE_PAIRS_OUTER_RULE_ID_LEN 4 +#define MAE_FIELD_MASK_VALUE_PAIRS_OUTER_RULE_ID_LBN 1408 +#define MAE_FIELD_MASK_VALUE_PAIRS_OUTER_RULE_ID_WIDTH 32 +#define MAE_FIELD_MASK_VALUE_PAIRS_OUTER_RULE_ID_MASK_OFST 180 +#define MAE_FIELD_MASK_VALUE_PAIRS_OUTER_RULE_ID_MASK_LEN 4 +#define MAE_FIELD_MASK_VALUE_PAIRS_OUTER_RULE_ID_MASK_LBN 1440 +#define MAE_FIELD_MASK_VALUE_PAIRS_OUTER_RULE_ID_MASK_WIDTH 32 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_ETHER_TYPE_BE_OFST 184 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_ETHER_TYPE_BE_LEN 2 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_ETHER_TYPE_BE_LBN 1472 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_ETHER_TYPE_BE_WIDTH 16 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_ETHER_TYPE_BE_MASK_OFST 188 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_ETHER_TYPE_BE_MASK_LEN 2 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_ETHER_TYPE_BE_MASK_LBN 1504 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_ETHER_TYPE_BE_MASK_WIDTH 16 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VLAN0_TCI_BE_OFST 192 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VLAN0_TCI_BE_LEN 2 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VLAN0_TCI_BE_LBN 1536 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VLAN0_TCI_BE_WIDTH 16 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VLAN0_TCI_BE_MASK_OFST 194 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VLAN0_TCI_BE_MASK_LEN 2 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VLAN0_TCI_BE_MASK_LBN 1552 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VLAN0_TCI_BE_MASK_WIDTH 16 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VLAN0_PROTO_BE_OFST 196 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VLAN0_PROTO_BE_LEN 2 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VLAN0_PROTO_BE_LBN 1568 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VLAN0_PROTO_BE_WIDTH 16 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VLAN0_PROTO_BE_MASK_OFST 198 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VLAN0_PROTO_BE_MASK_LEN 2 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VLAN0_PROTO_BE_MASK_LBN 1584 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VLAN0_PROTO_BE_MASK_WIDTH 16 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VLAN1_TCI_BE_OFST 200 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VLAN1_TCI_BE_LEN 2 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VLAN1_TCI_BE_LBN 1600 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VLAN1_TCI_BE_WIDTH 16 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VLAN1_TCI_BE_MASK_OFST 202 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VLAN1_TCI_BE_MASK_LEN 2 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VLAN1_TCI_BE_MASK_LBN 1616 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VLAN1_TCI_BE_MASK_WIDTH 16 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VLAN1_PROTO_BE_OFST 204 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VLAN1_PROTO_BE_LEN 2 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VLAN1_PROTO_BE_LBN 1632 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VLAN1_PROTO_BE_WIDTH 16 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VLAN1_PROTO_BE_MASK_OFST 206 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VLAN1_PROTO_BE_MASK_LEN 2 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VLAN1_PROTO_BE_MASK_LBN 1648 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VLAN1_PROTO_BE_MASK_WIDTH 16 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_ETH_SADDR_BE_OFST 208 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_ETH_SADDR_BE_LEN 6 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_ETH_SADDR_BE_LBN 1664 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_ETH_SADDR_BE_WIDTH 48 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_ETH_SADDR_BE_MASK_OFST 214 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_ETH_SADDR_BE_MASK_LEN 6 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_ETH_SADDR_BE_MASK_LBN 1712 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_ETH_SADDR_BE_MASK_WIDTH 48 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_ETH_DADDR_BE_OFST 220 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_ETH_DADDR_BE_LEN 6 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_ETH_DADDR_BE_LBN 1760 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_ETH_DADDR_BE_WIDTH 48 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_ETH_DADDR_BE_MASK_OFST 226 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_ETH_DADDR_BE_MASK_LEN 6 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_ETH_DADDR_BE_MASK_LBN 1808 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_ETH_DADDR_BE_MASK_WIDTH 48 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_SRC_IP4_BE_OFST 232 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_SRC_IP4_BE_LEN 4 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_SRC_IP4_BE_LBN 1856 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_SRC_IP4_BE_WIDTH 32 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_SRC_IP4_BE_MASK_OFST 236 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_SRC_IP4_BE_MASK_LEN 4 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_SRC_IP4_BE_MASK_LBN 1888 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_SRC_IP4_BE_MASK_WIDTH 32 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_SRC_IP6_BE_OFST 240 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_SRC_IP6_BE_LEN 16 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_SRC_IP6_BE_LBN 1920 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_SRC_IP6_BE_WIDTH 128 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_SRC_IP6_BE_MASK_OFST 256 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_SRC_IP6_BE_MASK_LEN 16 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_SRC_IP6_BE_MASK_LBN 2048 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_SRC_IP6_BE_MASK_WIDTH 128 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_DST_IP4_BE_OFST 272 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_DST_IP4_BE_LEN 4 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_DST_IP4_BE_LBN 2176 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_DST_IP4_BE_WIDTH 32 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_DST_IP4_BE_MASK_OFST 276 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_DST_IP4_BE_MASK_LEN 4 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_DST_IP4_BE_MASK_LBN 2208 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_DST_IP4_BE_MASK_WIDTH 32 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_DST_IP6_BE_OFST 280 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_DST_IP6_BE_LEN 16 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_DST_IP6_BE_LBN 2240 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_DST_IP6_BE_WIDTH 128 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_DST_IP6_BE_MASK_OFST 296 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_DST_IP6_BE_MASK_LEN 16 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_DST_IP6_BE_MASK_LBN 2368 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_DST_IP6_BE_MASK_WIDTH 128 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_IP_PROTO_OFST 312 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_IP_PROTO_LEN 1 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_IP_PROTO_LBN 2496 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_IP_PROTO_WIDTH 8 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_IP_PROTO_MASK_OFST 313 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_IP_PROTO_MASK_LEN 1 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_IP_PROTO_MASK_LBN 2504 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_IP_PROTO_MASK_WIDTH 8 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_IP_TOS_OFST 314 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_IP_TOS_LEN 1 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_IP_TOS_LBN 2512 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_IP_TOS_WIDTH 8 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_IP_TOS_MASK_OFST 315 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_IP_TOS_MASK_LEN 1 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_IP_TOS_MASK_LBN 2520 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_IP_TOS_MASK_WIDTH 8 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_IP_TTL_OFST 316 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_IP_TTL_LEN 1 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_IP_TTL_LBN 2528 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_IP_TTL_WIDTH 8 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_IP_TTL_MASK_OFST 317 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_IP_TTL_MASK_LEN 1 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_IP_TTL_MASK_LBN 2536 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_IP_TTL_MASK_WIDTH 8 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_IP_FLAGS_BE_OFST 320 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_IP_FLAGS_BE_LEN 4 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_IP_FLAGS_BE_LBN 2560 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_IP_FLAGS_BE_WIDTH 32 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_IP_FLAGS_BE_MASK_OFST 324 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_IP_FLAGS_BE_MASK_LEN 4 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_IP_FLAGS_BE_MASK_LBN 2592 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_IP_FLAGS_BE_MASK_WIDTH 32 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_L4_SPORT_BE_OFST 328 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_L4_SPORT_BE_LEN 2 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_L4_SPORT_BE_LBN 2624 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_L4_SPORT_BE_WIDTH 16 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_L4_SPORT_BE_MASK_OFST 330 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_L4_SPORT_BE_MASK_LEN 2 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_L4_SPORT_BE_MASK_LBN 2640 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_L4_SPORT_BE_MASK_WIDTH 16 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_L4_DPORT_BE_OFST 332 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_L4_DPORT_BE_LEN 2 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_L4_DPORT_BE_LBN 2656 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_L4_DPORT_BE_WIDTH 16 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_L4_DPORT_BE_MASK_OFST 334 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_L4_DPORT_BE_MASK_LEN 2 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_L4_DPORT_BE_MASK_LBN 2672 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_L4_DPORT_BE_MASK_WIDTH 16 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VNET_ID_BE_OFST 336 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VNET_ID_BE_LEN 4 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VNET_ID_BE_LBN 2688 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VNET_ID_BE_WIDTH 32 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VNET_ID_BE_MASK_OFST 340 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VNET_ID_BE_MASK_LEN 4 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VNET_ID_BE_MASK_LBN 2720 +#define MAE_FIELD_MASK_VALUE_PAIRS_ENC_VNET_ID_BE_MASK_WIDTH 32 + +/* MAE_FIELD_MASK_VALUE_PAIRS_V2 structuredef */ +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_LEN 372 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_INGRESS_MPORT_SELECTOR_OFST 0 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_INGRESS_MPORT_SELECTOR_LEN 4 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_INGRESS_MPORT_SELECTOR_LBN 0 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_INGRESS_MPORT_SELECTOR_WIDTH 32 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_INGRESS_MPORT_SELECTOR_MASK_OFST 4 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_INGRESS_MPORT_SELECTOR_MASK_LEN 4 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_INGRESS_MPORT_SELECTOR_MASK_LBN 32 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_INGRESS_MPORT_SELECTOR_MASK_WIDTH 32 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_MARK_OFST 8 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_MARK_LEN 4 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_MARK_LBN 64 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_MARK_WIDTH 32 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_MARK_MASK_OFST 12 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_MARK_MASK_LEN 4 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_MARK_MASK_LBN 96 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_MARK_MASK_WIDTH 32 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ETHER_TYPE_BE_OFST 16 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ETHER_TYPE_BE_LEN 2 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ETHER_TYPE_BE_LBN 128 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ETHER_TYPE_BE_WIDTH 16 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ETHER_TYPE_BE_MASK_OFST 18 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ETHER_TYPE_BE_MASK_LEN 2 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ETHER_TYPE_BE_MASK_LBN 144 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ETHER_TYPE_BE_MASK_WIDTH 16 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_VLAN0_TCI_BE_OFST 20 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_VLAN0_TCI_BE_LEN 2 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_VLAN0_TCI_BE_LBN 160 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_VLAN0_TCI_BE_WIDTH 16 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_VLAN0_TCI_BE_MASK_OFST 22 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_VLAN0_TCI_BE_MASK_LEN 2 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_VLAN0_TCI_BE_MASK_LBN 176 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_VLAN0_TCI_BE_MASK_WIDTH 16 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_VLAN0_PROTO_BE_OFST 24 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_VLAN0_PROTO_BE_LEN 2 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_VLAN0_PROTO_BE_LBN 192 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_VLAN0_PROTO_BE_WIDTH 16 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_VLAN0_PROTO_BE_MASK_OFST 26 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_VLAN0_PROTO_BE_MASK_LEN 2 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_VLAN0_PROTO_BE_MASK_LBN 208 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_VLAN0_PROTO_BE_MASK_WIDTH 16 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_VLAN1_TCI_BE_OFST 28 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_VLAN1_TCI_BE_LEN 2 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_VLAN1_TCI_BE_LBN 224 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_VLAN1_TCI_BE_WIDTH 16 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_VLAN1_TCI_BE_MASK_OFST 30 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_VLAN1_TCI_BE_MASK_LEN 2 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_VLAN1_TCI_BE_MASK_LBN 240 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_VLAN1_TCI_BE_MASK_WIDTH 16 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_VLAN1_PROTO_BE_OFST 32 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_VLAN1_PROTO_BE_LEN 2 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_VLAN1_PROTO_BE_LBN 256 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_VLAN1_PROTO_BE_WIDTH 16 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_VLAN1_PROTO_BE_MASK_OFST 34 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_VLAN1_PROTO_BE_MASK_LEN 2 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_VLAN1_PROTO_BE_MASK_LBN 272 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_VLAN1_PROTO_BE_MASK_WIDTH 16 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ETH_SADDR_BE_OFST 36 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ETH_SADDR_BE_LEN 6 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ETH_SADDR_BE_LBN 288 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ETH_SADDR_BE_WIDTH 48 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ETH_SADDR_BE_MASK_OFST 42 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ETH_SADDR_BE_MASK_LEN 6 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ETH_SADDR_BE_MASK_LBN 336 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ETH_SADDR_BE_MASK_WIDTH 48 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ETH_DADDR_BE_OFST 48 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ETH_DADDR_BE_LEN 6 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ETH_DADDR_BE_LBN 384 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ETH_DADDR_BE_WIDTH 48 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ETH_DADDR_BE_MASK_OFST 54 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ETH_DADDR_BE_MASK_LEN 6 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ETH_DADDR_BE_MASK_LBN 432 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ETH_DADDR_BE_MASK_WIDTH 48 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_SRC_IP4_BE_OFST 60 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_SRC_IP4_BE_LEN 4 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_SRC_IP4_BE_LBN 480 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_SRC_IP4_BE_WIDTH 32 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_SRC_IP4_BE_MASK_OFST 64 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_SRC_IP4_BE_MASK_LEN 4 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_SRC_IP4_BE_MASK_LBN 512 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_SRC_IP4_BE_MASK_WIDTH 32 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_SRC_IP6_BE_OFST 68 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_SRC_IP6_BE_LEN 16 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_SRC_IP6_BE_LBN 544 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_SRC_IP6_BE_WIDTH 128 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_SRC_IP6_BE_MASK_OFST 84 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_SRC_IP6_BE_MASK_LEN 16 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_SRC_IP6_BE_MASK_LBN 672 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_SRC_IP6_BE_MASK_WIDTH 128 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_DST_IP4_BE_OFST 100 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_DST_IP4_BE_LEN 4 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_DST_IP4_BE_LBN 800 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_DST_IP4_BE_WIDTH 32 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_DST_IP4_BE_MASK_OFST 104 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_DST_IP4_BE_MASK_LEN 4 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_DST_IP4_BE_MASK_LBN 832 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_DST_IP4_BE_MASK_WIDTH 32 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_DST_IP6_BE_OFST 108 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_DST_IP6_BE_LEN 16 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_DST_IP6_BE_LBN 864 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_DST_IP6_BE_WIDTH 128 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_DST_IP6_BE_MASK_OFST 124 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_DST_IP6_BE_MASK_LEN 16 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_DST_IP6_BE_MASK_LBN 992 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_DST_IP6_BE_MASK_WIDTH 128 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_PROTO_OFST 140 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_PROTO_LEN 1 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_PROTO_LBN 1120 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_PROTO_WIDTH 8 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_PROTO_MASK_OFST 141 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_PROTO_MASK_LEN 1 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_PROTO_MASK_LBN 1128 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_PROTO_MASK_WIDTH 8 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_TOS_OFST 142 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_TOS_LEN 1 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_TOS_LBN 1136 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_TOS_WIDTH 8 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_TOS_MASK_OFST 143 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_TOS_MASK_LEN 1 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_TOS_MASK_LBN 1144 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_TOS_MASK_WIDTH 8 +/* Due to hardware limitations, firmware may return + * MC_CMD_ERR_EINVAL(BAD_IP_TTL) when attempting to match on an IP_TTL value + * other than 1. + */ +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_TTL_OFST 144 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_TTL_LEN 1 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_TTL_LBN 1152 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_TTL_WIDTH 8 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_TTL_MASK_OFST 145 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_TTL_MASK_LEN 1 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_TTL_MASK_LBN 1160 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_TTL_MASK_WIDTH 8 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_FLAGS_BE_OFST 148 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_FLAGS_BE_LEN 4 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_FLAGS_BE_LBN 1184 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_FLAGS_BE_WIDTH 32 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_FLAGS_BE_MASK_OFST 152 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_FLAGS_BE_MASK_LEN 4 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_FLAGS_BE_MASK_LBN 1216 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_FLAGS_BE_MASK_WIDTH 32 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_L4_SPORT_BE_OFST 156 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_L4_SPORT_BE_LEN 2 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_L4_SPORT_BE_LBN 1248 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_L4_SPORT_BE_WIDTH 16 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_L4_SPORT_BE_MASK_OFST 158 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_L4_SPORT_BE_MASK_LEN 2 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_L4_SPORT_BE_MASK_LBN 1264 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_L4_SPORT_BE_MASK_WIDTH 16 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_L4_DPORT_BE_OFST 160 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_L4_DPORT_BE_LEN 2 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_L4_DPORT_BE_LBN 1280 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_L4_DPORT_BE_WIDTH 16 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_L4_DPORT_BE_MASK_OFST 162 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_L4_DPORT_BE_MASK_LEN 2 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_L4_DPORT_BE_MASK_LBN 1296 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_L4_DPORT_BE_MASK_WIDTH 16 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_TCP_FLAGS_BE_OFST 164 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_TCP_FLAGS_BE_LEN 2 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_TCP_FLAGS_BE_LBN 1312 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_TCP_FLAGS_BE_WIDTH 16 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_TCP_FLAGS_BE_MASK_OFST 166 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_TCP_FLAGS_BE_MASK_LEN 2 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_TCP_FLAGS_BE_MASK_LBN 1328 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_TCP_FLAGS_BE_MASK_WIDTH 16 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENCAP_TYPE_OFST 168 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENCAP_TYPE_LEN 4 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENCAP_TYPE_LBN 1344 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENCAP_TYPE_WIDTH 32 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENCAP_TYPE_MASK_OFST 172 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENCAP_TYPE_MASK_LEN 4 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENCAP_TYPE_MASK_LBN 1376 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENCAP_TYPE_MASK_WIDTH 32 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_OUTER_RULE_ID_OFST 176 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_OUTER_RULE_ID_LEN 4 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_OUTER_RULE_ID_LBN 1408 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_OUTER_RULE_ID_WIDTH 32 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_OUTER_RULE_ID_MASK_OFST 180 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_OUTER_RULE_ID_MASK_LEN 4 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_OUTER_RULE_ID_MASK_LBN 1440 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_OUTER_RULE_ID_MASK_WIDTH 32 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_ETHER_TYPE_BE_OFST 184 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_ETHER_TYPE_BE_LEN 2 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_ETHER_TYPE_BE_LBN 1472 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_ETHER_TYPE_BE_WIDTH 16 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_ETHER_TYPE_BE_MASK_OFST 188 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_ETHER_TYPE_BE_MASK_LEN 2 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_ETHER_TYPE_BE_MASK_LBN 1504 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_ETHER_TYPE_BE_MASK_WIDTH 16 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VLAN0_TCI_BE_OFST 192 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VLAN0_TCI_BE_LEN 2 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VLAN0_TCI_BE_LBN 1536 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VLAN0_TCI_BE_WIDTH 16 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VLAN0_TCI_BE_MASK_OFST 194 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VLAN0_TCI_BE_MASK_LEN 2 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VLAN0_TCI_BE_MASK_LBN 1552 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VLAN0_TCI_BE_MASK_WIDTH 16 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VLAN0_PROTO_BE_OFST 196 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VLAN0_PROTO_BE_LEN 2 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VLAN0_PROTO_BE_LBN 1568 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VLAN0_PROTO_BE_WIDTH 16 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VLAN0_PROTO_BE_MASK_OFST 198 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VLAN0_PROTO_BE_MASK_LEN 2 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VLAN0_PROTO_BE_MASK_LBN 1584 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VLAN0_PROTO_BE_MASK_WIDTH 16 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VLAN1_TCI_BE_OFST 200 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VLAN1_TCI_BE_LEN 2 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VLAN1_TCI_BE_LBN 1600 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VLAN1_TCI_BE_WIDTH 16 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VLAN1_TCI_BE_MASK_OFST 202 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VLAN1_TCI_BE_MASK_LEN 2 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VLAN1_TCI_BE_MASK_LBN 1616 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VLAN1_TCI_BE_MASK_WIDTH 16 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VLAN1_PROTO_BE_OFST 204 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VLAN1_PROTO_BE_LEN 2 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VLAN1_PROTO_BE_LBN 1632 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VLAN1_PROTO_BE_WIDTH 16 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VLAN1_PROTO_BE_MASK_OFST 206 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VLAN1_PROTO_BE_MASK_LEN 2 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VLAN1_PROTO_BE_MASK_LBN 1648 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VLAN1_PROTO_BE_MASK_WIDTH 16 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_ETH_SADDR_BE_OFST 208 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_ETH_SADDR_BE_LEN 6 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_ETH_SADDR_BE_LBN 1664 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_ETH_SADDR_BE_WIDTH 48 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_ETH_SADDR_BE_MASK_OFST 214 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_ETH_SADDR_BE_MASK_LEN 6 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_ETH_SADDR_BE_MASK_LBN 1712 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_ETH_SADDR_BE_MASK_WIDTH 48 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_ETH_DADDR_BE_OFST 220 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_ETH_DADDR_BE_LEN 6 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_ETH_DADDR_BE_LBN 1760 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_ETH_DADDR_BE_WIDTH 48 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_ETH_DADDR_BE_MASK_OFST 226 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_ETH_DADDR_BE_MASK_LEN 6 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_ETH_DADDR_BE_MASK_LBN 1808 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_ETH_DADDR_BE_MASK_WIDTH 48 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_SRC_IP4_BE_OFST 232 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_SRC_IP4_BE_LEN 4 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_SRC_IP4_BE_LBN 1856 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_SRC_IP4_BE_WIDTH 32 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_SRC_IP4_BE_MASK_OFST 236 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_SRC_IP4_BE_MASK_LEN 4 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_SRC_IP4_BE_MASK_LBN 1888 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_SRC_IP4_BE_MASK_WIDTH 32 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_SRC_IP6_BE_OFST 240 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_SRC_IP6_BE_LEN 16 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_SRC_IP6_BE_LBN 1920 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_SRC_IP6_BE_WIDTH 128 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_SRC_IP6_BE_MASK_OFST 256 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_SRC_IP6_BE_MASK_LEN 16 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_SRC_IP6_BE_MASK_LBN 2048 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_SRC_IP6_BE_MASK_WIDTH 128 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_DST_IP4_BE_OFST 272 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_DST_IP4_BE_LEN 4 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_DST_IP4_BE_LBN 2176 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_DST_IP4_BE_WIDTH 32 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_DST_IP4_BE_MASK_OFST 276 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_DST_IP4_BE_MASK_LEN 4 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_DST_IP4_BE_MASK_LBN 2208 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_DST_IP4_BE_MASK_WIDTH 32 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_DST_IP6_BE_OFST 280 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_DST_IP6_BE_LEN 16 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_DST_IP6_BE_LBN 2240 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_DST_IP6_BE_WIDTH 128 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_DST_IP6_BE_MASK_OFST 296 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_DST_IP6_BE_MASK_LEN 16 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_DST_IP6_BE_MASK_LBN 2368 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_DST_IP6_BE_MASK_WIDTH 128 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_IP_PROTO_OFST 312 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_IP_PROTO_LEN 1 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_IP_PROTO_LBN 2496 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_IP_PROTO_WIDTH 8 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_IP_PROTO_MASK_OFST 313 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_IP_PROTO_MASK_LEN 1 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_IP_PROTO_MASK_LBN 2504 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_IP_PROTO_MASK_WIDTH 8 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_IP_TOS_OFST 314 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_IP_TOS_LEN 1 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_IP_TOS_LBN 2512 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_IP_TOS_WIDTH 8 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_IP_TOS_MASK_OFST 315 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_IP_TOS_MASK_LEN 1 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_IP_TOS_MASK_LBN 2520 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_IP_TOS_MASK_WIDTH 8 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_IP_TTL_OFST 316 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_IP_TTL_LEN 1 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_IP_TTL_LBN 2528 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_IP_TTL_WIDTH 8 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_IP_TTL_MASK_OFST 317 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_IP_TTL_MASK_LEN 1 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_IP_TTL_MASK_LBN 2536 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_IP_TTL_MASK_WIDTH 8 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_IP_FLAGS_BE_OFST 320 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_IP_FLAGS_BE_LEN 4 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_IP_FLAGS_BE_LBN 2560 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_IP_FLAGS_BE_WIDTH 32 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_IP_FLAGS_BE_MASK_OFST 324 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_IP_FLAGS_BE_MASK_LEN 4 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_IP_FLAGS_BE_MASK_LBN 2592 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_IP_FLAGS_BE_MASK_WIDTH 32 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_L4_SPORT_BE_OFST 328 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_L4_SPORT_BE_LEN 2 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_L4_SPORT_BE_LBN 2624 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_L4_SPORT_BE_WIDTH 16 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_L4_SPORT_BE_MASK_OFST 330 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_L4_SPORT_BE_MASK_LEN 2 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_L4_SPORT_BE_MASK_LBN 2640 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_L4_SPORT_BE_MASK_WIDTH 16 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_L4_DPORT_BE_OFST 332 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_L4_DPORT_BE_LEN 2 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_L4_DPORT_BE_LBN 2656 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_L4_DPORT_BE_WIDTH 16 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_L4_DPORT_BE_MASK_OFST 334 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_L4_DPORT_BE_MASK_LEN 2 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_L4_DPORT_BE_MASK_LBN 2672 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_L4_DPORT_BE_MASK_WIDTH 16 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VNET_ID_BE_OFST 336 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VNET_ID_BE_LEN 4 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VNET_ID_BE_LBN 2688 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VNET_ID_BE_WIDTH 32 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VNET_ID_BE_MASK_OFST 340 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VNET_ID_BE_MASK_LEN 4 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VNET_ID_BE_MASK_LBN 2720 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_VNET_ID_BE_MASK_WIDTH 32 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_FLAGS_OFST 344 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_FLAGS_LEN 4 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IS_IP_FRAG_OFST 344 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IS_IP_FRAG_LBN 0 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IS_IP_FRAG_WIDTH 1 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_DO_CT_OFST 344 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_DO_CT_LBN 1 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_DO_CT_WIDTH 1 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_CT_HIT_OFST 344 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_CT_HIT_LBN 2 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_CT_HIT_WIDTH 1 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IS_FROM_NETWORK_OFST 344 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IS_FROM_NETWORK_LBN 3 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IS_FROM_NETWORK_WIDTH 1 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_HAS_OVLAN_OFST 344 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_HAS_OVLAN_LBN 4 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_HAS_OVLAN_WIDTH 1 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_HAS_IVLAN_OFST 344 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_HAS_IVLAN_LBN 5 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_HAS_IVLAN_WIDTH 1 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_HAS_OVLAN_OFST 344 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_HAS_OVLAN_LBN 6 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_HAS_OVLAN_WIDTH 1 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_HAS_IVLAN_OFST 344 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_HAS_IVLAN_LBN 7 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_ENC_HAS_IVLAN_WIDTH 1 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_TCP_SYN_FIN_RST_OFST 344 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_TCP_SYN_FIN_RST_LBN 8 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_TCP_SYN_FIN_RST_WIDTH 1 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_FIRST_FRAG_OFST 344 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_FIRST_FRAG_LBN 9 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_FIRST_FRAG_WIDTH 1 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_FLAGS_LBN 2752 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_FLAGS_WIDTH 32 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_FLAGS_MASK_OFST 348 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_FLAGS_MASK_LEN 4 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_FLAGS_MASK_LBN 2784 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_FLAGS_MASK_WIDTH 32 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_CT_DOMAIN_OFST 352 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_CT_DOMAIN_LEN 2 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_CT_DOMAIN_LBN 2816 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_CT_DOMAIN_WIDTH 16 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_CT_DOMAIN_MASK_OFST 354 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_CT_DOMAIN_MASK_LEN 2 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_CT_DOMAIN_MASK_LBN 2832 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_CT_DOMAIN_MASK_WIDTH 16 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_CT_MARK_OFST 356 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_CT_MARK_LEN 4 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_CT_MARK_LBN 2848 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_CT_MARK_WIDTH 32 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_CT_MARK_MASK_OFST 360 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_CT_MARK_MASK_LEN 4 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_CT_MARK_MASK_LBN 2880 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_CT_MARK_MASK_WIDTH 32 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_CT_PRIVATE_FLAGS_OFST 364 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_CT_PRIVATE_FLAGS_LEN 1 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_CT_PRIVATE_FLAGS_LBN 2912 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_CT_PRIVATE_FLAGS_WIDTH 8 +/* Set to zero. */ +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_RSVD2_OFST 365 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_RSVD2_LEN 1 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_RSVD2_LBN 2920 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_RSVD2_WIDTH 8 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_CT_PRIVATE_FLAGS_MASK_OFST 366 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_CT_PRIVATE_FLAGS_MASK_LEN 1 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_CT_PRIVATE_FLAGS_MASK_LBN 2928 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_CT_PRIVATE_FLAGS_MASK_WIDTH 8 +/* Set to zero. */ +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_RSVD3_OFST 367 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_RSVD3_LEN 1 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_RSVD3_LBN 2936 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_RSVD3_WIDTH 8 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_RECIRC_ID_OFST 368 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_RECIRC_ID_LEN 1 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_RECIRC_ID_LBN 2944 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_RECIRC_ID_WIDTH 8 +/* Set to zero */ +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_RSVD4_OFST 369 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_RSVD4_LEN 1 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_RSVD4_LBN 2952 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_RSVD4_WIDTH 8 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_RECIRC_ID_MASK_OFST 370 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_RECIRC_ID_MASK_LEN 1 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_RECIRC_ID_MASK_LBN 2960 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_RECIRC_ID_MASK_WIDTH 8 +/* Set to zero */ +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_RSVD5_OFST 371 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_RSVD5_LEN 1 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_RSVD5_LBN 2968 +#define MAE_FIELD_MASK_VALUE_PAIRS_V2_RSVD5_WIDTH 8 + +/* MAE_MPORT_SELECTOR structuredef: MPORTS are identified by an opaque unsigned + * integer value (mport_id) that is guaranteed to be representable within + * 32-bits or within any NIC interface field that needs store the value + * (whichever is narrower). This selector structure provides a stable way to + * refer to m-ports. + */ +#define MAE_MPORT_SELECTOR_LEN 4 +/* Used to force the tools to output bitfield-style defines for this structure. + */ +#define MAE_MPORT_SELECTOR_FLAT_OFST 0 +#define MAE_MPORT_SELECTOR_FLAT_LEN 4 +/* enum: An m-port selector value that is guaranteed never to represent a real + * mport + */ +#define MAE_MPORT_SELECTOR_NULL 0x0 +/* enum: The m-port assigned to the calling client. */ +#define MAE_MPORT_SELECTOR_ASSIGNED 0x1000000 +#define MAE_MPORT_SELECTOR_TYPE_OFST 0 +#define MAE_MPORT_SELECTOR_TYPE_LBN 24 +#define MAE_MPORT_SELECTOR_TYPE_WIDTH 8 +/* enum: The MPORT connected to a given physical port */ +#define MAE_MPORT_SELECTOR_TYPE_PPORT 0x2 +/* enum: The MPORT assigned to a given PCIe function. Deprecated in favour of + * MH_FUNC. + */ +#define MAE_MPORT_SELECTOR_TYPE_FUNC 0x3 +/* enum: An mport_id */ +#define MAE_MPORT_SELECTOR_TYPE_MPORT_ID 0x4 +/* enum: The MPORT assigned to a given PCIe function (see also FWRIVERHD-1108) + */ +#define MAE_MPORT_SELECTOR_TYPE_MH_FUNC 0x5 +/* enum: This is guaranteed never to be a valid selector type */ +#define MAE_MPORT_SELECTOR_TYPE_INVALID 0xff +#define MAE_MPORT_SELECTOR_MPORT_ID_OFST 0 +#define MAE_MPORT_SELECTOR_MPORT_ID_LBN 0 +#define MAE_MPORT_SELECTOR_MPORT_ID_WIDTH 24 +#define MAE_MPORT_SELECTOR_PPORT_ID_OFST 0 +#define MAE_MPORT_SELECTOR_PPORT_ID_LBN 0 +#define MAE_MPORT_SELECTOR_PPORT_ID_WIDTH 4 +#define MAE_MPORT_SELECTOR_FUNC_INTF_ID_OFST 0 +#define MAE_MPORT_SELECTOR_FUNC_INTF_ID_LBN 20 +#define MAE_MPORT_SELECTOR_FUNC_INTF_ID_WIDTH 4 +#define MAE_MPORT_SELECTOR_HOST_PRIMARY 0x1 /* enum */ +#define MAE_MPORT_SELECTOR_NIC_EMBEDDED 0x2 /* enum */ +/* enum: Deprecated, use CALLER_INTF instead. */ +#define MAE_MPORT_SELECTOR_CALLER 0xf +#define MAE_MPORT_SELECTOR_CALLER_INTF 0xf /* enum */ +#define MAE_MPORT_SELECTOR_FUNC_MH_PF_ID_OFST 0 +#define MAE_MPORT_SELECTOR_FUNC_MH_PF_ID_LBN 16 +#define MAE_MPORT_SELECTOR_FUNC_MH_PF_ID_WIDTH 4 +#define MAE_MPORT_SELECTOR_FUNC_PF_ID_OFST 0 +#define MAE_MPORT_SELECTOR_FUNC_PF_ID_LBN 16 +#define MAE_MPORT_SELECTOR_FUNC_PF_ID_WIDTH 8 +#define MAE_MPORT_SELECTOR_FUNC_VF_ID_OFST 0 +#define MAE_MPORT_SELECTOR_FUNC_VF_ID_LBN 0 +#define MAE_MPORT_SELECTOR_FUNC_VF_ID_WIDTH 16 +/* enum: Used for VF_ID to indicate a physical function. */ +#define MAE_MPORT_SELECTOR_FUNC_VF_ID_NULL 0xffff +/* enum: Used for PF_ID to indicate the physical function of the calling + * client. - When used by a PF with VF_ID == VF_ID_NULL, the mport selector + * relates to the calling function. (For clarity, it is recommended that + * clients use ASSIGNED to achieve this behaviour). - When used by a PF with + * VF_ID != VF_ID_NULL, the mport selector relates to a VF child of the calling + * function. - When used by a VF with VF_ID == VF_ID_NULL, the mport selector + * relates to the PF owning the calling function. - When used by a VF with + * VF_ID != VF_ID_NULL, the mport selector relates to a sibling VF of the + * calling function. - Not meaningful used by a client that is not a PCIe + * function. + */ +#define MAE_MPORT_SELECTOR_FUNC_PF_ID_CALLER 0xff +/* enum: Same as PF_ID_CALLER, but for use in the smaller MH_PF_ID field. Only + * valid if FUNC_INTF_ID is CALLER. + */ +#define MAE_MPORT_SELECTOR_FUNC_MH_PF_ID_CALLER 0xf +#define MAE_MPORT_SELECTOR_FLAT_LBN 0 +#define MAE_MPORT_SELECTOR_FLAT_WIDTH 32 + +/* MAE_LINK_ENDPOINT_SELECTOR structuredef: Structure that identifies a real or + * virtual network port by MAE port and link end. Intended to be used by + * network port MCDI commands. Setting FLAT to MAE_LINK_ENDPOINT_COMPAT is + * equivalent to using the previous version of the command. Not all possible + * combinations of MPORT_END and MPORT_SELECTOR in MAE_LINK_ENDPOINT_SELECTOR + * will work in all circumstances. 1. Some will always work (e.g. a VF can + * always address its logical MAC using MPORT_SELECTOR=ASSIGNED,LINK_END=VNIC), + * 2. Some are not meaningful and will always fail with EINVAL (e.g. attempting + * to address the VNIC end of a link to a physical port), 3. Some are + * meaningful but require the MCDI client to have the required permission and + * fail with EPERM otherwise (e.g. trying to set the MAC on a VF the caller + * cannot administer), and 4. Some could be implementation-specific and fail + * with ENOTSUP if not available (no examples exist right now). See + * SF-123581-TC section 4.3 for more details. + */ +#define MAE_LINK_ENDPOINT_SELECTOR_LEN 8 +/* Identifier for the MAE MPORT of interest */ +#define MAE_LINK_ENDPOINT_SELECTOR_MPORT_SELECTOR_OFST 0 +#define MAE_LINK_ENDPOINT_SELECTOR_MPORT_SELECTOR_LEN 4 +#define MAE_LINK_ENDPOINT_SELECTOR_MPORT_SELECTOR_LBN 0 +#define MAE_LINK_ENDPOINT_SELECTOR_MPORT_SELECTOR_WIDTH 32 +/* Which end of the link identified by MPORT to consider */ +#define MAE_LINK_ENDPOINT_SELECTOR_LINK_END_OFST 4 +#define MAE_LINK_ENDPOINT_SELECTOR_LINK_END_LEN 4 +/* Enum values, see field(s): */ +/* MAE_MPORT_END */ +#define MAE_LINK_ENDPOINT_SELECTOR_LINK_END_LBN 32 +#define MAE_LINK_ENDPOINT_SELECTOR_LINK_END_WIDTH 32 +/* A field for accessing the endpoint selector as a collection of bits */ +#define MAE_LINK_ENDPOINT_SELECTOR_FLAT_OFST 0 +#define MAE_LINK_ENDPOINT_SELECTOR_FLAT_LEN 8 +#define MAE_LINK_ENDPOINT_SELECTOR_FLAT_LO_OFST 0 +#define MAE_LINK_ENDPOINT_SELECTOR_FLAT_LO_LEN 4 +#define MAE_LINK_ENDPOINT_SELECTOR_FLAT_LO_LBN 0 +#define MAE_LINK_ENDPOINT_SELECTOR_FLAT_LO_WIDTH 32 +#define MAE_LINK_ENDPOINT_SELECTOR_FLAT_HI_OFST 4 +#define MAE_LINK_ENDPOINT_SELECTOR_FLAT_HI_LEN 4 +#define MAE_LINK_ENDPOINT_SELECTOR_FLAT_HI_LBN 32 +#define MAE_LINK_ENDPOINT_SELECTOR_FLAT_HI_WIDTH 32 +/* enum: Set FLAT to this value to obtain backward-compatible behaviour in + * commands that have been extended to take a MAE_LINK_ENDPOINT_SELECTOR + * argument. New commands that are designed to take such an argument from the + * start will not support this. + */ +#define MAE_LINK_ENDPOINT_SELECTOR_MAE_LINK_ENDPOINT_COMPAT 0x0 +#define MAE_LINK_ENDPOINT_SELECTOR_FLAT_LBN 0 +#define MAE_LINK_ENDPOINT_SELECTOR_FLAT_WIDTH 64 + + +/***********************************/ +/* MC_CMD_MAE_GET_CAPS + * Describes capabilities of the MAE (Match-Action Engine) + */ +#define MC_CMD_MAE_GET_CAPS 0x140 +#undef MC_CMD_0x140_PRIVILEGE_CTG + +#define MC_CMD_0x140_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_MAE_GET_CAPS_IN msgrequest */ +#define MC_CMD_MAE_GET_CAPS_IN_LEN 0 + +/* MC_CMD_MAE_GET_CAPS_OUT msgresponse */ +#define MC_CMD_MAE_GET_CAPS_OUT_LEN 52 +/* The number of field IDs that the NIC supports. Any field with a ID greater + * than or equal to the value returned in this field must be treated as having + * a support level of MAE_FIELD_UNSUPPORTED in all requests. + */ +#define MC_CMD_MAE_GET_CAPS_OUT_MATCH_FIELD_COUNT_OFST 0 +#define MC_CMD_MAE_GET_CAPS_OUT_MATCH_FIELD_COUNT_LEN 4 +#define MC_CMD_MAE_GET_CAPS_OUT_ENCAP_TYPES_SUPPORTED_OFST 4 +#define MC_CMD_MAE_GET_CAPS_OUT_ENCAP_TYPES_SUPPORTED_LEN 4 +#define MC_CMD_MAE_GET_CAPS_OUT_ENCAP_TYPE_VXLAN_OFST 4 +#define MC_CMD_MAE_GET_CAPS_OUT_ENCAP_TYPE_VXLAN_LBN 0 +#define MC_CMD_MAE_GET_CAPS_OUT_ENCAP_TYPE_VXLAN_WIDTH 1 +#define MC_CMD_MAE_GET_CAPS_OUT_ENCAP_TYPE_NVGRE_OFST 4 +#define MC_CMD_MAE_GET_CAPS_OUT_ENCAP_TYPE_NVGRE_LBN 1 +#define MC_CMD_MAE_GET_CAPS_OUT_ENCAP_TYPE_NVGRE_WIDTH 1 +#define MC_CMD_MAE_GET_CAPS_OUT_ENCAP_TYPE_GENEVE_OFST 4 +#define MC_CMD_MAE_GET_CAPS_OUT_ENCAP_TYPE_GENEVE_LBN 2 +#define MC_CMD_MAE_GET_CAPS_OUT_ENCAP_TYPE_GENEVE_WIDTH 1 +#define MC_CMD_MAE_GET_CAPS_OUT_ENCAP_TYPE_L2GRE_OFST 4 +#define MC_CMD_MAE_GET_CAPS_OUT_ENCAP_TYPE_L2GRE_LBN 3 +#define MC_CMD_MAE_GET_CAPS_OUT_ENCAP_TYPE_L2GRE_WIDTH 1 +/* Deprecated alias for AR_COUNTERS. */ +#define MC_CMD_MAE_GET_CAPS_OUT_COUNTERS_OFST 8 +#define MC_CMD_MAE_GET_CAPS_OUT_COUNTERS_LEN 4 +/* The total number of AR counters available to allocate. */ +#define MC_CMD_MAE_GET_CAPS_OUT_AR_COUNTERS_OFST 8 +#define MC_CMD_MAE_GET_CAPS_OUT_AR_COUNTERS_LEN 4 +/* The total number of counters lists available to allocate. A value of zero + * indicates that counter lists are not supported by the NIC. (But single + * counters may still be.) + */ +#define MC_CMD_MAE_GET_CAPS_OUT_COUNTER_LISTS_OFST 12 +#define MC_CMD_MAE_GET_CAPS_OUT_COUNTER_LISTS_LEN 4 +/* The total number of encap header structures available to allocate. */ +#define MC_CMD_MAE_GET_CAPS_OUT_ENCAP_HEADER_LIMIT_OFST 16 +#define MC_CMD_MAE_GET_CAPS_OUT_ENCAP_HEADER_LIMIT_LEN 4 +/* Reserved. Should be zero. */ +#define MC_CMD_MAE_GET_CAPS_OUT_RSVD_OFST 20 +#define MC_CMD_MAE_GET_CAPS_OUT_RSVD_LEN 4 +/* The total number of action sets available to allocate. */ +#define MC_CMD_MAE_GET_CAPS_OUT_ACTION_SETS_OFST 24 +#define MC_CMD_MAE_GET_CAPS_OUT_ACTION_SETS_LEN 4 +/* The total number of action set lists available to allocate. */ +#define MC_CMD_MAE_GET_CAPS_OUT_ACTION_SET_LISTS_OFST 28 +#define MC_CMD_MAE_GET_CAPS_OUT_ACTION_SET_LISTS_LEN 4 +/* The total number of outer rules available to allocate. */ +#define MC_CMD_MAE_GET_CAPS_OUT_OUTER_RULES_OFST 32 +#define MC_CMD_MAE_GET_CAPS_OUT_OUTER_RULES_LEN 4 +/* The total number of action rules available to allocate. */ +#define MC_CMD_MAE_GET_CAPS_OUT_ACTION_RULES_OFST 36 +#define MC_CMD_MAE_GET_CAPS_OUT_ACTION_RULES_LEN 4 +/* The number of priorities available for ACTION_RULE filters. It is invalid to + * install a MATCH_ACTION filter with a priority number >= ACTION_PRIOS. + */ +#define MC_CMD_MAE_GET_CAPS_OUT_ACTION_PRIOS_OFST 40 +#define MC_CMD_MAE_GET_CAPS_OUT_ACTION_PRIOS_LEN 4 +/* The number of priorities available for OUTER_RULE filters. It is invalid to + * install an OUTER_RULE filter with a priority number >= OUTER_PRIOS. + */ +#define MC_CMD_MAE_GET_CAPS_OUT_OUTER_PRIOS_OFST 44 +#define MC_CMD_MAE_GET_CAPS_OUT_OUTER_PRIOS_LEN 4 +/* MAE API major version. Currently 1. If this field is not present in the + * response (i.e. response shorter than 384 bits), then its value is zero. If + * the value does not match the client's expectations, the client should raise + * a fatal error. + */ +#define MC_CMD_MAE_GET_CAPS_OUT_API_VER_OFST 48 +#define MC_CMD_MAE_GET_CAPS_OUT_API_VER_LEN 4 + +/* MC_CMD_MAE_GET_CAPS_V2_OUT msgresponse */ +#define MC_CMD_MAE_GET_CAPS_V2_OUT_LEN 60 +/* The number of field IDs that the NIC supports. Any field with a ID greater + * than or equal to the value returned in this field must be treated as having + * a support level of MAE_FIELD_UNSUPPORTED in all requests. + */ +#define MC_CMD_MAE_GET_CAPS_V2_OUT_MATCH_FIELD_COUNT_OFST 0 +#define MC_CMD_MAE_GET_CAPS_V2_OUT_MATCH_FIELD_COUNT_LEN 4 +#define MC_CMD_MAE_GET_CAPS_V2_OUT_ENCAP_TYPES_SUPPORTED_OFST 4 +#define MC_CMD_MAE_GET_CAPS_V2_OUT_ENCAP_TYPES_SUPPORTED_LEN 4 +#define MC_CMD_MAE_GET_CAPS_V2_OUT_ENCAP_TYPE_VXLAN_OFST 4 +#define MC_CMD_MAE_GET_CAPS_V2_OUT_ENCAP_TYPE_VXLAN_LBN 0 +#define MC_CMD_MAE_GET_CAPS_V2_OUT_ENCAP_TYPE_VXLAN_WIDTH 1 +#define MC_CMD_MAE_GET_CAPS_V2_OUT_ENCAP_TYPE_NVGRE_OFST 4 +#define MC_CMD_MAE_GET_CAPS_V2_OUT_ENCAP_TYPE_NVGRE_LBN 1 +#define MC_CMD_MAE_GET_CAPS_V2_OUT_ENCAP_TYPE_NVGRE_WIDTH 1 +#define MC_CMD_MAE_GET_CAPS_V2_OUT_ENCAP_TYPE_GENEVE_OFST 4 +#define MC_CMD_MAE_GET_CAPS_V2_OUT_ENCAP_TYPE_GENEVE_LBN 2 +#define MC_CMD_MAE_GET_CAPS_V2_OUT_ENCAP_TYPE_GENEVE_WIDTH 1 +#define MC_CMD_MAE_GET_CAPS_V2_OUT_ENCAP_TYPE_L2GRE_OFST 4 +#define MC_CMD_MAE_GET_CAPS_V2_OUT_ENCAP_TYPE_L2GRE_LBN 3 +#define MC_CMD_MAE_GET_CAPS_V2_OUT_ENCAP_TYPE_L2GRE_WIDTH 1 +/* Deprecated alias for AR_COUNTERS. */ +#define MC_CMD_MAE_GET_CAPS_V2_OUT_COUNTERS_OFST 8 +#define MC_CMD_MAE_GET_CAPS_V2_OUT_COUNTERS_LEN 4 +/* The total number of AR counters available to allocate. */ +#define MC_CMD_MAE_GET_CAPS_V2_OUT_AR_COUNTERS_OFST 8 +#define MC_CMD_MAE_GET_CAPS_V2_OUT_AR_COUNTERS_LEN 4 +/* The total number of counters lists available to allocate. A value of zero + * indicates that counter lists are not supported by the NIC. (But single + * counters may still be.) + */ +#define MC_CMD_MAE_GET_CAPS_V2_OUT_COUNTER_LISTS_OFST 12 +#define MC_CMD_MAE_GET_CAPS_V2_OUT_COUNTER_LISTS_LEN 4 +/* The total number of encap header structures available to allocate. */ +#define MC_CMD_MAE_GET_CAPS_V2_OUT_ENCAP_HEADER_LIMIT_OFST 16 +#define MC_CMD_MAE_GET_CAPS_V2_OUT_ENCAP_HEADER_LIMIT_LEN 4 +/* Reserved. Should be zero. */ +#define MC_CMD_MAE_GET_CAPS_V2_OUT_RSVD_OFST 20 +#define MC_CMD_MAE_GET_CAPS_V2_OUT_RSVD_LEN 4 +/* The total number of action sets available to allocate. */ +#define MC_CMD_MAE_GET_CAPS_V2_OUT_ACTION_SETS_OFST 24 +#define MC_CMD_MAE_GET_CAPS_V2_OUT_ACTION_SETS_LEN 4 +/* The total number of action set lists available to allocate. */ +#define MC_CMD_MAE_GET_CAPS_V2_OUT_ACTION_SET_LISTS_OFST 28 +#define MC_CMD_MAE_GET_CAPS_V2_OUT_ACTION_SET_LISTS_LEN 4 +/* The total number of outer rules available to allocate. */ +#define MC_CMD_MAE_GET_CAPS_V2_OUT_OUTER_RULES_OFST 32 +#define MC_CMD_MAE_GET_CAPS_V2_OUT_OUTER_RULES_LEN 4 +/* The total number of action rules available to allocate. */ +#define MC_CMD_MAE_GET_CAPS_V2_OUT_ACTION_RULES_OFST 36 +#define MC_CMD_MAE_GET_CAPS_V2_OUT_ACTION_RULES_LEN 4 +/* The number of priorities available for ACTION_RULE filters. It is invalid to + * install a MATCH_ACTION filter with a priority number >= ACTION_PRIOS. + */ +#define MC_CMD_MAE_GET_CAPS_V2_OUT_ACTION_PRIOS_OFST 40 +#define MC_CMD_MAE_GET_CAPS_V2_OUT_ACTION_PRIOS_LEN 4 +/* The number of priorities available for OUTER_RULE filters. It is invalid to + * install an OUTER_RULE filter with a priority number >= OUTER_PRIOS. + */ +#define MC_CMD_MAE_GET_CAPS_V2_OUT_OUTER_PRIOS_OFST 44 +#define MC_CMD_MAE_GET_CAPS_V2_OUT_OUTER_PRIOS_LEN 4 +/* MAE API major version. Currently 1. If this field is not present in the + * response (i.e. response shorter than 384 bits), then its value is zero. If + * the value does not match the client's expectations, the client should raise + * a fatal error. + */ +#define MC_CMD_MAE_GET_CAPS_V2_OUT_API_VER_OFST 48 +#define MC_CMD_MAE_GET_CAPS_V2_OUT_API_VER_LEN 4 +/* Mask of supported counter types. Each bit position corresponds to a value of + * the MAE_COUNTER_TYPE enum. If this field is missing (i.e. V1 response), + * clients must assume that only AR counters are supported (i.e. + * COUNTER_TYPES_SUPPORTED==0x1). See also + * MC_CMD_MAE_COUNTERS_STREAM_START/COUNTER_TYPES_MASK. + */ +#define MC_CMD_MAE_GET_CAPS_V2_OUT_COUNTER_TYPES_SUPPORTED_OFST 52 +#define MC_CMD_MAE_GET_CAPS_V2_OUT_COUNTER_TYPES_SUPPORTED_LEN 4 +/* The total number of conntrack counters available to allocate. */ +#define MC_CMD_MAE_GET_CAPS_V2_OUT_CT_COUNTERS_OFST 56 +#define MC_CMD_MAE_GET_CAPS_V2_OUT_CT_COUNTERS_LEN 4 + +/* MC_CMD_MAE_GET_CAPS_V3_OUT msgresponse */ +#define MC_CMD_MAE_GET_CAPS_V3_OUT_LEN 64 +/* The number of field IDs that the NIC supports. Any field with a ID greater + * than or equal to the value returned in this field must be treated as having + * a support level of MAE_FIELD_UNSUPPORTED in all requests. + */ +#define MC_CMD_MAE_GET_CAPS_V3_OUT_MATCH_FIELD_COUNT_OFST 0 +#define MC_CMD_MAE_GET_CAPS_V3_OUT_MATCH_FIELD_COUNT_LEN 4 +#define MC_CMD_MAE_GET_CAPS_V3_OUT_ENCAP_TYPES_SUPPORTED_OFST 4 +#define MC_CMD_MAE_GET_CAPS_V3_OUT_ENCAP_TYPES_SUPPORTED_LEN 4 +#define MC_CMD_MAE_GET_CAPS_V3_OUT_ENCAP_TYPE_VXLAN_OFST 4 +#define MC_CMD_MAE_GET_CAPS_V3_OUT_ENCAP_TYPE_VXLAN_LBN 0 +#define MC_CMD_MAE_GET_CAPS_V3_OUT_ENCAP_TYPE_VXLAN_WIDTH 1 +#define MC_CMD_MAE_GET_CAPS_V3_OUT_ENCAP_TYPE_NVGRE_OFST 4 +#define MC_CMD_MAE_GET_CAPS_V3_OUT_ENCAP_TYPE_NVGRE_LBN 1 +#define MC_CMD_MAE_GET_CAPS_V3_OUT_ENCAP_TYPE_NVGRE_WIDTH 1 +#define MC_CMD_MAE_GET_CAPS_V3_OUT_ENCAP_TYPE_GENEVE_OFST 4 +#define MC_CMD_MAE_GET_CAPS_V3_OUT_ENCAP_TYPE_GENEVE_LBN 2 +#define MC_CMD_MAE_GET_CAPS_V3_OUT_ENCAP_TYPE_GENEVE_WIDTH 1 +#define MC_CMD_MAE_GET_CAPS_V3_OUT_ENCAP_TYPE_L2GRE_OFST 4 +#define MC_CMD_MAE_GET_CAPS_V3_OUT_ENCAP_TYPE_L2GRE_LBN 3 +#define MC_CMD_MAE_GET_CAPS_V3_OUT_ENCAP_TYPE_L2GRE_WIDTH 1 +/* Deprecated alias for AR_COUNTERS. */ +#define MC_CMD_MAE_GET_CAPS_V3_OUT_COUNTERS_OFST 8 +#define MC_CMD_MAE_GET_CAPS_V3_OUT_COUNTERS_LEN 4 +/* The total number of AR counters available to allocate. */ +#define MC_CMD_MAE_GET_CAPS_V3_OUT_AR_COUNTERS_OFST 8 +#define MC_CMD_MAE_GET_CAPS_V3_OUT_AR_COUNTERS_LEN 4 +/* The total number of counters lists available to allocate. A value of zero + * indicates that counter lists are not supported by the NIC. (But single + * counters may still be.) + */ +#define MC_CMD_MAE_GET_CAPS_V3_OUT_COUNTER_LISTS_OFST 12 +#define MC_CMD_MAE_GET_CAPS_V3_OUT_COUNTER_LISTS_LEN 4 +/* The total number of encap header structures available to allocate. */ +#define MC_CMD_MAE_GET_CAPS_V3_OUT_ENCAP_HEADER_LIMIT_OFST 16 +#define MC_CMD_MAE_GET_CAPS_V3_OUT_ENCAP_HEADER_LIMIT_LEN 4 +/* Reserved. Should be zero. */ +#define MC_CMD_MAE_GET_CAPS_V3_OUT_RSVD_OFST 20 +#define MC_CMD_MAE_GET_CAPS_V3_OUT_RSVD_LEN 4 +/* The total number of action sets available to allocate. */ +#define MC_CMD_MAE_GET_CAPS_V3_OUT_ACTION_SETS_OFST 24 +#define MC_CMD_MAE_GET_CAPS_V3_OUT_ACTION_SETS_LEN 4 +/* The total number of action set lists available to allocate. */ +#define MC_CMD_MAE_GET_CAPS_V3_OUT_ACTION_SET_LISTS_OFST 28 +#define MC_CMD_MAE_GET_CAPS_V3_OUT_ACTION_SET_LISTS_LEN 4 +/* The total number of outer rules available to allocate. */ +#define MC_CMD_MAE_GET_CAPS_V3_OUT_OUTER_RULES_OFST 32 +#define MC_CMD_MAE_GET_CAPS_V3_OUT_OUTER_RULES_LEN 4 +/* The total number of action rules available to allocate. */ +#define MC_CMD_MAE_GET_CAPS_V3_OUT_ACTION_RULES_OFST 36 +#define MC_CMD_MAE_GET_CAPS_V3_OUT_ACTION_RULES_LEN 4 +/* The number of priorities available for ACTION_RULE filters. It is invalid to + * install a MATCH_ACTION filter with a priority number >= ACTION_PRIOS. + */ +#define MC_CMD_MAE_GET_CAPS_V3_OUT_ACTION_PRIOS_OFST 40 +#define MC_CMD_MAE_GET_CAPS_V3_OUT_ACTION_PRIOS_LEN 4 +/* The number of priorities available for OUTER_RULE filters. It is invalid to + * install an OUTER_RULE filter with a priority number >= OUTER_PRIOS. + */ +#define MC_CMD_MAE_GET_CAPS_V3_OUT_OUTER_PRIOS_OFST 44 +#define MC_CMD_MAE_GET_CAPS_V3_OUT_OUTER_PRIOS_LEN 4 +/* MAE API major version. Currently 1. If this field is not present in the + * response (i.e. response shorter than 384 bits), then its value is zero. If + * the value does not match the client's expectations, the client should raise + * a fatal error. + */ +#define MC_CMD_MAE_GET_CAPS_V3_OUT_API_VER_OFST 48 +#define MC_CMD_MAE_GET_CAPS_V3_OUT_API_VER_LEN 4 +/* Mask of supported counter types. Each bit position corresponds to a value of + * the MAE_COUNTER_TYPE enum. If this field is missing (i.e. V1 response), + * clients must assume that only AR counters are supported (i.e. + * COUNTER_TYPES_SUPPORTED==0x1). See also + * MC_CMD_MAE_COUNTERS_STREAM_START/COUNTER_TYPES_MASK. + */ +#define MC_CMD_MAE_GET_CAPS_V3_OUT_COUNTER_TYPES_SUPPORTED_OFST 52 +#define MC_CMD_MAE_GET_CAPS_V3_OUT_COUNTER_TYPES_SUPPORTED_LEN 4 +/* The total number of conntrack counters available to allocate. */ +#define MC_CMD_MAE_GET_CAPS_V3_OUT_CT_COUNTERS_OFST 56 +#define MC_CMD_MAE_GET_CAPS_V3_OUT_CT_COUNTERS_LEN 4 +/* The total number of Outer Rule counters available to allocate. */ +#define MC_CMD_MAE_GET_CAPS_V3_OUT_OR_COUNTERS_OFST 60 +#define MC_CMD_MAE_GET_CAPS_V3_OUT_OR_COUNTERS_LEN 4 + + +/***********************************/ +/* MC_CMD_MAE_GET_AR_CAPS + * Get a level of support for match fields when used in match-action rules + */ +#define MC_CMD_MAE_GET_AR_CAPS 0x141 +#undef MC_CMD_0x141_PRIVILEGE_CTG + +#define MC_CMD_0x141_PRIVILEGE_CTG SRIOV_CTG_MAE + +/* MC_CMD_MAE_GET_AR_CAPS_IN msgrequest */ +#define MC_CMD_MAE_GET_AR_CAPS_IN_LEN 0 + +/* MC_CMD_MAE_GET_AR_CAPS_OUT msgresponse */ +#define MC_CMD_MAE_GET_AR_CAPS_OUT_LENMIN 4 +#define MC_CMD_MAE_GET_AR_CAPS_OUT_LENMAX 252 +#define MC_CMD_MAE_GET_AR_CAPS_OUT_LENMAX_MCDI2 1020 +#define MC_CMD_MAE_GET_AR_CAPS_OUT_LEN(num) (4+4*(num)) +#define MC_CMD_MAE_GET_AR_CAPS_OUT_FIELD_FLAGS_NUM(len) (((len)-4)/4) +/* Number of fields actually returned in FIELD_FLAGS. */ +#define MC_CMD_MAE_GET_AR_CAPS_OUT_COUNT_OFST 0 +#define MC_CMD_MAE_GET_AR_CAPS_OUT_COUNT_LEN 4 +/* Array of values indicating the NIC's support for a given field, indexed by + * field id. The driver must ensure space for + * MC_CMD_MAE_GET_CAPS.MATCH_FIELD_COUNT entries in the array.. + */ +#define MC_CMD_MAE_GET_AR_CAPS_OUT_FIELD_FLAGS_OFST 4 +#define MC_CMD_MAE_GET_AR_CAPS_OUT_FIELD_FLAGS_LEN 4 +#define MC_CMD_MAE_GET_AR_CAPS_OUT_FIELD_FLAGS_MINNUM 0 +#define MC_CMD_MAE_GET_AR_CAPS_OUT_FIELD_FLAGS_MAXNUM 62 +#define MC_CMD_MAE_GET_AR_CAPS_OUT_FIELD_FLAGS_MAXNUM_MCDI2 254 + + +/***********************************/ +/* MC_CMD_MAE_GET_OR_CAPS + * Get a level of support for fields used in outer rule keys. + */ +#define MC_CMD_MAE_GET_OR_CAPS 0x142 +#undef MC_CMD_0x142_PRIVILEGE_CTG + +#define MC_CMD_0x142_PRIVILEGE_CTG SRIOV_CTG_MAE + +/* MC_CMD_MAE_GET_OR_CAPS_IN msgrequest */ +#define MC_CMD_MAE_GET_OR_CAPS_IN_LEN 0 + +/* MC_CMD_MAE_GET_OR_CAPS_OUT msgresponse */ +#define MC_CMD_MAE_GET_OR_CAPS_OUT_LENMIN 4 +#define MC_CMD_MAE_GET_OR_CAPS_OUT_LENMAX 252 +#define MC_CMD_MAE_GET_OR_CAPS_OUT_LENMAX_MCDI2 1020 +#define MC_CMD_MAE_GET_OR_CAPS_OUT_LEN(num) (4+4*(num)) +#define MC_CMD_MAE_GET_OR_CAPS_OUT_FIELD_FLAGS_NUM(len) (((len)-4)/4) +/* Number of fields actually returned in FIELD_FLAGS. */ +#define MC_CMD_MAE_GET_OR_CAPS_OUT_COUNT_OFST 0 +#define MC_CMD_MAE_GET_OR_CAPS_OUT_COUNT_LEN 4 +/* Same semantics as MC_CMD_MAE_GET_AR_CAPS.MAE_FIELD_FLAGS */ +#define MC_CMD_MAE_GET_OR_CAPS_OUT_FIELD_FLAGS_OFST 4 +#define MC_CMD_MAE_GET_OR_CAPS_OUT_FIELD_FLAGS_LEN 4 +#define MC_CMD_MAE_GET_OR_CAPS_OUT_FIELD_FLAGS_MINNUM 0 +#define MC_CMD_MAE_GET_OR_CAPS_OUT_FIELD_FLAGS_MAXNUM 62 +#define MC_CMD_MAE_GET_OR_CAPS_OUT_FIELD_FLAGS_MAXNUM_MCDI2 254 + + +/***********************************/ +/* MC_CMD_MAE_COUNTER_ALLOC + * Allocate match-action-engine counters, which can be referenced in various + * tables. + */ +#define MC_CMD_MAE_COUNTER_ALLOC 0x143 +#undef MC_CMD_0x143_PRIVILEGE_CTG + +#define MC_CMD_0x143_PRIVILEGE_CTG SRIOV_CTG_MAE + +/* MC_CMD_MAE_COUNTER_ALLOC_IN msgrequest: Using this is equivalent to using V2 + * with COUNTER_TYPE=AR. + */ +#define MC_CMD_MAE_COUNTER_ALLOC_IN_LEN 4 +/* The number of counters that the driver would like allocated */ +#define MC_CMD_MAE_COUNTER_ALLOC_IN_REQUESTED_COUNT_OFST 0 +#define MC_CMD_MAE_COUNTER_ALLOC_IN_REQUESTED_COUNT_LEN 4 + +/* MC_CMD_MAE_COUNTER_ALLOC_V2_IN msgrequest */ +#define MC_CMD_MAE_COUNTER_ALLOC_V2_IN_LEN 8 +/* The number of counters that the driver would like allocated */ +#define MC_CMD_MAE_COUNTER_ALLOC_V2_IN_REQUESTED_COUNT_OFST 0 +#define MC_CMD_MAE_COUNTER_ALLOC_V2_IN_REQUESTED_COUNT_LEN 4 +/* Which type of counter to allocate. */ +#define MC_CMD_MAE_COUNTER_ALLOC_V2_IN_COUNTER_TYPE_OFST 4 +#define MC_CMD_MAE_COUNTER_ALLOC_V2_IN_COUNTER_TYPE_LEN 4 +/* Enum values, see field(s): */ +/* MAE_COUNTER_TYPE */ + +/* MC_CMD_MAE_COUNTER_ALLOC_OUT msgresponse */ +#define MC_CMD_MAE_COUNTER_ALLOC_OUT_LENMIN 12 +#define MC_CMD_MAE_COUNTER_ALLOC_OUT_LENMAX 252 +#define MC_CMD_MAE_COUNTER_ALLOC_OUT_LENMAX_MCDI2 1020 +#define MC_CMD_MAE_COUNTER_ALLOC_OUT_LEN(num) (8+4*(num)) +#define MC_CMD_MAE_COUNTER_ALLOC_OUT_COUNTER_ID_NUM(len) (((len)-8)/4) +/* Generation count. Packets with generation count >= GENERATION_COUNT will + * contain valid counter values for counter IDs allocated in this call, unless + * the counter values are zero and zero squash is enabled. Note that there is + * an independent GENERATION_COUNT object per counter type, and that generation + * counts wrap from 0xffffffff to 1. + */ +#define MC_CMD_MAE_COUNTER_ALLOC_OUT_GENERATION_COUNT_OFST 0 +#define MC_CMD_MAE_COUNTER_ALLOC_OUT_GENERATION_COUNT_LEN 4 +/* enum: Generation counter 0 is reserved and unused. */ +#define MC_CMD_MAE_COUNTER_ALLOC_OUT_GENERATION_COUNT_INVALID 0x0 +/* The number of counter IDs that the NIC allocated. It is never less than 1; + * failure to allocate a single counter will cause an error to be returned. It + * is never greater than REQUESTED_COUNT, but may be less. + */ +#define MC_CMD_MAE_COUNTER_ALLOC_OUT_COUNTER_ID_COUNT_OFST 4 +#define MC_CMD_MAE_COUNTER_ALLOC_OUT_COUNTER_ID_COUNT_LEN 4 +/* An array containing the IDs for the counters allocated. */ +#define MC_CMD_MAE_COUNTER_ALLOC_OUT_COUNTER_ID_OFST 8 +#define MC_CMD_MAE_COUNTER_ALLOC_OUT_COUNTER_ID_LEN 4 +#define MC_CMD_MAE_COUNTER_ALLOC_OUT_COUNTER_ID_MINNUM 1 +#define MC_CMD_MAE_COUNTER_ALLOC_OUT_COUNTER_ID_MAXNUM 61 +#define MC_CMD_MAE_COUNTER_ALLOC_OUT_COUNTER_ID_MAXNUM_MCDI2 253 +/* enum: A counter ID that is guaranteed never to represent a real counter */ +#define MC_CMD_MAE_COUNTER_ALLOC_OUT_COUNTER_ID_NULL 0xffffffff +/* Other enum values, see field(s): */ +/* MAE_COUNTER_ID */ + + +/***********************************/ +/* MC_CMD_MAE_COUNTER_FREE + * Free match-action-engine counters + */ +#define MC_CMD_MAE_COUNTER_FREE 0x144 +#undef MC_CMD_0x144_PRIVILEGE_CTG + +#define MC_CMD_0x144_PRIVILEGE_CTG SRIOV_CTG_MAE + +/* MC_CMD_MAE_COUNTER_FREE_IN msgrequest: Using this is equivalent to using V2 + * with COUNTER_TYPE=AR. + */ +#define MC_CMD_MAE_COUNTER_FREE_IN_LENMIN 8 +#define MC_CMD_MAE_COUNTER_FREE_IN_LENMAX 132 +#define MC_CMD_MAE_COUNTER_FREE_IN_LENMAX_MCDI2 132 +#define MC_CMD_MAE_COUNTER_FREE_IN_LEN(num) (4+4*(num)) +#define MC_CMD_MAE_COUNTER_FREE_IN_FREE_COUNTER_ID_NUM(len) (((len)-4)/4) +/* The number of counter IDs to be freed. */ +#define MC_CMD_MAE_COUNTER_FREE_IN_COUNTER_ID_COUNT_OFST 0 +#define MC_CMD_MAE_COUNTER_FREE_IN_COUNTER_ID_COUNT_LEN 4 +/* An array containing the counter IDs to be freed. */ +#define MC_CMD_MAE_COUNTER_FREE_IN_FREE_COUNTER_ID_OFST 4 +#define MC_CMD_MAE_COUNTER_FREE_IN_FREE_COUNTER_ID_LEN 4 +#define MC_CMD_MAE_COUNTER_FREE_IN_FREE_COUNTER_ID_MINNUM 1 +#define MC_CMD_MAE_COUNTER_FREE_IN_FREE_COUNTER_ID_MAXNUM 32 +#define MC_CMD_MAE_COUNTER_FREE_IN_FREE_COUNTER_ID_MAXNUM_MCDI2 32 + +/* MC_CMD_MAE_COUNTER_FREE_V2_IN msgrequest */ +#define MC_CMD_MAE_COUNTER_FREE_V2_IN_LEN 136 +/* The number of counter IDs to be freed. */ +#define MC_CMD_MAE_COUNTER_FREE_V2_IN_COUNTER_ID_COUNT_OFST 0 +#define MC_CMD_MAE_COUNTER_FREE_V2_IN_COUNTER_ID_COUNT_LEN 4 +/* An array containing the counter IDs to be freed. */ +#define MC_CMD_MAE_COUNTER_FREE_V2_IN_FREE_COUNTER_ID_OFST 4 +#define MC_CMD_MAE_COUNTER_FREE_V2_IN_FREE_COUNTER_ID_LEN 4 +#define MC_CMD_MAE_COUNTER_FREE_V2_IN_FREE_COUNTER_ID_MINNUM 1 +#define MC_CMD_MAE_COUNTER_FREE_V2_IN_FREE_COUNTER_ID_MAXNUM 32 +#define MC_CMD_MAE_COUNTER_FREE_V2_IN_FREE_COUNTER_ID_MAXNUM_MCDI2 32 +/* Which type of counter to free. */ +#define MC_CMD_MAE_COUNTER_FREE_V2_IN_COUNTER_TYPE_OFST 132 +#define MC_CMD_MAE_COUNTER_FREE_V2_IN_COUNTER_TYPE_LEN 4 +/* Enum values, see field(s): */ +/* MAE_COUNTER_TYPE */ + +/* MC_CMD_MAE_COUNTER_FREE_OUT msgresponse */ +#define MC_CMD_MAE_COUNTER_FREE_OUT_LENMIN 12 +#define MC_CMD_MAE_COUNTER_FREE_OUT_LENMAX 136 +#define MC_CMD_MAE_COUNTER_FREE_OUT_LENMAX_MCDI2 136 +#define MC_CMD_MAE_COUNTER_FREE_OUT_LEN(num) (8+4*(num)) +#define MC_CMD_MAE_COUNTER_FREE_OUT_FREED_COUNTER_ID_NUM(len) (((len)-8)/4) +/* Generation count. A packet with generation count == GENERATION_COUNT will + * contain the final values for these counter IDs, unless the counter values + * are zero and zero squash is enabled. Note that the GENERATION_COUNT value is + * specific to the COUNTER_TYPE (IDENTIFIER field in packet header). Receiving + * a packet with generation count > GENERATION_COUNT guarantees that no more + * values will be written for these counters. If values for these counter IDs + * are present, the counter ID has been reallocated. A counter ID will not be + * reallocated within a single read cycle as this would merge increments from + * the 'old' and 'new' counters. GENERATION_COUNT_INVALID is reserved and + * unused. + */ +#define MC_CMD_MAE_COUNTER_FREE_OUT_GENERATION_COUNT_OFST 0 +#define MC_CMD_MAE_COUNTER_FREE_OUT_GENERATION_COUNT_LEN 4 +/* The number of counter IDs actually freed. It is never less than 1; failure + * to free a single counter will cause an error to be returned. It is never + * greater than the number that were requested to be freed, but may be less if + * counters could not be freed. + */ +#define MC_CMD_MAE_COUNTER_FREE_OUT_COUNTER_ID_COUNT_OFST 4 +#define MC_CMD_MAE_COUNTER_FREE_OUT_COUNTER_ID_COUNT_LEN 4 +/* An array containing the IDs for the counters to that were freed. Note, + * failure to free a counter can only occur on incorrect driver behaviour, so + * asserting that the expected counters were freed is reasonable. When + * debugging, attempting to free a single counter at a time will provide a + * reason for the failure to free said counter. + */ +#define MC_CMD_MAE_COUNTER_FREE_OUT_FREED_COUNTER_ID_OFST 8 +#define MC_CMD_MAE_COUNTER_FREE_OUT_FREED_COUNTER_ID_LEN 4 +#define MC_CMD_MAE_COUNTER_FREE_OUT_FREED_COUNTER_ID_MINNUM 1 +#define MC_CMD_MAE_COUNTER_FREE_OUT_FREED_COUNTER_ID_MAXNUM 32 +#define MC_CMD_MAE_COUNTER_FREE_OUT_FREED_COUNTER_ID_MAXNUM_MCDI2 32 + + +/***********************************/ +/* MC_CMD_MAE_COUNTERS_STREAM_START + * Start streaming counter values, specifying an RxQ to deliver packets to. + * Counters allocated to the calling function will be written in a round robin + * at a fixed cycle rate, assuming sufficient credits are available. The driver + * may cause the counter values to be written at a slower rate by constraining + * the availability of credits. Note that if the driver wishes to deliver + * packets to a different queue, it must call MAE_COUNTERS_STREAM_STOP to stop + * delivering packets to the current queue first. + */ +#define MC_CMD_MAE_COUNTERS_STREAM_START 0x151 +#undef MC_CMD_0x151_PRIVILEGE_CTG + +#define MC_CMD_0x151_PRIVILEGE_CTG SRIOV_CTG_MAE + +/* MC_CMD_MAE_COUNTERS_STREAM_START_IN msgrequest: Using V1 is equivalent to V2 + * with COUNTER_TYPES_MASK=0x1 (i.e. AR counters only). + */ +#define MC_CMD_MAE_COUNTERS_STREAM_START_IN_LEN 8 +/* The RxQ to write packets to. */ +#define MC_CMD_MAE_COUNTERS_STREAM_START_IN_QID_OFST 0 +#define MC_CMD_MAE_COUNTERS_STREAM_START_IN_QID_LEN 2 +/* Maximum size in bytes of packets that may be written to the RxQ. */ +#define MC_CMD_MAE_COUNTERS_STREAM_START_IN_PACKET_SIZE_OFST 2 +#define MC_CMD_MAE_COUNTERS_STREAM_START_IN_PACKET_SIZE_LEN 2 +/* Optional flags. */ +#define MC_CMD_MAE_COUNTERS_STREAM_START_IN_FLAGS_OFST 4 +#define MC_CMD_MAE_COUNTERS_STREAM_START_IN_FLAGS_LEN 4 +#define MC_CMD_MAE_COUNTERS_STREAM_START_IN_ZERO_SQUASH_DISABLE_OFST 4 +#define MC_CMD_MAE_COUNTERS_STREAM_START_IN_ZERO_SQUASH_DISABLE_LBN 0 +#define MC_CMD_MAE_COUNTERS_STREAM_START_IN_ZERO_SQUASH_DISABLE_WIDTH 1 +#define MC_CMD_MAE_COUNTERS_STREAM_START_IN_COUNTER_STALL_EN_OFST 4 +#define MC_CMD_MAE_COUNTERS_STREAM_START_IN_COUNTER_STALL_EN_LBN 1 +#define MC_CMD_MAE_COUNTERS_STREAM_START_IN_COUNTER_STALL_EN_WIDTH 1 + +/* MC_CMD_MAE_COUNTERS_STREAM_START_V2_IN msgrequest */ +#define MC_CMD_MAE_COUNTERS_STREAM_START_V2_IN_LEN 12 +/* The RxQ to write packets to. */ +#define MC_CMD_MAE_COUNTERS_STREAM_START_V2_IN_QID_OFST 0 +#define MC_CMD_MAE_COUNTERS_STREAM_START_V2_IN_QID_LEN 2 +/* Maximum size in bytes of packets that may be written to the RxQ. */ +#define MC_CMD_MAE_COUNTERS_STREAM_START_V2_IN_PACKET_SIZE_OFST 2 +#define MC_CMD_MAE_COUNTERS_STREAM_START_V2_IN_PACKET_SIZE_LEN 2 +/* Optional flags. */ +#define MC_CMD_MAE_COUNTERS_STREAM_START_V2_IN_FLAGS_OFST 4 +#define MC_CMD_MAE_COUNTERS_STREAM_START_V2_IN_FLAGS_LEN 4 +#define MC_CMD_MAE_COUNTERS_STREAM_START_V2_IN_ZERO_SQUASH_DISABLE_OFST 4 +#define MC_CMD_MAE_COUNTERS_STREAM_START_V2_IN_ZERO_SQUASH_DISABLE_LBN 0 +#define MC_CMD_MAE_COUNTERS_STREAM_START_V2_IN_ZERO_SQUASH_DISABLE_WIDTH 1 +#define MC_CMD_MAE_COUNTERS_STREAM_START_V2_IN_COUNTER_STALL_EN_OFST 4 +#define MC_CMD_MAE_COUNTERS_STREAM_START_V2_IN_COUNTER_STALL_EN_LBN 1 +#define MC_CMD_MAE_COUNTERS_STREAM_START_V2_IN_COUNTER_STALL_EN_WIDTH 1 +/* Mask of which counter types should be reported. Each bit position + * corresponds to a value of the MAE_COUNTER_TYPE enum. For example a value of + * 0x3 requests both AR and CT counters. A value of zero is invalid. Counter + * types not selected by the mask value won't be included in the stream. If a + * client wishes to change which counter types are reported, it must first call + * MAE_COUNTERS_STREAM_STOP, then restart it with the new mask value. + * Requesting a counter type which isn't supported by firmware (reported in + * MC_CMD_MAE_GET_CAPS/COUNTER_TYPES_SUPPORTED) will result in ENOTSUP. + */ +#define MC_CMD_MAE_COUNTERS_STREAM_START_V2_IN_COUNTER_TYPES_MASK_OFST 8 +#define MC_CMD_MAE_COUNTERS_STREAM_START_V2_IN_COUNTER_TYPES_MASK_LEN 4 + +/* MC_CMD_MAE_COUNTERS_STREAM_START_OUT msgresponse */ +#define MC_CMD_MAE_COUNTERS_STREAM_START_OUT_LEN 4 +#define MC_CMD_MAE_COUNTERS_STREAM_START_OUT_FLAGS_OFST 0 +#define MC_CMD_MAE_COUNTERS_STREAM_START_OUT_FLAGS_LEN 4 +#define MC_CMD_MAE_COUNTERS_STREAM_START_OUT_USES_CREDITS_OFST 0 +#define MC_CMD_MAE_COUNTERS_STREAM_START_OUT_USES_CREDITS_LBN 0 +#define MC_CMD_MAE_COUNTERS_STREAM_START_OUT_USES_CREDITS_WIDTH 1 + + +/***********************************/ +/* MC_CMD_MAE_COUNTERS_STREAM_STOP + * Stop streaming counter values to the specified RxQ. + */ +#define MC_CMD_MAE_COUNTERS_STREAM_STOP 0x152 +#undef MC_CMD_0x152_PRIVILEGE_CTG + +#define MC_CMD_0x152_PRIVILEGE_CTG SRIOV_CTG_MAE + +/* MC_CMD_MAE_COUNTERS_STREAM_STOP_IN msgrequest */ +#define MC_CMD_MAE_COUNTERS_STREAM_STOP_IN_LEN 2 +/* The RxQ to stop writing packets to. */ +#define MC_CMD_MAE_COUNTERS_STREAM_STOP_IN_QID_OFST 0 +#define MC_CMD_MAE_COUNTERS_STREAM_STOP_IN_QID_LEN 2 + +/* MC_CMD_MAE_COUNTERS_STREAM_STOP_OUT msgresponse */ +#define MC_CMD_MAE_COUNTERS_STREAM_STOP_OUT_LEN 4 +/* Generation count for AR counters. The final set of AR counter values will be + * written out in packets with count == GENERATION_COUNT. An empty packet with + * count > GENERATION_COUNT indicates that no more counter values of this type + * will be written to this stream. GENERATION_COUNT_INVALID is reserved and + * unused. + */ +#define MC_CMD_MAE_COUNTERS_STREAM_STOP_OUT_GENERATION_COUNT_OFST 0 +#define MC_CMD_MAE_COUNTERS_STREAM_STOP_OUT_GENERATION_COUNT_LEN 4 + +/* MC_CMD_MAE_COUNTERS_STREAM_STOP_V2_OUT msgresponse */ +#define MC_CMD_MAE_COUNTERS_STREAM_STOP_V2_OUT_LENMIN 4 +#define MC_CMD_MAE_COUNTERS_STREAM_STOP_V2_OUT_LENMAX 32 +#define MC_CMD_MAE_COUNTERS_STREAM_STOP_V2_OUT_LENMAX_MCDI2 32 +#define MC_CMD_MAE_COUNTERS_STREAM_STOP_V2_OUT_LEN(num) (0+4*(num)) +#define MC_CMD_MAE_COUNTERS_STREAM_STOP_V2_OUT_GENERATION_COUNT_NUM(len) (((len)-0)/4) +/* Array of generation counts, indexed by MAE_COUNTER_TYPE. Note that since + * MAE_COUNTER_TYPE_AR==0, this response is backwards-compatible with V1. The + * final set of counter values will be written out in packets with count == + * GENERATION_COUNT. An empty packet with count > GENERATION_COUNT indicates + * that no more counter values of this type will be written to this stream. + * GENERATION_COUNT_INVALID is reserved and unused. + */ +#define MC_CMD_MAE_COUNTERS_STREAM_STOP_V2_OUT_GENERATION_COUNT_OFST 0 +#define MC_CMD_MAE_COUNTERS_STREAM_STOP_V2_OUT_GENERATION_COUNT_LEN 4 +#define MC_CMD_MAE_COUNTERS_STREAM_STOP_V2_OUT_GENERATION_COUNT_MINNUM 1 +#define MC_CMD_MAE_COUNTERS_STREAM_STOP_V2_OUT_GENERATION_COUNT_MAXNUM 8 +#define MC_CMD_MAE_COUNTERS_STREAM_STOP_V2_OUT_GENERATION_COUNT_MAXNUM_MCDI2 8 + + +/***********************************/ +/* MC_CMD_MAE_COUNTERS_STREAM_GIVE_CREDITS + * Give a number of credits to the packetiser. Each credit received allows the + * MC to write one packet to the RxQ, therefore for each credit the driver must + * have written sufficient descriptors for a packet of length + * MAE_COUNTERS_PACKETISER_STREAM_START/PACKET_SIZE and rung the doorbell. + */ +#define MC_CMD_MAE_COUNTERS_STREAM_GIVE_CREDITS 0x153 +#undef MC_CMD_0x153_PRIVILEGE_CTG + +#define MC_CMD_0x153_PRIVILEGE_CTG SRIOV_CTG_MAE + +/* MC_CMD_MAE_COUNTERS_STREAM_GIVE_CREDITS_IN msgrequest */ +#define MC_CMD_MAE_COUNTERS_STREAM_GIVE_CREDITS_IN_LEN 4 +/* Number of credits to give to the packetiser. */ +#define MC_CMD_MAE_COUNTERS_STREAM_GIVE_CREDITS_IN_NUM_CREDITS_OFST 0 +#define MC_CMD_MAE_COUNTERS_STREAM_GIVE_CREDITS_IN_NUM_CREDITS_LEN 4 + +/* MC_CMD_MAE_COUNTERS_STREAM_GIVE_CREDITS_OUT msgresponse */ +#define MC_CMD_MAE_COUNTERS_STREAM_GIVE_CREDITS_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_MAE_ENCAP_HEADER_ALLOC + * Allocate an encapsulation header to be used in an Action Rule response. The + * header must be constructed as a valid packet with 0-length payload. + * Specifically, the L3/L4 lengths & checksums will only be incrementally fixed + * by the NIC, rather than recomputed entirely. Currently only IPv4, IPv6 and + * UDP are supported. If the maximum number of headers have already been + * allocated then the command will fail with MC_CMD_ERR_ENOSPC. + */ +#define MC_CMD_MAE_ENCAP_HEADER_ALLOC 0x148 +#undef MC_CMD_0x148_PRIVILEGE_CTG + +#define MC_CMD_0x148_PRIVILEGE_CTG SRIOV_CTG_MAE + +/* MC_CMD_MAE_ENCAP_HEADER_ALLOC_IN msgrequest */ +#define MC_CMD_MAE_ENCAP_HEADER_ALLOC_IN_LENMIN 4 +#define MC_CMD_MAE_ENCAP_HEADER_ALLOC_IN_LENMAX 252 +#define MC_CMD_MAE_ENCAP_HEADER_ALLOC_IN_LENMAX_MCDI2 1020 +#define MC_CMD_MAE_ENCAP_HEADER_ALLOC_IN_LEN(num) (4+1*(num)) +#define MC_CMD_MAE_ENCAP_HEADER_ALLOC_IN_HDR_DATA_NUM(len) (((len)-4)/1) +#define MC_CMD_MAE_ENCAP_HEADER_ALLOC_IN_ENCAP_TYPE_OFST 0 +#define MC_CMD_MAE_ENCAP_HEADER_ALLOC_IN_ENCAP_TYPE_LEN 4 +#define MC_CMD_MAE_ENCAP_HEADER_ALLOC_IN_HDR_DATA_OFST 4 +#define MC_CMD_MAE_ENCAP_HEADER_ALLOC_IN_HDR_DATA_LEN 1 +#define MC_CMD_MAE_ENCAP_HEADER_ALLOC_IN_HDR_DATA_MINNUM 0 +#define MC_CMD_MAE_ENCAP_HEADER_ALLOC_IN_HDR_DATA_MAXNUM 248 +#define MC_CMD_MAE_ENCAP_HEADER_ALLOC_IN_HDR_DATA_MAXNUM_MCDI2 1016 + +/* MC_CMD_MAE_ENCAP_HEADER_ALLOC_OUT msgresponse */ +#define MC_CMD_MAE_ENCAP_HEADER_ALLOC_OUT_LEN 4 +#define MC_CMD_MAE_ENCAP_HEADER_ALLOC_OUT_ENCAP_HEADER_ID_OFST 0 +#define MC_CMD_MAE_ENCAP_HEADER_ALLOC_OUT_ENCAP_HEADER_ID_LEN 4 +/* enum: An encap metadata ID that is guaranteed never to represent real encap + * metadata + */ +#define MC_CMD_MAE_ENCAP_HEADER_ALLOC_OUT_ENCAP_HEADER_ID_NULL 0xffffffff + + +/***********************************/ +/* MC_CMD_MAE_ENCAP_HEADER_UPDATE + * Update encap action metadata. See comments for MAE_ENCAP_HEADER_ALLOC. + */ +#define MC_CMD_MAE_ENCAP_HEADER_UPDATE 0x149 +#undef MC_CMD_0x149_PRIVILEGE_CTG + +#define MC_CMD_0x149_PRIVILEGE_CTG SRIOV_CTG_MAE + +/* MC_CMD_MAE_ENCAP_HEADER_UPDATE_IN msgrequest */ +#define MC_CMD_MAE_ENCAP_HEADER_UPDATE_IN_LENMIN 8 +#define MC_CMD_MAE_ENCAP_HEADER_UPDATE_IN_LENMAX 252 +#define MC_CMD_MAE_ENCAP_HEADER_UPDATE_IN_LENMAX_MCDI2 1020 +#define MC_CMD_MAE_ENCAP_HEADER_UPDATE_IN_LEN(num) (8+1*(num)) +#define MC_CMD_MAE_ENCAP_HEADER_UPDATE_IN_HDR_DATA_NUM(len) (((len)-8)/1) +#define MC_CMD_MAE_ENCAP_HEADER_UPDATE_IN_EH_ID_OFST 0 +#define MC_CMD_MAE_ENCAP_HEADER_UPDATE_IN_EH_ID_LEN 4 +#define MC_CMD_MAE_ENCAP_HEADER_UPDATE_IN_ENCAP_TYPE_OFST 4 +#define MC_CMD_MAE_ENCAP_HEADER_UPDATE_IN_ENCAP_TYPE_LEN 4 +#define MC_CMD_MAE_ENCAP_HEADER_UPDATE_IN_HDR_DATA_OFST 8 +#define MC_CMD_MAE_ENCAP_HEADER_UPDATE_IN_HDR_DATA_LEN 1 +#define MC_CMD_MAE_ENCAP_HEADER_UPDATE_IN_HDR_DATA_MINNUM 0 +#define MC_CMD_MAE_ENCAP_HEADER_UPDATE_IN_HDR_DATA_MAXNUM 244 +#define MC_CMD_MAE_ENCAP_HEADER_UPDATE_IN_HDR_DATA_MAXNUM_MCDI2 1012 + +/* MC_CMD_MAE_ENCAP_HEADER_UPDATE_OUT msgresponse */ +#define MC_CMD_MAE_ENCAP_HEADER_UPDATE_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_MAE_ENCAP_HEADER_FREE + * Free encap action metadata + */ +#define MC_CMD_MAE_ENCAP_HEADER_FREE 0x14a +#undef MC_CMD_0x14a_PRIVILEGE_CTG + +#define MC_CMD_0x14a_PRIVILEGE_CTG SRIOV_CTG_MAE + +/* MC_CMD_MAE_ENCAP_HEADER_FREE_IN msgrequest */ +#define MC_CMD_MAE_ENCAP_HEADER_FREE_IN_LENMIN 4 +#define MC_CMD_MAE_ENCAP_HEADER_FREE_IN_LENMAX 128 +#define MC_CMD_MAE_ENCAP_HEADER_FREE_IN_LENMAX_MCDI2 128 +#define MC_CMD_MAE_ENCAP_HEADER_FREE_IN_LEN(num) (0+4*(num)) +#define MC_CMD_MAE_ENCAP_HEADER_FREE_IN_EH_ID_NUM(len) (((len)-0)/4) +/* Same semantics as MC_CMD_MAE_COUNTER_FREE */ +#define MC_CMD_MAE_ENCAP_HEADER_FREE_IN_EH_ID_OFST 0 +#define MC_CMD_MAE_ENCAP_HEADER_FREE_IN_EH_ID_LEN 4 +#define MC_CMD_MAE_ENCAP_HEADER_FREE_IN_EH_ID_MINNUM 1 +#define MC_CMD_MAE_ENCAP_HEADER_FREE_IN_EH_ID_MAXNUM 32 +#define MC_CMD_MAE_ENCAP_HEADER_FREE_IN_EH_ID_MAXNUM_MCDI2 32 + +/* MC_CMD_MAE_ENCAP_HEADER_FREE_OUT msgresponse */ +#define MC_CMD_MAE_ENCAP_HEADER_FREE_OUT_LENMIN 4 +#define MC_CMD_MAE_ENCAP_HEADER_FREE_OUT_LENMAX 128 +#define MC_CMD_MAE_ENCAP_HEADER_FREE_OUT_LENMAX_MCDI2 128 +#define MC_CMD_MAE_ENCAP_HEADER_FREE_OUT_LEN(num) (0+4*(num)) +#define MC_CMD_MAE_ENCAP_HEADER_FREE_OUT_FREED_EH_ID_NUM(len) (((len)-0)/4) +/* Same semantics as MC_CMD_MAE_COUNTER_FREE */ +#define MC_CMD_MAE_ENCAP_HEADER_FREE_OUT_FREED_EH_ID_OFST 0 +#define MC_CMD_MAE_ENCAP_HEADER_FREE_OUT_FREED_EH_ID_LEN 4 +#define MC_CMD_MAE_ENCAP_HEADER_FREE_OUT_FREED_EH_ID_MINNUM 1 +#define MC_CMD_MAE_ENCAP_HEADER_FREE_OUT_FREED_EH_ID_MAXNUM 32 +#define MC_CMD_MAE_ENCAP_HEADER_FREE_OUT_FREED_EH_ID_MAXNUM_MCDI2 32 + + +/***********************************/ +/* MC_CMD_MAE_MAC_ADDR_ALLOC + * Allocate MAC address. Hardware implementations have MAC addresses programmed + * into an indirection table, and clients should take care not to allocate the + * same MAC address twice (but instead reuse its ID). If the maximum number of + * MAC addresses have already been allocated then the command will fail with + * MC_CMD_ERR_ENOSPC. + */ +#define MC_CMD_MAE_MAC_ADDR_ALLOC 0x15e +#undef MC_CMD_0x15e_PRIVILEGE_CTG + +#define MC_CMD_0x15e_PRIVILEGE_CTG SRIOV_CTG_MAE + +/* MC_CMD_MAE_MAC_ADDR_ALLOC_IN msgrequest */ +#define MC_CMD_MAE_MAC_ADDR_ALLOC_IN_LEN 6 +/* MAC address as bytes in network order. */ +#define MC_CMD_MAE_MAC_ADDR_ALLOC_IN_MAC_ADDR_OFST 0 +#define MC_CMD_MAE_MAC_ADDR_ALLOC_IN_MAC_ADDR_LEN 6 + +/* MC_CMD_MAE_MAC_ADDR_ALLOC_OUT msgresponse */ +#define MC_CMD_MAE_MAC_ADDR_ALLOC_OUT_LEN 4 +#define MC_CMD_MAE_MAC_ADDR_ALLOC_OUT_MAC_ID_OFST 0 +#define MC_CMD_MAE_MAC_ADDR_ALLOC_OUT_MAC_ID_LEN 4 +/* enum: An MAC address ID that is guaranteed never to represent a real MAC + * address. + */ +#define MC_CMD_MAE_MAC_ADDR_ALLOC_OUT_MAC_ID_NULL 0xffffffff + + +/***********************************/ +/* MC_CMD_MAE_MAC_ADDR_FREE + * Free MAC address. + */ +#define MC_CMD_MAE_MAC_ADDR_FREE 0x15f +#undef MC_CMD_0x15f_PRIVILEGE_CTG + +#define MC_CMD_0x15f_PRIVILEGE_CTG SRIOV_CTG_MAE + +/* MC_CMD_MAE_MAC_ADDR_FREE_IN msgrequest */ +#define MC_CMD_MAE_MAC_ADDR_FREE_IN_LENMIN 4 +#define MC_CMD_MAE_MAC_ADDR_FREE_IN_LENMAX 128 +#define MC_CMD_MAE_MAC_ADDR_FREE_IN_LENMAX_MCDI2 128 +#define MC_CMD_MAE_MAC_ADDR_FREE_IN_LEN(num) (0+4*(num)) +#define MC_CMD_MAE_MAC_ADDR_FREE_IN_MAC_ID_NUM(len) (((len)-0)/4) +/* Same semantics as MC_CMD_MAE_COUNTER_FREE */ +#define MC_CMD_MAE_MAC_ADDR_FREE_IN_MAC_ID_OFST 0 +#define MC_CMD_MAE_MAC_ADDR_FREE_IN_MAC_ID_LEN 4 +#define MC_CMD_MAE_MAC_ADDR_FREE_IN_MAC_ID_MINNUM 1 +#define MC_CMD_MAE_MAC_ADDR_FREE_IN_MAC_ID_MAXNUM 32 +#define MC_CMD_MAE_MAC_ADDR_FREE_IN_MAC_ID_MAXNUM_MCDI2 32 + +/* MC_CMD_MAE_MAC_ADDR_FREE_OUT msgresponse */ +#define MC_CMD_MAE_MAC_ADDR_FREE_OUT_LENMIN 4 +#define MC_CMD_MAE_MAC_ADDR_FREE_OUT_LENMAX 128 +#define MC_CMD_MAE_MAC_ADDR_FREE_OUT_LENMAX_MCDI2 128 +#define MC_CMD_MAE_MAC_ADDR_FREE_OUT_LEN(num) (0+4*(num)) +#define MC_CMD_MAE_MAC_ADDR_FREE_OUT_FREED_MAC_ID_NUM(len) (((len)-0)/4) +/* Same semantics as MC_CMD_MAE_COUNTER_FREE */ +#define MC_CMD_MAE_MAC_ADDR_FREE_OUT_FREED_MAC_ID_OFST 0 +#define MC_CMD_MAE_MAC_ADDR_FREE_OUT_FREED_MAC_ID_LEN 4 +#define MC_CMD_MAE_MAC_ADDR_FREE_OUT_FREED_MAC_ID_MINNUM 1 +#define MC_CMD_MAE_MAC_ADDR_FREE_OUT_FREED_MAC_ID_MAXNUM 32 +#define MC_CMD_MAE_MAC_ADDR_FREE_OUT_FREED_MAC_ID_MAXNUM_MCDI2 32 + + +/***********************************/ +/* MC_CMD_MAE_ACTION_SET_ALLOC + * Allocate an action set, which can be referenced either in response to an + * Action Rule, or as part of an Action Set List. If the maxmimum number of + * action sets have already been allocated then the command will fail with + * MC_CMD_ERR_ENOSPC. + */ +#define MC_CMD_MAE_ACTION_SET_ALLOC 0x14d +#undef MC_CMD_0x14d_PRIVILEGE_CTG + +#define MC_CMD_0x14d_PRIVILEGE_CTG SRIOV_CTG_MAE + +/* MC_CMD_MAE_ACTION_SET_ALLOC_IN msgrequest */ +#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_LEN 44 +#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_FLAGS_OFST 0 +#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_FLAGS_LEN 4 +#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_VLAN_PUSH_OFST 0 +#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_VLAN_PUSH_LBN 0 +#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_VLAN_PUSH_WIDTH 2 +#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_VLAN_POP_OFST 0 +#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_VLAN_POP_LBN 4 +#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_VLAN_POP_WIDTH 2 +#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DECAP_OFST 0 +#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DECAP_LBN 8 +#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DECAP_WIDTH 1 +#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_MARK_OFST 0 +#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_MARK_LBN 9 +#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_MARK_WIDTH 1 +#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_FLAG_OFST 0 +#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_FLAG_LBN 10 +#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_FLAG_WIDTH 1 +#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DO_NAT_OFST 0 +#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DO_NAT_LBN 11 +#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DO_NAT_WIDTH 1 +#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DO_DECR_IP_TTL_OFST 0 +#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DO_DECR_IP_TTL_LBN 12 +#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DO_DECR_IP_TTL_WIDTH 1 +#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DO_SET_SRC_MPORT_OFST 0 +#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DO_SET_SRC_MPORT_LBN 13 +#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DO_SET_SRC_MPORT_WIDTH 1 +#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_SUPPRESS_SELF_DELIVERY_OFST 0 +#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_SUPPRESS_SELF_DELIVERY_LBN 14 +#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_SUPPRESS_SELF_DELIVERY_WIDTH 1 +#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DO_REPLACE_RDP_C_PL_OFST 0 +#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DO_REPLACE_RDP_C_PL_LBN 15 +#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DO_REPLACE_RDP_C_PL_WIDTH 1 +#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DO_REPLACE_RDP_D_PL_OFST 0 +#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DO_REPLACE_RDP_D_PL_LBN 16 +#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DO_REPLACE_RDP_D_PL_WIDTH 1 +#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DO_REPLACE_RDP_OUT_HOST_CHAN_OFST 0 +#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DO_REPLACE_RDP_OUT_HOST_CHAN_LBN 17 +#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DO_REPLACE_RDP_OUT_HOST_CHAN_WIDTH 1 +#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DO_SET_NET_CHAN_OFST 0 +#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DO_SET_NET_CHAN_LBN 18 +#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DO_SET_NET_CHAN_WIDTH 1 +#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_LACP_PLUGIN_OFST 0 +#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_LACP_PLUGIN_LBN 19 +#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_LACP_PLUGIN_WIDTH 1 +#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_LACP_INC_L4_OFST 0 +#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_LACP_INC_L4_LBN 20 +#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_LACP_INC_L4_WIDTH 1 +/* If VLAN_PUSH >= 1, TCI value to be inserted as outermost VLAN. */ +#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_VLAN0_TCI_BE_OFST 4 +#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_VLAN0_TCI_BE_LEN 2 +/* If VLAN_PUSH >= 1, TPID value to be inserted as outermost VLAN. */ +#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_VLAN0_PROTO_BE_OFST 6 +#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_VLAN0_PROTO_BE_LEN 2 +/* If VLAN_PUSH == 2, inner TCI value to be inserted. */ +#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_VLAN1_TCI_BE_OFST 8 +#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_VLAN1_TCI_BE_LEN 2 +/* If VLAN_PUSH == 2, inner TPID value to be inserted. */ +#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_VLAN1_PROTO_BE_OFST 10 +#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_VLAN1_PROTO_BE_LEN 2 +/* Reserved. Ignored by firmware. Should be set to zero or 0xffffffff. */ +#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_RSVD_OFST 12 +#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_RSVD_LEN 4 +/* Set to ENCAP_HEADER_ID_NULL to request no encap action */ +#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_ENCAP_HEADER_ID_OFST 16 +#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_ENCAP_HEADER_ID_LEN 4 +/* An m-port selector identifying the m-port that the modified packet should be + * delivered to. Set to MPORT_SELECTOR_NULL to request no delivery of the + * packet. + */ +#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DELIVER_OFST 20 +#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DELIVER_LEN 4 +/* Allows an action set to trigger several counter updates. Set to + * MAE_COUNTER_ID_NULL to request no counter action. + */ +#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_COUNTER_LIST_ID_OFST 24 +#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_COUNTER_LIST_ID_LEN 4 +/* Enum values, see field(s): */ +/* MAE_COUNTER_ID */ +/* If a driver only wished to update one counter within this action set, then + * it can supply a COUNTER_ID instead of allocating a single-element counter + * list. The ID must have been allocated with COUNTER_TYPE=AR. This field + * should be set to MAE_COUNTER_ID_NULL if this behaviour is not required. It + * is not valid to supply a non-NULL value for both COUNTER_LIST_ID and + * COUNTER_ID. + */ +#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_COUNTER_ID_OFST 28 +#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_COUNTER_ID_LEN 4 +/* Enum values, see field(s): */ +/* MAE_COUNTER_ID */ +#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_MARK_VALUE_OFST 32 +#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_MARK_VALUE_LEN 4 +/* Set to MAC_ID_NULL to request no source MAC replacement. */ +#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_SRC_MAC_ID_OFST 36 +#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_SRC_MAC_ID_LEN 4 +/* Set to MAC_ID_NULL to request no destination MAC replacement. */ +#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DST_MAC_ID_OFST 40 +#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DST_MAC_ID_LEN 4 + +/* MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN msgrequest: Only supported if + * MAE_ACTION_SET_ALLOC_V2_SUPPORTED is advertised in + * MC_CMD_GET_CAPABILITIES_V7_OUT. + */ +#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_LEN 51 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_FLAGS_OFST 0 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_FLAGS_LEN 4 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_VLAN_PUSH_OFST 0 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_VLAN_PUSH_LBN 0 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_VLAN_PUSH_WIDTH 2 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_VLAN_POP_OFST 0 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_VLAN_POP_LBN 4 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_VLAN_POP_WIDTH 2 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DECAP_OFST 0 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DECAP_LBN 8 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DECAP_WIDTH 1 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_MARK_OFST 0 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_MARK_LBN 9 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_MARK_WIDTH 1 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_FLAG_OFST 0 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_FLAG_LBN 10 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_FLAG_WIDTH 1 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_NAT_OFST 0 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_NAT_LBN 11 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_NAT_WIDTH 1 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_DECR_IP_TTL_OFST 0 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_DECR_IP_TTL_LBN 12 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_DECR_IP_TTL_WIDTH 1 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_SET_SRC_MPORT_OFST 0 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_SET_SRC_MPORT_LBN 13 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_SET_SRC_MPORT_WIDTH 1 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_SUPPRESS_SELF_DELIVERY_OFST 0 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_SUPPRESS_SELF_DELIVERY_LBN 14 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_SUPPRESS_SELF_DELIVERY_WIDTH 1 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_REPLACE_RDP_C_PL_OFST 0 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_REPLACE_RDP_C_PL_LBN 15 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_REPLACE_RDP_C_PL_WIDTH 1 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_REPLACE_RDP_D_PL_OFST 0 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_REPLACE_RDP_D_PL_LBN 16 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_REPLACE_RDP_D_PL_WIDTH 1 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_REPLACE_RDP_OUT_HOST_CHAN_OFST 0 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_REPLACE_RDP_OUT_HOST_CHAN_LBN 17 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_REPLACE_RDP_OUT_HOST_CHAN_WIDTH 1 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_SET_NET_CHAN_OFST 0 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_SET_NET_CHAN_LBN 18 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_SET_NET_CHAN_WIDTH 1 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_LACP_PLUGIN_OFST 0 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_LACP_PLUGIN_LBN 19 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_LACP_PLUGIN_WIDTH 1 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_LACP_INC_L4_OFST 0 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_LACP_INC_L4_LBN 20 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_LACP_INC_L4_WIDTH 1 +/* If VLAN_PUSH >= 1, TCI value to be inserted as outermost VLAN. */ +#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_VLAN0_TCI_BE_OFST 4 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_VLAN0_TCI_BE_LEN 2 +/* If VLAN_PUSH >= 1, TPID value to be inserted as outermost VLAN. */ +#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_VLAN0_PROTO_BE_OFST 6 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_VLAN0_PROTO_BE_LEN 2 +/* If VLAN_PUSH == 2, inner TCI value to be inserted. */ +#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_VLAN1_TCI_BE_OFST 8 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_VLAN1_TCI_BE_LEN 2 +/* If VLAN_PUSH == 2, inner TPID value to be inserted. */ +#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_VLAN1_PROTO_BE_OFST 10 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_VLAN1_PROTO_BE_LEN 2 +/* Reserved. Ignored by firmware. Should be set to zero or 0xffffffff. */ +#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_RSVD_OFST 12 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_RSVD_LEN 4 +/* Set to ENCAP_HEADER_ID_NULL to request no encap action */ +#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_ENCAP_HEADER_ID_OFST 16 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_ENCAP_HEADER_ID_LEN 4 +/* An m-port selector identifying the m-port that the modified packet should be + * delivered to. Set to MPORT_SELECTOR_NULL to request no delivery of the + * packet. + */ +#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DELIVER_OFST 20 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DELIVER_LEN 4 +/* Allows an action set to trigger several counter updates. Set to + * MAE_COUNTER_ID_NULL to request no counter action. + */ +#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_COUNTER_LIST_ID_OFST 24 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_COUNTER_LIST_ID_LEN 4 +/* Enum values, see field(s): */ +/* MAE_COUNTER_ID */ +/* If a driver only wished to update one counter within this action set, then + * it can supply a COUNTER_ID instead of allocating a single-element counter + * list. The ID must have been allocated with COUNTER_TYPE=AR. This field + * should be set to MAE_COUNTER_ID_NULL if this behaviour is not required. It + * is not valid to supply a non-NULL value for both COUNTER_LIST_ID and + * COUNTER_ID. + */ +#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_COUNTER_ID_OFST 28 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_COUNTER_ID_LEN 4 +/* Enum values, see field(s): */ +/* MAE_COUNTER_ID */ +#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_MARK_VALUE_OFST 32 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_MARK_VALUE_LEN 4 +/* Set to MAC_ID_NULL to request no source MAC replacement. */ +#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_SRC_MAC_ID_OFST 36 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_SRC_MAC_ID_LEN 4 +/* Set to MAC_ID_NULL to request no destination MAC replacement. */ +#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DST_MAC_ID_OFST 40 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DST_MAC_ID_LEN 4 +/* Source m-port ID to be reported for DO_SET_SRC_MPORT action. */ +#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_REPORTED_SRC_MPORT_OFST 44 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_REPORTED_SRC_MPORT_LEN 4 +/* Actions for modifying the Differentiated Services Code-Point (DSCP) bits + * within IPv4 and IPv6 headers. + */ +#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DSCP_CONTROL_OFST 48 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DSCP_CONTROL_LEN 2 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_DSCP_ENCAP_COPY_OFST 48 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_DSCP_ENCAP_COPY_LBN 0 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_DSCP_ENCAP_COPY_WIDTH 1 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_DSCP_DECAP_COPY_OFST 48 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_DSCP_DECAP_COPY_LBN 1 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_DSCP_DECAP_COPY_WIDTH 1 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_REPLACE_DSCP_OFST 48 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_REPLACE_DSCP_LBN 2 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_REPLACE_DSCP_WIDTH 1 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DSCP_VALUE_OFST 48 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DSCP_VALUE_LBN 3 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DSCP_VALUE_WIDTH 6 +/* Actions for modifying the Explicit Congestion Notification (ECN) bits within + * IPv4 and IPv6 headers. + */ +#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_ECN_CONTROL_OFST 50 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_ECN_CONTROL_LEN 1 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_ECN_ENCAP_COPY_OFST 50 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_ECN_ENCAP_COPY_LBN 0 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_ECN_ENCAP_COPY_WIDTH 1 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_ECN_DECAP_COPY_OFST 50 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_ECN_DECAP_COPY_LBN 1 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_ECN_DECAP_COPY_WIDTH 1 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_REPLACE_ECN_OFST 50 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_REPLACE_ECN_LBN 2 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_REPLACE_ECN_WIDTH 1 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_ECN_VALUE_OFST 50 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_ECN_VALUE_LBN 3 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_ECN_VALUE_WIDTH 2 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_ECN_ECT_0_TO_CE_OFST 50 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_ECN_ECT_0_TO_CE_LBN 5 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_ECN_ECT_0_TO_CE_WIDTH 1 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_ECN_ECT_1_TO_CE_OFST 50 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_ECN_ECT_1_TO_CE_LBN 6 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_ECN_ECT_1_TO_CE_WIDTH 1 + +/* MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN msgrequest: Only supported if + * MAE_ACTION_SET_ALLOC_V3_SUPPORTED is advertised in + * MC_CMD_GET_CAPABILITIES_V10_OUT. + */ +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_LEN 53 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_FLAGS_OFST 0 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_FLAGS_LEN 4 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_VLAN_PUSH_OFST 0 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_VLAN_PUSH_LBN 0 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_VLAN_PUSH_WIDTH 2 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_VLAN_POP_OFST 0 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_VLAN_POP_LBN 4 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_VLAN_POP_WIDTH 2 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DECAP_OFST 0 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DECAP_LBN 8 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DECAP_WIDTH 1 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_MARK_OFST 0 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_MARK_LBN 9 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_MARK_WIDTH 1 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_FLAG_OFST 0 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_FLAG_LBN 10 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_FLAG_WIDTH 1 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_NAT_OFST 0 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_NAT_LBN 11 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_NAT_WIDTH 1 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_DECR_IP_TTL_OFST 0 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_DECR_IP_TTL_LBN 12 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_DECR_IP_TTL_WIDTH 1 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_SET_SRC_MPORT_OFST 0 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_SET_SRC_MPORT_LBN 13 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_SET_SRC_MPORT_WIDTH 1 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_SUPPRESS_SELF_DELIVERY_OFST 0 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_SUPPRESS_SELF_DELIVERY_LBN 14 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_SUPPRESS_SELF_DELIVERY_WIDTH 1 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_REPLACE_RDP_C_PL_OFST 0 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_REPLACE_RDP_C_PL_LBN 15 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_REPLACE_RDP_C_PL_WIDTH 1 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_REPLACE_RDP_D_PL_OFST 0 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_REPLACE_RDP_D_PL_LBN 16 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_REPLACE_RDP_D_PL_WIDTH 1 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_REPLACE_RDP_OUT_HOST_CHAN_OFST 0 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_REPLACE_RDP_OUT_HOST_CHAN_LBN 17 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_REPLACE_RDP_OUT_HOST_CHAN_WIDTH 1 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_SET_NET_CHAN_OFST 0 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_SET_NET_CHAN_LBN 18 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_SET_NET_CHAN_WIDTH 1 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_LACP_PLUGIN_OFST 0 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_LACP_PLUGIN_LBN 19 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_LACP_PLUGIN_WIDTH 1 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_LACP_INC_L4_OFST 0 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_LACP_INC_L4_LBN 20 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_LACP_INC_L4_WIDTH 1 +/* If VLAN_PUSH >= 1, TCI value to be inserted as outermost VLAN. */ +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_VLAN0_TCI_BE_OFST 4 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_VLAN0_TCI_BE_LEN 2 +/* If VLAN_PUSH >= 1, TPID value to be inserted as outermost VLAN. */ +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_VLAN0_PROTO_BE_OFST 6 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_VLAN0_PROTO_BE_LEN 2 +/* If VLAN_PUSH == 2, inner TCI value to be inserted. */ +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_VLAN1_TCI_BE_OFST 8 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_VLAN1_TCI_BE_LEN 2 +/* If VLAN_PUSH == 2, inner TPID value to be inserted. */ +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_VLAN1_PROTO_BE_OFST 10 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_VLAN1_PROTO_BE_LEN 2 +/* Reserved. Ignored by firmware. Should be set to zero or 0xffffffff. */ +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_RSVD_OFST 12 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_RSVD_LEN 4 +/* Set to ENCAP_HEADER_ID_NULL to request no encap action */ +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_ENCAP_HEADER_ID_OFST 16 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_ENCAP_HEADER_ID_LEN 4 +/* An m-port selector identifying the m-port that the modified packet should be + * delivered to. Set to MPORT_SELECTOR_NULL to request no delivery of the + * packet. + */ +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DELIVER_OFST 20 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DELIVER_LEN 4 +/* Allows an action set to trigger several counter updates. Set to + * MAE_COUNTER_ID_NULL to request no counter action. + */ +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_COUNTER_LIST_ID_OFST 24 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_COUNTER_LIST_ID_LEN 4 +/* Enum values, see field(s): */ +/* MAE_COUNTER_ID */ +/* If a driver only wished to update one counter within this action set, then + * it can supply a COUNTER_ID instead of allocating a single-element counter + * list. The ID must have been allocated with COUNTER_TYPE=AR. This field + * should be set to MAE_COUNTER_ID_NULL if this behaviour is not required. It + * is not valid to supply a non-NULL value for both COUNTER_LIST_ID and + * COUNTER_ID. + */ +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_COUNTER_ID_OFST 28 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_COUNTER_ID_LEN 4 +/* Enum values, see field(s): */ +/* MAE_COUNTER_ID */ +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_MARK_VALUE_OFST 32 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_MARK_VALUE_LEN 4 +/* Set to MAC_ID_NULL to request no source MAC replacement. */ +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_SRC_MAC_ID_OFST 36 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_SRC_MAC_ID_LEN 4 +/* Set to MAC_ID_NULL to request no destination MAC replacement. */ +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DST_MAC_ID_OFST 40 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DST_MAC_ID_LEN 4 +/* Source m-port ID to be reported for DO_SET_SRC_MPORT action. */ +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_REPORTED_SRC_MPORT_OFST 44 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_REPORTED_SRC_MPORT_LEN 4 +/* Actions for modifying the Differentiated Services Code-Point (DSCP) bits + * within IPv4 and IPv6 headers. + */ +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DSCP_CONTROL_OFST 48 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DSCP_CONTROL_LEN 2 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_DSCP_ENCAP_COPY_OFST 48 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_DSCP_ENCAP_COPY_LBN 0 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_DSCP_ENCAP_COPY_WIDTH 1 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_DSCP_DECAP_COPY_OFST 48 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_DSCP_DECAP_COPY_LBN 1 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_DSCP_DECAP_COPY_WIDTH 1 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_REPLACE_DSCP_OFST 48 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_REPLACE_DSCP_LBN 2 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_REPLACE_DSCP_WIDTH 1 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DSCP_VALUE_OFST 48 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DSCP_VALUE_LBN 3 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DSCP_VALUE_WIDTH 6 +/* Actions for modifying the Explicit Congestion Notification (ECN) bits within + * IPv4 and IPv6 headers. + */ +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_ECN_CONTROL_OFST 50 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_ECN_CONTROL_LEN 1 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_ECN_ENCAP_COPY_OFST 50 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_ECN_ENCAP_COPY_LBN 0 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_ECN_ENCAP_COPY_WIDTH 1 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_ECN_DECAP_COPY_OFST 50 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_ECN_DECAP_COPY_LBN 1 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_ECN_DECAP_COPY_WIDTH 1 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_REPLACE_ECN_OFST 50 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_REPLACE_ECN_LBN 2 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_REPLACE_ECN_WIDTH 1 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_ECN_VALUE_OFST 50 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_ECN_VALUE_LBN 3 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_ECN_VALUE_WIDTH 2 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_ECN_ECT_0_TO_CE_OFST 50 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_ECN_ECT_0_TO_CE_LBN 5 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_ECN_ECT_0_TO_CE_WIDTH 1 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_ECN_ECT_1_TO_CE_OFST 50 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_ECN_ECT_1_TO_CE_LBN 6 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_ECN_ECT_1_TO_CE_WIDTH 1 +/* Actions for overwriting CH_ROUTE subfields. */ +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_RDP_OVERWRITE_OFST 51 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_RDP_OVERWRITE_LEN 1 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_RDP_C_PL_OFST 51 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_RDP_C_PL_LBN 0 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_RDP_C_PL_WIDTH 1 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_RDP_D_PL_OFST 51 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_RDP_D_PL_LBN 1 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_RDP_D_PL_WIDTH 1 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_RDP_PL_CHAN_OFST 51 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_RDP_PL_CHAN_LBN 2 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_RDP_PL_CHAN_WIDTH 1 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_RDP_OUT_HOST_CHAN_OFST 51 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_RDP_OUT_HOST_CHAN_LBN 3 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_RDP_OUT_HOST_CHAN_WIDTH 1 +/* Override outgoing CH_VC to network port for DO_SET_NET_CHAN action. Cannot + * be used in conjunction with DO_SET_SRC_MPORT action. + */ +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_NET_CHAN_OFST 52 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_NET_CHAN_LEN 1 + +/* MC_CMD_MAE_ACTION_SET_ALLOC_OUT msgresponse */ +#define MC_CMD_MAE_ACTION_SET_ALLOC_OUT_LEN 4 +/* The MSB of the AS_ID is guaranteed to be clear if the ID is not + * ACTION_SET_ID_NULL. This allows an AS_ID to be distinguished from an ASL_ID + * returned from MC_CMD_MAE_ACTION_SET_LIST_ALLOC. + */ +#define MC_CMD_MAE_ACTION_SET_ALLOC_OUT_AS_ID_OFST 0 +#define MC_CMD_MAE_ACTION_SET_ALLOC_OUT_AS_ID_LEN 4 +/* enum: An action set ID that is guaranteed never to represent an action set + */ +#define MC_CMD_MAE_ACTION_SET_ALLOC_OUT_ACTION_SET_ID_NULL 0xffffffff + + +/***********************************/ +/* MC_CMD_MAE_ACTION_SET_FREE + */ +#define MC_CMD_MAE_ACTION_SET_FREE 0x14e +#undef MC_CMD_0x14e_PRIVILEGE_CTG + +#define MC_CMD_0x14e_PRIVILEGE_CTG SRIOV_CTG_MAE + +/* MC_CMD_MAE_ACTION_SET_FREE_IN msgrequest */ +#define MC_CMD_MAE_ACTION_SET_FREE_IN_LENMIN 4 +#define MC_CMD_MAE_ACTION_SET_FREE_IN_LENMAX 128 +#define MC_CMD_MAE_ACTION_SET_FREE_IN_LENMAX_MCDI2 128 +#define MC_CMD_MAE_ACTION_SET_FREE_IN_LEN(num) (0+4*(num)) +#define MC_CMD_MAE_ACTION_SET_FREE_IN_AS_ID_NUM(len) (((len)-0)/4) +/* Same semantics as MC_CMD_MAE_COUNTER_FREE */ +#define MC_CMD_MAE_ACTION_SET_FREE_IN_AS_ID_OFST 0 +#define MC_CMD_MAE_ACTION_SET_FREE_IN_AS_ID_LEN 4 +#define MC_CMD_MAE_ACTION_SET_FREE_IN_AS_ID_MINNUM 1 +#define MC_CMD_MAE_ACTION_SET_FREE_IN_AS_ID_MAXNUM 32 +#define MC_CMD_MAE_ACTION_SET_FREE_IN_AS_ID_MAXNUM_MCDI2 32 + +/* MC_CMD_MAE_ACTION_SET_FREE_OUT msgresponse */ +#define MC_CMD_MAE_ACTION_SET_FREE_OUT_LENMIN 4 +#define MC_CMD_MAE_ACTION_SET_FREE_OUT_LENMAX 128 +#define MC_CMD_MAE_ACTION_SET_FREE_OUT_LENMAX_MCDI2 128 +#define MC_CMD_MAE_ACTION_SET_FREE_OUT_LEN(num) (0+4*(num)) +#define MC_CMD_MAE_ACTION_SET_FREE_OUT_FREED_AS_ID_NUM(len) (((len)-0)/4) +/* Same semantics as MC_CMD_MAE_COUNTER_FREE */ +#define MC_CMD_MAE_ACTION_SET_FREE_OUT_FREED_AS_ID_OFST 0 +#define MC_CMD_MAE_ACTION_SET_FREE_OUT_FREED_AS_ID_LEN 4 +#define MC_CMD_MAE_ACTION_SET_FREE_OUT_FREED_AS_ID_MINNUM 1 +#define MC_CMD_MAE_ACTION_SET_FREE_OUT_FREED_AS_ID_MAXNUM 32 +#define MC_CMD_MAE_ACTION_SET_FREE_OUT_FREED_AS_ID_MAXNUM_MCDI2 32 + + +/***********************************/ +/* MC_CMD_MAE_ACTION_SET_LIST_ALLOC + * Allocate an action set list (ASL) that can be referenced by an ID. The ASL + * ID can be used when inserting an action rule, so that for each packet + * matching the rule every action set in the list is applied. If the maximum + * number of ASLs have already been allocated then the command will fail with + * MC_CMD_ERR_ENOSPC. + */ +#define MC_CMD_MAE_ACTION_SET_LIST_ALLOC 0x14f +#undef MC_CMD_0x14f_PRIVILEGE_CTG + +#define MC_CMD_0x14f_PRIVILEGE_CTG SRIOV_CTG_MAE + +/* MC_CMD_MAE_ACTION_SET_LIST_ALLOC_IN msgrequest */ +#define MC_CMD_MAE_ACTION_SET_LIST_ALLOC_IN_LENMIN 8 +#define MC_CMD_MAE_ACTION_SET_LIST_ALLOC_IN_LENMAX 252 +#define MC_CMD_MAE_ACTION_SET_LIST_ALLOC_IN_LENMAX_MCDI2 1020 +#define MC_CMD_MAE_ACTION_SET_LIST_ALLOC_IN_LEN(num) (4+4*(num)) +#define MC_CMD_MAE_ACTION_SET_LIST_ALLOC_IN_AS_IDS_NUM(len) (((len)-4)/4) +/* Number of elements in the AS_IDS field. */ +#define MC_CMD_MAE_ACTION_SET_LIST_ALLOC_IN_COUNT_OFST 0 +#define MC_CMD_MAE_ACTION_SET_LIST_ALLOC_IN_COUNT_LEN 4 +/* The IDs of the action sets in this list. The last element of this list may + * be the ID of an already allocated ASL. In this case the action sets from the + * already allocated ASL will be applied after the action sets supplied by this + * request. This mechanism can be used to reduce resource usage in the case + * where one ASL is a sublist of another ASL. The sublist should be allocated + * first, then the superlist should be allocated by supplying all required + * action set IDs that are not in the sublist followed by the ID of the + * sublist. One sublist can be referenced by multiple superlists. + */ +#define MC_CMD_MAE_ACTION_SET_LIST_ALLOC_IN_AS_IDS_OFST 4 +#define MC_CMD_MAE_ACTION_SET_LIST_ALLOC_IN_AS_IDS_LEN 4 +#define MC_CMD_MAE_ACTION_SET_LIST_ALLOC_IN_AS_IDS_MINNUM 1 +#define MC_CMD_MAE_ACTION_SET_LIST_ALLOC_IN_AS_IDS_MAXNUM 62 +#define MC_CMD_MAE_ACTION_SET_LIST_ALLOC_IN_AS_IDS_MAXNUM_MCDI2 254 + +/* MC_CMD_MAE_ACTION_SET_LIST_ALLOC_OUT msgresponse */ +#define MC_CMD_MAE_ACTION_SET_LIST_ALLOC_OUT_LEN 4 +/* The MSB of the ASL_ID is guaranteed to be set. This allows an ASL_ID to be + * distinguished from an AS_ID returned from MC_CMD_MAE_ACTION_SET_ALLOC. + */ +#define MC_CMD_MAE_ACTION_SET_LIST_ALLOC_OUT_ASL_ID_OFST 0 +#define MC_CMD_MAE_ACTION_SET_LIST_ALLOC_OUT_ASL_ID_LEN 4 +/* enum: An action set list ID that is guaranteed never to represent an action + * set list + */ +#define MC_CMD_MAE_ACTION_SET_LIST_ALLOC_OUT_ACTION_SET_LIST_ID_NULL 0xffffffff + + +/***********************************/ +/* MC_CMD_MAE_ACTION_SET_LIST_FREE + * Free match-action-engine redirect_lists + */ +#define MC_CMD_MAE_ACTION_SET_LIST_FREE 0x150 +#undef MC_CMD_0x150_PRIVILEGE_CTG + +#define MC_CMD_0x150_PRIVILEGE_CTG SRIOV_CTG_MAE + +/* MC_CMD_MAE_ACTION_SET_LIST_FREE_IN msgrequest */ +#define MC_CMD_MAE_ACTION_SET_LIST_FREE_IN_LENMIN 4 +#define MC_CMD_MAE_ACTION_SET_LIST_FREE_IN_LENMAX 128 +#define MC_CMD_MAE_ACTION_SET_LIST_FREE_IN_LENMAX_MCDI2 128 +#define MC_CMD_MAE_ACTION_SET_LIST_FREE_IN_LEN(num) (0+4*(num)) +#define MC_CMD_MAE_ACTION_SET_LIST_FREE_IN_ASL_ID_NUM(len) (((len)-0)/4) +/* Same semantics as MC_CMD_MAE_COUNTER_FREE */ +#define MC_CMD_MAE_ACTION_SET_LIST_FREE_IN_ASL_ID_OFST 0 +#define MC_CMD_MAE_ACTION_SET_LIST_FREE_IN_ASL_ID_LEN 4 +#define MC_CMD_MAE_ACTION_SET_LIST_FREE_IN_ASL_ID_MINNUM 1 +#define MC_CMD_MAE_ACTION_SET_LIST_FREE_IN_ASL_ID_MAXNUM 32 +#define MC_CMD_MAE_ACTION_SET_LIST_FREE_IN_ASL_ID_MAXNUM_MCDI2 32 + +/* MC_CMD_MAE_ACTION_SET_LIST_FREE_OUT msgresponse */ +#define MC_CMD_MAE_ACTION_SET_LIST_FREE_OUT_LENMIN 4 +#define MC_CMD_MAE_ACTION_SET_LIST_FREE_OUT_LENMAX 128 +#define MC_CMD_MAE_ACTION_SET_LIST_FREE_OUT_LENMAX_MCDI2 128 +#define MC_CMD_MAE_ACTION_SET_LIST_FREE_OUT_LEN(num) (0+4*(num)) +#define MC_CMD_MAE_ACTION_SET_LIST_FREE_OUT_FREED_ASL_ID_NUM(len) (((len)-0)/4) +/* Same semantics as MC_CMD_MAE_COUNTER_FREE */ +#define MC_CMD_MAE_ACTION_SET_LIST_FREE_OUT_FREED_ASL_ID_OFST 0 +#define MC_CMD_MAE_ACTION_SET_LIST_FREE_OUT_FREED_ASL_ID_LEN 4 +#define MC_CMD_MAE_ACTION_SET_LIST_FREE_OUT_FREED_ASL_ID_MINNUM 1 +#define MC_CMD_MAE_ACTION_SET_LIST_FREE_OUT_FREED_ASL_ID_MAXNUM 32 +#define MC_CMD_MAE_ACTION_SET_LIST_FREE_OUT_FREED_ASL_ID_MAXNUM_MCDI2 32 + + +/***********************************/ +/* MC_CMD_MAE_OUTER_RULE_INSERT + * Inserts an Outer Rule, which controls encapsulation parsing, and may + * influence the Lookup Sequence. If the maximum number of rules have already + * been inserted then the command will fail with MC_CMD_ERR_ENOSPC. + */ +#define MC_CMD_MAE_OUTER_RULE_INSERT 0x15a +#undef MC_CMD_0x15a_PRIVILEGE_CTG + +#define MC_CMD_0x15a_PRIVILEGE_CTG SRIOV_CTG_MAE + +/* MC_CMD_MAE_OUTER_RULE_INSERT_IN msgrequest */ +#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_LENMIN 16 +#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_LENMAX 252 +#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_LENMAX_MCDI2 1020 +#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_LEN(num) (16+1*(num)) +#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_FIELD_MATCH_CRITERIA_NUM(len) (((len)-16)/1) +/* Packets matching the rule will be parsed with this encapsulation. */ +#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_ENCAP_TYPE_OFST 0 +#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_ENCAP_TYPE_LEN 4 +/* Enum values, see field(s): */ +/* MAE_MCDI_ENCAP_TYPE */ +/* Match priority. Lower values have higher priority. Must be less than + * MC_CMD_MAE_GET_CAPS_OUT.ENCAP_PRIOS If a packet matches two filters with + * equal priority then it is unspecified which takes priority. + */ +#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_PRIO_OFST 4 +#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_PRIO_LEN 4 +/* Deprecated alias for ACTION_CONTROL. */ +#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_LOOKUP_CONTROL_OFST 8 +#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_LOOKUP_CONTROL_LEN 4 +#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_DO_CT_OFST 8 +#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_DO_CT_LBN 0 +#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_DO_CT_WIDTH 1 +#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_CT_VNI_MODE_OFST 8 +#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_CT_VNI_MODE_LBN 1 +#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_CT_VNI_MODE_WIDTH 2 +/* Enum values, see field(s): */ +/* MAE_CT_VNI_MODE */ +#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_DO_COUNT_OFST 8 +#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_DO_COUNT_LBN 3 +#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_DO_COUNT_WIDTH 1 +#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_CT_TCP_FLAGS_INHIBIT_OFST 8 +#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_CT_TCP_FLAGS_INHIBIT_LBN 4 +#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_CT_TCP_FLAGS_INHIBIT_WIDTH 1 +#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_RECIRC_ID_OFST 8 +#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_RECIRC_ID_LBN 8 +#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_RECIRC_ID_WIDTH 8 +#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_CT_DOMAIN_OFST 8 +#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_CT_DOMAIN_LBN 16 +#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_CT_DOMAIN_WIDTH 16 +/* This field controls the actions that are performed when a rule is hit. */ +#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_ACTION_CONTROL_OFST 8 +#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_ACTION_CONTROL_LEN 4 +/* ID of counter to increment when the rule is hit. Only used if the DO_COUNT + * flag is set. The ID must have been allocated with COUNTER_TYPE=OR. + */ +#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_COUNTER_ID_OFST 12 +#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_COUNTER_ID_LEN 4 +/* Structure of the format MAE_ENC_FIELD_PAIRS. */ +#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_FIELD_MATCH_CRITERIA_OFST 16 +#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_FIELD_MATCH_CRITERIA_LEN 1 +#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_FIELD_MATCH_CRITERIA_MINNUM 0 +#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_FIELD_MATCH_CRITERIA_MAXNUM 236 +#define MC_CMD_MAE_OUTER_RULE_INSERT_IN_FIELD_MATCH_CRITERIA_MAXNUM_MCDI2 1004 + +/* MC_CMD_MAE_OUTER_RULE_INSERT_OUT msgresponse */ +#define MC_CMD_MAE_OUTER_RULE_INSERT_OUT_LEN 4 +#define MC_CMD_MAE_OUTER_RULE_INSERT_OUT_OR_ID_OFST 0 +#define MC_CMD_MAE_OUTER_RULE_INSERT_OUT_OR_ID_LEN 4 +/* enum: An outer match ID that is guaranteed never to represent an outer match + */ +#define MC_CMD_MAE_OUTER_RULE_INSERT_OUT_OUTER_RULE_ID_NULL 0xffffffff + + +/***********************************/ +/* MC_CMD_MAE_OUTER_RULE_REMOVE + */ +#define MC_CMD_MAE_OUTER_RULE_REMOVE 0x15b +#undef MC_CMD_0x15b_PRIVILEGE_CTG + +#define MC_CMD_0x15b_PRIVILEGE_CTG SRIOV_CTG_MAE + +/* MC_CMD_MAE_OUTER_RULE_REMOVE_IN msgrequest */ +#define MC_CMD_MAE_OUTER_RULE_REMOVE_IN_LENMIN 4 +#define MC_CMD_MAE_OUTER_RULE_REMOVE_IN_LENMAX 128 +#define MC_CMD_MAE_OUTER_RULE_REMOVE_IN_LENMAX_MCDI2 128 +#define MC_CMD_MAE_OUTER_RULE_REMOVE_IN_LEN(num) (0+4*(num)) +#define MC_CMD_MAE_OUTER_RULE_REMOVE_IN_OR_ID_NUM(len) (((len)-0)/4) +/* Same semantics as MC_CMD_MAE_COUNTER_FREE */ +#define MC_CMD_MAE_OUTER_RULE_REMOVE_IN_OR_ID_OFST 0 +#define MC_CMD_MAE_OUTER_RULE_REMOVE_IN_OR_ID_LEN 4 +#define MC_CMD_MAE_OUTER_RULE_REMOVE_IN_OR_ID_MINNUM 1 +#define MC_CMD_MAE_OUTER_RULE_REMOVE_IN_OR_ID_MAXNUM 32 +#define MC_CMD_MAE_OUTER_RULE_REMOVE_IN_OR_ID_MAXNUM_MCDI2 32 + +/* MC_CMD_MAE_OUTER_RULE_REMOVE_OUT msgresponse */ +#define MC_CMD_MAE_OUTER_RULE_REMOVE_OUT_LENMIN 4 +#define MC_CMD_MAE_OUTER_RULE_REMOVE_OUT_LENMAX 128 +#define MC_CMD_MAE_OUTER_RULE_REMOVE_OUT_LENMAX_MCDI2 128 +#define MC_CMD_MAE_OUTER_RULE_REMOVE_OUT_LEN(num) (0+4*(num)) +#define MC_CMD_MAE_OUTER_RULE_REMOVE_OUT_REMOVED_OR_ID_NUM(len) (((len)-0)/4) +/* Same semantics as MC_CMD_MAE_COUNTER_FREE */ +#define MC_CMD_MAE_OUTER_RULE_REMOVE_OUT_REMOVED_OR_ID_OFST 0 +#define MC_CMD_MAE_OUTER_RULE_REMOVE_OUT_REMOVED_OR_ID_LEN 4 +#define MC_CMD_MAE_OUTER_RULE_REMOVE_OUT_REMOVED_OR_ID_MINNUM 1 +#define MC_CMD_MAE_OUTER_RULE_REMOVE_OUT_REMOVED_OR_ID_MAXNUM 32 +#define MC_CMD_MAE_OUTER_RULE_REMOVE_OUT_REMOVED_OR_ID_MAXNUM_MCDI2 32 + + +/***********************************/ +/* MC_CMD_MAE_OUTER_RULE_UPDATE + * Atomically change the response of an Outer Rule. + */ +#define MC_CMD_MAE_OUTER_RULE_UPDATE 0x17d +#undef MC_CMD_0x17d_PRIVILEGE_CTG + +#define MC_CMD_0x17d_PRIVILEGE_CTG SRIOV_CTG_MAE + +/* MC_CMD_MAE_OUTER_RULE_UPDATE_IN msgrequest */ +#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_LEN 16 +/* ID of outer rule to update */ +#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_OR_ID_OFST 0 +#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_OR_ID_LEN 4 +/* Packets matching the rule will be parsed with this encapsulation. */ +#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_ENCAP_TYPE_OFST 4 +#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_ENCAP_TYPE_LEN 4 +/* Enum values, see field(s): */ +/* MAE_MCDI_ENCAP_TYPE */ +/* This field controls the actions that are performed when a rule is hit. */ +#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_ACTION_CONTROL_OFST 8 +#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_ACTION_CONTROL_LEN 4 +#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_DO_CT_OFST 8 +#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_DO_CT_LBN 0 +#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_DO_CT_WIDTH 1 +#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_CT_VNI_MODE_OFST 8 +#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_CT_VNI_MODE_LBN 1 +#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_CT_VNI_MODE_WIDTH 2 +/* Enum values, see field(s): */ +/* MAE_CT_VNI_MODE */ +#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_DO_COUNT_OFST 8 +#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_DO_COUNT_LBN 3 +#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_DO_COUNT_WIDTH 1 +#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_CT_TCP_FLAGS_INHIBIT_OFST 8 +#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_CT_TCP_FLAGS_INHIBIT_LBN 4 +#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_CT_TCP_FLAGS_INHIBIT_WIDTH 1 +#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_RECIRC_ID_OFST 8 +#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_RECIRC_ID_LBN 8 +#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_RECIRC_ID_WIDTH 8 +#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_CT_DOMAIN_OFST 8 +#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_CT_DOMAIN_LBN 16 +#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_CT_DOMAIN_WIDTH 16 +/* ID of counter to increment when the rule is hit. Only used if the DO_COUNT + * flag is set. The ID must have been allocated with COUNTER_TYPE=OR. + */ +#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_COUNTER_ID_OFST 12 +#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_COUNTER_ID_LEN 4 + +/* MC_CMD_MAE_OUTER_RULE_UPDATE_OUT msgresponse */ +#define MC_CMD_MAE_OUTER_RULE_UPDATE_OUT_LEN 0 + +/* MAE_ACTION_RULE_RESPONSE structuredef */ +#define MAE_ACTION_RULE_RESPONSE_LEN 16 +#define MAE_ACTION_RULE_RESPONSE_ASL_ID_OFST 0 +#define MAE_ACTION_RULE_RESPONSE_ASL_ID_LEN 4 +#define MAE_ACTION_RULE_RESPONSE_ASL_ID_LBN 0 +#define MAE_ACTION_RULE_RESPONSE_ASL_ID_WIDTH 32 +/* Only one of ASL_ID or AS_ID may have a non-NULL value. */ +#define MAE_ACTION_RULE_RESPONSE_AS_ID_OFST 4 +#define MAE_ACTION_RULE_RESPONSE_AS_ID_LEN 4 +#define MAE_ACTION_RULE_RESPONSE_AS_ID_LBN 32 +#define MAE_ACTION_RULE_RESPONSE_AS_ID_WIDTH 32 +/* Controls lookup flow when this rule is hit. See sub-fields for details. More + * info on the lookup sequence can be found in SF-122976-TC. It is an error to + * set both DO_CT and DO_RECIRC. + */ +#define MAE_ACTION_RULE_RESPONSE_LOOKUP_CONTROL_OFST 8 +#define MAE_ACTION_RULE_RESPONSE_LOOKUP_CONTROL_LEN 4 +#define MAE_ACTION_RULE_RESPONSE_DO_CT_OFST 8 +#define MAE_ACTION_RULE_RESPONSE_DO_CT_LBN 0 +#define MAE_ACTION_RULE_RESPONSE_DO_CT_WIDTH 1 +#define MAE_ACTION_RULE_RESPONSE_DO_RECIRC_OFST 8 +#define MAE_ACTION_RULE_RESPONSE_DO_RECIRC_LBN 1 +#define MAE_ACTION_RULE_RESPONSE_DO_RECIRC_WIDTH 1 +#define MAE_ACTION_RULE_RESPONSE_CT_VNI_MODE_OFST 8 +#define MAE_ACTION_RULE_RESPONSE_CT_VNI_MODE_LBN 2 +#define MAE_ACTION_RULE_RESPONSE_CT_VNI_MODE_WIDTH 2 +/* Enum values, see field(s): */ +/* MAE_CT_VNI_MODE */ +#define MAE_ACTION_RULE_RESPONSE_RECIRC_ID_OFST 8 +#define MAE_ACTION_RULE_RESPONSE_RECIRC_ID_LBN 8 +#define MAE_ACTION_RULE_RESPONSE_RECIRC_ID_WIDTH 8 +#define MAE_ACTION_RULE_RESPONSE_CT_DOMAIN_OFST 8 +#define MAE_ACTION_RULE_RESPONSE_CT_DOMAIN_LBN 16 +#define MAE_ACTION_RULE_RESPONSE_CT_DOMAIN_WIDTH 16 +#define MAE_ACTION_RULE_RESPONSE_LOOKUP_CONTROL_LBN 64 +#define MAE_ACTION_RULE_RESPONSE_LOOKUP_CONTROL_WIDTH 32 +/* Counter ID to increment if DO_CT or DO_RECIRC is set. Must be set to + * COUNTER_ID_NULL otherwise. Counter ID must have been allocated with + * COUNTER_TYPE=AR. + */ +#define MAE_ACTION_RULE_RESPONSE_COUNTER_ID_OFST 12 +#define MAE_ACTION_RULE_RESPONSE_COUNTER_ID_LEN 4 +#define MAE_ACTION_RULE_RESPONSE_COUNTER_ID_LBN 96 +#define MAE_ACTION_RULE_RESPONSE_COUNTER_ID_WIDTH 32 + + +/***********************************/ +/* MC_CMD_MAE_ACTION_RULE_INSERT + * Insert a rule specify that packets matching a filter be processed according + * to a previous allocated action. Masks can be set as indicated by + * MC_CMD_MAE_GET_MATCH_FIELD_CAPABILITIES. If the maximum number of rules have + * already been inserted then the command will fail with MC_CMD_ERR_ENOSPC. + */ +#define MC_CMD_MAE_ACTION_RULE_INSERT 0x15c +#undef MC_CMD_0x15c_PRIVILEGE_CTG + +#define MC_CMD_0x15c_PRIVILEGE_CTG SRIOV_CTG_MAE + +/* MC_CMD_MAE_ACTION_RULE_INSERT_IN msgrequest */ +#define MC_CMD_MAE_ACTION_RULE_INSERT_IN_LENMIN 28 +#define MC_CMD_MAE_ACTION_RULE_INSERT_IN_LENMAX 252 +#define MC_CMD_MAE_ACTION_RULE_INSERT_IN_LENMAX_MCDI2 1020 +#define MC_CMD_MAE_ACTION_RULE_INSERT_IN_LEN(num) (28+1*(num)) +#define MC_CMD_MAE_ACTION_RULE_INSERT_IN_MATCH_CRITERIA_NUM(len) (((len)-28)/1) +/* See MC_CMD_MAE_OUTER_RULE_REGISTER_IN/PRIO. */ +#define MC_CMD_MAE_ACTION_RULE_INSERT_IN_PRIO_OFST 0 +#define MC_CMD_MAE_ACTION_RULE_INSERT_IN_PRIO_LEN 4 +/* Structure of the format MAE_ACTION_RULE_RESPONSE */ +#define MC_CMD_MAE_ACTION_RULE_INSERT_IN_RESPONSE_OFST 4 +#define MC_CMD_MAE_ACTION_RULE_INSERT_IN_RESPONSE_LEN 20 +/* Reserved for future use. Must be set to zero. */ +#define MC_CMD_MAE_ACTION_RULE_INSERT_IN_RSVD_OFST 24 +#define MC_CMD_MAE_ACTION_RULE_INSERT_IN_RSVD_LEN 4 +/* Structure of the format MAE_FIELD_MASK_VALUE_PAIRS */ +#define MC_CMD_MAE_ACTION_RULE_INSERT_IN_MATCH_CRITERIA_OFST 28 +#define MC_CMD_MAE_ACTION_RULE_INSERT_IN_MATCH_CRITERIA_LEN 1 +#define MC_CMD_MAE_ACTION_RULE_INSERT_IN_MATCH_CRITERIA_MINNUM 0 +#define MC_CMD_MAE_ACTION_RULE_INSERT_IN_MATCH_CRITERIA_MAXNUM 224 +#define MC_CMD_MAE_ACTION_RULE_INSERT_IN_MATCH_CRITERIA_MAXNUM_MCDI2 992 + +/* MC_CMD_MAE_ACTION_RULE_INSERT_OUT msgresponse */ +#define MC_CMD_MAE_ACTION_RULE_INSERT_OUT_LEN 4 +#define MC_CMD_MAE_ACTION_RULE_INSERT_OUT_AR_ID_OFST 0 +#define MC_CMD_MAE_ACTION_RULE_INSERT_OUT_AR_ID_LEN 4 +/* enum: An action rule ID that is guaranteed never to represent an action rule + */ +#define MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL 0xffffffff + + +/***********************************/ +/* MC_CMD_MAE_ACTION_RULE_UPDATE + * Atomically change the response of an action rule. Firmware may return + * ENOTSUP, in which case the driver should DELETE/INSERT. + */ +#define MC_CMD_MAE_ACTION_RULE_UPDATE 0x15d +#undef MC_CMD_0x15d_PRIVILEGE_CTG + +#define MC_CMD_0x15d_PRIVILEGE_CTG SRIOV_CTG_MAE + +/* MC_CMD_MAE_ACTION_RULE_UPDATE_IN msgrequest */ +#define MC_CMD_MAE_ACTION_RULE_UPDATE_IN_LEN 24 +/* ID of action rule to update */ +#define MC_CMD_MAE_ACTION_RULE_UPDATE_IN_AR_ID_OFST 0 +#define MC_CMD_MAE_ACTION_RULE_UPDATE_IN_AR_ID_LEN 4 +/* Structure of the format MAE_ACTION_RULE_RESPONSE */ +#define MC_CMD_MAE_ACTION_RULE_UPDATE_IN_RESPONSE_OFST 4 +#define MC_CMD_MAE_ACTION_RULE_UPDATE_IN_RESPONSE_LEN 20 + +/* MC_CMD_MAE_ACTION_RULE_UPDATE_OUT msgresponse */ +#define MC_CMD_MAE_ACTION_RULE_UPDATE_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_MAE_ACTION_RULE_DELETE + */ +#define MC_CMD_MAE_ACTION_RULE_DELETE 0x155 +#undef MC_CMD_0x155_PRIVILEGE_CTG + +#define MC_CMD_0x155_PRIVILEGE_CTG SRIOV_CTG_MAE + +/* MC_CMD_MAE_ACTION_RULE_DELETE_IN msgrequest */ +#define MC_CMD_MAE_ACTION_RULE_DELETE_IN_LENMIN 4 +#define MC_CMD_MAE_ACTION_RULE_DELETE_IN_LENMAX 128 +#define MC_CMD_MAE_ACTION_RULE_DELETE_IN_LENMAX_MCDI2 128 +#define MC_CMD_MAE_ACTION_RULE_DELETE_IN_LEN(num) (0+4*(num)) +#define MC_CMD_MAE_ACTION_RULE_DELETE_IN_AR_ID_NUM(len) (((len)-0)/4) +/* Same semantics as MC_CMD_MAE_COUNTER_FREE */ +#define MC_CMD_MAE_ACTION_RULE_DELETE_IN_AR_ID_OFST 0 +#define MC_CMD_MAE_ACTION_RULE_DELETE_IN_AR_ID_LEN 4 +#define MC_CMD_MAE_ACTION_RULE_DELETE_IN_AR_ID_MINNUM 1 +#define MC_CMD_MAE_ACTION_RULE_DELETE_IN_AR_ID_MAXNUM 32 +#define MC_CMD_MAE_ACTION_RULE_DELETE_IN_AR_ID_MAXNUM_MCDI2 32 + +/* MC_CMD_MAE_ACTION_RULE_DELETE_OUT msgresponse */ +#define MC_CMD_MAE_ACTION_RULE_DELETE_OUT_LENMIN 4 +#define MC_CMD_MAE_ACTION_RULE_DELETE_OUT_LENMAX 128 +#define MC_CMD_MAE_ACTION_RULE_DELETE_OUT_LENMAX_MCDI2 128 +#define MC_CMD_MAE_ACTION_RULE_DELETE_OUT_LEN(num) (0+4*(num)) +#define MC_CMD_MAE_ACTION_RULE_DELETE_OUT_DELETED_AR_ID_NUM(len) (((len)-0)/4) +/* Same semantics as MC_CMD_MAE_COUNTER_FREE */ +#define MC_CMD_MAE_ACTION_RULE_DELETE_OUT_DELETED_AR_ID_OFST 0 +#define MC_CMD_MAE_ACTION_RULE_DELETE_OUT_DELETED_AR_ID_LEN 4 +#define MC_CMD_MAE_ACTION_RULE_DELETE_OUT_DELETED_AR_ID_MINNUM 1 +#define MC_CMD_MAE_ACTION_RULE_DELETE_OUT_DELETED_AR_ID_MAXNUM 32 +#define MC_CMD_MAE_ACTION_RULE_DELETE_OUT_DELETED_AR_ID_MAXNUM_MCDI2 32 + + +/***********************************/ +/* MC_CMD_MAE_MPORT_LOOKUP + * Return the m-port corresponding to a selector. + */ +#define MC_CMD_MAE_MPORT_LOOKUP 0x160 +#undef MC_CMD_0x160_PRIVILEGE_CTG + +#define MC_CMD_0x160_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_MAE_MPORT_LOOKUP_IN msgrequest */ +#define MC_CMD_MAE_MPORT_LOOKUP_IN_LEN 4 +#define MC_CMD_MAE_MPORT_LOOKUP_IN_MPORT_SELECTOR_OFST 0 +#define MC_CMD_MAE_MPORT_LOOKUP_IN_MPORT_SELECTOR_LEN 4 + +/* MC_CMD_MAE_MPORT_LOOKUP_OUT msgresponse */ +#define MC_CMD_MAE_MPORT_LOOKUP_OUT_LEN 4 +#define MC_CMD_MAE_MPORT_LOOKUP_OUT_MPORT_ID_OFST 0 +#define MC_CMD_MAE_MPORT_LOOKUP_OUT_MPORT_ID_LEN 4 + + +/***********************************/ +/* MC_CMD_MAE_MPORT_ALLOC + * Allocates a m-port, which can subsequently be used in action rules as a + * match or delivery argument. + */ +#define MC_CMD_MAE_MPORT_ALLOC 0x163 +#undef MC_CMD_0x163_PRIVILEGE_CTG + +#define MC_CMD_0x163_PRIVILEGE_CTG SRIOV_CTG_MAE + +/* MC_CMD_MAE_MPORT_ALLOC_IN msgrequest */ +#define MC_CMD_MAE_MPORT_ALLOC_IN_LEN 20 +/* The type of m-port to allocate. Firmware may return ENOTSUP for certain + * types. + */ +#define MC_CMD_MAE_MPORT_ALLOC_IN_TYPE_OFST 0 +#define MC_CMD_MAE_MPORT_ALLOC_IN_TYPE_LEN 4 +/* enum: Traffic can be sent to this type of m-port using an override + * descriptor. Traffic received on this type of m-port will go to the VNIC on a + * nominated m-port, and will be delivered with metadata identifying the alias + * m-port. + */ +#define MC_CMD_MAE_MPORT_ALLOC_IN_MPORT_TYPE_ALIAS 0x1 +/* enum: This type of m-port has a VNIC attached. Queues can be created on this + * VNIC by specifying the created m-port as an m-port selector at queue + * creation time. + */ +#define MC_CMD_MAE_MPORT_ALLOC_IN_MPORT_TYPE_VNIC 0x2 +/* 128-bit value for use by the driver. */ +#define MC_CMD_MAE_MPORT_ALLOC_IN_UUID_OFST 4 +#define MC_CMD_MAE_MPORT_ALLOC_IN_UUID_LEN 16 + +/* MC_CMD_MAE_MPORT_ALLOC_ALIAS_IN msgrequest */ +#define MC_CMD_MAE_MPORT_ALLOC_ALIAS_IN_LEN 24 +/* The type of m-port to allocate. Firmware may return ENOTSUP for certain + * types. + */ +#define MC_CMD_MAE_MPORT_ALLOC_ALIAS_IN_TYPE_OFST 0 +#define MC_CMD_MAE_MPORT_ALLOC_ALIAS_IN_TYPE_LEN 4 +/* enum: Traffic can be sent to this type of m-port using an override + * descriptor. Traffic received on this type of m-port will go to the VNIC on a + * nominated m-port, and will be delivered with metadata identifying the alias + * m-port. + */ +#define MC_CMD_MAE_MPORT_ALLOC_ALIAS_IN_MPORT_TYPE_ALIAS 0x1 +/* enum: This type of m-port has a VNIC attached. Queues can be created on this + * VNIC by specifying the created m-port as an m-port selector at queue + * creation time. + */ +#define MC_CMD_MAE_MPORT_ALLOC_ALIAS_IN_MPORT_TYPE_VNIC 0x2 +/* 128-bit value for use by the driver. */ +#define MC_CMD_MAE_MPORT_ALLOC_ALIAS_IN_UUID_OFST 4 +#define MC_CMD_MAE_MPORT_ALLOC_ALIAS_IN_UUID_LEN 16 +/* An m-port selector identifying the VNIC to which traffic should be + * delivered. This must currently be set to MAE_MPORT_SELECTOR_ASSIGNED (i.e. + * the m-port assigned to the calling client). + */ +#define MC_CMD_MAE_MPORT_ALLOC_ALIAS_IN_DELIVER_MPORT_OFST 20 +#define MC_CMD_MAE_MPORT_ALLOC_ALIAS_IN_DELIVER_MPORT_LEN 4 + +/* MC_CMD_MAE_MPORT_ALLOC_VNIC_IN msgrequest */ +#define MC_CMD_MAE_MPORT_ALLOC_VNIC_IN_LEN 20 +/* The type of m-port to allocate. Firmware may return ENOTSUP for certain + * types. + */ +#define MC_CMD_MAE_MPORT_ALLOC_VNIC_IN_TYPE_OFST 0 +#define MC_CMD_MAE_MPORT_ALLOC_VNIC_IN_TYPE_LEN 4 +/* enum: Traffic can be sent to this type of m-port using an override + * descriptor. Traffic received on this type of m-port will go to the VNIC on a + * nominated m-port, and will be delivered with metadata identifying the alias + * m-port. + */ +#define MC_CMD_MAE_MPORT_ALLOC_VNIC_IN_MPORT_TYPE_ALIAS 0x1 +/* enum: This type of m-port has a VNIC attached. Queues can be created on this + * VNIC by specifying the created m-port as an m-port selector at queue + * creation time. + */ +#define MC_CMD_MAE_MPORT_ALLOC_VNIC_IN_MPORT_TYPE_VNIC 0x2 +/* 128-bit value for use by the driver. */ +#define MC_CMD_MAE_MPORT_ALLOC_VNIC_IN_UUID_OFST 4 +#define MC_CMD_MAE_MPORT_ALLOC_VNIC_IN_UUID_LEN 16 + +/* MC_CMD_MAE_MPORT_ALLOC_OUT msgresponse */ +#define MC_CMD_MAE_MPORT_ALLOC_OUT_LEN 4 +/* ID of newly-allocated m-port. */ +#define MC_CMD_MAE_MPORT_ALLOC_OUT_MPORT_ID_OFST 0 +#define MC_CMD_MAE_MPORT_ALLOC_OUT_MPORT_ID_LEN 4 + +/* MC_CMD_MAE_MPORT_ALLOC_ALIAS_OUT msgrequest */ +#define MC_CMD_MAE_MPORT_ALLOC_ALIAS_OUT_LEN 24 +/* ID of newly-allocated m-port. */ +#define MC_CMD_MAE_MPORT_ALLOC_ALIAS_OUT_MPORT_ID_OFST 0 +#define MC_CMD_MAE_MPORT_ALLOC_ALIAS_OUT_MPORT_ID_LEN 4 +/* A value that will appear in the packet metadata for any packets delivered + * using an alias type m-port. This value is guaranteed unique on the VNIC + * being delivered to, and is guaranteed not to exceed the range of values + * representable in the relevant metadata field. + */ +#define MC_CMD_MAE_MPORT_ALLOC_ALIAS_OUT_LABEL_OFST 20 +#define MC_CMD_MAE_MPORT_ALLOC_ALIAS_OUT_LABEL_LEN 4 + +/* MC_CMD_MAE_MPORT_ALLOC_VNIC_OUT msgrequest */ +#define MC_CMD_MAE_MPORT_ALLOC_VNIC_OUT_LEN 4 +/* ID of newly-allocated m-port. */ +#define MC_CMD_MAE_MPORT_ALLOC_VNIC_OUT_MPORT_ID_OFST 0 +#define MC_CMD_MAE_MPORT_ALLOC_VNIC_OUT_MPORT_ID_LEN 4 + + +/***********************************/ +/* MC_CMD_MAE_MPORT_FREE + * Free a m-port which was previously allocated by the driver. + */ +#define MC_CMD_MAE_MPORT_FREE 0x164 +#undef MC_CMD_0x164_PRIVILEGE_CTG + +#define MC_CMD_0x164_PRIVILEGE_CTG SRIOV_CTG_MAE + +/* MC_CMD_MAE_MPORT_FREE_IN msgrequest */ +#define MC_CMD_MAE_MPORT_FREE_IN_LEN 4 +/* MPORT_ID as returned by MC_CMD_MAE_MPORT_ALLOC. */ +#define MC_CMD_MAE_MPORT_FREE_IN_MPORT_ID_OFST 0 +#define MC_CMD_MAE_MPORT_FREE_IN_MPORT_ID_LEN 4 + +/* MC_CMD_MAE_MPORT_FREE_OUT msgresponse */ +#define MC_CMD_MAE_MPORT_FREE_OUT_LEN 0 + +/* MAE_MPORT_DESC structuredef */ +#define MAE_MPORT_DESC_LEN 52 +#define MAE_MPORT_DESC_MPORT_ID_OFST 0 +#define MAE_MPORT_DESC_MPORT_ID_LEN 4 +#define MAE_MPORT_DESC_MPORT_ID_LBN 0 +#define MAE_MPORT_DESC_MPORT_ID_WIDTH 32 +/* Reserved for future purposes, contains information independent of caller */ +#define MAE_MPORT_DESC_FLAGS_OFST 4 +#define MAE_MPORT_DESC_FLAGS_LEN 4 +#define MAE_MPORT_DESC_FLAGS_LBN 32 +#define MAE_MPORT_DESC_FLAGS_WIDTH 32 +#define MAE_MPORT_DESC_CALLER_FLAGS_OFST 8 +#define MAE_MPORT_DESC_CALLER_FLAGS_LEN 4 +#define MAE_MPORT_DESC_CAN_RECEIVE_ON_OFST 8 +#define MAE_MPORT_DESC_CAN_RECEIVE_ON_LBN 0 +#define MAE_MPORT_DESC_CAN_RECEIVE_ON_WIDTH 1 +#define MAE_MPORT_DESC_CAN_DELIVER_TO_OFST 8 +#define MAE_MPORT_DESC_CAN_DELIVER_TO_LBN 1 +#define MAE_MPORT_DESC_CAN_DELIVER_TO_WIDTH 1 +#define MAE_MPORT_DESC_CAN_DELETE_OFST 8 +#define MAE_MPORT_DESC_CAN_DELETE_LBN 2 +#define MAE_MPORT_DESC_CAN_DELETE_WIDTH 1 +#define MAE_MPORT_DESC_IS_ZOMBIE_OFST 8 +#define MAE_MPORT_DESC_IS_ZOMBIE_LBN 3 +#define MAE_MPORT_DESC_IS_ZOMBIE_WIDTH 1 +#define MAE_MPORT_DESC_CALLER_FLAGS_LBN 64 +#define MAE_MPORT_DESC_CALLER_FLAGS_WIDTH 32 +/* Not the ideal name; it's really the type of thing connected to the m-port */ +#define MAE_MPORT_DESC_MPORT_TYPE_OFST 12 +#define MAE_MPORT_DESC_MPORT_TYPE_LEN 4 +/* enum: Connected to a MAC... */ +#define MAE_MPORT_DESC_MPORT_TYPE_NET_PORT 0x0 +/* enum: Adds metadata and delivers to another m-port */ +#define MAE_MPORT_DESC_MPORT_TYPE_ALIAS 0x1 +/* enum: Connected to a VNIC. */ +#define MAE_MPORT_DESC_MPORT_TYPE_VNIC 0x2 +#define MAE_MPORT_DESC_MPORT_TYPE_LBN 96 +#define MAE_MPORT_DESC_MPORT_TYPE_WIDTH 32 +/* 128-bit value available to drivers for m-port identification. */ +#define MAE_MPORT_DESC_UUID_OFST 16 +#define MAE_MPORT_DESC_UUID_LEN 16 +#define MAE_MPORT_DESC_UUID_LBN 128 +#define MAE_MPORT_DESC_UUID_WIDTH 128 +/* Big wadge of space reserved for other common properties */ +#define MAE_MPORT_DESC_RESERVED_OFST 32 +#define MAE_MPORT_DESC_RESERVED_LEN 8 +#define MAE_MPORT_DESC_RESERVED_LO_OFST 32 +#define MAE_MPORT_DESC_RESERVED_LO_LEN 4 +#define MAE_MPORT_DESC_RESERVED_LO_LBN 256 +#define MAE_MPORT_DESC_RESERVED_LO_WIDTH 32 +#define MAE_MPORT_DESC_RESERVED_HI_OFST 36 +#define MAE_MPORT_DESC_RESERVED_HI_LEN 4 +#define MAE_MPORT_DESC_RESERVED_HI_LBN 288 +#define MAE_MPORT_DESC_RESERVED_HI_WIDTH 32 +#define MAE_MPORT_DESC_RESERVED_LBN 256 +#define MAE_MPORT_DESC_RESERVED_WIDTH 64 +/* Logical port index. Only valid when type NET Port. */ +#define MAE_MPORT_DESC_NET_PORT_IDX_OFST 40 +#define MAE_MPORT_DESC_NET_PORT_IDX_LEN 4 +#define MAE_MPORT_DESC_NET_PORT_IDX_LBN 320 +#define MAE_MPORT_DESC_NET_PORT_IDX_WIDTH 32 +/* The m-port delivered to */ +#define MAE_MPORT_DESC_ALIAS_DELIVER_MPORT_ID_OFST 40 +#define MAE_MPORT_DESC_ALIAS_DELIVER_MPORT_ID_LEN 4 +#define MAE_MPORT_DESC_ALIAS_DELIVER_MPORT_ID_LBN 320 +#define MAE_MPORT_DESC_ALIAS_DELIVER_MPORT_ID_WIDTH 32 +/* The type of thing that owns the VNIC */ +#define MAE_MPORT_DESC_VNIC_CLIENT_TYPE_OFST 40 +#define MAE_MPORT_DESC_VNIC_CLIENT_TYPE_LEN 4 +#define MAE_MPORT_DESC_VNIC_CLIENT_TYPE_FUNCTION 0x1 /* enum */ +#define MAE_MPORT_DESC_VNIC_CLIENT_TYPE_PLUGIN 0x2 /* enum */ +#define MAE_MPORT_DESC_VNIC_CLIENT_TYPE_LBN 320 +#define MAE_MPORT_DESC_VNIC_CLIENT_TYPE_WIDTH 32 +/* The PCIe interface on which the function lives. CJK: We need an enumeration + * of interfaces that we extend as new interface (types) appear. This belongs + * elsewhere and should be referenced from here + */ +#define MAE_MPORT_DESC_VNIC_FUNCTION_INTERFACE_OFST 44 +#define MAE_MPORT_DESC_VNIC_FUNCTION_INTERFACE_LEN 4 +#define MAE_MPORT_DESC_VNIC_FUNCTION_INTERFACE_LBN 352 +#define MAE_MPORT_DESC_VNIC_FUNCTION_INTERFACE_WIDTH 32 +#define MAE_MPORT_DESC_VNIC_FUNCTION_PF_IDX_OFST 48 +#define MAE_MPORT_DESC_VNIC_FUNCTION_PF_IDX_LEN 2 +#define MAE_MPORT_DESC_VNIC_FUNCTION_PF_IDX_LBN 384 +#define MAE_MPORT_DESC_VNIC_FUNCTION_PF_IDX_WIDTH 16 +#define MAE_MPORT_DESC_VNIC_FUNCTION_VF_IDX_OFST 50 +#define MAE_MPORT_DESC_VNIC_FUNCTION_VF_IDX_LEN 2 +/* enum: Indicates that the function is a PF */ +#define MAE_MPORT_DESC_VF_IDX_NULL 0xffff +#define MAE_MPORT_DESC_VNIC_FUNCTION_VF_IDX_LBN 400 +#define MAE_MPORT_DESC_VNIC_FUNCTION_VF_IDX_WIDTH 16 +/* Reserved. Should be ignored for now. */ +#define MAE_MPORT_DESC_VNIC_PLUGIN_TBD_OFST 44 +#define MAE_MPORT_DESC_VNIC_PLUGIN_TBD_LEN 4 +#define MAE_MPORT_DESC_VNIC_PLUGIN_TBD_LBN 352 +#define MAE_MPORT_DESC_VNIC_PLUGIN_TBD_WIDTH 32 + +/* MAE_MPORT_DESC_V2 structuredef */ +#define MAE_MPORT_DESC_V2_LEN 56 +#define MAE_MPORT_DESC_V2_MPORT_ID_OFST 0 +#define MAE_MPORT_DESC_V2_MPORT_ID_LEN 4 +#define MAE_MPORT_DESC_V2_MPORT_ID_LBN 0 +#define MAE_MPORT_DESC_V2_MPORT_ID_WIDTH 32 +/* Reserved for future purposes, contains information independent of caller */ +#define MAE_MPORT_DESC_V2_FLAGS_OFST 4 +#define MAE_MPORT_DESC_V2_FLAGS_LEN 4 +#define MAE_MPORT_DESC_V2_FLAGS_LBN 32 +#define MAE_MPORT_DESC_V2_FLAGS_WIDTH 32 +#define MAE_MPORT_DESC_V2_CALLER_FLAGS_OFST 8 +#define MAE_MPORT_DESC_V2_CALLER_FLAGS_LEN 4 +#define MAE_MPORT_DESC_V2_CAN_RECEIVE_ON_OFST 8 +#define MAE_MPORT_DESC_V2_CAN_RECEIVE_ON_LBN 0 +#define MAE_MPORT_DESC_V2_CAN_RECEIVE_ON_WIDTH 1 +#define MAE_MPORT_DESC_V2_CAN_DELIVER_TO_OFST 8 +#define MAE_MPORT_DESC_V2_CAN_DELIVER_TO_LBN 1 +#define MAE_MPORT_DESC_V2_CAN_DELIVER_TO_WIDTH 1 +#define MAE_MPORT_DESC_V2_CAN_DELETE_OFST 8 +#define MAE_MPORT_DESC_V2_CAN_DELETE_LBN 2 +#define MAE_MPORT_DESC_V2_CAN_DELETE_WIDTH 1 +#define MAE_MPORT_DESC_V2_IS_ZOMBIE_OFST 8 +#define MAE_MPORT_DESC_V2_IS_ZOMBIE_LBN 3 +#define MAE_MPORT_DESC_V2_IS_ZOMBIE_WIDTH 1 +#define MAE_MPORT_DESC_V2_CALLER_FLAGS_LBN 64 +#define MAE_MPORT_DESC_V2_CALLER_FLAGS_WIDTH 32 +/* Not the ideal name; it's really the type of thing connected to the m-port */ +#define MAE_MPORT_DESC_V2_MPORT_TYPE_OFST 12 +#define MAE_MPORT_DESC_V2_MPORT_TYPE_LEN 4 +/* enum: Connected to a MAC... */ +#define MAE_MPORT_DESC_V2_MPORT_TYPE_NET_PORT 0x0 +/* enum: Adds metadata and delivers to another m-port */ +#define MAE_MPORT_DESC_V2_MPORT_TYPE_ALIAS 0x1 +/* enum: Connected to a VNIC. */ +#define MAE_MPORT_DESC_V2_MPORT_TYPE_VNIC 0x2 +#define MAE_MPORT_DESC_V2_MPORT_TYPE_LBN 96 +#define MAE_MPORT_DESC_V2_MPORT_TYPE_WIDTH 32 +/* 128-bit value available to drivers for m-port identification. */ +#define MAE_MPORT_DESC_V2_UUID_OFST 16 +#define MAE_MPORT_DESC_V2_UUID_LEN 16 +#define MAE_MPORT_DESC_V2_UUID_LBN 128 +#define MAE_MPORT_DESC_V2_UUID_WIDTH 128 +/* Big wadge of space reserved for other common properties */ +#define MAE_MPORT_DESC_V2_RESERVED_OFST 32 +#define MAE_MPORT_DESC_V2_RESERVED_LEN 8 +#define MAE_MPORT_DESC_V2_RESERVED_LO_OFST 32 +#define MAE_MPORT_DESC_V2_RESERVED_LO_LEN 4 +#define MAE_MPORT_DESC_V2_RESERVED_LO_LBN 256 +#define MAE_MPORT_DESC_V2_RESERVED_LO_WIDTH 32 +#define MAE_MPORT_DESC_V2_RESERVED_HI_OFST 36 +#define MAE_MPORT_DESC_V2_RESERVED_HI_LEN 4 +#define MAE_MPORT_DESC_V2_RESERVED_HI_LBN 288 +#define MAE_MPORT_DESC_V2_RESERVED_HI_WIDTH 32 +#define MAE_MPORT_DESC_V2_RESERVED_LBN 256 +#define MAE_MPORT_DESC_V2_RESERVED_WIDTH 64 +/* Logical port index. Only valid when type NET Port. */ +#define MAE_MPORT_DESC_V2_NET_PORT_IDX_OFST 40 +#define MAE_MPORT_DESC_V2_NET_PORT_IDX_LEN 4 +#define MAE_MPORT_DESC_V2_NET_PORT_IDX_LBN 320 +#define MAE_MPORT_DESC_V2_NET_PORT_IDX_WIDTH 32 +/* The m-port delivered to */ +#define MAE_MPORT_DESC_V2_ALIAS_DELIVER_MPORT_ID_OFST 40 +#define MAE_MPORT_DESC_V2_ALIAS_DELIVER_MPORT_ID_LEN 4 +#define MAE_MPORT_DESC_V2_ALIAS_DELIVER_MPORT_ID_LBN 320 +#define MAE_MPORT_DESC_V2_ALIAS_DELIVER_MPORT_ID_WIDTH 32 +/* The type of thing that owns the VNIC */ +#define MAE_MPORT_DESC_V2_VNIC_CLIENT_TYPE_OFST 40 +#define MAE_MPORT_DESC_V2_VNIC_CLIENT_TYPE_LEN 4 +#define MAE_MPORT_DESC_V2_VNIC_CLIENT_TYPE_FUNCTION 0x1 /* enum */ +#define MAE_MPORT_DESC_V2_VNIC_CLIENT_TYPE_PLUGIN 0x2 /* enum */ +#define MAE_MPORT_DESC_V2_VNIC_CLIENT_TYPE_LBN 320 +#define MAE_MPORT_DESC_V2_VNIC_CLIENT_TYPE_WIDTH 32 +/* The PCIe interface on which the function lives. CJK: We need an enumeration + * of interfaces that we extend as new interface (types) appear. This belongs + * elsewhere and should be referenced from here + */ +#define MAE_MPORT_DESC_V2_VNIC_FUNCTION_INTERFACE_OFST 44 +#define MAE_MPORT_DESC_V2_VNIC_FUNCTION_INTERFACE_LEN 4 +#define MAE_MPORT_DESC_V2_VNIC_FUNCTION_INTERFACE_LBN 352 +#define MAE_MPORT_DESC_V2_VNIC_FUNCTION_INTERFACE_WIDTH 32 +#define MAE_MPORT_DESC_V2_VNIC_FUNCTION_PF_IDX_OFST 48 +#define MAE_MPORT_DESC_V2_VNIC_FUNCTION_PF_IDX_LEN 2 +#define MAE_MPORT_DESC_V2_VNIC_FUNCTION_PF_IDX_LBN 384 +#define MAE_MPORT_DESC_V2_VNIC_FUNCTION_PF_IDX_WIDTH 16 +#define MAE_MPORT_DESC_V2_VNIC_FUNCTION_VF_IDX_OFST 50 +#define MAE_MPORT_DESC_V2_VNIC_FUNCTION_VF_IDX_LEN 2 +/* enum: Indicates that the function is a PF */ +#define MAE_MPORT_DESC_V2_VF_IDX_NULL 0xffff +#define MAE_MPORT_DESC_V2_VNIC_FUNCTION_VF_IDX_LBN 400 +#define MAE_MPORT_DESC_V2_VNIC_FUNCTION_VF_IDX_WIDTH 16 +/* Reserved. Should be ignored for now. */ +#define MAE_MPORT_DESC_V2_VNIC_PLUGIN_TBD_OFST 44 +#define MAE_MPORT_DESC_V2_VNIC_PLUGIN_TBD_LEN 4 +#define MAE_MPORT_DESC_V2_VNIC_PLUGIN_TBD_LBN 352 +#define MAE_MPORT_DESC_V2_VNIC_PLUGIN_TBD_WIDTH 32 +/* A client handle for the VNIC's owner. Only valid for type VNIC. */ +#define MAE_MPORT_DESC_V2_VNIC_CLIENT_HANDLE_OFST 52 +#define MAE_MPORT_DESC_V2_VNIC_CLIENT_HANDLE_LEN 4 +#define MAE_MPORT_DESC_V2_VNIC_CLIENT_HANDLE_LBN 416 +#define MAE_MPORT_DESC_V2_VNIC_CLIENT_HANDLE_WIDTH 32 + + +/***********************************/ +/* MC_CMD_MAE_MPORT_ENUMERATE + * Deprecated in favour of MAE_MPORT_READ_JOURNAL. Support for this command + * will be removed at some future point. + */ +#define MC_CMD_MAE_MPORT_ENUMERATE 0x17c +#undef MC_CMD_0x17c_PRIVILEGE_CTG + +#define MC_CMD_0x17c_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_MAE_MPORT_ENUMERATE_IN msgrequest */ +#define MC_CMD_MAE_MPORT_ENUMERATE_IN_LEN 0 + +/* MC_CMD_MAE_MPORT_ENUMERATE_OUT msgresponse */ +#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_LENMIN 8 +#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_LENMAX 252 +#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_LENMAX_MCDI2 1020 +#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_LEN(num) (8+1*(num)) +#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_MPORT_DESC_DATA_NUM(len) (((len)-8)/1) +#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_MPORT_DESC_COUNT_OFST 0 +#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_MPORT_DESC_COUNT_LEN 4 +#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_SIZEOF_MPORT_DESC_OFST 4 +#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_SIZEOF_MPORT_DESC_LEN 4 +/* Any array of MAE_MPORT_DESC structures. The MAE_MPORT_DESC structure may + * grow in future version of this command. Drivers should use a stride of + * SIZEOF_MPORT_DESC. Fields beyond SIZEOF_MPORT_DESC are not present. + */ +#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_MPORT_DESC_DATA_OFST 8 +#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_MPORT_DESC_DATA_LEN 1 +#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_MPORT_DESC_DATA_MINNUM 0 +#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_MPORT_DESC_DATA_MAXNUM 244 +#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_MPORT_DESC_DATA_MAXNUM_MCDI2 1012 + + +/***********************************/ +/* MC_CMD_MAE_MPORT_READ_JOURNAL + * Firmware maintains a per-client journal of mport creations and deletions. + * This journal is clear-on-read, i.e. repeated calls of this command will + * drain the buffer. Whenever the caller resets its function via FLR or + * MC_CMD_ENTITY_RESET, the journal is regenerated from a blank start. + */ +#define MC_CMD_MAE_MPORT_READ_JOURNAL 0x147 +#undef MC_CMD_0x147_PRIVILEGE_CTG + +#define MC_CMD_0x147_PRIVILEGE_CTG SRIOV_CTG_MAE + +/* MC_CMD_MAE_MPORT_READ_JOURNAL_IN msgrequest */ +#define MC_CMD_MAE_MPORT_READ_JOURNAL_IN_LEN 4 +/* Any unused flags are reserved and must be set to zero. */ +#define MC_CMD_MAE_MPORT_READ_JOURNAL_IN_FLAGS_OFST 0 +#define MC_CMD_MAE_MPORT_READ_JOURNAL_IN_FLAGS_LEN 4 + +/* MC_CMD_MAE_MPORT_READ_JOURNAL_OUT msgresponse */ +#define MC_CMD_MAE_MPORT_READ_JOURNAL_OUT_LENMIN 12 +#define MC_CMD_MAE_MPORT_READ_JOURNAL_OUT_LENMAX 252 +#define MC_CMD_MAE_MPORT_READ_JOURNAL_OUT_LENMAX_MCDI2 1020 +#define MC_CMD_MAE_MPORT_READ_JOURNAL_OUT_LEN(num) (12+1*(num)) +#define MC_CMD_MAE_MPORT_READ_JOURNAL_OUT_MPORT_DESC_DATA_NUM(len) (((len)-12)/1) +/* Any unused flags are reserved and must be ignored. */ +#define MC_CMD_MAE_MPORT_READ_JOURNAL_OUT_FLAGS_OFST 0 +#define MC_CMD_MAE_MPORT_READ_JOURNAL_OUT_FLAGS_LEN 4 +#define MC_CMD_MAE_MPORT_READ_JOURNAL_OUT_MORE_OFST 0 +#define MC_CMD_MAE_MPORT_READ_JOURNAL_OUT_MORE_LBN 0 +#define MC_CMD_MAE_MPORT_READ_JOURNAL_OUT_MORE_WIDTH 1 +/* The number of MAE_MPORT_DESC structures in MPORT_DESC_DATA. May be zero. */ +#define MC_CMD_MAE_MPORT_READ_JOURNAL_OUT_MPORT_DESC_COUNT_OFST 4 +#define MC_CMD_MAE_MPORT_READ_JOURNAL_OUT_MPORT_DESC_COUNT_LEN 4 +#define MC_CMD_MAE_MPORT_READ_JOURNAL_OUT_SIZEOF_MPORT_DESC_OFST 8 +#define MC_CMD_MAE_MPORT_READ_JOURNAL_OUT_SIZEOF_MPORT_DESC_LEN 4 +/* Any array of MAE_MPORT_DESC structures. The MAE_MPORT_DESC structure may + * grow in future version of this command. Drivers should use a stride of + * SIZEOF_MPORT_DESC. Fields beyond SIZEOF_MPORT_DESC are not present. + */ +#define MC_CMD_MAE_MPORT_READ_JOURNAL_OUT_MPORT_DESC_DATA_OFST 12 +#define MC_CMD_MAE_MPORT_READ_JOURNAL_OUT_MPORT_DESC_DATA_LEN 1 +#define MC_CMD_MAE_MPORT_READ_JOURNAL_OUT_MPORT_DESC_DATA_MINNUM 0 +#define MC_CMD_MAE_MPORT_READ_JOURNAL_OUT_MPORT_DESC_DATA_MAXNUM 240 +#define MC_CMD_MAE_MPORT_READ_JOURNAL_OUT_MPORT_DESC_DATA_MAXNUM_MCDI2 1008 + +/* TABLE_FIELD_DESCR structuredef: An individual table field descriptor. This + * describes the location and properties of one N-bit field within a wider + * M-bit key/mask/response value. + */ +#define TABLE_FIELD_DESCR_LEN 8 +/* Identifier for this field. */ +#define TABLE_FIELD_DESCR_FIELD_ID_OFST 0 +#define TABLE_FIELD_DESCR_FIELD_ID_LEN 2 +/* Enum values, see field(s): */ +/* TABLE_FIELD_ID */ +#define TABLE_FIELD_DESCR_FIELD_ID_LBN 0 +#define TABLE_FIELD_DESCR_FIELD_ID_WIDTH 16 +/* Lowest (least significant) bit number of the bits of this field. */ +#define TABLE_FIELD_DESCR_LBN_OFST 2 +#define TABLE_FIELD_DESCR_LBN_LEN 2 +#define TABLE_FIELD_DESCR_LBN_LBN 16 +#define TABLE_FIELD_DESCR_LBN_WIDTH 16 +/* Width of this field in bits. */ +#define TABLE_FIELD_DESCR_WIDTH_OFST 4 +#define TABLE_FIELD_DESCR_WIDTH_LEN 2 +#define TABLE_FIELD_DESCR_WIDTH_LBN 32 +#define TABLE_FIELD_DESCR_WIDTH_WIDTH 16 +/* The mask type for this field. (Note that masking is relevant to keys; fields + * of responses are always reported with the EXACT type.) + */ +#define TABLE_FIELD_DESCR_MASK_TYPE_OFST 6 +#define TABLE_FIELD_DESCR_MASK_TYPE_LEN 1 +/* enum: Field must never be selected in the mask. */ +#define TABLE_FIELD_DESCR_MASK_NEVER 0x0 +/* enum: Exact match: field must always be selected in the mask. */ +#define TABLE_FIELD_DESCR_MASK_EXACT 0x1 +/* enum: Ternary match: arbitrary mask bits are allowed. */ +#define TABLE_FIELD_DESCR_MASK_TERNARY 0x2 +/* enum: Whole field match: mask must be all 1 bits, or all 0 bits. */ +#define TABLE_FIELD_DESCR_MASK_WHOLE_FIELD 0x3 +/* enum: Longest prefix match: mask must be 1 bit(s) followed by 0 bit(s). */ +#define TABLE_FIELD_DESCR_MASK_LPM 0x4 +#define TABLE_FIELD_DESCR_MASK_TYPE_LBN 48 +#define TABLE_FIELD_DESCR_MASK_TYPE_WIDTH 8 +/* A version code that allows field semantics to be extended. All fields + * currently use version 0. + */ +#define TABLE_FIELD_DESCR_SCHEME_OFST 7 +#define TABLE_FIELD_DESCR_SCHEME_LEN 1 +#define TABLE_FIELD_DESCR_SCHEME_LBN 56 +#define TABLE_FIELD_DESCR_SCHEME_WIDTH 8 + + +/***********************************/ +/* MC_CMD_TABLE_LIST + * Return the list of tables which may be accessed via this table API. + */ +#define MC_CMD_TABLE_LIST 0x1c9 +#undef MC_CMD_0x1c9_PRIVILEGE_CTG + +#define MC_CMD_0x1c9_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_TABLE_LIST_IN msgrequest */ +#define MC_CMD_TABLE_LIST_IN_LEN 4 +/* Index of the first item to be returned in the TABLE_ID sequence. (Set to 0 + * for the first call; further calls are only required if the whole sequence + * does not fit within the maximum MCDI message size.) + */ +#define MC_CMD_TABLE_LIST_IN_FIRST_TABLE_ID_INDEX_OFST 0 +#define MC_CMD_TABLE_LIST_IN_FIRST_TABLE_ID_INDEX_LEN 4 + +/* MC_CMD_TABLE_LIST_OUT msgresponse */ +#define MC_CMD_TABLE_LIST_OUT_LENMIN 4 +#define MC_CMD_TABLE_LIST_OUT_LENMAX 252 +#define MC_CMD_TABLE_LIST_OUT_LENMAX_MCDI2 1020 +#define MC_CMD_TABLE_LIST_OUT_LEN(num) (4+4*(num)) +#define MC_CMD_TABLE_LIST_OUT_TABLE_ID_NUM(len) (((len)-4)/4) +/* The total number of tables. */ +#define MC_CMD_TABLE_LIST_OUT_N_TABLES_OFST 0 +#define MC_CMD_TABLE_LIST_OUT_N_TABLES_LEN 4 +/* A sequence of table identifiers. If all N_TABLES items do not fit, further + * items can be obtained by repeating the call with a non-zero + * FIRST_TABLE_ID_INDEX. + */ +#define MC_CMD_TABLE_LIST_OUT_TABLE_ID_OFST 4 +#define MC_CMD_TABLE_LIST_OUT_TABLE_ID_LEN 4 +#define MC_CMD_TABLE_LIST_OUT_TABLE_ID_MINNUM 0 +#define MC_CMD_TABLE_LIST_OUT_TABLE_ID_MAXNUM 62 +#define MC_CMD_TABLE_LIST_OUT_TABLE_ID_MAXNUM_MCDI2 254 +/* Enum values, see field(s): */ +/* TABLE_ID */ + + +/***********************************/ +/* MC_CMD_TABLE_DESCRIPTOR + * Request the table descriptor for a particular table. This describes + * properties of the table and the format of the key and response. May return + * EINVAL for unknown table ID. + */ +#define MC_CMD_TABLE_DESCRIPTOR 0x1ca +#undef MC_CMD_0x1ca_PRIVILEGE_CTG + +#define MC_CMD_0x1ca_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_TABLE_DESCRIPTOR_IN msgrequest */ +#define MC_CMD_TABLE_DESCRIPTOR_IN_LEN 8 +/* Identifier for this field. */ +#define MC_CMD_TABLE_DESCRIPTOR_IN_TABLE_ID_OFST 0 +#define MC_CMD_TABLE_DESCRIPTOR_IN_TABLE_ID_LEN 4 +/* Enum values, see field(s): */ +/* TABLE_ID */ +/* Index of the first item to be returned in the FIELDS sequence. (Set to 0 for + * the first call; further calls are only required if the whole sequence does + * not fit within the maximum MCDI message size.) + */ +#define MC_CMD_TABLE_DESCRIPTOR_IN_FIRST_FIELDS_INDEX_OFST 4 +#define MC_CMD_TABLE_DESCRIPTOR_IN_FIRST_FIELDS_INDEX_LEN 4 + +/* MC_CMD_TABLE_DESCRIPTOR_OUT msgresponse */ +#define MC_CMD_TABLE_DESCRIPTOR_OUT_LENMIN 28 +#define MC_CMD_TABLE_DESCRIPTOR_OUT_LENMAX 252 +#define MC_CMD_TABLE_DESCRIPTOR_OUT_LENMAX_MCDI2 1020 +#define MC_CMD_TABLE_DESCRIPTOR_OUT_LEN(num) (20+8*(num)) +#define MC_CMD_TABLE_DESCRIPTOR_OUT_FIELDS_NUM(len) (((len)-20)/8) +/* Maximum number of entries in this table. */ +#define MC_CMD_TABLE_DESCRIPTOR_OUT_MAX_ENTRIES_OFST 0 +#define MC_CMD_TABLE_DESCRIPTOR_OUT_MAX_ENTRIES_LEN 4 +/* The type of table. (This is really just informational; the important + * properties of a table that affect programming can be deduced from other + * items in the table or field descriptor.) + */ +#define MC_CMD_TABLE_DESCRIPTOR_OUT_TYPE_OFST 4 +#define MC_CMD_TABLE_DESCRIPTOR_OUT_TYPE_LEN 2 +/* enum: Direct table (essentially just an array). Behaves like a BCAM for + * programming purposes, where the fact that the key is actually used as an + * array index is really just an implementation detail. + */ +#define MC_CMD_TABLE_DESCRIPTOR_OUT_TYPE_DIRECT 0x1 +/* enum: BCAM (binary CAM) table: exact match on all key fields." */ +#define MC_CMD_TABLE_DESCRIPTOR_OUT_TYPE_BCAM 0x2 +/* enum: TCAM (ternary CAM) table: matches fields with a mask. Each entry may + * have its own different mask. + */ +#define MC_CMD_TABLE_DESCRIPTOR_OUT_TYPE_TCAM 0x3 +/* enum: STCAM (semi-TCAM) table: like a TCAM but entries shared a limited + * number of unique masks. + */ +#define MC_CMD_TABLE_DESCRIPTOR_OUT_TYPE_STCAM 0x4 +/* Width of key (and corresponding mask, for TCAM or STCAM) in bits. */ +#define MC_CMD_TABLE_DESCRIPTOR_OUT_KEY_WIDTH_OFST 6 +#define MC_CMD_TABLE_DESCRIPTOR_OUT_KEY_WIDTH_LEN 2 +/* Width of response in bits. */ +#define MC_CMD_TABLE_DESCRIPTOR_OUT_RESP_WIDTH_OFST 8 +#define MC_CMD_TABLE_DESCRIPTOR_OUT_RESP_WIDTH_LEN 2 +/* The total number of fields in the key. */ +#define MC_CMD_TABLE_DESCRIPTOR_OUT_N_KEY_FIELDS_OFST 10 +#define MC_CMD_TABLE_DESCRIPTOR_OUT_N_KEY_FIELDS_LEN 2 +/* The total number of fields in the response. */ +#define MC_CMD_TABLE_DESCRIPTOR_OUT_N_RESP_FIELDS_OFST 12 +#define MC_CMD_TABLE_DESCRIPTOR_OUT_N_RESP_FIELDS_LEN 2 +/* Number of priorities for STCAM or TCAM; otherwise 0. The priority of a table + * entry (relevant when more than one masked entry matches) ranges from + * 0=highest to N_PRIORITIES-1=lowest. + */ +#define MC_CMD_TABLE_DESCRIPTOR_OUT_N_PRIORITIES_OFST 14 +#define MC_CMD_TABLE_DESCRIPTOR_OUT_N_PRIORITIES_LEN 2 +/* Maximum number of masks for STCAM; otherwise 0. */ +#define MC_CMD_TABLE_DESCRIPTOR_OUT_MAX_MASKS_OFST 16 +#define MC_CMD_TABLE_DESCRIPTOR_OUT_MAX_MASKS_LEN 2 +/* Flags. */ +#define MC_CMD_TABLE_DESCRIPTOR_OUT_FLAGS_OFST 18 +#define MC_CMD_TABLE_DESCRIPTOR_OUT_FLAGS_LEN 1 +#define MC_CMD_TABLE_DESCRIPTOR_OUT_ALLOC_MASKS_OFST 18 +#define MC_CMD_TABLE_DESCRIPTOR_OUT_ALLOC_MASKS_LBN 0 +#define MC_CMD_TABLE_DESCRIPTOR_OUT_ALLOC_MASKS_WIDTH 1 +/* Access scheme version code, allowing the method of accessing table entries + * to change semantics in future. A client which does not understand the value + * of this field should assume that it cannot program this table. Currently + * always set to 0 indicating the original MC_CMD_TABLE_INSERT/UPDATE/DELETE + * semantics. + */ +#define MC_CMD_TABLE_DESCRIPTOR_OUT_SCHEME_OFST 19 +#define MC_CMD_TABLE_DESCRIPTOR_OUT_SCHEME_LEN 1 +/* A sequence of TABLE_FIELD_DESCR structures: N_KEY_FIELDS items describing + * the key, followed by N_RESP_FIELDS items describing the response. If all + * N_KEY_FIELDS+N_RESP_FIELDS items do not fit, further items can be obtained + * by repeating the call with a non-zero FIRST_FIELDS_INDEX. + */ +#define MC_CMD_TABLE_DESCRIPTOR_OUT_FIELDS_OFST 20 +#define MC_CMD_TABLE_DESCRIPTOR_OUT_FIELDS_LEN 8 +#define MC_CMD_TABLE_DESCRIPTOR_OUT_FIELDS_LO_OFST 20 +#define MC_CMD_TABLE_DESCRIPTOR_OUT_FIELDS_LO_LEN 4 +#define MC_CMD_TABLE_DESCRIPTOR_OUT_FIELDS_LO_LBN 160 +#define MC_CMD_TABLE_DESCRIPTOR_OUT_FIELDS_LO_WIDTH 32 +#define MC_CMD_TABLE_DESCRIPTOR_OUT_FIELDS_HI_OFST 24 +#define MC_CMD_TABLE_DESCRIPTOR_OUT_FIELDS_HI_LEN 4 +#define MC_CMD_TABLE_DESCRIPTOR_OUT_FIELDS_HI_LBN 192 +#define MC_CMD_TABLE_DESCRIPTOR_OUT_FIELDS_HI_WIDTH 32 +#define MC_CMD_TABLE_DESCRIPTOR_OUT_FIELDS_MINNUM 1 +#define MC_CMD_TABLE_DESCRIPTOR_OUT_FIELDS_MAXNUM 29 +#define MC_CMD_TABLE_DESCRIPTOR_OUT_FIELDS_MAXNUM_MCDI2 125 + + +/***********************************/ +/* MC_CMD_TABLE_INSERT + * Insert a new entry into a table. The entry must not currently exist. May + * return EINVAL for unknown table ID or other bad request parameters, EEXIST + * if the entry already exists, ENOSPC if there is no space or EPERM if the + * operation is not permitted. In case of an error, the additional MCDI error + * argument field returns the raw error code from the underlying CAM driver. + */ +#define MC_CMD_TABLE_INSERT 0x1cd +#undef MC_CMD_0x1cd_PRIVILEGE_CTG + +#define MC_CMD_0x1cd_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_TABLE_INSERT_IN msgrequest */ +#define MC_CMD_TABLE_INSERT_IN_LENMIN 16 +#define MC_CMD_TABLE_INSERT_IN_LENMAX 252 +#define MC_CMD_TABLE_INSERT_IN_LENMAX_MCDI2 1020 +#define MC_CMD_TABLE_INSERT_IN_LEN(num) (12+4*(num)) +#define MC_CMD_TABLE_INSERT_IN_DATA_NUM(len) (((len)-12)/4) +/* Table identifier. */ +#define MC_CMD_TABLE_INSERT_IN_TABLE_ID_OFST 0 +#define MC_CMD_TABLE_INSERT_IN_TABLE_ID_LEN 4 +/* Enum values, see field(s): */ +/* TABLE_ID */ +/* Width in bits of supplied key data (must match table properties). */ +#define MC_CMD_TABLE_INSERT_IN_KEY_WIDTH_OFST 4 +#define MC_CMD_TABLE_INSERT_IN_KEY_WIDTH_LEN 2 +/* Width in bits of supplied mask data (0 for direct/BCAM tables, or for STCAM + * when allocated MASK_ID is used instead). + */ +#define MC_CMD_TABLE_INSERT_IN_MASK_WIDTH_OFST 6 +#define MC_CMD_TABLE_INSERT_IN_MASK_WIDTH_LEN 2 +/* Width in bits of supplied response data (for INSERT and UPDATE operations + * this must match the table properties; for DELETE operations, no response + * data is required and this must be 0). + */ +#define MC_CMD_TABLE_INSERT_IN_RESP_WIDTH_OFST 8 +#define MC_CMD_TABLE_INSERT_IN_RESP_WIDTH_LEN 2 +/* Mask ID for STCAM table - used instead of mask data if the table descriptor + * reports ALLOC_MASKS==1. Otherwise set to 0. + */ +#define MC_CMD_TABLE_INSERT_IN_MASK_ID_OFST 6 +#define MC_CMD_TABLE_INSERT_IN_MASK_ID_LEN 2 +/* Priority for TCAM or STCAM, in range 0..N_PRIORITIES-1, otherwise 0. */ +#define MC_CMD_TABLE_INSERT_IN_PRIORITY_OFST 8 +#define MC_CMD_TABLE_INSERT_IN_PRIORITY_LEN 2 +/* (32-bit alignment padding - set to 0) */ +#define MC_CMD_TABLE_INSERT_IN_RESERVED_OFST 10 +#define MC_CMD_TABLE_INSERT_IN_RESERVED_LEN 2 +/* Sequence of key, mask (if MASK_WIDTH > 0), and response (if RESP_WIDTH > 0) + * data values. Each of these items is logically treated as a single wide N-bit + * value, in which the individual fields have been placed within that value per + * the LBN and WIDTH information from the table field descriptors. The wide + * N-bit value is padded with 0 bits at the MSB end if necessary to make a + * multiple of 32 bits. The value is then packed into this command as a + * sequence of 32-bit words, bits [31:0] first, then bits [63:32], etc. + */ +#define MC_CMD_TABLE_INSERT_IN_DATA_OFST 12 +#define MC_CMD_TABLE_INSERT_IN_DATA_LEN 4 +#define MC_CMD_TABLE_INSERT_IN_DATA_MINNUM 1 +#define MC_CMD_TABLE_INSERT_IN_DATA_MAXNUM 60 +#define MC_CMD_TABLE_INSERT_IN_DATA_MAXNUM_MCDI2 252 + +/* MC_CMD_TABLE_INSERT_OUT msgresponse */ +#define MC_CMD_TABLE_INSERT_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_TABLE_UPDATE + * Update an existing entry in a table with a new response value. May return + * EINVAL for unknown table ID or other bad request parameters, ENOENT if the + * entry does not already exist, or EPERM if the operation is not permitted. In + * case of an error, the additional MCDI error argument field returns the raw + * error code from the underlying CAM driver. + */ +#define MC_CMD_TABLE_UPDATE 0x1ce +#undef MC_CMD_0x1ce_PRIVILEGE_CTG + +#define MC_CMD_0x1ce_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_TABLE_UPDATE_IN msgrequest */ +#define MC_CMD_TABLE_UPDATE_IN_LENMIN 16 +#define MC_CMD_TABLE_UPDATE_IN_LENMAX 252 +#define MC_CMD_TABLE_UPDATE_IN_LENMAX_MCDI2 1020 +#define MC_CMD_TABLE_UPDATE_IN_LEN(num) (12+4*(num)) +#define MC_CMD_TABLE_UPDATE_IN_DATA_NUM(len) (((len)-12)/4) +/* Table identifier. */ +#define MC_CMD_TABLE_UPDATE_IN_TABLE_ID_OFST 0 +#define MC_CMD_TABLE_UPDATE_IN_TABLE_ID_LEN 4 +/* Enum values, see field(s): */ +/* TABLE_ID */ +/* Width in bits of supplied key data (must match table properties). */ +#define MC_CMD_TABLE_UPDATE_IN_KEY_WIDTH_OFST 4 +#define MC_CMD_TABLE_UPDATE_IN_KEY_WIDTH_LEN 2 +/* Width in bits of supplied mask data (0 for direct/BCAM tables, or for STCAM + * when allocated MASK_ID is used instead). + */ +#define MC_CMD_TABLE_UPDATE_IN_MASK_WIDTH_OFST 6 +#define MC_CMD_TABLE_UPDATE_IN_MASK_WIDTH_LEN 2 +/* Width in bits of supplied response data (for INSERT and UPDATE operations + * this must match the table properties; for DELETE operations, no response + * data is required and this must be 0). + */ +#define MC_CMD_TABLE_UPDATE_IN_RESP_WIDTH_OFST 8 +#define MC_CMD_TABLE_UPDATE_IN_RESP_WIDTH_LEN 2 +/* Mask ID for STCAM table - used instead of mask data if the table descriptor + * reports ALLOC_MASKS==1. Otherwise set to 0. + */ +#define MC_CMD_TABLE_UPDATE_IN_MASK_ID_OFST 6 +#define MC_CMD_TABLE_UPDATE_IN_MASK_ID_LEN 2 +/* Priority for TCAM or STCAM, in range 0..N_PRIORITIES-1, otherwise 0. */ +#define MC_CMD_TABLE_UPDATE_IN_PRIORITY_OFST 8 +#define MC_CMD_TABLE_UPDATE_IN_PRIORITY_LEN 2 +/* (32-bit alignment padding - set to 0) */ +#define MC_CMD_TABLE_UPDATE_IN_RESERVED_OFST 10 +#define MC_CMD_TABLE_UPDATE_IN_RESERVED_LEN 2 +/* Sequence of key, mask (if MASK_WIDTH > 0), and response (if RESP_WIDTH > 0) + * data values. Each of these items is logically treated as a single wide N-bit + * value, in which the individual fields have been placed within that value per + * the LBN and WIDTH information from the table field descriptors. The wide + * N-bit value is padded with 0 bits at the MSB end if necessary to make a + * multiple of 32 bits. The value is then packed into this command as a + * sequence of 32-bit words, bits [31:0] first, then bits [63:32], etc. + */ +#define MC_CMD_TABLE_UPDATE_IN_DATA_OFST 12 +#define MC_CMD_TABLE_UPDATE_IN_DATA_LEN 4 +#define MC_CMD_TABLE_UPDATE_IN_DATA_MINNUM 1 +#define MC_CMD_TABLE_UPDATE_IN_DATA_MAXNUM 60 +#define MC_CMD_TABLE_UPDATE_IN_DATA_MAXNUM_MCDI2 252 + +/* MC_CMD_TABLE_UPDATE_OUT msgresponse */ +#define MC_CMD_TABLE_UPDATE_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_TABLE_DELETE + * Delete an existing entry in a table. May return EINVAL for unknown table ID + * or other bad request parameters, ENOENT if the entry does not exist, or + * EPERM if the operation is not permitted. In case of an error, the additional + * MCDI error argument field returns the raw error code from the underlying CAM + * driver. + */ +#define MC_CMD_TABLE_DELETE 0x1cf +#undef MC_CMD_0x1cf_PRIVILEGE_CTG + +#define MC_CMD_0x1cf_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_TABLE_DELETE_IN msgrequest */ +#define MC_CMD_TABLE_DELETE_IN_LENMIN 16 +#define MC_CMD_TABLE_DELETE_IN_LENMAX 252 +#define MC_CMD_TABLE_DELETE_IN_LENMAX_MCDI2 1020 +#define MC_CMD_TABLE_DELETE_IN_LEN(num) (12+4*(num)) +#define MC_CMD_TABLE_DELETE_IN_DATA_NUM(len) (((len)-12)/4) +/* Table identifier. */ +#define MC_CMD_TABLE_DELETE_IN_TABLE_ID_OFST 0 +#define MC_CMD_TABLE_DELETE_IN_TABLE_ID_LEN 4 +/* Enum values, see field(s): */ +/* TABLE_ID */ +/* Width in bits of supplied key data (must match table properties). */ +#define MC_CMD_TABLE_DELETE_IN_KEY_WIDTH_OFST 4 +#define MC_CMD_TABLE_DELETE_IN_KEY_WIDTH_LEN 2 +/* Width in bits of supplied mask data (0 for direct/BCAM tables, or for STCAM + * when allocated MASK_ID is used instead). + */ +#define MC_CMD_TABLE_DELETE_IN_MASK_WIDTH_OFST 6 +#define MC_CMD_TABLE_DELETE_IN_MASK_WIDTH_LEN 2 +/* Width in bits of supplied response data (for INSERT and UPDATE operations + * this must match the table properties; for DELETE operations, no response + * data is required and this must be 0). + */ +#define MC_CMD_TABLE_DELETE_IN_RESP_WIDTH_OFST 8 +#define MC_CMD_TABLE_DELETE_IN_RESP_WIDTH_LEN 2 +/* Mask ID for STCAM table - used instead of mask data if the table descriptor + * reports ALLOC_MASKS==1. Otherwise set to 0. + */ +#define MC_CMD_TABLE_DELETE_IN_MASK_ID_OFST 6 +#define MC_CMD_TABLE_DELETE_IN_MASK_ID_LEN 2 +/* Priority for TCAM or STCAM, in range 0..N_PRIORITIES-1, otherwise 0. */ +#define MC_CMD_TABLE_DELETE_IN_PRIORITY_OFST 8 +#define MC_CMD_TABLE_DELETE_IN_PRIORITY_LEN 2 +/* (32-bit alignment padding - set to 0) */ +#define MC_CMD_TABLE_DELETE_IN_RESERVED_OFST 10 +#define MC_CMD_TABLE_DELETE_IN_RESERVED_LEN 2 +/* Sequence of key, mask (if MASK_WIDTH > 0), and response (if RESP_WIDTH > 0) + * data values. Each of these items is logically treated as a single wide N-bit + * value, in which the individual fields have been placed within that value per + * the LBN and WIDTH information from the table field descriptors. The wide + * N-bit value is padded with 0 bits at the MSB end if necessary to make a + * multiple of 32 bits. The value is then packed into this command as a + * sequence of 32-bit words, bits [31:0] first, then bits [63:32], etc. + */ +#define MC_CMD_TABLE_DELETE_IN_DATA_OFST 12 +#define MC_CMD_TABLE_DELETE_IN_DATA_LEN 4 +#define MC_CMD_TABLE_DELETE_IN_DATA_MINNUM 1 +#define MC_CMD_TABLE_DELETE_IN_DATA_MAXNUM 60 +#define MC_CMD_TABLE_DELETE_IN_DATA_MAXNUM_MCDI2 252 + +/* MC_CMD_TABLE_DELETE_OUT msgresponse */ +#define MC_CMD_TABLE_DELETE_OUT_LEN 0 + + +#endif /* MCDI_PCOL_H */ diff --git a/src/drivers/net/ethernet/sfc/mcdi_pcol_mae.h b/src/drivers/net/ethernet/sfc/mcdi_pcol_mae.h new file mode 100644 index 0000000..51089e1 --- /dev/null +++ b/src/drivers/net/ethernet/sfc/mcdi_pcol_mae.h @@ -0,0 +1,23 @@ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2019 Solarflare Communications Inc. + * Copyright 2019 Xilinx, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#ifndef MCDI_PCOL_MAE_H +#define MCDI_PCOL_MAE_H +/* MCDI definitions for Match-Action Engine functionality, that are + * missing from the main mcdi_pcol.h + */ + +/* MC_CMD_MAE_COUNTER_LIST_ALLOC is not (yet) a released API, but the + * following value is needed as an argument to MC_CMD_MAE_ACTION_SET_ALLOC. + */ +/* enum: A counter ID that is guaranteed never to represent a real counter */ +#define MC_CMD_MAE_COUNTER_LIST_ALLOC_OUT_COUNTER_LIST_ID_NULL 0xffffffff + +#endif /* MCDI_PCOL_MAE_H */ diff --git a/src/drivers/net/ethernet/sfc/mcdi_port.c b/src/drivers/net/ethernet/sfc/mcdi_port.c new file mode 100644 index 0000000..7e19228 --- /dev/null +++ b/src/drivers/net/ethernet/sfc/mcdi_port.c @@ -0,0 +1,116 @@ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2009-2017 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +/* + * Driver for PHY related operations via MCDI. + */ + +#include +#include "efx.h" +#include "mcdi_port.h" +#include "nic.h" +#include "efx_common.h" +#include "selftest.h" +#include "mcdi_port_common.h" + +static void efx_mcdi_fwalert_event(struct efx_nic *efx, efx_qword_t *ev) +{ + unsigned int reason, data; + + reason = EFX_QWORD_FIELD(*ev, MCDI_EVENT_FWALERT_REASON); + data = EFX_QWORD_FIELD(*ev, MCDI_EVENT_FWALERT_DATA); + + switch (reason) { + case MCDI_EVENT_FWALERT_REASON_SRAM_ACCESS: + netif_err(efx, hw, efx->net_dev, + "Error - controller firmware has detected a write" + " to an illegal SRAM address\n"); + break; + default: + netif_err(efx, hw, efx->net_dev, + "Firmware alert reason %u: 0x%x\n", reason, data); + } +} + +bool efx_mcdi_port_process_event(struct efx_channel *channel, efx_qword_t *event, + int *rc, int budget) +{ + struct efx_nic *efx = channel->efx; + int code = EFX_QWORD_FIELD(*event, MCDI_EVENT_CODE); + + switch (code) { + case MCDI_EVENT_CODE_MAC_STATS_DMA: + /* MAC stats are gather lazily. We can ignore this. */ + return true; + case MCDI_EVENT_CODE_FWALERT: + efx_mcdi_fwalert_event(efx, event); + return true; + case MCDI_EVENT_CODE_TX_ERR: + case MCDI_EVENT_CODE_RX_ERR: + netif_err(efx, hw, efx->net_dev, + "%s DMA error (event: "EFX_QWORD_FMT")\n", + code == MCDI_EVENT_CODE_TX_ERR ? "TX" : "RX", + EFX_QWORD_VAL(*event)); + efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR); + return true; + case MCDI_EVENT_CODE_PTP_FAULT: + case MCDI_EVENT_CODE_PTP_PPS: + case MCDI_EVENT_CODE_HW_PPS: + efx_ptp_event(efx, event); + return true; + case MCDI_EVENT_CODE_PTP_TIME: + efx_time_sync_event(channel, event); + return true; + } + + return false; +} + +u32 efx_mcdi_phy_get_caps(struct efx_nic *efx) +{ + struct efx_mcdi_phy_data *phy_data = efx->phy_data; + + return phy_data->supported_cap; +} + +bool efx_mcdi_mac_check_fault(struct efx_nic *efx) +{ + MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LINK_OUT_LEN); + size_t outlength; + int rc; + + BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0); + + rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0, + outbuf, sizeof(outbuf), &outlength); + if (rc) + return true; + + return MCDI_DWORD(outbuf, GET_LINK_OUT_MAC_FAULT) != 0; +} + +int efx_mcdi_port_probe(struct efx_nic *efx) +{ + int rc; + + /* Fill out loopback modes and initial link state */ + rc = efx_mcdi_phy_probe(efx); + + if (rc) + return rc; + + return efx_mcdi_mac_init_stats(efx); +} + +void efx_mcdi_port_remove(struct efx_nic *efx) +{ + efx_mcdi_port_reconfigure(efx); + efx_mcdi_phy_remove(efx); + efx_mcdi_mac_fini_stats(efx); +} diff --git a/src/drivers/net/ethernet/sfc/mcdi_port.h b/src/drivers/net/ethernet/sfc/mcdi_port.h new file mode 100644 index 0000000..07863dd --- /dev/null +++ b/src/drivers/net/ethernet/sfc/mcdi_port.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2008-2013 Solarflare Communications Inc. + * Copyright 2019-2020 Xilinx Inc. + */ + +#ifndef EFX_MCDI_PORT_H +#define EFX_MCDI_PORT_H + +#include "net_driver.h" + +u32 efx_mcdi_phy_get_caps(struct efx_nic *efx); +bool efx_mcdi_mac_check_fault(struct efx_nic *efx); +int efx_mcdi_port_probe(struct efx_nic *efx); +void efx_mcdi_port_remove(struct efx_nic *efx); + +#endif /* EFX_MCDI_PORT_H */ diff --git a/src/drivers/net/ethernet/sfc/mcdi_port_common.c b/src/drivers/net/ethernet/sfc/mcdi_port_common.c new file mode 100644 index 0000000..426308d --- /dev/null +++ b/src/drivers/net/ethernet/sfc/mcdi_port_common.c @@ -0,0 +1,2128 @@ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2018 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ +#ifdef CONFIG_DEBUG_FS +#include +#endif +#include "efx.h" +#include "nic.h" +#include "efx_common.h" +#include "mcdi_port_common.h" +#include "debugfs.h" + +static int efx_mcdi_phy_diag_type(struct efx_nic *efx); +static int efx_mcdi_phy_sff_8472_level(struct efx_nic *efx); +static u32 efx_mcdi_phy_module_type(struct efx_nic *efx); + + +#ifdef CONFIG_DEBUG_FS + +/* DMA all of the phy statistics, and return a single statistic out of the block. + * Means we can't view a snapshot of all the statistics, but they're not + * populated in zero time anyway */ +static int efx_mcdi_phy_stats_read(struct seq_file *file, void *data) +{ + u8 pos = *((u8 *)data); + struct efx_mcdi_phy_data *phy_data = + container_of(data, struct efx_mcdi_phy_data, index[pos]); + struct efx_nic *efx = phy_data->efx; + efx_dword_t *value; + MCDI_DECLARE_BUF(inbuf, MC_CMD_PHY_STATS_IN_LEN); + int rc; + + MCDI_SET_QWORD(inbuf, PHY_STATS_IN_DMA_ADDR, phy_data->stats_addr); + BUILD_BUG_ON(MC_CMD_PHY_STATS_OUT_DMA_LEN != 0); + + rc = efx_mcdi_rpc(efx, MC_CMD_PHY_STATS, inbuf, MC_CMD_PHY_STATS_IN_LEN, + NULL, 0, NULL); + if (rc) + return rc; + + value = (efx_dword_t *)phy_data->stats + pos; + + seq_printf(file, "%d\n", EFX_DWORD_FIELD(*value, EFX_DWORD_0)); + return 0; +} + +#define PHY_STAT_PARAMETER(_index, _name) \ + [_index] = EFX_NAMED_PARAMETER(_name, \ + struct efx_mcdi_phy_data, \ + index[_index], u8, \ + efx_mcdi_phy_stats_read) + +static const struct efx_debugfs_parameter debug_entries[] = { + PHY_STAT_PARAMETER(MC_CMD_OUI, oui), + PHY_STAT_PARAMETER(MC_CMD_PMA_PMD_LINK_UP, pma_pmd_link_up), + PHY_STAT_PARAMETER(MC_CMD_PMA_PMD_RX_FAULT, pma_pmd_rx_fault), + PHY_STAT_PARAMETER(MC_CMD_PMA_PMD_TX_FAULT, pma_pmd_tx_fault), + PHY_STAT_PARAMETER(MC_CMD_PMA_PMD_SIGNAL, pma_pmd_signal), + PHY_STAT_PARAMETER(MC_CMD_PMA_PMD_SNR_A, pma_pmd_snr_a), + PHY_STAT_PARAMETER(MC_CMD_PMA_PMD_SNR_B, pma_pmd_snr_b), + PHY_STAT_PARAMETER(MC_CMD_PMA_PMD_SNR_C, pma_pmd_snr_c), + PHY_STAT_PARAMETER(MC_CMD_PMA_PMD_SNR_D, pma_pmd_snr_d), + PHY_STAT_PARAMETER(MC_CMD_PCS_LINK_UP, pcs_link_up), + PHY_STAT_PARAMETER(MC_CMD_PCS_RX_FAULT, pcs_rx_fault), + PHY_STAT_PARAMETER(MC_CMD_PCS_TX_FAULT, pcs_tx_fault), + PHY_STAT_PARAMETER(MC_CMD_PCS_BER, pcs_ber), + PHY_STAT_PARAMETER(MC_CMD_PCS_BLOCK_ERRORS, pcs_block_errors), + PHY_STAT_PARAMETER(MC_CMD_PHYXS_LINK_UP, phyxs_link_up), + PHY_STAT_PARAMETER(MC_CMD_PHYXS_RX_FAULT, phxys_rx_fault), + PHY_STAT_PARAMETER(MC_CMD_PHYXS_TX_FAULT, phyxs_tx_fault), + PHY_STAT_PARAMETER(MC_CMD_PHYXS_ALIGN, phyxs_align), + PHY_STAT_PARAMETER(MC_CMD_PHYXS_SYNC, phyxs_sync), + PHY_STAT_PARAMETER(MC_CMD_AN_LINK_UP, an_link_up), + PHY_STAT_PARAMETER(MC_CMD_AN_COMPLETE, an_complete), + PHY_STAT_PARAMETER(MC_CMD_AN_10GBT_STATUS, an_10gbt_status), + PHY_STAT_PARAMETER(MC_CMD_CL22_LINK_UP, cl22_link_up), + {NULL}, +}; + +static int efx_mcdi_phy_stats_init(struct efx_nic *efx) +{ + struct efx_mcdi_phy_data *phy_data = efx->phy_data; + int pos, rc; + + /* debug_entries[] must be contiguous */ + BUILD_BUG_ON(ARRAY_SIZE(debug_entries) != MC_CMD_PHY_NSTATS + 1); + + /* Allocata a DMA buffer for phy stats */ + phy_data->stats = dma_alloc_coherent(&efx->pci_dev->dev, EFX_PAGE_SIZE, + &phy_data->stats_addr, GFP_ATOMIC); + if (phy_data->stats == NULL) + return -ENOMEM; + + phy_data->efx = efx; + for (pos = 0; pos < MC_CMD_PHY_NSTATS; ++pos) + phy_data->index[pos] = pos; + rc = efx_extend_debugfs_port(efx, phy_data, ~phy_data->stats_mask, + debug_entries); + if (rc < 0) + goto fail; + + return 0; + +fail: + dma_free_coherent(&efx->pci_dev->dev, EFX_PAGE_SIZE, phy_data->stats, + phy_data->stats_addr); + + return rc; +} + +static void efx_mcdi_phy_stats_fini(struct efx_nic *efx) +{ + struct efx_mcdi_phy_data *phy_data = efx->phy_data; + + efx_trim_debugfs_port(efx, debug_entries); + if (phy_data) + dma_free_coherent(&efx->pci_dev->dev, EFX_PAGE_SIZE, + phy_data->stats, phy_data->stats_addr); +} +#else /* CONFIG_DEBUG_FS */ +static int efx_mcdi_phy_stats_init(struct efx_nic *efx) +{ + return 0; +} + +static void efx_mcdi_phy_stats_fini(struct efx_nic *efx) {} +#endif /* CONFIG_DEBUG_FS */ + +static u8 efx_mcdi_link_state_flags(struct efx_link_state *link_state) +{ + return (link_state->up ? (1 << MC_CMD_GET_LINK_OUT_LINK_UP_LBN) : 0) | + (link_state->fd ? (1 << MC_CMD_GET_LINK_OUT_FULL_DUPLEX_LBN) : 0); +} + +static u8 efx_mcdi_link_state_fcntl(struct efx_link_state *link_state) +{ + switch (link_state->fc) { + case EFX_FC_AUTO | EFX_FC_TX | EFX_FC_RX: + return MC_CMD_FCNTL_AUTO; + case EFX_FC_TX | EFX_FC_RX: + return MC_CMD_FCNTL_BIDIR; + case EFX_FC_RX: + return MC_CMD_FCNTL_RESPOND; + default: + WARN_ON_ONCE(1); + fallthrough; + case 0: + return MC_CMD_FCNTL_OFF; + } +} + +int efx_mcdi_get_phy_cfg(struct efx_nic *efx, struct efx_mcdi_phy_data *cfg) +{ + MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PHY_CFG_OUT_LEN); + size_t outlen; + int rc; + + BUILD_BUG_ON(MC_CMD_GET_PHY_CFG_IN_LEN != 0); + BUILD_BUG_ON(MC_CMD_GET_PHY_CFG_OUT_NAME_LEN != sizeof(cfg->name)); + + rc = efx_mcdi_rpc(efx, MC_CMD_GET_PHY_CFG, NULL, 0, + outbuf, sizeof(outbuf), &outlen); + if (rc) + goto fail; + + if (outlen < MC_CMD_GET_PHY_CFG_OUT_LEN) { + rc = -EIO; + goto fail; + } + + cfg->flags = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_FLAGS); + cfg->type = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_TYPE); + cfg->supported_cap = + MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_SUPPORTED_CAP); + cfg->channel = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_CHANNEL); + cfg->port = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_PRT); + cfg->stats_mask = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_STATS_MASK); + memcpy(cfg->name, MCDI_PTR(outbuf, GET_PHY_CFG_OUT_NAME), + sizeof(cfg->name)); + cfg->media = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_MEDIA_TYPE); + cfg->mmd_mask = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_MMD_MASK); + memcpy(cfg->revision, MCDI_PTR(outbuf, GET_PHY_CFG_OUT_REVISION), + sizeof(cfg->revision)); + + return 0; + +fail: + netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); + return rc; +} + +void efx_link_set_wanted_fc(struct efx_nic *efx, u8 wanted_fc) +{ + efx->wanted_fc = wanted_fc; + if (efx->link_advertising[0] & ADVERTISED_Autoneg) { + if (wanted_fc & EFX_FC_RX) + efx->link_advertising[0] |= (ADVERTISED_Pause | + ADVERTISED_Asym_Pause); + else + efx->link_advertising[0] &= ~(ADVERTISED_Pause | + ADVERTISED_Asym_Pause); + if (wanted_fc & EFX_FC_TX) + efx->link_advertising[0] ^= ADVERTISED_Asym_Pause; + } +} + +void efx_link_set_advertising(struct efx_nic *efx, + const unsigned long *advertising) +{ + memcpy(efx->link_advertising, advertising, + sizeof(__ETHTOOL_DECLARE_LINK_MODE_MASK())); + if (advertising[0] & ADVERTISED_Autoneg) { + if (advertising[0] & ADVERTISED_Pause) + efx->wanted_fc |= (EFX_FC_TX | EFX_FC_RX); + else + efx->wanted_fc &= ~(EFX_FC_TX | EFX_FC_RX); + if (advertising[0] & ADVERTISED_Asym_Pause) + efx->wanted_fc ^= EFX_FC_TX; + } +} + +static void efx_mcdi_set_link_completer(struct efx_nic *efx, + unsigned long cookie, int rc, + efx_dword_t *outbuf, + size_t outlen_actual) +{ + struct efx_mcdi_phy_data *phy_cfg = efx->phy_data; + __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising); + + /* EAGAIN means another MODULECHANGE event came in while we were + * doing the SET_LINK. Ignore the failure, we should be + * trying again shortly. + */ + if (rc == -EAGAIN) + return; + + /* For other failures, nothing we can do except log it */ + if (rc) { + if (net_ratelimit()) + netif_err(efx, link, efx->net_dev, + "Failed to set link settings for new module (%#x)\n", + rc); + return; + } + + mcdi_to_ethtool_linkset(efx, phy_cfg->media, cookie, advertising); + efx_link_set_advertising(efx, advertising); +} + +int efx_mcdi_set_link(struct efx_nic *efx, u32 capabilities, + u32 flags, u32 loopback_mode, bool async, u8 seq) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_LINK_IN_V2_LEN); + int rc; + + BUILD_BUG_ON(MC_CMD_SET_LINK_OUT_LEN != 0); + + MCDI_SET_DWORD(inbuf, SET_LINK_IN_V2_CAP, capabilities); + MCDI_SET_DWORD(inbuf, SET_LINK_IN_V2_FLAGS, flags); + MCDI_SET_DWORD(inbuf, SET_LINK_IN_V2_LOOPBACK_MODE, loopback_mode); + MCDI_SET_DWORD(inbuf, SET_LINK_IN_V2_LOOPBACK_SPEED, 0 /* auto */); + MCDI_SET_DWORD(inbuf, SET_LINK_IN_V2_MODULE_SEQ, seq); + + if (async) + rc = efx_mcdi_rpc_async(efx, MC_CMD_SET_LINK, inbuf, + sizeof(inbuf), + efx_mcdi_set_link_completer, + capabilities); + else + rc = efx_mcdi_rpc(efx, MC_CMD_SET_LINK, inbuf, sizeof(inbuf), + NULL, 0, NULL); + return rc; +} + +int efx_mcdi_loopback_modes(struct efx_nic *efx, u64 *loopback_modes) +{ + MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LOOPBACK_MODES_OUT_LEN); + size_t outlen; + int rc; + + rc = efx_mcdi_rpc(efx, MC_CMD_GET_LOOPBACK_MODES, NULL, 0, + outbuf, sizeof(outbuf), &outlen); + if (rc) + goto fail; + + if (outlen < (MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_OFST + + MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_LEN)) { + rc = -EIO; + goto fail; + } + + *loopback_modes = MCDI_QWORD(outbuf, GET_LOOPBACK_MODES_OUT_SUGGESTED); + + return 0; + +fail: + netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); + return rc; +} + +#define MAP_CAP(mcdi_cap, ethtool_cap) do { \ + if (cap & (1 << MC_CMD_PHY_CAP_##mcdi_cap##_LBN)) \ + SET_CAP(ethtool_cap); \ + cap &= ~(1 << MC_CMD_PHY_CAP_##mcdi_cap##_LBN); \ + } while(0) +#define SET_CAP(ethtool_cap) (result |= SUPPORTED_##ethtool_cap) +static u32 mcdi_to_ethtool_cap(struct efx_nic *efx, u32 media, u32 cap) +{ + u32 result = 0; + + switch (media) { + case MC_CMD_MEDIA_KX4: + SET_CAP(Backplane); + MAP_CAP(1000FDX, 1000baseKX_Full); + MAP_CAP(10000FDX, 10000baseKX4_Full); + MAP_CAP(40000FDX, 40000baseKR4_Full); + break; + + case MC_CMD_MEDIA_XFP: + case MC_CMD_MEDIA_SFP_PLUS: + case MC_CMD_MEDIA_QSFP_PLUS: + SET_CAP(FIBRE); + MAP_CAP(1000FDX, 1000baseT_Full); + MAP_CAP(10000FDX, 10000baseT_Full); + MAP_CAP(40000FDX, 40000baseCR4_Full); + break; + + case MC_CMD_MEDIA_BASE_T: + SET_CAP(TP); + MAP_CAP(10HDX, 10baseT_Half); + MAP_CAP(10FDX, 10baseT_Full); + MAP_CAP(100HDX, 100baseT_Half); + MAP_CAP(100FDX, 100baseT_Full); + MAP_CAP(1000HDX, 1000baseT_Half); + MAP_CAP(1000FDX, 1000baseT_Full); + MAP_CAP(10000FDX, 10000baseT_Full); + break; + } + + MAP_CAP(PAUSE, Pause); + MAP_CAP(ASYM, Asym_Pause); + MAP_CAP(AN, Autoneg); + +#ifdef EFX_NOT_UPSTREAM + if (cap & MCDI_PORT_SPEED_CAPS) { + static bool warned; + + if (!warned) { + pci_notice(efx->pci_dev, + "This NIC has link speeds that are not supported by your kernel: %#x\n", + cap); + warned = true; + } + } +#endif + + return result; +} +#undef SET_CAP +#undef MAP_CAP + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_ETHTOOL_LINKSETTINGS) +#define SET_CAP(name) __set_bit(ETHTOOL_LINK_MODE_ ## name ## _BIT, \ + linkset) +#define CHECK_CAP(mcdi_cap) \ +({ \ + u32 bit = cap & (1 << MC_CMD_PHY_CAP_##mcdi_cap##_LBN); \ + cap &= ~(1 << MC_CMD_PHY_CAP_##mcdi_cap##_LBN); \ + bit; \ +}) + +void mcdi_to_ethtool_linkset(struct efx_nic *efx, u32 media, u32 cap, + unsigned long *linkset) +{ + bitmap_zero(linkset, __ETHTOOL_LINK_MODE_MASK_NBITS); + switch (media) { + case MC_CMD_MEDIA_KX4: + SET_CAP(Backplane); + if (CHECK_CAP(1000FDX)) + SET_CAP(1000baseKX_Full); + if (CHECK_CAP(10000FDX)) + SET_CAP(10000baseKX4_Full); + if (CHECK_CAP(40000FDX)) + SET_CAP(40000baseKR4_Full); + break; + + case MC_CMD_MEDIA_XFP: + case MC_CMD_MEDIA_SFP_PLUS: + case MC_CMD_MEDIA_QSFP_PLUS: + SET_CAP(FIBRE); + if (CHECK_CAP(1000FDX)) { + SET_CAP(1000baseT_Full); +#if !defined (EFX_USE_KCOMPAT) || defined (EFX_HAVE_LINK_MODE_1000X) + SET_CAP(1000baseX_Full); +#endif + } +#if !defined (EFX_USE_KCOMPAT) || defined (EFX_HAVE_LINK_MODE_1000X) + if (CHECK_CAP(10000FDX)) { + SET_CAP(10000baseCR_Full); + SET_CAP(10000baseLR_Full); + SET_CAP(10000baseSR_Full); + } +#endif + if (CHECK_CAP(40000FDX)) { + SET_CAP(40000baseCR4_Full); + SET_CAP(40000baseSR4_Full); + } +#if !defined (EFX_USE_KCOMPAT) || defined (EFX_HAVE_LINK_MODE_25_50_100) + if (CHECK_CAP(100000FDX)) { + SET_CAP(100000baseCR4_Full); + SET_CAP(100000baseSR4_Full); + } + if (CHECK_CAP(25000FDX)) { + SET_CAP(25000baseCR_Full); + SET_CAP(25000baseSR_Full); + } + if (CHECK_CAP(50000FDX)) + SET_CAP(50000baseCR2_Full); +#endif + break; + + case MC_CMD_MEDIA_BASE_T: + SET_CAP(TP); + if (CHECK_CAP(10HDX)) + SET_CAP(10baseT_Half); + if (CHECK_CAP(10FDX)) + SET_CAP(10baseT_Full); + if (CHECK_CAP(100HDX)) + SET_CAP(100baseT_Half); + if (CHECK_CAP(100FDX)) + SET_CAP(100baseT_Full); + if (CHECK_CAP(1000HDX)) + SET_CAP(1000baseT_Half); + if (CHECK_CAP(1000FDX)) + SET_CAP(1000baseT_Full); + if (CHECK_CAP(10000FDX)) + SET_CAP(10000baseT_Full); + break; + } + + if (CHECK_CAP(PAUSE)) + SET_CAP(Pause); + if (CHECK_CAP(ASYM)) + SET_CAP(Asym_Pause); + if (CHECK_CAP(AN)) + SET_CAP(Autoneg); + +#ifdef EFX_NOT_UPSTREAM + if (cap & MCDI_PORT_SPEED_CAPS) { + static bool warned; + + if (!warned) { + pci_notice(efx->pci_dev, + "This NIC has link speeds that are not supported by your kernel: %#x\n", + cap); + warned = true; + } + } +#endif +} + +#ifdef EFX_HAVE_LINK_MODE_FEC_BITS +static void mcdi_fec_to_ethtool_linkset(u32 cap, unsigned long *linkset) +{ + if (cap & ((1 << MC_CMD_PHY_CAP_BASER_FEC_LBN) | + (1 << MC_CMD_PHY_CAP_25G_BASER_FEC_LBN))) + SET_CAP(FEC_BASER); + if (cap & (1 << MC_CMD_PHY_CAP_RS_FEC_LBN)) + SET_CAP(FEC_RS); + if (!(cap & ((1 << MC_CMD_PHY_CAP_BASER_FEC_REQUESTED_LBN) | + (1 << MC_CMD_PHY_CAP_25G_BASER_FEC_REQUESTED_LBN) | + (1 << MC_CMD_PHY_CAP_RS_FEC_REQUESTED_LBN)))) + SET_CAP(FEC_NONE); +} +#endif +#undef SET_CAP +#undef CHECK_CAP +#else +void mcdi_to_ethtool_linkset(struct efx_nic *efx, u32 media, u32 cap, + unsigned long *linkset) +{ + linkset[0] = mcdi_to_ethtool_cap(efx, media, cap); +} +#endif +#undef MAP_CAP + +static u32 ethtool_to_mcdi_cap(u32 cap) +{ + u32 result = 0; + + if (cap & SUPPORTED_10baseT_Half) + result |= (1 << MC_CMD_PHY_CAP_10HDX_LBN); + if (cap & SUPPORTED_10baseT_Full) + result |= (1 << MC_CMD_PHY_CAP_10FDX_LBN); + if (cap & SUPPORTED_100baseT_Half) + result |= (1 << MC_CMD_PHY_CAP_100HDX_LBN); + if (cap & SUPPORTED_100baseT_Full) + result |= (1 << MC_CMD_PHY_CAP_100FDX_LBN); + if (cap & SUPPORTED_1000baseT_Half) + result |= (1 << MC_CMD_PHY_CAP_1000HDX_LBN); + if (cap & (SUPPORTED_1000baseT_Full | SUPPORTED_1000baseKX_Full)) + result |= (1 << MC_CMD_PHY_CAP_1000FDX_LBN); + if (cap & (SUPPORTED_10000baseT_Full | SUPPORTED_10000baseKX4_Full)) + result |= (1 << MC_CMD_PHY_CAP_10000FDX_LBN); + if (cap & (SUPPORTED_40000baseCR4_Full | SUPPORTED_40000baseKR4_Full)) + result |= (1 << MC_CMD_PHY_CAP_40000FDX_LBN); + if (cap & SUPPORTED_Pause) + result |= (1 << MC_CMD_PHY_CAP_PAUSE_LBN); + if (cap & SUPPORTED_Asym_Pause) + result |= (1 << MC_CMD_PHY_CAP_ASYM_LBN); + if (cap & SUPPORTED_Autoneg) + result |= (1 << MC_CMD_PHY_CAP_AN_LBN); + + return result; +} + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_ETHTOOL_LINKSETTINGS) +u32 ethtool_linkset_to_mcdi_cap(const unsigned long *linkset) +{ + u32 result = 0; + + #define TEST_BIT(name) test_bit(ETHTOOL_LINK_MODE_ ## name ## _BIT, \ + linkset) + + if (TEST_BIT(10baseT_Half)) + result |= (1 << MC_CMD_PHY_CAP_10HDX_LBN); + if (TEST_BIT(10baseT_Full)) + result |= (1 << MC_CMD_PHY_CAP_10FDX_LBN); + if (TEST_BIT(100baseT_Half)) + result |= (1 << MC_CMD_PHY_CAP_100HDX_LBN); + if (TEST_BIT(100baseT_Full)) + result |= (1 << MC_CMD_PHY_CAP_100FDX_LBN); + if (TEST_BIT(1000baseT_Half)) + result |= (1 << MC_CMD_PHY_CAP_1000HDX_LBN); +#if !defined (EFX_USE_KCOMPAT) || defined (EFX_HAVE_LINK_MODE_1000X) + if (TEST_BIT(1000baseT_Full) || TEST_BIT(1000baseKX_Full) || + TEST_BIT(1000baseX_Full)) + result |= (1 << MC_CMD_PHY_CAP_1000FDX_LBN); + if (TEST_BIT(10000baseT_Full) || TEST_BIT(10000baseKX4_Full) || + TEST_BIT(10000baseCR_Full) || TEST_BIT(10000baseLR_Full) || + TEST_BIT(10000baseSR_Full)) + result |= (1 << MC_CMD_PHY_CAP_10000FDX_LBN); +#else + if (TEST_BIT(1000baseT_Full) || TEST_BIT(1000baseKX_Full)) + result |= (1 << MC_CMD_PHY_CAP_1000FDX_LBN); + if (TEST_BIT(10000baseT_Full) || TEST_BIT(10000baseKX4_Full)) + result |= (1 << MC_CMD_PHY_CAP_10000FDX_LBN); +#endif + if (TEST_BIT(40000baseCR4_Full) || TEST_BIT(40000baseKR4_Full) || + TEST_BIT(40000baseSR4_Full)) + result |= (1 << MC_CMD_PHY_CAP_40000FDX_LBN); +#if !defined (EFX_USE_KCOMPAT) || defined (EFX_HAVE_LINK_MODE_25_50_100) + if (TEST_BIT(100000baseCR4_Full) || TEST_BIT(100000baseSR4_Full)) + result |= (1 << MC_CMD_PHY_CAP_100000FDX_LBN); + if (TEST_BIT(25000baseCR_Full) || TEST_BIT(25000baseSR_Full)) + result |= (1 << MC_CMD_PHY_CAP_25000FDX_LBN); + if (TEST_BIT(50000baseCR2_Full)) + result |= (1 << MC_CMD_PHY_CAP_50000FDX_LBN); +#endif + if (TEST_BIT(Pause)) + result |= (1 << MC_CMD_PHY_CAP_PAUSE_LBN); + if (TEST_BIT(Asym_Pause)) + result |= (1 << MC_CMD_PHY_CAP_ASYM_LBN); + if (TEST_BIT(Autoneg)) + result |= (1 << MC_CMD_PHY_CAP_AN_LBN); + + #undef TEST_BIT + + return result; +} +#else +u32 ethtool_linkset_to_mcdi_cap(const unsigned long *linkset) +{ + return ethtool_to_mcdi_cap(linkset[0]); +} +#endif + +u32 efx_get_mcdi_phy_flags(struct efx_nic *efx) +{ + struct efx_mcdi_phy_data *phy_cfg = efx->phy_data; + enum efx_phy_mode mode, supported; + u32 flags; + + /* TODO: Advertise the capabilities supported by this PHY */ + supported = 0; + if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_OUT_TXDIS_LBN)) + supported |= PHY_MODE_TX_DISABLED; + if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_OUT_LOWPOWER_LBN)) + supported |= PHY_MODE_LOW_POWER; + if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_OUT_POWEROFF_LBN)) + supported |= PHY_MODE_OFF; + + mode = efx->phy_mode & supported; + + flags = 0; + if (mode & PHY_MODE_TX_DISABLED) + flags |= (1 << MC_CMD_SET_LINK_IN_TXDIS_LBN); + if (mode & PHY_MODE_LOW_POWER) + flags |= (1 << MC_CMD_SET_LINK_IN_LOWPOWER_LBN); + if (mode & PHY_MODE_OFF) + flags |= (1 << MC_CMD_SET_LINK_IN_POWEROFF_LBN); + + if (efx->state != STATE_UNINIT && !netif_running(efx->net_dev)) + flags |= (1 << MC_CMD_SET_LINK_IN_LINKDOWN_LBN); + + return flags; +} + +void efx_mcdi_phy_decode_link(struct efx_nic *efx, + struct efx_link_state *link_state, + u32 speed, u32 flags, u32 fcntl, + u32 ld_caps, u32 lp_caps) +{ + switch (fcntl) { + case MC_CMD_FCNTL_AUTO: + WARN_ON(1); /* This is not a link mode */ + link_state->fc = EFX_FC_AUTO | EFX_FC_TX | EFX_FC_RX; + break; + case MC_CMD_FCNTL_BIDIR: + link_state->fc = EFX_FC_TX | EFX_FC_RX; + break; + case MC_CMD_FCNTL_RESPOND: + link_state->fc = EFX_FC_RX; + break; + default: + WARN_ON(1); + fallthrough; + case MC_CMD_FCNTL_OFF: + link_state->fc = 0; + break; + } + + link_state->up = !!(flags & (1 << MC_CMD_GET_LINK_OUT_LINK_UP_LBN)); + link_state->fd = !!(flags & (1 << MC_CMD_GET_LINK_OUT_FULL_DUPLEX_LBN)); + link_state->speed = speed; + link_state->ld_caps = ld_caps; + link_state->lp_caps = lp_caps; +} + +/* The semantics of the ethtool FEC mode bitmask are not well defined, + * particularly the meaning of combinations of bits. Which means we get to + * define our own semantics, as follows: + * OFF overrides any other bits, and means "disable all FEC" (with the + * exception of 25G KR4/CR4, where it is not possible to reject it if AN + * partner requests it). + * AUTO on its own means use cable requirements and link partner autoneg with + * fw-default preferences for the cable type. + * AUTO and either RS or BASER means use the specified FEC type if cable and + * link partner support it, otherwise autoneg/fw-default. + * RS or BASER alone means use the specified FEC type if cable and link partner + * support it and either requests it, otherwise no FEC. + * Both RS and BASER (whether AUTO or not) means use FEC if cable and link + * partner support it, preferring RS to BASER. + */ +#define FEC_BIT(x) (1 << MC_CMD_PHY_CAP_##x##_LBN) +u32 ethtool_fec_caps_to_mcdi(u32 supported_cap, u32 ethtool_cap) +{ + u32 ret = 0; + + if (ethtool_cap & ETHTOOL_FEC_OFF) + return 0; + + if (ethtool_cap & ETHTOOL_FEC_AUTO) + ret |= (FEC_BIT(BASER_FEC) | FEC_BIT(25G_BASER_FEC) | + FEC_BIT(RS_FEC)) & + supported_cap; + if (ethtool_cap & ETHTOOL_FEC_RS && supported_cap & FEC_BIT(RS_FEC)) + ret |= FEC_BIT(RS_FEC) | FEC_BIT(RS_FEC_REQUESTED); + if (ethtool_cap & ETHTOOL_FEC_BASER) { + if (supported_cap & FEC_BIT(BASER_FEC)) + ret |= FEC_BIT(BASER_FEC) | FEC_BIT(BASER_FEC_REQUESTED); + if (supported_cap & FEC_BIT(25G_BASER_FEC)) + ret |= FEC_BIT(25G_BASER_FEC) | FEC_BIT(25G_BASER_FEC_REQUESTED); + } + return ret; +} + +/* Basic validation to ensure that the caps we are going to attempt to + * set are in fact supported by the adapter. Note that no FEC is always + * supported. + */ + +static int ethtool_fec_supported(u32 supported_cap, u32 ethtool_cap) +{ + if (ethtool_cap & ETHTOOL_FEC_OFF) + return 0; + + if (ethtool_cap && + !ethtool_fec_caps_to_mcdi(supported_cap, ethtool_cap)) + return -EINVAL; + return 0; +} + +/* Invert ethtool_fec_caps_to_mcdi. */ +u32 mcdi_fec_caps_to_ethtool(u32 caps, bool is_25g) +{ + bool rs = caps & FEC_BIT(RS_FEC), + rs_req = caps & FEC_BIT(RS_FEC_REQUESTED), + baser = is_25g ? caps & FEC_BIT(25G_BASER_FEC) + : caps & FEC_BIT(BASER_FEC), + baser_req = is_25g ? caps & FEC_BIT(25G_BASER_FEC_REQUESTED) + : caps & FEC_BIT(BASER_FEC_REQUESTED); + + if (!baser && !rs) + return ETHTOOL_FEC_OFF; + return (rs_req ? ETHTOOL_FEC_RS : 0) | + (baser_req ? ETHTOOL_FEC_BASER : 0) | + (baser == baser_req && rs == rs_req ? 0 : ETHTOOL_FEC_AUTO); +} +#undef FEC_BIT + +/* Verify that the forced flow control settings (!EFX_FC_AUTO) are + * supported by the link partner. Warn the user if this isn't the case + */ +void efx_mcdi_phy_check_fcntl(struct efx_nic *efx, u32 lpa) +{ + struct efx_mcdi_phy_data *phy_cfg = efx->phy_data; + u32 rmtadv; + + /* The link partner capabilities are only relevant if the + * link supports flow control autonegotiation */ + if (~phy_cfg->supported_cap & (1 << MC_CMD_PHY_CAP_AN_LBN)) + return; + + /* If flow control autoneg is supported and enabled, then fine */ + if (efx->wanted_fc & EFX_FC_AUTO) + return; + + rmtadv = 0; + if (lpa & (1 << MC_CMD_PHY_CAP_PAUSE_LBN)) + rmtadv |= ADVERTISED_Pause; + if (lpa & (1 << MC_CMD_PHY_CAP_ASYM_LBN)) + rmtadv |= ADVERTISED_Asym_Pause; + + if ((efx->wanted_fc & EFX_FC_TX) && rmtadv == ADVERTISED_Asym_Pause) + netif_err(efx, link, efx->net_dev, + "warning: link partner doesn't support pause frames"); +} + +bool efx_mcdi_phy_poll(struct efx_nic *efx) +{ + struct efx_link_state old_state = efx->link_state; + MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LINK_OUT_LEN); + int rc; + + WARN_ON(!mutex_is_locked(&efx->mac_lock)); + + BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0); + + rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0, + outbuf, sizeof(outbuf), NULL); + if (rc) { + efx->link_state.up = false; + } else { + u32 ld_caps = MCDI_DWORD(outbuf, GET_LINK_OUT_CAP); + u32 lp_caps = ld_caps; + + efx_mcdi_phy_decode_link( + efx, &efx->link_state, + MCDI_DWORD(outbuf, GET_LINK_OUT_LINK_SPEED), + MCDI_DWORD(outbuf, GET_LINK_OUT_FLAGS), + MCDI_DWORD(outbuf, GET_LINK_OUT_FCNTL), + ld_caps, lp_caps); + } + + return !efx_link_state_equal(&efx->link_state, &old_state); +} + +int efx_mcdi_phy_probe(struct efx_nic *efx) +{ + struct efx_mcdi_phy_data *phy_data; + MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LINK_OUT_LEN); + u32 caps, ld_caps, lp_caps; + int rc; + + /* Initialise and populate phy_data */ + phy_data = kzalloc(sizeof(*phy_data), GFP_KERNEL); + if (phy_data == NULL) + return -ENOMEM; + + rc = efx_mcdi_get_phy_cfg(efx, phy_data); + if (rc != 0) + goto fail; + + /* Read initial link advertisement */ + BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0); + rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0, + outbuf, sizeof(outbuf), NULL); + if (rc) + goto fail; + /* Fill out nic state */ + efx->phy_data = phy_data; + efx->phy_type = phy_data->type; + strscpy(efx->phy_name, phy_data->name, sizeof(efx->phy_name)); + + caps = ld_caps = lp_caps = MCDI_DWORD(outbuf, GET_LINK_OUT_CAP); +#ifdef EFX_NOT_UPSTREAM + /* If the link isn't advertising any speeds this is almost certainly + * due to an in-distro driver bug, which can have an effect if loaded + * before this out-of-tree driver. It is fixed upstream in: + * 3497ed8c852a ("sfc: report supported link speeds on SFP connections") + * + * Unfortunately this fix is not in a number of released distro kernels. + * In particular: + * RHEL 6.8, kernel 2.6.32-642.el6 (fixed in 2.6.32-642.13.1.el6) + * SLES 12 sp2, kernel 4.4.21-68-default + * + * If no speeds are marked as supported by the link we add all those + * that are supported by the NIC. + */ + if (!(caps & MCDI_PORT_SPEED_CAPS)) + caps |= phy_data->supported_cap & MCDI_PORT_SPEED_CAPS; +#endif + mcdi_to_ethtool_linkset(efx, phy_data->media, caps, + efx->link_advertising); + + /* Assert that we can map efx -> mcdi loopback modes */ + BUILD_BUG_ON(LOOPBACK_NONE != MC_CMD_LOOPBACK_NONE); + BUILD_BUG_ON(LOOPBACK_DATA != MC_CMD_LOOPBACK_DATA); + BUILD_BUG_ON(LOOPBACK_GMAC != MC_CMD_LOOPBACK_GMAC); + BUILD_BUG_ON(LOOPBACK_XGMII != MC_CMD_LOOPBACK_XGMII); + BUILD_BUG_ON(LOOPBACK_XGXS != MC_CMD_LOOPBACK_XGXS); + BUILD_BUG_ON(LOOPBACK_XAUI != MC_CMD_LOOPBACK_XAUI); + BUILD_BUG_ON(LOOPBACK_GMII != MC_CMD_LOOPBACK_GMII); + BUILD_BUG_ON(LOOPBACK_SGMII != MC_CMD_LOOPBACK_SGMII); + BUILD_BUG_ON(LOOPBACK_XGBR != MC_CMD_LOOPBACK_XGBR); + BUILD_BUG_ON(LOOPBACK_XFI != MC_CMD_LOOPBACK_XFI); + BUILD_BUG_ON(LOOPBACK_XAUI_FAR != MC_CMD_LOOPBACK_XAUI_FAR); + BUILD_BUG_ON(LOOPBACK_GMII_FAR != MC_CMD_LOOPBACK_GMII_FAR); + BUILD_BUG_ON(LOOPBACK_SGMII_FAR != MC_CMD_LOOPBACK_SGMII_FAR); + BUILD_BUG_ON(LOOPBACK_XFI_FAR != MC_CMD_LOOPBACK_XFI_FAR); + BUILD_BUG_ON(LOOPBACK_GPHY != MC_CMD_LOOPBACK_GPHY); + BUILD_BUG_ON(LOOPBACK_PHYXS != MC_CMD_LOOPBACK_PHYXS); + BUILD_BUG_ON(LOOPBACK_PCS != MC_CMD_LOOPBACK_PCS); + BUILD_BUG_ON(LOOPBACK_PMAPMD != MC_CMD_LOOPBACK_PMAPMD); + BUILD_BUG_ON(LOOPBACK_XPORT != MC_CMD_LOOPBACK_XPORT); + BUILD_BUG_ON(LOOPBACK_XGMII_WS != MC_CMD_LOOPBACK_XGMII_WS); + BUILD_BUG_ON(LOOPBACK_XAUI_WS != MC_CMD_LOOPBACK_XAUI_WS); + BUILD_BUG_ON(LOOPBACK_XAUI_WS_FAR != MC_CMD_LOOPBACK_XAUI_WS_FAR); + BUILD_BUG_ON(LOOPBACK_XAUI_WS_NEAR != MC_CMD_LOOPBACK_XAUI_WS_NEAR); + BUILD_BUG_ON(LOOPBACK_GMII_WS != MC_CMD_LOOPBACK_GMII_WS); + BUILD_BUG_ON(LOOPBACK_XFI_WS != MC_CMD_LOOPBACK_XFI_WS); + BUILD_BUG_ON(LOOPBACK_XFI_WS_FAR != MC_CMD_LOOPBACK_XFI_WS_FAR); + BUILD_BUG_ON(LOOPBACK_PHYXS_WS != MC_CMD_LOOPBACK_PHYXS_WS); + + rc = efx_mcdi_loopback_modes(efx, &efx->loopback_modes); + if (rc != 0) + goto fail; + /* The MC indicates that LOOPBACK_NONE is a valid loopback mode, + * but by convention we don't + */ + efx->loopback_modes &= ~(1 << LOOPBACK_NONE); + + /* Set the initial link mode */ + efx_mcdi_phy_decode_link(efx, &efx->link_state, + MCDI_DWORD(outbuf, GET_LINK_OUT_LINK_SPEED), + MCDI_DWORD(outbuf, GET_LINK_OUT_FLAGS), + MCDI_DWORD(outbuf, GET_LINK_OUT_FCNTL), + ld_caps, lp_caps); + + efx->fec_config = mcdi_fec_caps_to_ethtool(caps, + efx->link_state.speed == 25000 || + efx->link_state.speed == 50000); + + /* Default to Autonegotiated flow control if the PHY supports it */ + efx->wanted_fc = EFX_FC_RX | EFX_FC_TX; + if (phy_data->supported_cap & (1 << MC_CMD_PHY_CAP_AN_LBN)) + efx->wanted_fc |= EFX_FC_AUTO; + efx_link_set_wanted_fc(efx, efx->wanted_fc); + + rc = efx_mcdi_phy_stats_init(efx); + if (rc != 0) + goto fail; + + return 0; + +fail: + kfree(phy_data); + return rc; +} + +void efx_mcdi_phy_remove(struct efx_nic *efx) +{ + struct efx_mcdi_phy_data *phy_data = efx->phy_data; + + efx_mcdi_phy_stats_fini(efx); + efx->phy_data = NULL; + kfree(phy_data); +} + +static u32 ethtool_speed_to_mcdi_cap(bool duplex, u32 speed) +{ + if (duplex) { + switch (speed) { + case 10: return 1 << MC_CMD_PHY_CAP_10FDX_LBN; + case 100: return 1 << MC_CMD_PHY_CAP_100FDX_LBN; + case 1000: return 1 << MC_CMD_PHY_CAP_1000FDX_LBN; + case 10000: return 1 << MC_CMD_PHY_CAP_10000FDX_LBN; + case 40000: return 1 << MC_CMD_PHY_CAP_40000FDX_LBN; + case 100000: return 1 << MC_CMD_PHY_CAP_100000FDX_LBN; + case 25000: return 1 << MC_CMD_PHY_CAP_25000FDX_LBN; + case 50000: return 1 << MC_CMD_PHY_CAP_50000FDX_LBN; + } + } else { + switch (speed) { + case 10: return 1 << MC_CMD_PHY_CAP_10HDX_LBN; + case 100: return 1 << MC_CMD_PHY_CAP_100HDX_LBN; + case 1000: return 1 << MC_CMD_PHY_CAP_1000HDX_LBN; + } + } + + return 0; +} + +int efx_mcdi_phy_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd, + unsigned long *new_adv) +{ + struct efx_mcdi_phy_data *phy_cfg = efx->phy_data; + unsigned int advertising = ecmd->advertising; + u32 caps; + int rc; + + /* Remove flow control settings that the MAC supports + * but that the PHY can't advertise. + */ + if (~phy_cfg->supported_cap & (1 << MC_CMD_PHY_CAP_PAUSE_LBN)) + advertising &= ~ADVERTISED_Pause; + if (~phy_cfg->supported_cap & (1 << MC_CMD_PHY_CAP_ASYM_LBN)) + advertising &= ~ADVERTISED_Asym_Pause; + + if (ecmd->autoneg) + caps = ethtool_to_mcdi_cap(advertising) | + 1 << MC_CMD_PHY_CAP_AN_LBN; + else + caps = ethtool_speed_to_mcdi_cap(ecmd->duplex, + ethtool_cmd_speed(ecmd)); + if (!caps) + return -EINVAL; + + caps |= ethtool_fec_caps_to_mcdi(phy_cfg->supported_cap, + efx->fec_config); + + rc = efx_mcdi_set_link(efx, caps, efx_get_mcdi_phy_flags(efx), + efx->loopback_mode, false, SET_LINK_SEQ_IGNORE); + if (rc) { + if (rc == -EINVAL) + netif_err(efx, link, efx->net_dev, + "invalid link settings: autoneg=%u" + " advertising=%#x speed=%u duplex=%u" + " translated to caps=%#x\n", + ecmd->autoneg, ecmd->advertising, + ecmd->speed, ecmd->duplex, caps); + return rc; + } + + /* Rather than storing the original advertising mask, we + * convert the capabilities we're actually using back to an + * advertising mask so that (1) get_settings() will report + * correct information (2) we can push the capabilities again + * after an MC reset, or recalculate them on module change. + */ + mcdi_to_ethtool_linkset(efx, phy_cfg->media, caps, new_adv); + + return 1; +} + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_ETHTOOL_LINKSETTINGS) +int efx_mcdi_phy_set_ksettings(struct efx_nic *efx, + const struct ethtool_link_ksettings *settings, + unsigned long *advertising) +{ + const struct ethtool_link_settings *base = &settings->base; + struct efx_mcdi_phy_data *phy_cfg = efx->phy_data; + u32 caps; + int rc; + + memcpy(advertising, settings->link_modes.advertising, + sizeof(__ETHTOOL_DECLARE_LINK_MODE_MASK())); + + /* Remove flow control settings that the MAC supports + * but that the PHY can't advertise. + */ + if (~phy_cfg->supported_cap & (1 << MC_CMD_PHY_CAP_PAUSE_LBN)) + __clear_bit(ETHTOOL_LINK_MODE_Pause_BIT, advertising); + if (~phy_cfg->supported_cap & (1 << MC_CMD_PHY_CAP_ASYM_LBN)) + __clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, advertising); + + if (base->autoneg) + caps = ethtool_linkset_to_mcdi_cap(advertising) | + 1 << MC_CMD_PHY_CAP_AN_LBN; + else + caps = ethtool_speed_to_mcdi_cap(base->duplex, base->speed); + if (!caps) + return -EINVAL; + + caps |= ethtool_fec_caps_to_mcdi(phy_cfg->supported_cap, + efx->fec_config); + + rc = efx_mcdi_set_link(efx, caps, efx_get_mcdi_phy_flags(efx), + efx->loopback_mode, false, SET_LINK_SEQ_IGNORE); + if (rc) { + if (rc == -EINVAL) + netif_err(efx, link, efx->net_dev, + "invalid link settings: autoneg=%u" + " advertising=%*pb speed=%u duplex=%u" + " translated to caps=%#x\n", + base->autoneg, __ETHTOOL_LINK_MODE_MASK_NBITS, + settings->link_modes.advertising, base->speed, + base->duplex, caps); + return rc; + } + + /* Rather than storing the original advertising mask, we + * convert the capabilities we're actually using back to an + * advertising mask so that (1) get_settings() will report + * correct information (2) we can push the capabilities again + * after an MC reset. + */ + mcdi_to_ethtool_linkset(efx, phy_cfg->media, caps, advertising); + + return 1; +} +#endif + +int efx_mcdi_phy_get_fecparam(struct efx_nic *efx, struct ethtool_fecparam *fec) +{ + MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LINK_OUT_V2_LEN); + u32 caps, active, speed; /* MCDI format */ + bool is_25g = false; + size_t outlen; + int rc; + + BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0); + rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0, + outbuf, sizeof(outbuf), &outlen); + if (rc) + return rc; + if (outlen < MC_CMD_GET_LINK_OUT_V2_LEN) + return -EOPNOTSUPP; + + /* behaviour for 25G/50G links depends on 25G BASER bit */ + speed = MCDI_DWORD(outbuf, GET_LINK_OUT_V2_LINK_SPEED); + is_25g = speed == 25000 || speed == 50000; + + caps = MCDI_DWORD(outbuf, GET_LINK_OUT_V2_CAP); + fec->fec = mcdi_fec_caps_to_ethtool(caps, is_25g); + /* BASER is never supported on 100G */ + if (speed == 100000) + fec->fec &= ~ETHTOOL_FEC_BASER; + + active = MCDI_DWORD(outbuf, GET_LINK_OUT_V2_FEC_TYPE); + switch (active) { + case MC_CMD_FEC_NONE: + fec->active_fec = ETHTOOL_FEC_OFF; + break; + case MC_CMD_FEC_BASER: + fec->active_fec = ETHTOOL_FEC_BASER; + break; + case MC_CMD_FEC_RS: + fec->active_fec = ETHTOOL_FEC_RS; + break; + default: + netif_warn(efx, hw, efx->net_dev, + "Firmware reports unrecognised FEC_TYPE %u\n", + active); + /* We don't know what firmware has picked. AUTO is as good a + * "can't happen" value as any other. + */ + fec->active_fec = ETHTOOL_FEC_AUTO; + break; + } + + return 0; +} + +int efx_mcdi_phy_set_fecparam(struct efx_nic *efx, + const struct ethtool_fecparam *fec) +{ + u32 caps = ethtool_linkset_to_mcdi_cap(efx->link_advertising); + struct efx_mcdi_phy_data *phy_data = efx->phy_data; + int rc; + + rc = ethtool_fec_supported(phy_data->supported_cap, fec->fec); + if (rc) + return rc; + + caps |= ethtool_fec_caps_to_mcdi(phy_data->supported_cap, fec->fec); + rc = efx_mcdi_set_link(efx, caps, efx_get_mcdi_phy_flags(efx), + efx->loopback_mode, false, SET_LINK_SEQ_IGNORE); + if (rc) + return rc; + + /* Record the new FEC setting for subsequent set_link calls */ + efx->fec_config = fec->fec; + return 0; +} + +int efx_mcdi_phy_test_alive(struct efx_nic *efx) +{ + MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PHY_STATE_OUT_LEN); + size_t outlen; + int rc; + + BUILD_BUG_ON(MC_CMD_GET_PHY_STATE_IN_LEN != 0); + + rc = efx_mcdi_rpc(efx, MC_CMD_GET_PHY_STATE, NULL, 0, + outbuf, sizeof(outbuf), &outlen); + if (rc) + return rc; + + if (outlen < MC_CMD_GET_PHY_STATE_OUT_LEN) + return -EIO; + if (MCDI_DWORD(outbuf, GET_PHY_STATE_OUT_STATE) != MC_CMD_PHY_STATE_OK) + return -EINVAL; + + return 0; +} + +static const char *const mcdi_sft9001_cable_diag_names[] = { + "cable.pairA.length", + "cable.pairB.length", + "cable.pairC.length", + "cable.pairD.length", + "cable.pairA.status", + "cable.pairB.status", + "cable.pairC.status", + "cable.pairD.status", +}; + +const char *efx_mcdi_phy_test_name(struct efx_nic *efx, + unsigned int index) +{ + struct efx_mcdi_phy_data *phy_cfg = efx->phy_data; + + if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_OUT_BIST_LBN)) { + if (index == 0) + return "bist"; + --index; + } + + if (phy_cfg->flags & + ((1 << MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_SHORT_LBN) | + (1 << MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_LONG_LBN))) { + if (index == 0) + return "cable"; + --index; + + if (efx->phy_type == PHY_TYPE_SFT9001B) { + if (index < ARRAY_SIZE(mcdi_sft9001_cable_diag_names)) + return mcdi_sft9001_cable_diag_names[index]; + index -= ARRAY_SIZE(mcdi_sft9001_cable_diag_names); + } + } + + return NULL; +} + +u32 efx_get_mcdi_caps(struct efx_nic *efx) +{ + struct efx_mcdi_phy_data *phy_data = efx->phy_data; + + return ethtool_linkset_to_mcdi_cap(efx->link_advertising) | + ethtool_fec_caps_to_mcdi(phy_data->supported_cap, + efx->fec_config); +} + +int efx_mcdi_port_reconfigure(struct efx_nic *efx) +{ + /* Avoid using PHY data if unavailable (e.g. probe retry). */ + if (efx->phy_data == NULL) + return -ENETDOWN; + + return efx_mcdi_set_link(efx, efx_get_mcdi_caps(efx), + efx_get_mcdi_phy_flags(efx), + efx->loopback_mode, false, + SET_LINK_SEQ_IGNORE); +} + +static unsigned int efx_calc_mac_mtu(struct efx_nic *efx) +{ + return EFX_MAX_FRAME_LEN(efx->net_dev->mtu); +} + +int efx_mcdi_set_mac(struct efx_nic *efx) +{ + MCDI_DECLARE_BUF(cmdbytes, MC_CMD_SET_MAC_IN_LEN); + bool forward_fcs; + u32 fcntl; + + BUILD_BUG_ON(MC_CMD_SET_MAC_OUT_LEN != 0); + + /* This has no effect on EF10 */ + ether_addr_copy(MCDI_PTR(cmdbytes, SET_MAC_IN_ADDR), + efx->net_dev->dev_addr); + + MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_MTU, efx_calc_mac_mtu(efx)); + MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_DRAIN, 0); + +#if defined(EFX_USE_KCOMPAT) && !defined(EFX_HAVE_ETHTOOL_FCS) + forward_fcs = efx->forward_fcs; +#else + forward_fcs = !!(efx->net_dev->features & NETIF_F_RXFCS); +#endif + MCDI_POPULATE_DWORD_1(cmdbytes, SET_MAC_IN_FLAGS, + SET_MAC_IN_FLAG_INCLUDE_FCS, forward_fcs); + + switch (efx->wanted_fc) { + case EFX_FC_RX | EFX_FC_TX: + fcntl = MC_CMD_FCNTL_BIDIR; + break; + case EFX_FC_RX: + fcntl = MC_CMD_FCNTL_RESPOND; + break; + default: + fcntl = MC_CMD_FCNTL_OFF; + break; + } + if (efx->wanted_fc & EFX_FC_AUTO) + fcntl = MC_CMD_FCNTL_AUTO; + if (efx->fc_disable) + fcntl = MC_CMD_FCNTL_OFF; + + MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_FCNTL, fcntl); + + return efx_mcdi_rpc(efx, MC_CMD_SET_MAC, cmdbytes, sizeof(cmdbytes), + NULL, 0, NULL); +} + +int efx_mcdi_set_mtu(struct efx_nic *efx) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_MAC_EXT_IN_LEN); + + BUILD_BUG_ON(MC_CMD_SET_MAC_OUT_LEN != 0); + + MCDI_SET_DWORD(inbuf, SET_MAC_EXT_IN_MTU, efx_calc_mac_mtu(efx)); + + MCDI_POPULATE_DWORD_1(inbuf, SET_MAC_EXT_IN_CONTROL, + SET_MAC_EXT_IN_CFG_MTU, 1); + + return efx_mcdi_rpc(efx, MC_CMD_SET_MAC, inbuf, sizeof(inbuf), + NULL, 0, NULL); +} + +/* MAC statistics + */ +enum efx_stats_action { + EFX_STATS_ENABLE, + EFX_STATS_DISABLE, + EFX_STATS_PULL, + EFX_STATS_PERIOD, +}; + +static int efx_mcdi_mac_stats(struct efx_nic *efx, + enum efx_stats_action action, int clear) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_MAC_STATS_IN_LEN); + int rc; + int dma, change; + dma_addr_t dma_addr = efx->stats_buffer.dma_addr; + u32 dma_len; + + if (action == EFX_STATS_ENABLE) + efx->stats_enabled = true; + else if (action == EFX_STATS_DISABLE) + efx->stats_enabled = false; + + dma = action == EFX_STATS_PULL || efx->stats_enabled; + change = action != EFX_STATS_PULL; + + dma_len = dma ? efx->num_mac_stats * sizeof(u64) : 0; + + BUILD_BUG_ON(MC_CMD_MAC_STATS_OUT_DMA_LEN != 0); + + MCDI_SET_QWORD(inbuf, MAC_STATS_IN_DMA_ADDR, dma_addr); + MCDI_POPULATE_DWORD_7(inbuf, MAC_STATS_IN_CMD, + MAC_STATS_IN_DMA, dma, + MAC_STATS_IN_CLEAR, clear, + MAC_STATS_IN_PERIODIC_CHANGE, change, + MAC_STATS_IN_PERIODIC_ENABLE, efx->stats_enabled, + MAC_STATS_IN_PERIODIC_CLEAR, 0, + MAC_STATS_IN_PERIODIC_NOEVENT, 1, + MAC_STATS_IN_PERIOD_MS, efx->stats_period_ms); + MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_LEN, dma_len); + + if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0) + MCDI_SET_DWORD(inbuf, MAC_STATS_IN_PORT_ID, + efx->vport.vport_id); + + rc = efx_mcdi_rpc_quiet(efx, MC_CMD_MAC_STATS, inbuf, sizeof(inbuf), + NULL, 0, NULL); + /* Expect ENOENT if DMA queues have not been set up */ + if (rc && (rc != -ENOENT || atomic_read(&efx->active_queues))) + efx_mcdi_display_error(efx, MC_CMD_MAC_STATS, sizeof(inbuf), + NULL, 0, rc); + return rc; +} + +/* Periodic MAC statistics monitoring + */ +static void efx_mac_stats_schedule_monitor_work(struct efx_nic *efx) +{ + unsigned int period_ms = max(500u, efx->stats_period_ms * 2); + + /* Only run the check when regular DMA is expected */ + if (efx->stats_period_ms > 0) + schedule_delayed_work(&efx->stats_monitor_work, + msecs_to_jiffies(period_ms)); +} + +static void efx_mac_stats_start_monitor(struct efx_nic *efx) +{ + efx_mac_stats_schedule_monitor_work(efx); +} + +static void efx_mac_stats_stop_monitor(struct efx_nic *efx) +{ + cancel_delayed_work_sync(&efx->stats_monitor_work); + efx->stats_monitor_generation = EFX_MC_STATS_GENERATION_INVALID; +} + +void efx_mac_stats_reset_monitor(struct efx_nic *efx) +{ + cancel_delayed_work(&efx->stats_monitor_work); + efx->stats_monitor_generation = EFX_MC_STATS_GENERATION_INVALID; +} + +void efx_mac_stats_monitor(struct work_struct *data) +{ + struct efx_nic *efx = container_of(data, struct efx_nic, + stats_monitor_work.work); + __le64 *dma_stats = efx->stats_buffer.addr; + __le64 generation; + + if (!efx->stats_enabled) + return; + + generation = READ_ONCE(dma_stats[efx->num_mac_stats - 1]); + + if (generation == efx->stats_monitor_generation && + efx->stats_initialised) { + pci_warn(efx->pci_dev, + "Periodic DMA statistics are not updating. Restarting periodic collection.\n"); + + cancel_delayed_work(&efx->stats_monitor_work); + efx_mcdi_mac_stats(efx, EFX_STATS_PERIOD, 0); + } else if (generation != EFX_MC_STATS_GENERATION_INVALID) { + efx->stats_monitor_generation = generation; + } + efx_mac_stats_schedule_monitor_work(efx); +} + +void efx_mcdi_mac_start_stats(struct efx_nic *efx) +{ + __le64 *dma_stats = efx->stats_buffer.addr; + + dma_stats[efx->num_mac_stats - 1] = EFX_MC_STATS_GENERATION_INVALID; + + efx_mcdi_mac_stats(efx, EFX_STATS_ENABLE, 0); + efx_mac_stats_start_monitor(efx); +} + +void efx_mcdi_mac_stop_stats(struct efx_nic *efx) +{ + efx_mac_stats_stop_monitor(efx); + efx_mcdi_mac_stats(efx, EFX_STATS_DISABLE, 0); +} + +void efx_mcdi_mac_update_stats_period(struct efx_nic *efx) +{ + efx_mac_stats_stop_monitor(efx); + efx_mcdi_mac_stats(efx, EFX_STATS_PERIOD, 0); + efx_mac_stats_start_monitor(efx); +} + +#define EFX_MAC_STATS_WAIT_MS 1000 + +void efx_mcdi_mac_pull_stats(struct efx_nic *efx) +{ + __le64 *dma_stats = efx->stats_buffer.addr; + unsigned long end; + + if (!dma_stats) + return; + + dma_stats[efx->num_mac_stats - 1] = EFX_MC_STATS_GENERATION_INVALID; + efx_mcdi_mac_stats(efx, EFX_STATS_PULL, 0); + + end = jiffies + msecs_to_jiffies(EFX_MAC_STATS_WAIT_MS); + + while (READ_ONCE(dma_stats[efx->num_mac_stats - 1]) == + EFX_MC_STATS_GENERATION_INVALID && + time_before(jiffies, end)) + usleep_range(100, 1000); +} + +int efx_mcdi_mac_init_stats(struct efx_nic *efx) +{ + int rc; + + if (!efx->num_mac_stats) + return 0; + + /* Allocate buffer for stats */ + rc = efx_nic_alloc_buffer(efx, &efx->stats_buffer, + efx->num_mac_stats * sizeof(u64), GFP_KERNEL); + if (rc) { + pci_warn(efx->pci_dev, + "failed to allocate DMA buffer: %d\n", rc); + return rc; + } + + efx->mc_initial_stats = + kcalloc(efx->num_mac_stats, sizeof(u64), GFP_KERNEL); + if (!efx->mc_initial_stats) { + pci_warn(efx->pci_dev, + "failed to allocate initial MC stats buffer\n"); + rc = -ENOMEM; + goto fail; + } + + pci_dbg(efx->pci_dev, + "stats buffer at %llx (virt %p phys %llx)\n", + (u64) efx->stats_buffer.dma_addr, + efx->stats_buffer.addr, + (u64) virt_to_phys(efx->stats_buffer.addr)); + + return 0; + +fail: + efx_mcdi_mac_fini_stats(efx); + return rc; +} + +void efx_mcdi_mac_fini_stats(struct efx_nic *efx) +{ + kfree(efx->mc_initial_stats); + efx->mc_initial_stats = NULL; + efx_nic_free_buffer(efx, &efx->stats_buffer); +} + +/* Get physical port number (EF10 only; on Siena it is same as PF number) */ +int efx_mcdi_port_get_number(struct efx_nic *efx) +{ + MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN); + int rc; + + rc = efx_mcdi_rpc(efx, MC_CMD_GET_PORT_ASSIGNMENT, NULL, 0, + outbuf, sizeof(outbuf), NULL); + if (rc) + return rc; + + return MCDI_DWORD(outbuf, GET_PORT_ASSIGNMENT_OUT_PORT); +} + +/* Event processing + */ +static unsigned int efx_mcdi_event_link_speed[] = { + [MCDI_EVENT_LINKCHANGE_SPEED_100M] = 100, + [MCDI_EVENT_LINKCHANGE_SPEED_1G] = 1000, + [MCDI_EVENT_LINKCHANGE_SPEED_10G] = 10000, + [MCDI_EVENT_LINKCHANGE_SPEED_40G] = 40000, + [MCDI_EVENT_LINKCHANGE_SPEED_25G] = 25000, + [MCDI_EVENT_LINKCHANGE_SPEED_50G] = 50000, + [MCDI_EVENT_LINKCHANGE_SPEED_100G] = 100000, +}; + +void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev) +{ + u32 flags, fcntl, speed, lpa; + + speed = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_SPEED); + if (speed < ARRAY_SIZE(efx_mcdi_event_link_speed)) { + speed = efx_mcdi_event_link_speed[speed]; + } else { + if (net_ratelimit()) + netif_warn(efx, hw, efx->net_dev, + "Invalid speed enum %d in link change event", + speed); + speed = 0; + } + + flags = efx_mcdi_link_state_flags(&efx->link_state) & + ~(1 << MC_CMD_GET_LINK_OUT_LINK_UP_LBN); + flags |= EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_LINK_FLAGS); + fcntl = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_FCNTL); + lpa = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_LP_CAP); + + /* efx->link_state is only modified by efx_mcdi_phy_get_link(), + * which is only run after flushing the event queues. Therefore, it + * is safe to modify the link state outside of the mac_lock here. + */ + efx_mcdi_phy_decode_link(efx, &efx->link_state, speed, flags, fcntl, + efx->link_state.ld_caps, lpa); + efx_link_status_changed(efx); + + efx_mcdi_phy_check_fcntl(efx, lpa); +} + +void efx_mcdi_process_link_change_v2(struct efx_nic *efx, efx_qword_t *ev) +{ + u32 link_up, flags, fcntl, speed, lpa; + + speed = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_V2_SPEED); + EFX_WARN_ON_PARANOID(speed >= ARRAY_SIZE(efx_mcdi_event_link_speed)); + speed = efx_mcdi_event_link_speed[speed]; + + link_up = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_V2_FLAGS_LINK_UP); + fcntl = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_V2_FCNTL); + lpa = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_V2_LP_CAP); + flags = efx_mcdi_link_state_flags(&efx->link_state) & + ~(1 << MC_CMD_GET_LINK_OUT_LINK_UP_LBN); + flags |= !!link_up << MC_CMD_GET_LINK_OUT_LINK_UP_LBN; + flags |= 1 << MC_CMD_GET_LINK_OUT_FULL_DUPLEX_LBN; + + efx_mcdi_phy_decode_link(efx, &efx->link_state, speed, flags, fcntl, + efx->link_state.ld_caps, lpa); + + efx_link_status_changed(efx); + + efx_mcdi_phy_check_fcntl(efx, lpa); +} + +void efx_mcdi_process_module_change(struct efx_nic *efx, efx_qword_t *ev) +{ + struct efx_mcdi_phy_data *phy_cfg = efx->phy_data; + u32 caps = efx_get_mcdi_caps(efx); + u8 flags = efx_mcdi_link_state_flags(&efx->link_state); + u32 ld_caps; + u8 seq; + + ld_caps = EFX_QWORD_FIELD(*ev, MCDI_EVENT_MODULECHANGE_LD_CAP); + seq = EFX_DWORD_FIELD(*ev, MCDI_EVENT_MODULECHANGE_SEQ); + + flags |= 1 << MC_CMD_GET_LINK_OUT_FULL_DUPLEX_LBN; + + /* efx->link_state is only modified by efx_mcdi_phy_get_link(), + * which is only run after flushing the event queues. Therefore, it + * is safe to modify the link state outside of the mac_lock here. + */ + efx_mcdi_phy_decode_link(efx, &efx->link_state, efx->link_state.speed, + flags, + efx_mcdi_link_state_fcntl(&efx->link_state), + ld_caps, 0); + + /* See if efx->link_advertising works with the new module's ld_caps. + * If it doesn't, reset to all speeds rather than go with no speed. + */ + if (ld_caps && caps && !(caps & ld_caps & MCDI_PORT_SPEED_CAPS)) { + caps |= (phy_cfg->supported_cap & MCDI_PORT_SPEED_CAPS); + caps &= ld_caps; + + if (caps & MCDI_PORT_SPEED_CAPS) + netif_info(efx, link, efx->net_dev, + "No configured speeds were supported with the new module. Resetting speed to default (caps=%#x)\n", + caps); + else + netif_info(efx, link, efx->net_dev, + "No speeds were supported with the new module (caps=%#x)\n", + caps); + + if (efx_mcdi_set_link(efx, caps, efx_get_mcdi_phy_flags(efx), + efx->loopback_mode, true, seq) == 0) + mcdi_to_ethtool_linkset(efx, phy_cfg->media, caps, + efx->link_advertising); + } + + efx_link_status_changed(efx); +} + +static void efx_handle_drain_event(struct efx_nic *efx) +{ + if (atomic_dec_and_test(&efx->active_queues)) + wake_up(&efx->flush_wq); + + WARN_ON(atomic_read(&efx->active_queues) < 0); +} + +bool efx_mcdi_port_process_event_common(struct efx_channel *channel, + efx_qword_t *event, int *rc, int budget) +{ + struct efx_nic *efx = channel->efx; + int code = EFX_QWORD_FIELD(*event, MCDI_EVENT_CODE); + + switch (code) { + case MCDI_EVENT_CODE_LINKCHANGE: + efx_mcdi_process_link_change(efx, event); + return true; + case MCDI_EVENT_CODE_LINKCHANGE_V2: + efx_mcdi_process_link_change_v2(efx, event); + return true; + case MCDI_EVENT_CODE_MODULECHANGE: + efx_mcdi_process_module_change(efx, event); + return true; + case MCDI_EVENT_CODE_TX_FLUSH: + case MCDI_EVENT_CODE_RX_FLUSH: + /* Two flush events will be sent: one to the same event + * queue as completions, and one to event queue 0. + * In the latter case the {RX,TX}_FLUSH_TO_DRIVER + * flag will be set, and we should ignore the event + * because we want to wait for all completions. + */ + BUILD_BUG_ON(MCDI_EVENT_TX_FLUSH_TO_DRIVER_LBN != + MCDI_EVENT_RX_FLUSH_TO_DRIVER_LBN); + + if (!MCDI_EVENT_FIELD(*event, TX_FLUSH_TO_DRIVER)) + efx_handle_drain_event(efx); +#ifdef EFX_NOT_UPSTREAM +#if IS_MODULE(CONFIG_SFC_DRIVERLINK) + else + *rc = efx_dl_handle_event(&efx->dl_nic, event, budget); +#endif +#endif + return true; + case MCDI_EVENT_CODE_SENSOREVT: + if (efx_nic_has_dynamic_sensors(efx)) + efx_mcdi_dynamic_sensor_event(efx, event); + else + efx_mcdi_sensor_event(efx, event); + return true; + case MCDI_EVENT_CODE_TX_ERR: + case MCDI_EVENT_CODE_RX_ERR: + /* Driver handles ef10 with specific code */ + if (efx_nic_rev(efx) != EFX_REV_EF100) + return false; + + netif_err(efx, hw, efx->net_dev, + "%s DMA error (event: "EFX_QWORD_FMT")\n", + code == MCDI_EVENT_CODE_TX_ERR ? "TX" : "RX", + EFX_QWORD_VAL(*event)); + efx_schedule_reset(efx, RESET_TYPE_DATAPATH); + return true; + } + + return false; +} + +static int efx_mcdi_bist(struct efx_nic *efx, unsigned int bist_mode, + int *results) +{ + unsigned int retry, i, count = 0; + size_t outlen; + u32 status; + MCDI_DECLARE_BUF(inbuf, MC_CMD_START_BIST_IN_LEN); + MCDI_DECLARE_BUF(outbuf, MC_CMD_POLL_BIST_OUT_SFT9001_LEN); + u8 *ptr; + int rc; + + BUILD_BUG_ON(MC_CMD_START_BIST_OUT_LEN != 0); + MCDI_SET_DWORD(inbuf, START_BIST_IN_TYPE, bist_mode); + rc = efx_mcdi_rpc(efx, MC_CMD_START_BIST, + inbuf, MC_CMD_START_BIST_IN_LEN, NULL, 0, NULL); + if (rc) + goto out; + + /* Wait up to 10s for BIST to finish */ + for (retry = 0; retry < 100; ++retry) { + BUILD_BUG_ON(MC_CMD_POLL_BIST_IN_LEN != 0); + rc = efx_mcdi_rpc(efx, MC_CMD_POLL_BIST, NULL, 0, + outbuf, sizeof(outbuf), &outlen); + if (rc) + goto out; + + status = MCDI_DWORD(outbuf, POLL_BIST_OUT_RESULT); + if (status != MC_CMD_POLL_BIST_RUNNING) + goto finished; + + msleep(100); + } + + rc = -ETIMEDOUT; + goto out; + +finished: + results[count++] = (status == MC_CMD_POLL_BIST_PASSED) ? 1 : -1; + + /* SFT9001 specific cable diagnostics output */ + if (efx->phy_type == PHY_TYPE_SFT9001B && + (bist_mode == MC_CMD_PHY_BIST_CABLE_SHORT || + bist_mode == MC_CMD_PHY_BIST_CABLE_LONG)) { + ptr = MCDI_PTR(outbuf, POLL_BIST_OUT_SFT9001_CABLE_LENGTH_A); + if (status == MC_CMD_POLL_BIST_PASSED && + outlen >= MC_CMD_POLL_BIST_OUT_SFT9001_LEN) { + for (i = 0; i < 8; i++) { + results[count + i] = + EFX_DWORD_FIELD(((efx_dword_t *)ptr)[i], + EFX_DWORD_0); + } + } + count += 8; + } + rc = count; + +out: + return rc; +} + +int efx_mcdi_phy_run_tests(struct efx_nic *efx, int *results, + unsigned int flags) +{ + struct efx_mcdi_phy_data *phy_cfg = efx->phy_data; + u32 mode; + int rc; + + if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_OUT_BIST_LBN)) { + rc = efx_mcdi_bist(efx, MC_CMD_PHY_BIST, results); + if (rc < 0) + return rc; + + results += rc; + } + + /* If we support both LONG and SHORT, then run each in response to + * break or not. Otherwise, run the one we support + */ + mode = 0; + if (phy_cfg->flags & + (1 << MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_SHORT_LBN)) { + if ((flags & ETH_TEST_FL_OFFLINE) && + (phy_cfg->flags & + (1 << MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_LONG_LBN))) + mode = MC_CMD_PHY_BIST_CABLE_LONG; + else + mode = MC_CMD_PHY_BIST_CABLE_SHORT; + } else if (phy_cfg->flags & + (1 << MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_LONG_LBN)) + mode = MC_CMD_PHY_BIST_CABLE_LONG; + + if (mode != 0) { + rc = efx_mcdi_bist(efx, mode, results); + if (rc < 0) + return rc; + results += rc; + } + + return 0; +} + +#define SFP_PAGE_SIZE 128 +#define SFF_DIAG_TYPE_OFFSET 92 +#define SFF_DIAG_ADDR_CHANGE (1 << 2) +#define SFF_DIAG_IMPLEMENTED (1 << 6) +#define SFF_8079_NUM_PAGES 2 +#define SFF_8472_NUM_PAGES 4 +#define SFF_8436_NUM_PAGES 5 +#define SFF_DMT_LEVEL_OFFSET 94 + +/** efx_mcdi_phy_get_module_eeprom_page() - Get a single page of module eeprom + * @efx: NIC context + * @page: EEPROM page number + * @data: Destination data pointer + * @offset: Offset in page to copy from in to data + * @space: Space available in data + * + * Return: + * >=0 - amount of data copied + * <0 - error + */ +static int efx_mcdi_phy_get_module_eeprom_page(struct efx_nic *efx, + unsigned int page, + u8 *data, ssize_t offset, + ssize_t space) +{ + MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMAX); + MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_PHY_MEDIA_INFO_IN_LEN); + unsigned int payload_len; + unsigned int to_copy; + size_t outlen; + int rc; + + if (offset > SFP_PAGE_SIZE) + return -EINVAL; + + to_copy = min(space, SFP_PAGE_SIZE - offset); + + MCDI_SET_DWORD(inbuf, GET_PHY_MEDIA_INFO_IN_PAGE, page); + rc = efx_mcdi_rpc_quiet(efx, MC_CMD_GET_PHY_MEDIA_INFO, + inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), + &outlen); + + if (rc) + return rc; + + if (outlen < (MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_OFST + + SFP_PAGE_SIZE)) { + /* There are SFP+ modules that claim to be SFF-8472 compliant + * and do not provide diagnostic information, but they don't + * return all bits 0 as the spec says they should... */ + if (page >= 2 && page < 4 && + efx_mcdi_phy_module_type(efx) == MC_CMD_MEDIA_SFP_PLUS && + efx_mcdi_phy_sff_8472_level(efx) > 0 && + (efx_mcdi_phy_diag_type(efx) & SFF_DIAG_IMPLEMENTED) == 0) { + memset(data, 0, to_copy); + + return to_copy; + } + + return -EIO; + } + + payload_len = MCDI_DWORD(outbuf, GET_PHY_MEDIA_INFO_OUT_DATALEN); + if (payload_len != SFP_PAGE_SIZE) + return -EIO; + + memcpy(data, MCDI_PTR(outbuf, GET_PHY_MEDIA_INFO_OUT_DATA) + offset, + to_copy); + + return to_copy; +} + +static int efx_mcdi_phy_get_module_eeprom_byte(struct efx_nic *efx, + unsigned int page, + u8 byte) +{ + u8 data; + int rc; + + rc = efx_mcdi_phy_get_module_eeprom_page(efx, page, &data, byte, 1); + if (rc == 1) + return data; + + return rc; +} + +static int efx_mcdi_phy_diag_type(struct efx_nic *efx) +{ + /* Page zero of the EEPROM includes the diagnostic type at byte 92. */ + return efx_mcdi_phy_get_module_eeprom_byte(efx, 0, + SFF_DIAG_TYPE_OFFSET); +} + +static int efx_mcdi_phy_sff_8472_level(struct efx_nic *efx) +{ + /* Page zero of the EEPROM includes the DMT level at byte 94. */ + return efx_mcdi_phy_get_module_eeprom_byte(efx, 0, + SFF_DMT_LEVEL_OFFSET); +} + +static u32 efx_mcdi_phy_module_type(struct efx_nic *efx) +{ + struct efx_mcdi_phy_data *phy_data = efx->phy_data; + + if (phy_data->media != MC_CMD_MEDIA_QSFP_PLUS) + return phy_data->media; + + /* A QSFP+ NIC may actually have an SFP+ module attached. + * The ID is page 0, byte 0. + * QSFP28 is of type SFF_8636, however, this is treated + * the same by ethtool, so we can also treat them the same. + */ + switch (efx_mcdi_phy_get_module_eeprom_byte(efx, 0, 0)) { + case 0x3: // SFP + return MC_CMD_MEDIA_SFP_PLUS; + case 0xc: // QSFP + case 0xd: // QSFP+ + case 0x11: // QSFP28 + return MC_CMD_MEDIA_QSFP_PLUS; + default: + return 0; + } +} + +static u8 efx_mcdi_phy_connector(struct efx_nic *efx) +{ + u8 connector; + + switch (efx_mcdi_phy_module_type(efx)) { + case MC_CMD_MEDIA_SFP_PLUS: + /* SFP/SFP+ (SFF-8472). Connector type is page 0 byte 2 */ + connector = efx_mcdi_phy_get_module_eeprom_byte(efx, 0, 2); + break; + case MC_CMD_MEDIA_QSFP_PLUS: + /* QSFP/QSFP+/QSFP28 (SFF-8636/SFF-8436) + * Connector type is page 0 byte 130 (upper page 0 byte 2). + */ + connector = efx_mcdi_phy_get_module_eeprom_byte(efx, 0, 2); + break; + default: + connector = 0; /* unknown */ + break; + } + + /* Check connector type (SFF-8024 table 4-3). */ + switch (connector) { + case 0x07: /* LC (Lucent connector) */ + case 0x0b: /* Optical pigtail */ + return PORT_FIBRE; + case 0x21: /* Copper pigtail */ + case 0x23: /* No separable connector */ + return PORT_DA; + case 0x22: /* RJ45 */ + return PORT_TP; + default: + return PORT_OTHER; + } +} + +static u8 mcdi_to_ethtool_media(struct efx_nic *efx) +{ + struct efx_mcdi_phy_data *phy_data = efx->phy_data; + + switch (phy_data->media) { + case MC_CMD_MEDIA_XAUI: + case MC_CMD_MEDIA_CX4: + case MC_CMD_MEDIA_KX4: + return PORT_OTHER; + + case MC_CMD_MEDIA_XFP: + case MC_CMD_MEDIA_SFP_PLUS: + case MC_CMD_MEDIA_QSFP_PLUS: + return efx_mcdi_phy_connector(efx); + + case MC_CMD_MEDIA_BASE_T: + return PORT_TP; + + default: + return PORT_OTHER; + } +} + +void efx_mcdi_phy_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd) +{ + struct efx_mcdi_phy_data *phy_cfg = efx->phy_data; +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_USE_ETHTOOL_LP_ADVERTISING) + MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LINK_OUT_LEN); + int rc; +#endif + + ecmd->supported = mcdi_to_ethtool_cap(efx, phy_cfg->media, + phy_cfg->supported_cap); + ecmd->advertising = efx->link_advertising[0]; + ethtool_cmd_speed_set(ecmd, efx->link_state.speed); + ecmd->duplex = efx->link_state.fd; + ecmd->port = mcdi_to_ethtool_media(efx); + ecmd->phy_address = phy_cfg->port; + ecmd->transceiver = XCVR_INTERNAL; + ecmd->autoneg = !!(efx->link_advertising[0] & ADVERTISED_Autoneg); + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_USE_ETHTOOL_LP_ADVERTISING) + BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0); + rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0, + outbuf, sizeof(outbuf), NULL); + if (rc) + return; + ecmd->lp_advertising = + mcdi_to_ethtool_cap(efx, phy_cfg->media, + MCDI_DWORD(outbuf, GET_LINK_OUT_LP_CAP)); +#endif +} + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_ETHTOOL_LINKSETTINGS) +void efx_mcdi_phy_get_ksettings(struct efx_nic *efx, + struct ethtool_link_ksettings *out) +{ + struct efx_mcdi_phy_data *phy_cfg = efx->phy_data; + MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LINK_OUT_LEN); + struct ethtool_link_settings *base = &out->base; + int rc; + + if (netif_carrier_ok(efx->net_dev)) { + base->speed = efx->link_state.speed; + base->duplex = efx->link_state.fd ? DUPLEX_FULL : DUPLEX_HALF; + } else { + base->speed = 0; + base->duplex = DUPLEX_UNKNOWN; + } + base->port = mcdi_to_ethtool_media(efx); + base->phy_address = phy_cfg->port; + base->autoneg = efx->link_advertising[0] & ADVERTISED_Autoneg ? + AUTONEG_ENABLE : + AUTONEG_DISABLE; + mcdi_to_ethtool_linkset(efx, phy_cfg->media, phy_cfg->supported_cap, + out->link_modes.supported); + memcpy(out->link_modes.advertising, efx->link_advertising, + sizeof(__ETHTOOL_DECLARE_LINK_MODE_MASK())); + + BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0); + rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0, + outbuf, sizeof(outbuf), NULL); + if (rc) + return; + mcdi_to_ethtool_linkset(efx, phy_cfg->media, + MCDI_DWORD(outbuf, GET_LINK_OUT_LP_CAP), + out->link_modes.lp_advertising); +#ifdef EFX_HAVE_LINK_MODE_FEC_BITS + mcdi_fec_to_ethtool_linkset(MCDI_DWORD(outbuf, GET_LINK_OUT_CAP), + out->link_modes.advertising); + mcdi_fec_to_ethtool_linkset(phy_cfg->supported_cap, + out->link_modes.supported); + mcdi_fec_to_ethtool_linkset(MCDI_DWORD(outbuf, GET_LINK_OUT_LP_CAP), + out->link_modes.lp_advertising); +#endif +} +#endif + +static +int efx_mcdi_phy_get_module_eeprom_locked(struct efx_nic *efx, + struct ethtool_eeprom *ee, u8 *data) +{ + int rc; + ssize_t space_remaining = ee->len; + unsigned int page_off; + bool ignore_missing; + int num_pages; + int page; + + switch (efx_mcdi_phy_module_type(efx)) { + case MC_CMD_MEDIA_SFP_PLUS: + num_pages = efx_mcdi_phy_sff_8472_level(efx) > 0 ? + SFF_8472_NUM_PAGES : SFF_8079_NUM_PAGES; + page = 0; + ignore_missing = false; + break; + case MC_CMD_MEDIA_QSFP_PLUS: + num_pages = SFF_8436_NUM_PAGES; + page = -1; /* We obtain the lower page by asking for -1. */ + ignore_missing = true; /* Ignore missing pages after page 0. */ + break; + default: + return -EOPNOTSUPP; + } + + page_off = ee->offset % SFP_PAGE_SIZE; + page += ee->offset / SFP_PAGE_SIZE; + + while (space_remaining && (page < num_pages)) { + rc = efx_mcdi_phy_get_module_eeprom_page(efx, page, + data, page_off, space_remaining); + + if (rc > 0) { + space_remaining -= rc; + data += rc; + page_off = 0; + page++; + } else if (rc == 0) { + space_remaining = 0; + } else if (ignore_missing && (page > 0)) { + int intended_size = SFP_PAGE_SIZE - page_off; + + space_remaining -= intended_size; + if (space_remaining < 0) { + space_remaining = 0; + } else { + memset(data, 0, intended_size); + data += intended_size; + page_off = 0; + page++; + rc = 0; + } + } else { + return rc; + } + } + + return 0; +} + +int efx_mcdi_phy_get_module_eeprom(struct efx_nic *efx, + struct ethtool_eeprom *ee, + u8 *data) +{ + int ret; + + mutex_lock(&efx->mac_lock); + ret = efx_mcdi_phy_get_module_eeprom_locked(efx, ee, data); + mutex_unlock(&efx->mac_lock); + + return ret; +} + +static int efx_mcdi_phy_get_module_info_locked(struct efx_nic *efx, + struct ethtool_modinfo *modinfo) +{ + int sff_8472_level; + int diag_type; + + switch (efx_mcdi_phy_module_type(efx)) { + case MC_CMD_MEDIA_SFP_PLUS: + sff_8472_level = efx_mcdi_phy_sff_8472_level(efx); + + /* If we can't read the diagnostics level we have none. */ + if (sff_8472_level < 0) + return -EOPNOTSUPP; + + /* Check if this module requires the (unsupported) address + * change operation + */ + diag_type = efx_mcdi_phy_diag_type(efx); + + if (sff_8472_level == 0 || + (diag_type & SFF_DIAG_ADDR_CHANGE)) { + modinfo->type = ETH_MODULE_SFF_8079; + modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN; + } else { + modinfo->type = ETH_MODULE_SFF_8472; + modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; + } + break; + + case MC_CMD_MEDIA_QSFP_PLUS: + modinfo->type = ETH_MODULE_SFF_8436; + modinfo->eeprom_len = ETH_MODULE_SFF_8436_MAX_LEN; + break; + + default: + return -EOPNOTSUPP; + } + + return 0; +} + +int efx_mcdi_phy_get_module_info(struct efx_nic *efx, + struct ethtool_modinfo *modinfo) +{ + int ret; + + mutex_lock(&efx->mac_lock); + ret = efx_mcdi_phy_get_module_info_locked(efx, modinfo); + mutex_unlock(&efx->mac_lock); + + return ret; +} + diff --git a/src/drivers/net/ethernet/sfc/mcdi_port_common.h b/src/drivers/net/ethernet/sfc/mcdi_port_common.h new file mode 100644 index 0000000..15949c5 --- /dev/null +++ b/src/drivers/net/ethernet/sfc/mcdi_port_common.h @@ -0,0 +1,121 @@ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2018 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ +#ifndef EFX_MCDI_PORT_COMMON_H +#define EFX_MCDI_PORT_COMMON_H + +#include "net_driver.h" +#include "mcdi.h" +#include "mcdi_pcol.h" + +#define MCDI_PORT_SPEED_CAPS ((1 << MC_CMD_PHY_CAP_10HDX_LBN) | \ + (1 << MC_CMD_PHY_CAP_10FDX_LBN) | \ + (1 << MC_CMD_PHY_CAP_100HDX_LBN) | \ + (1 << MC_CMD_PHY_CAP_100FDX_LBN) | \ + (1 << MC_CMD_PHY_CAP_1000HDX_LBN) | \ + (1 << MC_CMD_PHY_CAP_1000FDX_LBN) | \ + (1 << MC_CMD_PHY_CAP_10000FDX_LBN) | \ + (1 << MC_CMD_PHY_CAP_40000FDX_LBN) | \ + (1 << MC_CMD_PHY_CAP_100000FDX_LBN) | \ + (1 << MC_CMD_PHY_CAP_25000FDX_LBN) | \ + (1 << MC_CMD_PHY_CAP_50000FDX_LBN)) + +#define SET_LINK_SEQ_IGNORE (1 << MC_CMD_SET_LINK_IN_V2_MODULE_SEQ_IGNORE_LBN) + +struct efx_mcdi_phy_data { + u32 flags; + u32 type; + u32 supported_cap; + u32 channel; + u32 port; + u32 stats_mask; + u8 name[20]; + u32 media; + u32 mmd_mask; + u8 revision[20]; +#ifdef CONFIG_DEBUG_FS + struct efx_nic *efx; + void *stats; + dma_addr_t stats_addr; + u8 index[MC_CMD_PHY_NSTATS]; +#endif +}; + +void mcdi_to_ethtool_linkset(struct efx_nic *efx, u32 media, u32 cap, + unsigned long *linkset); +u32 mcdi_fec_caps_to_ethtool(u32 caps, bool is_25g); +u32 ethtool_linkset_to_mcdi_cap(const unsigned long *linkset); +u32 ethtool_fec_caps_to_mcdi(u32 supported_cap, u32 ethtool_cap); + +u32 efx_get_mcdi_phy_flags(struct efx_nic *efx); +void efx_mcdi_phy_decode_link(struct efx_nic *efx, + struct efx_link_state *link_state, + u32 speed, u32 flags, u32 fcntl, + u32 ld_caps, u32 lp_caps); + +int efx_mcdi_get_phy_cfg(struct efx_nic *efx, struct efx_mcdi_phy_data *cfg); +void efx_link_set_wanted_fc(struct efx_nic *efx, u8); +void efx_link_set_advertising(struct efx_nic *efx, const unsigned long *); +int efx_mcdi_set_link(struct efx_nic *efx, u32 capabilities, + u32 flags, u32 loopback_mode, bool async, u8 seq); +int efx_mcdi_loopback_modes(struct efx_nic *efx, u64 *loopback_modes); +void efx_mcdi_phy_check_fcntl(struct efx_nic *efx, u32 lpa); +bool efx_mcdi_phy_poll(struct efx_nic *efx); +int efx_mcdi_phy_probe(struct efx_nic *efx); +void efx_mcdi_phy_remove(struct efx_nic *efx); +void efx_mcdi_phy_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd); +int efx_mcdi_phy_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd, + unsigned long *new_adv); +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_ETHTOOL_LINKSETTINGS) +int efx_mcdi_phy_set_ksettings(struct efx_nic *efx, + const struct ethtool_link_ksettings *settings, + unsigned long *advertising); +void efx_mcdi_phy_get_ksettings(struct efx_nic *efx, + struct ethtool_link_ksettings *out); +#endif + +int efx_mcdi_phy_get_fecparam(struct efx_nic *efx, struct ethtool_fecparam *fec); +int efx_mcdi_phy_set_fecparam(struct efx_nic *efx, + const struct ethtool_fecparam *fec); +int efx_mcdi_phy_test_alive(struct efx_nic *efx); +u32 efx_get_mcdi_caps(struct efx_nic *efx); + +int efx_mcdi_set_mac(struct efx_nic *efx); +int efx_mcdi_set_mtu(struct efx_nic *efx); + +void efx_mac_stats_monitor(struct work_struct *data); +void efx_mac_stats_reset_monitor(struct efx_nic *efx); + +void efx_mcdi_mac_update_stats_period(struct efx_nic *efx); +void efx_mcdi_mac_start_stats(struct efx_nic *efx); +void efx_mcdi_mac_stop_stats(struct efx_nic *efx); +void efx_mcdi_mac_pull_stats(struct efx_nic *efx); +int efx_mcdi_mac_init_stats(struct efx_nic *efx); +void efx_mcdi_mac_fini_stats(struct efx_nic *efx); + +int efx_mcdi_port_reconfigure(struct efx_nic *efx); +int efx_mcdi_port_get_number(struct efx_nic *efx); +bool efx_mcdi_port_process_event_common(struct efx_channel *channel, + efx_qword_t *event, int *rc, + int budget); +void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev); +void efx_mcdi_process_link_change_v2(struct efx_nic *efx, efx_qword_t *ev); +void efx_mcdi_process_module_change(struct efx_nic *efx, efx_qword_t *ev); + +int efx_mcdi_phy_run_tests(struct efx_nic *efx, int *results, + unsigned int flags); +const char *efx_mcdi_phy_test_name(struct efx_nic *efx, + unsigned int index); + +int efx_mcdi_phy_get_module_eeprom(struct efx_nic *efx, + struct ethtool_eeprom *ee, u8 *data); +int efx_mcdi_phy_get_module_info(struct efx_nic *efx, + struct ethtool_modinfo *modinfo); + +#endif + diff --git a/src/drivers/net/ethernet/sfc/mcdi_vdpa.c b/src/drivers/net/ethernet/sfc/mcdi_vdpa.c new file mode 100644 index 0000000..e45a07b --- /dev/null +++ b/src/drivers/net/ethernet/sfc/mcdi_vdpa.c @@ -0,0 +1,358 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Driver for Xilinx network controllers and boards + * Copyright 2020 Xilinx Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#include +#include "ef100_vdpa.h" +#include "efx.h" +#include "nic.h" +#include "mcdi_vdpa.h" +#include "mcdi_pcol.h" +#include "ef100_rep.h" + +#if defined(CONFIG_SFC_VDPA) + +#ifdef VDPA_TEST +#include +static u64 ef100vdpa_features = (1ULL << VIRTIO_F_ANY_LAYOUT) | + (1ULL << VIRTIO_F_VERSION_1) | +#if defined(EFX_USE_KCOMPAT) && defined(EFX_HAVE_VIRTIO_F_IN_ORDER) + (1ULL << VIRTIO_F_IN_ORDER) | +#endif + (1ULL << VIRTIO_NET_F_MAC) | + (1ULL << VIRTIO_NET_F_MTU) | + (1ULL << VIRTIO_NET_F_SPEED_DUPLEX); +#endif + +/* value of target_vf in case of MCDI invocations on a VF + * for virtqueue create, delete and doorbell offset + */ +#define MCDI_TARGET_VF_VAL_VF_NULL 0xFFFF + +struct efx_vring_ctx *efx_vdpa_vring_init(struct efx_nic *efx, u32 vi, + enum ef100_vdpa_vq_type vring_type) +{ + struct efx_vring_ctx *vring_ctx; + u32 queue_cmd; + + vring_ctx = kzalloc(sizeof(*vring_ctx), GFP_KERNEL); + if (!vring_ctx) + return ERR_PTR(-ENOMEM); + if (vring_type == EF100_VDPA_VQ_TYPE_NET_RXQ) { + queue_cmd = MC_CMD_VIRTIO_INIT_QUEUE_REQ_NET_RXQ; + } else if (vring_type == EF100_VDPA_VQ_TYPE_NET_TXQ) { + queue_cmd = MC_CMD_VIRTIO_INIT_QUEUE_REQ_NET_TXQ; + } else { + pr_err("%s: Invalid Queue type %u\n", __func__, vring_type); + kfree(vring_ctx); + return ERR_PTR(-ENOMEM); + } + + vring_ctx->nic = efx; + vring_ctx->vf_index = MCDI_TARGET_VF_VAL_VF_NULL; + vring_ctx->vi_index = vi; + vring_ctx->mcdi_vring_type = queue_cmd; + return vring_ctx; +} + +void efx_vdpa_vring_fini(struct efx_vring_ctx *vring_ctx) +{ + kfree(vring_ctx); +} + +int efx_vdpa_get_features(struct efx_nic *efx, + enum ef100_vdpa_device_type type, + u64 *featuresp) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_VIRTIO_GET_FEATURES_IN_LEN); + MCDI_DECLARE_BUF(outbuf, MC_CMD_VIRTIO_GET_FEATURES_OUT_LEN); + u32 high_val = 0, low_val = 0; + ssize_t outlen; + int rc; + + if (!efx) { + pr_err("%s: Invalid NIC pointer\n", __func__); + return -EINVAL; + } + if (!featuresp) { + pr_err("%s: Invalid features pointer\n", __func__); + return -EINVAL; + } + if (type != EF100_VDPA_DEVICE_TYPE_NET) + return -EINVAL; + + MCDI_SET_DWORD(inbuf, VIRTIO_GET_FEATURES_IN_DEVICE_ID, + MC_CMD_VIRTIO_GET_FEATURES_IN_NET); + rc = efx_mcdi_rpc(efx, MC_CMD_VIRTIO_GET_FEATURES, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), &outlen); + if (rc) + return rc; + if (outlen < MC_CMD_VIRTIO_GET_FEATURES_OUT_LEN) + return -EIO; + low_val = MCDI_DWORD(outbuf, VIRTIO_GET_FEATURES_OUT_FEATURES_LO); + high_val = MCDI_DWORD(outbuf, VIRTIO_GET_FEATURES_OUT_FEATURES_HI); + *featuresp = ((u64)high_val << 32) | low_val; + return 0; +} + +int efx_vdpa_verify_features(struct efx_nic *efx, + enum ef100_vdpa_device_type type, u64 features) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_VIRTIO_TEST_FEATURES_IN_LEN); + ssize_t outlen; + int rc; + + if (!efx) { + pr_err("%s: Invalid NIC pointer\n", __func__); + return -EINVAL; + } + + if (type != EF100_VDPA_DEVICE_TYPE_NET) { + pr_err("%s: Device type not supported %d\n", __func__, type); + return -EINVAL; + } + + BUILD_BUG_ON(MC_CMD_VIRTIO_TEST_FEATURES_OUT_LEN != 0); + MCDI_SET_DWORD(inbuf, VIRTIO_TEST_FEATURES_IN_DEVICE_ID, + MC_CMD_VIRTIO_GET_FEATURES_IN_NET); + MCDI_SET_DWORD(inbuf, VIRTIO_TEST_FEATURES_IN_FEATURES_LO, + features & 0xFFFFFFFF); + MCDI_SET_DWORD(inbuf, VIRTIO_TEST_FEATURES_IN_FEATURES_HI, + (features >> 32) & 0xFFFFFFFF); + rc = efx_mcdi_rpc(efx, MC_CMD_VIRTIO_TEST_FEATURES, inbuf, sizeof(inbuf), + NULL, 0, &outlen); + if (rc) + return rc; + if (outlen != MC_CMD_VIRTIO_TEST_FEATURES_OUT_LEN) + return -EIO; + return 0; +} + +int efx_vdpa_vring_create(struct efx_vring_ctx *vring_ctx, + struct efx_vring_cfg *vring_cfg, + struct efx_vring_dyn_cfg *vring_dyn_cfg) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_VIRTIO_INIT_QUEUE_REQ_LEN); + struct efx_nic *efx; + ssize_t outlen; + int rc; + +#ifdef EFX_NOT_UPSTREAM + pr_info("%s: vf:%u type:%u vi:%u last_avail: %x last_used: %x\n", + __func__, vring_ctx->vf_index, + vring_ctx->mcdi_vring_type, vring_ctx->vi_index, + vring_dyn_cfg ? vring_dyn_cfg->avail_idx : U32_MAX, + vring_dyn_cfg ? vring_dyn_cfg->used_idx : U32_MAX); +#endif + efx = vring_ctx->nic; + + BUILD_BUG_ON(MC_CMD_VIRTIO_INIT_QUEUE_RESP_LEN != 0); + + MCDI_SET_BYTE(inbuf, VIRTIO_INIT_QUEUE_REQ_QUEUE_TYPE, + vring_ctx->mcdi_vring_type); + MCDI_SET_WORD(inbuf, VIRTIO_INIT_QUEUE_REQ_TARGET_VF, + vring_ctx->vf_index); + MCDI_SET_DWORD(inbuf, VIRTIO_INIT_QUEUE_REQ_INSTANCE, + vring_ctx->vi_index); + + MCDI_SET_DWORD(inbuf, VIRTIO_INIT_QUEUE_REQ_SIZE, vring_cfg->size); + MCDI_SET_DWORD(inbuf, VIRTIO_INIT_QUEUE_REQ_DESC_TBL_ADDR_LO, + vring_cfg->desc & 0xFFFFFFFF); + MCDI_SET_DWORD(inbuf, VIRTIO_INIT_QUEUE_REQ_DESC_TBL_ADDR_HI, + vring_cfg->desc >> 32); + MCDI_SET_DWORD(inbuf, VIRTIO_INIT_QUEUE_REQ_AVAIL_RING_ADDR_LO, + vring_cfg->avail & 0xFFFFFFFF); + MCDI_SET_DWORD(inbuf, VIRTIO_INIT_QUEUE_REQ_AVAIL_RING_ADDR_HI, + vring_cfg->avail >> 32); + MCDI_SET_DWORD(inbuf, VIRTIO_INIT_QUEUE_REQ_USED_RING_ADDR_LO, + vring_cfg->used & 0xFFFFFFFF); + MCDI_SET_DWORD(inbuf, VIRTIO_INIT_QUEUE_REQ_USED_RING_ADDR_HI, + vring_cfg->used >> 32); + + if (vring_cfg->use_pasid) { + MCDI_POPULATE_DWORD_1(inbuf, VIRTIO_INIT_QUEUE_REQ_FLAGS, + VIRTIO_INIT_QUEUE_REQ_USE_PASID, 1); + MCDI_SET_DWORD(inbuf, VIRTIO_INIT_QUEUE_REQ_PASID, + vring_cfg->pasid); + } + MCDI_SET_WORD(inbuf, VIRTIO_INIT_QUEUE_REQ_MSIX_VECTOR, + vring_cfg->msix_vector); + MCDI_SET_DWORD(inbuf, VIRTIO_INIT_QUEUE_REQ_FEATURES_LO, + vring_cfg->features & 0xFFFFFFFF); + MCDI_SET_DWORD(inbuf, VIRTIO_INIT_QUEUE_REQ_FEATURES_HI, + vring_cfg->features >> 32); + + if (vring_dyn_cfg) { + MCDI_SET_DWORD(inbuf, VIRTIO_INIT_QUEUE_REQ_INITIAL_PIDX, + vring_dyn_cfg->avail_idx); + MCDI_SET_DWORD(inbuf, VIRTIO_INIT_QUEUE_REQ_INITIAL_CIDX, + vring_dyn_cfg->used_idx); + } + MCDI_SET_DWORD(inbuf, VIRTIO_INIT_QUEUE_REQ_MPORT_SELECTOR, + MAE_MPORT_SELECTOR_ASSIGNED); + + rc = efx_mcdi_rpc(efx, MC_CMD_VIRTIO_INIT_QUEUE, inbuf, sizeof(inbuf), + NULL, 0, &outlen); + if (rc) + return rc; + if (outlen != MC_CMD_VIRTIO_INIT_QUEUE_RESP_LEN) + return -EMSGSIZE; + return 0; +} + +int efx_vdpa_vring_destroy(struct efx_vring_ctx *vring_ctx, + struct efx_vring_dyn_cfg *vring_dyn_cfg) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_VIRTIO_FINI_QUEUE_REQ_LEN); + MCDI_DECLARE_BUF(outbuf, MC_CMD_VIRTIO_FINI_QUEUE_RESP_LEN); + struct efx_nic *efx; + ssize_t outlen; + int rc; + + efx = vring_ctx->nic; +#ifdef EFX_NOT_UPSTREAM + pr_info("%s: Called for vf:0x%x type:%u vi:%u\n", __func__, + vring_ctx->vf_index, vring_ctx->mcdi_vring_type, + vring_ctx->vi_index); +#endif + + MCDI_SET_BYTE(inbuf, VIRTIO_FINI_QUEUE_REQ_QUEUE_TYPE, + vring_ctx->mcdi_vring_type); + MCDI_SET_WORD(inbuf, VIRTIO_INIT_QUEUE_REQ_TARGET_VF, + vring_ctx->vf_index); + MCDI_SET_DWORD(inbuf, VIRTIO_INIT_QUEUE_REQ_INSTANCE, + vring_ctx->vi_index); + rc = efx_mcdi_rpc(efx, MC_CMD_VIRTIO_FINI_QUEUE, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), &outlen); + + if (rc) + return rc; + + if (outlen < MC_CMD_VIRTIO_FINI_QUEUE_RESP_LEN) + return -EMSGSIZE; + + if (vring_dyn_cfg) { + vring_dyn_cfg->avail_idx = MCDI_DWORD(outbuf, + VIRTIO_FINI_QUEUE_RESP_FINAL_PIDX); + vring_dyn_cfg->used_idx = MCDI_DWORD(outbuf, + VIRTIO_FINI_QUEUE_RESP_FINAL_CIDX); + } + + return 0; +} + +int efx_vdpa_get_doorbell_offset(struct efx_vring_ctx *vring_ctx, + u32 *offsetp) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_VIRTIO_GET_DOORBELL_OFFSET_REQ_LEN); + MCDI_DECLARE_BUF(outbuf, + MC_CMD_VIRTIO_GET_NET_DOORBELL_OFFSET_RESP_LEN); + struct efx_nic *efx = vring_ctx->nic; + ssize_t outlen; + int rc; + +#ifdef EFX_NOT_UPSTREAM + pr_info("%s: Called for vf:0x%x type:%u vi:%u\n", __func__, + vring_ctx->vf_index, vring_ctx->mcdi_vring_type, + vring_ctx->vi_index); +#endif + if (!offsetp) + return -EINVAL; + + if (vring_ctx->mcdi_vring_type != MC_CMD_VIRTIO_INIT_QUEUE_REQ_NET_RXQ && + vring_ctx->mcdi_vring_type != MC_CMD_VIRTIO_INIT_QUEUE_REQ_NET_TXQ) { + pci_err(efx->pci_dev, + "%s: Invalid Queue type %u\n", + __func__, vring_ctx->mcdi_vring_type); + return -EINVAL; + } + + MCDI_SET_BYTE(inbuf, VIRTIO_GET_DOORBELL_OFFSET_REQ_DEVICE_ID, + MC_CMD_VIRTIO_GET_FEATURES_IN_NET); + MCDI_SET_WORD(inbuf, VIRTIO_GET_DOORBELL_OFFSET_REQ_TARGET_VF, + vring_ctx->vf_index); + MCDI_SET_DWORD(inbuf, VIRTIO_GET_DOORBELL_OFFSET_REQ_INSTANCE, + vring_ctx->vi_index); + + rc = efx_mcdi_rpc(efx, MC_CMD_VIRTIO_GET_DOORBELL_OFFSET, inbuf, + sizeof(inbuf), outbuf, sizeof(outbuf), &outlen); + if (rc) + return rc; + + if (outlen < MC_CMD_VIRTIO_GET_NET_DOORBELL_OFFSET_RESP_LEN) + return -EIO; + + if (vring_ctx->mcdi_vring_type == MC_CMD_VIRTIO_INIT_QUEUE_REQ_NET_RXQ) + *offsetp = MCDI_DWORD(outbuf, + VIRTIO_GET_NET_DOORBELL_OFFSET_RESP_RX_DBL_OFFSET); + else + *offsetp = MCDI_DWORD(outbuf, + VIRTIO_GET_NET_DOORBELL_OFFSET_RESP_TX_DBL_OFFSET); + + return 0; +} + +int efx_vdpa_get_mac_address(struct efx_nic *efx, u8 *mac_address) +{ + struct ef100_nic_data *nic_data = efx->nic_data; + unsigned int vf_index = nic_data->vf_index; + efx_qword_t pciefn; + u32 clid; + int rc; + + /* Construct PCIE_FUNCTION structure for the VF */ + EFX_POPULATE_QWORD_3(pciefn, + PCIE_FUNCTION_PF, PCIE_FUNCTION_PF_NULL, + PCIE_FUNCTION_VF, vf_index, + PCIE_FUNCTION_INTF, PCIE_INTERFACE_CALLER); + /* look up VF's client ID */ + rc = efx_ef100_lookup_client_id(efx, pciefn, &clid); + if (rc) { + pr_err("%s: Failed to get VF %u client ID, rc %d\n", + __func__, vf_index, rc); + } else { + pr_info("%s: VF %u client ID %#x\n", __func__, vf_index, clid); + + /* Get the assigned MAC address */ + rc = ef100_get_mac_address(efx, mac_address, clid, + efx->type->is_vf); + } + + return rc; +} + +int efx_vdpa_get_mtu(struct efx_nic *efx, u16 *mtu) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_MAC_EXT_IN_LEN); + MCDI_DECLARE_BUF(outbuf, MC_CMD_SET_MAC_V2_OUT_LEN); + ssize_t outlen; + int rc; + + MCDI_SET_DWORD(inbuf, SET_MAC_EXT_IN_CONTROL, 0); + rc = efx_mcdi_rpc(efx, MC_CMD_SET_MAC, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), &outlen); + if (rc) + return rc; + if (outlen < MC_CMD_SET_MAC_V2_OUT_LEN) + return -EIO; + + *mtu = MCDI_DWORD(outbuf, SET_MAC_V2_OUT_MTU); + return 0; +} + +int efx_vdpa_get_link_details(struct efx_nic *efx, u16 *link_up, + u32 *link_speed, u8 *duplex) +{ + /* TODO:MCDI invocation for getting Link details.*/ + *link_up = VIRTIO_NET_S_LINK_UP; + /* Setting Dummy link speed to 100Mpbs*/ + *link_speed = 100; + *duplex = DUPLEX_FULL; + return 0; +} +#endif diff --git a/src/drivers/net/ethernet/sfc/mcdi_vdpa.h b/src/drivers/net/ethernet/sfc/mcdi_vdpa.h new file mode 100644 index 0000000..3c6fa80 --- /dev/null +++ b/src/drivers/net/ethernet/sfc/mcdi_vdpa.h @@ -0,0 +1,93 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Driver for Xilinx network controllers and boards + * Copyright 2020 Xilinx Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#ifndef EFX_MCDI_VDPA_H +#define EFX_MCDI_VDPA_H + +#if defined(CONFIG_SFC_VDPA) + +/* MAE Port selector value*/ +#define MAE_MPORT_SELECTOR_ASSIGNED 0x1000000 + +/** + * struct efx_vring_ctx: The vring context + * + * @nic: pointer of the VF's efx_nic object + * @vf_index: VF index of the vDPA VF + * @vi_index: vi index to be used for queue creation + * @mcdi_vring_type: corresponding MCDI vring type + */ +struct efx_vring_ctx { + struct efx_nic *nic; + u32 vf_index; + u32 vi_index; + u32 mcdi_vring_type; +}; + +/** + * struct efx_vring_cfg: Configuration for vring creation + * + * @desc: Descriptor area address of the vring + * @avail: Available area address of the vring + * @used: Device area address of the vring + * @size: Size of the vring + * @use_pasid: boolean whether to use pasid for queue creation + * @pasid: pasid to use for queue creation + * @msix_vector: msix vector address for the queue + * @features: negotiated feature bits + */ +struct efx_vring_cfg { + u64 desc; + u64 avail; + u64 used; + u32 size; + bool use_pasid; + u32 pasid; + u16 msix_vector; + u64 features; +}; + +/** + * struct efx_vring_dyn_cfg - dynamic vring configuration + * + * @avail_idx: last available index of the vring + * @used_idx: last used index of the vring + */ +struct efx_vring_dyn_cfg { + u32 avail_idx; + u32 used_idx; +}; + +int efx_vdpa_get_features(struct efx_nic *efx, enum ef100_vdpa_device_type type, + u64 *featuresp); + +int efx_vdpa_verify_features(struct efx_nic *efx, + enum ef100_vdpa_device_type type, u64 features); + +struct efx_vring_ctx *efx_vdpa_vring_init(struct efx_nic *efx, u32 vi, + enum ef100_vdpa_vq_type vring_type); + +void efx_vdpa_vring_fini(struct efx_vring_ctx *vring_ctx); + +int efx_vdpa_vring_create(struct efx_vring_ctx *vring_ctx, + struct efx_vring_cfg *vring_cfg, + struct efx_vring_dyn_cfg *vring_dyn_cfg); + +int efx_vdpa_vring_destroy(struct efx_vring_ctx *vring_ctx, + struct efx_vring_dyn_cfg *vring_dyn_cfg); + +int efx_vdpa_get_doorbell_offset(struct efx_vring_ctx *vring_ctx, + u32 *offsetp); +int efx_vdpa_get_mac_address(struct efx_nic *efx, u8 *mac_address); +int efx_vdpa_get_link_details(struct efx_nic *efx, u16 *link_up, + u32 *link_speed, u8 *duplex); +int efx_vdpa_get_mtu(struct efx_nic *efx, u16 *mtu); +#endif + +#endif diff --git a/src/drivers/net/ethernet/sfc/mtd.c b/src/drivers/net/ethernet/sfc/mtd.c new file mode 100644 index 0000000..0dcfc78 --- /dev/null +++ b/src/drivers/net/ethernet/sfc/mtd.c @@ -0,0 +1,311 @@ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2005-2006 Fen Systems Ltd. + * Copyright 2006-2017 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#include +#ifndef EFX_USE_KCOMPAT +#include +#else +#include "linux_mtd_mtd.h" +#endif +#include +#ifndef EFX_USE_KCOMPAT +#include +#endif + +#include "net_driver.h" +#include "efx.h" + +/* Some partitions should only be written during manufacturing. Not + * only should they not be rewritten later, but exposing all of them + * can easily fill up the MTD table (16 or 32 entries). + */ +bool efx_allow_nvconfig_writes; +#ifdef EFX_NOT_UPSTREAM +module_param(efx_allow_nvconfig_writes, bool, 0644); +MODULE_PARM_DESC(efx_allow_nvconfig_writes, + "Allow access to static config and backup firmware"); +#endif /* EFX_NOT_UPSTREAM */ + +/* MTD interface */ + +int efx_mtd_init(struct efx_nic *efx) +{ + efx->mtd_struct = kzalloc(sizeof(*efx->mtd_struct), GFP_KERNEL); + if (!efx->mtd_struct) + return -ENOMEM; + + efx->mtd_struct->efx = efx; + INIT_LIST_HEAD(&efx->mtd_struct->list); + +#ifdef EFX_WORKAROUND_87308 + atomic_set(&efx->mtd_struct->probed_flag, 0); + INIT_DELAYED_WORK(&efx->mtd_struct->creation_work, + efx_mtd_creation_work); +#endif + + return 0; +} + +void efx_mtd_free(struct efx_nic *efx) +{ + if (efx->mtd_struct) + kfree(efx->mtd_struct); + + efx->mtd_struct = NULL; +} + +static int efx_mtd_erase(struct mtd_info *mtd, struct erase_info *erase) +{ + struct efx_mtd_partition *part = mtd->priv; + struct efx_nic *efx = part->mtd_struct->efx; + int rc; + + rc = efx->type->mtd_erase(mtd, erase->addr, erase->len); +#if defined(EFX_USE_KCOMPAT) && defined(MTD_ERASE_DONE) + erase->state = rc ? MTD_ERASE_FAILED : MTD_ERASE_DONE; + mtd_erase_callback(erase); +#endif + return rc; +} + +static void efx_mtd_sync(struct mtd_info *mtd) +{ + struct efx_mtd_partition *part = mtd->priv; + struct efx_nic *efx = part->mtd_struct->efx; + int rc; + + rc = efx->type->mtd_sync(mtd); + if (rc) + pr_err("%s: %s sync failed (%d)\n", + part->name, part->dev_type_name, rc); +} + +static void efx_mtd_free_parts(struct kref *kref) +{ + struct efx_mtd *mtd_struct = container_of(kref, struct efx_mtd, parts_kref); + + kfree(mtd_struct->parts); + mtd_struct->parts = NULL; +} + +static void efx_mtd_scrub(struct efx_mtd_partition *part) +{ + /* The MTD wrappers check for NULL, except for the read + * function. We render that harmless by setting the + * size to 0. + */ +#if defined(EFX_USE_KCOMPAT) && defined(EFX_HAVE_MTD_DIRECT_ACCESS) + part->mtd.erase = NULL; + part->mtd.write = NULL; + part->mtd.sync = NULL; +#else + part->mtd._erase = NULL; + part->mtd._write = NULL; + part->mtd._sync = NULL; +#endif + part->mtd.priv = NULL; + part->mtd.size = 0; +} + +#ifdef EFX_NOT_UPSTREAM +/* Free the MTD device after all references have gone away. */ +static void efx_mtd_release_partition(struct device *dev) +{ + struct mtd_info *mtd = dev_get_drvdata(dev); + struct efx_mtd_partition *part = mtd->priv; + + /* Call mtd_release to remove the /dev/mtdXro node */ + if (dev->type && dev->type->release) + (dev->type->release)(dev); + + efx_mtd_scrub(part); + list_del(&part->node); + kref_put(&part->mtd_struct->parts_kref, efx_mtd_free_parts); +} +#endif + +static void efx_mtd_remove_partition(struct efx_mtd *mtd_struct, + struct efx_mtd_partition *part) +{ + int rc; +#ifdef EFX_WORKAROUND_63680 + unsigned retry; + + if (!part->mtd.size) + return; + + for (retry = 15; retry; retry--) { +#else + for (;;) { +#endif + rc = mtd_device_unregister(&part->mtd); + if (rc != -EBUSY) + break; +#ifdef EFX_WORKAROUND_63680 + /* Try to disown the other process */ + if ((retry <= 5) && (part->mtd.usecount > 0)) { + if (mtd_struct->efx) + netif_err(mtd_struct->efx, hw, mtd_struct->efx->net_dev, + "MTD device %s stuck for %d seconds, disowning it\n", + part->name, 15-retry); + else + printk(KERN_ERR + "sfc: MTD device %s stuck for %d seconds, disowning it\n", + part->name, 15-retry); + part->mtd.usecount--; + } +#endif + ssleep(1); + } +#ifdef EFX_WORKAROUND_63680 + if (rc || !retry) { +#else + if (rc) { +#endif + if (mtd_struct->efx) + netif_err(mtd_struct->efx, hw, mtd_struct->efx->net_dev, + "Error %d removing MTD device %s. A reboot is needed to fix this\n", + rc, part->name); + else + printk(KERN_ERR + "sfc: Error %d removing MTD device %s. A reboot is needed to fix this\n", + rc, part->name); + part->name[0] = '\0'; + } + +#ifndef EFX_NOT_UPSTREAM + efx_mtd_scrub(part); + list_del(&part->node); + kref_put(&mtd_struct->parts_kref, efx_mtd_free_parts); +#endif +} + +int efx_mtd_add(struct efx_nic *efx, struct efx_mtd_partition *parts, + size_t n_parts) +{ + struct efx_mtd_partition *part; + struct efx_mtd *mtd_struct = efx->mtd_struct; + size_t i; + + mtd_struct->parts = parts; + kref_init(&mtd_struct->parts_kref); + + for (i = 0; i < n_parts; i++) { + part = &parts[i]; + part->mtd_struct = mtd_struct; +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_USE_MTD_WRITESIZE) + if (!part->mtd.writesize) + part->mtd.writesize = 1; +#endif + if (efx_allow_nvconfig_writes && + !(part->mtd.flags & MTD_NO_ERASE)) + part->mtd.flags |= MTD_WRITEABLE; + + part->mtd.owner = THIS_MODULE; + part->mtd.priv = part; + part->mtd.name = part->name; +#if defined(EFX_USE_KCOMPAT) && defined(EFX_HAVE_MTD_DIRECT_ACCESS) + part->mtd.erase = efx_mtd_erase; + part->mtd.read = efx->type->mtd_read; + part->mtd.write = efx->type->mtd_write; + part->mtd.sync = efx_mtd_sync; +#else + part->mtd._erase = efx_mtd_erase; + part->mtd._read = efx->type->mtd_read; + part->mtd._write = efx->type->mtd_write; + part->mtd._sync = efx_mtd_sync; +#endif + + efx->type->mtd_rename(part); + + if (mtd_device_register(&part->mtd, NULL, 0)) + goto fail; + + kref_get(&mtd_struct->parts_kref); + +#ifdef EFX_NOT_UPSTREAM + /* The core MTD functionality does not comply completely with + * the device API. When it does we may need to change the way + * our data is cleaned up. + */ + WARN_ON_ONCE(part->mtd.dev.release); + part->mtd.dev.release = efx_mtd_release_partition; +#endif + + /* Add to list in order - efx_mtd_remove() depends on this */ + list_add_tail(&part->node, &mtd_struct->list); + } + + return 0; + +fail: + netif_err(mtd_struct->efx, hw, mtd_struct->efx->net_dev, + "MTD device creation for %s FAILED\n", part->name); + while (i--) + efx_mtd_remove_partition(mtd_struct, &parts[i]); + kref_put(&mtd_struct->parts_kref, efx_mtd_free_parts); + +#if defined(EFX_USE_KCOMPAT) && defined(EFX_HAVE_MTD_TABLE) + /* The number of MTDs is limited (to 16 or 32 by default) and + * we probably reached that limit. + */ + return -EBUSY; +#else + /* Failure is unlikely here, but probably means we're out of memory */ + return -ENOMEM; +#endif +} + +void efx_mtd_remove(struct efx_nic *efx) +{ + struct efx_mtd_partition *part, *next; + struct efx_mtd *mtd_struct = efx->mtd_struct; + + /* This is done here because it can't be performed when the + * mtd_struct is actually freed as efx might not exist + */ + + WARN_ON(efx_dev_registered(efx)); + + if (list_empty(&mtd_struct->list)) + return; + + list_for_each_entry_safe(part, next, &mtd_struct->list, node) + efx_mtd_remove_partition(mtd_struct, part); + kref_put(&mtd_struct->parts_kref, efx_mtd_free_parts); + + efx_mtd_free(efx); +} + +void efx_mtd_rename(struct efx_nic *efx) +{ + struct efx_mtd_partition *part; + struct efx_mtd *mtd_struct = efx->mtd_struct; + + ASSERT_RTNL(); + + list_for_each_entry(part, &mtd_struct->list, node) + efx->type->mtd_rename(part); +} + +#if defined(CONFIG_SFC_MTD) && defined(EFX_WORKAROUND_87308) +void efx_mtd_creation_work(struct work_struct *data) +{ + struct efx_mtd *mtd_struct = container_of(data, struct efx_mtd, + creation_work.work); + + if (atomic_xchg(&mtd_struct->probed_flag, 1) != 0) + return; + + rtnl_lock(); + (void)efx_mtd_probe(mtd_struct->efx); + rtnl_unlock(); +} +#endif diff --git a/src/drivers/net/ethernet/sfc/net_driver.h b/src/drivers/net/ethernet/sfc/net_driver.h new file mode 100644 index 0000000..b9d4740 --- /dev/null +++ b/src/drivers/net/ethernet/sfc/net_driver.h @@ -0,0 +1,2855 @@ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2005-2006 Fen Systems Ltd. + * Copyright 2005-2017 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +/* Common definitions for all Efx net driver code */ + +#ifndef EFX_NET_DRIVER_H +#define EFX_NET_DRIVER_H + +/* Uncomment this to enable output from netif_vdbg +#define VERBOSE_DEBUG 1 + */ + +#ifdef EFX_NOT_UPSTREAM +#define SFC_NAPI_DEBUG 1 +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifndef EFX_USE_KCOMPAT +#include +#include +#include +#endif +#include +#include +#ifndef EFX_USE_KCOMPAT +#include +#else +#include "linux_mtd_mtd.h" +#endif +#ifndef EFX_USE_KCOMPAT +#include +#endif +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_USE_DEVLINK) +#include +#endif +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_XDP_RXQ_INFO) +#include +#endif +#include +#if !defined(EFX_USE_KCOMPAT) +#if defined(CONFIG_XDP_SOCKETS) +#include +#endif +#endif + +#ifdef EFX_NOT_UPSTREAM +#include "config.h" +#endif + +#ifdef EFX_USE_KCOMPAT +/* Must come before other headers */ +#include "kernel_compat.h" +#endif + +#if defined(EFX_USE_KCOMPAT) && defined(EFX_HAVE_BUSY_POLL) +#include +#endif +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_XDP) +#include +#endif +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_RHASHTABLE) +#include +#endif + +#include "enum.h" +#include "bitfield.h" +#ifdef EFX_NOT_UPSTREAM +#include "sfctool.h" /* Provides missing 'struct ethtool_*' declarations */ +#if IS_MODULE(CONFIG_SFC_DRIVERLINK) +#define EFX_DRIVERLINK_API_VERSION_MINOR EFX_DRIVERLINK_API_VERSION_MINOR_MAX +#include "driverlink_api.h" /* Indirectly includes filter.h */ +#endif +#endif +#include "filter.h" + +#include "workarounds.h" + +/************************************************************************** + * + * Build definitions + * + **************************************************************************/ + +#ifdef EFX_NOT_UPSTREAM +#define EFX_DRIVER_VERSION "5.3.16.1004" +#endif + +#ifdef DEBUG +#define EFX_WARN_ON_ONCE_PARANOID(x) WARN_ON_ONCE(x) +#define EFX_WARN_ON_PARANOID(x) WARN_ON(x) +#else +#define EFX_WARN_ON_ONCE_PARANOID(x) do {} while (0) +#define EFX_WARN_ON_PARANOID(x) do {} while (0) +#endif + +#if defined(EFX_NOT_UPSTREAM) +#define EFX_RX_PAGE_SHARE 1 +#if IS_ENABLED(CONFIG_VLAN_8021Q) +#ifndef EFX_HAVE_CSUM_LEVEL +#define EFX_USE_FAKE_VLAN_RX_ACCEL 1 +#endif +#ifdef EFX_HAVE_SKB_VLANTCI +#define EFX_USE_FAKE_VLAN_TX_ACCEL 1 +#endif +#endif +#endif + +#if defined(EFX_NOT_UPSTREAM) && defined(CONFIG_SMP) && defined(CONFIG_XPS) && defined(EFX_HAVE_IRQ_NOTIFIERS) +#define EFX_USE_IRQ_NOTIFIERS +#endif + +/************************************************************************** + * + * Efx data structures + * + **************************************************************************/ + +/* This limit is arbitrary, it only exists to impose a testing limit and + * to avoid large memory allocations in case of a bug. + */ +#define EFX_MAX_CHANNELS 255U +#define EFX_MAX_RX_QUEUES EFX_MAX_CHANNELS +#define EFX_EXTRA_CHANNEL_PTP 0 +#define EFX_EXTRA_CHANNEL_TC 1 +#define EFX_MAX_EXTRA_CHANNELS 2U + +/* Checksum generation is a per-queue option in hardware, so each + * queue visible to the networking core is backed by two hardware TX + * queues. */ +#define EFX_MAX_CORE_TX_QUEUES EFX_MAX_CHANNELS +#define EFX_TXQ_TYPE_NO_OFFLOAD 0 +#define EFX_TXQ_TYPE_CSUM_OFFLOAD 1 +#define EFX_TXQ_TYPE_INNER_CSUM_OFFLOAD 2 +#define EFX_TXQ_TYPE_BOTH_CSUM_OFFLOAD 3 +#define EFX_TXQ_TYPES 3 +#define EFX_MAX_TX_QUEUES (EFX_TXQ_TYPES * EFX_MAX_CORE_TX_QUEUES) + +/* Maximum possible MTU the driver supports */ +#define EFX_MAX_MTU (9 * 1024) +#define EFX_100_MAX_MTU 9600 + +/* Minimum MTU, from RFC791 (IP) */ +#define EFX_MIN_MTU 68 + +/* Maximum total header length for TSOv2 */ +#define EFX_TSO2_MAX_HDRLEN 208 + +#if !defined(EFX_NOT_UPSTREAM) || defined(EFX_RX_PAGE_SHARE) +/* Size of an RX scatter buffer. Small enough to pack 2 into a 4K page, + * and should be a multiple of the cache line size. + */ +#define EFX_RX_USR_BUF_SIZE (2048 - 256 - XDP_PACKET_HEADROOM) +#else +/* Size of an RX scatter buffer. */ +#define EFX_RX_USR_BUF_SIZE (PAGE_SIZE - L1_CACHE_BYTES) +#endif + +/* If possible, we should ensure cache line alignment at start and end + * of every buffer. Otherwise, we just need to ensure 4-byte + * alignment of the network header. + */ +#if NET_IP_ALIGN == 0 +#define EFX_RX_BUF_ALIGNMENT L1_CACHE_BYTES +#else +#define EFX_RX_BUF_ALIGNMENT 4 +#endif + +/* Forward declare Precision Time Protocol (PTP) support structure. */ +struct efx_ptp_data; +struct hwtstamp_config; + +struct efx_self_tests; + +enum efx_rss_mode { + EFX_RSS_PACKAGES, + EFX_RSS_CORES, + EFX_RSS_HYPERTHREADS, + EFX_RSS_NUMA_LOCAL_CORES, + EFX_RSS_NUMA_LOCAL_HYPERTHREADS, + EFX_RSS_CUSTOM, +}; + +#ifdef EFX_NOT_UPSTREAM +enum efx_performance_profile { + EFX_PERFORMANCE_PROFILE_AUTO, + EFX_PERFORMANCE_PROFILE_THROUGHPUT, + EFX_PERFORMANCE_PROFILE_LATENCY, +}; +#endif + +/** + * struct efx_buffer - A general-purpose DMA buffer + * @addr: host base address of the buffer + * @dma_addr: DMA base address of the buffer + * @len: Buffer length, in bytes + * + * The NIC uses these buffers for its interrupt status registers and + * MAC stats dumps. + */ +struct efx_buffer { + void *addr; + dma_addr_t dma_addr; + unsigned int len; +}; + +/** + * struct efx_tx_buffer - buffer state for a TX descriptor + * @skb: When @flags & %EFX_TX_BUF_SKB, the associated socket buffer to be + * freed when descriptor completes + * @buf: When @flags & %EFX_TX_BUF_HEAP, the associated heap buffer to be + * freed when descriptor completes. + * @xdpf: When @flags & %EFX_TX_BUF_XDP, the XDP frame information; its @data + * member is the associated buffer to drop a page reference on. If the + * kernel does not support this (i.e. ifndef EFX_HAVE_XDP_FRAME_API), then + * instead @buf is used, and holds the buffer to drop a page reference on. + * @option: When @flags & %EFX_TX_BUF_OPTION, an EF10-specific option descriptor. + * @dma_addr: DMA address of the fragment. + * @flags: Flags for allocation and DMA mapping type + * @len: Length of this fragment. + * This field is zero when the queue slot is empty. + * @unmap_len: Length of this fragment to unmap + * @dma_offset: Offset of @dma_addr from the address of the backing DMA mapping. + * Only valid if @unmap_len != 0. + */ +struct efx_tx_buffer { + union { + const struct sk_buff *skb; + void *buf; + struct xdp_frame *xdpf; + }; + union { + efx_qword_t option; /* EF10 */ + dma_addr_t dma_addr; + }; + unsigned short flags; + unsigned short len; + unsigned short unmap_len; + unsigned short dma_offset; +}; +#define EFX_TX_BUF_CONT 1 /* not last descriptor of packet */ +#define EFX_TX_BUF_SKB 2 /* buffer is last part of skb */ +#define EFX_TX_BUF_HEAP 4 /* buffer was allocated with kmalloc() */ +#define EFX_TX_BUF_MAP_SINGLE 8 /* buffer was mapped with dma_map_single() */ +#define EFX_TX_BUF_OPTION 0x10 /* empty buffer for option descriptor */ +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_XDP_TX) +#define EFX_TX_BUF_XDP 0x20 /* buffer was sent with XDP */ +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_XDP_SOCK) +#define EFX_TX_BUF_XSK 0x80 /* buffer was sent for XSK */ +#endif +#endif +#define EFX_TX_BUF_TSO_V3 0x40 /* empty buffer for a TSO_V3 descriptor */ +#define EFX_TX_BUF_EFV 0x100 /* buffer was sent from representor */ + +/** + * struct efx_tx_queue - An Efx TX queue + * @efx: The associated Efx NIC + * @queue: DMA queue number + * @label: Queue label - distinguishes this queue from others sharing evq. Used + * as an index in to %efx_channel->tx_queues + * @csum_offload: Is checksum offloading enabled for this queue? + * @tso_version: Version of TSO in use for this queue. + * @tso_encap: Is encapsulated TSO supported? Supported in TSOv2 on 8000 series. + * @channel: The associated channel + * @core_txq: The networking core TX queue structure + * @buffer: The software buffer ring + * @cb_page: Array of pages of copy buffers + * @txd: The hardware descriptor ring + * @ptr_mask: The size of the ring minus 1. + * @piobuf: PIO buffer region for this TX queue (shared with its partner). + * Size of the region is efx_piobuf_size. + * @piobuf_offset: Buffer offset to be specified in PIO descriptors + * @timestamping: Is timestamping enabled for this channel? + * @xdp_tx: Is this an XDP tx queue? + * @xsk_pool: reference to xsk_buff_pool assigned to this queue + * @handle_vlan: VLAN insertion offload handler. + * @handle_tso: TSO offload handler. + * @read_count: Current read pointer. + * This is the number of buffers that have been removed from both rings. + * @read_jiffies: Time that @read_count was last updated. + * @old_write_count: The value of @write_count when last checked. + * This is here for performance reasons. The xmit path will + * only get the up-to-date value of @write_count if this + * variable indicates that the queue is empty. This is to + * avoid cache-line ping-pong between the xmit path and the + * completion path. + * @merge_events: Number of TX merged completion events + * @doorbell_notify_comp: Number of doorbell updates from the completion path. + * @bytes_compl: Number of bytes completed to report to BQL + * @pkts_compl: Number of packets completed to report to BQL + * @completed_timestamp_major: Top part of the most recent tx timestamp. + * @completed_timestamp_minor: Low part of the most recent tx timestamp. + * @completion_remainder: The number of outstanding descriptors left over after + * the last TX completion event. + * @insert_count: Current insert pointer + * This is the number of buffers that have been added to the + * software ring. + * @write_count: Current write pointer + * This is the number of buffers that have been added to the + * hardware ring. + * @packet_write_count: Completable write pointer + * This is the write pointer of the last packet written. + * Normally this will equal @write_count, but as option descriptors + * don't produce completion events, they won't update this. + * Filled in iff @efx->type->option_descriptors; only used for PIO. + * Thus, this is written and used on EF10, and neither on farch. + * @old_read_count: The value of read_count when last checked. + * This is here for performance reasons. The xmit path will + * only get the up-to-date value of read_count if this + * variable indicates that the queue is full. This is to + * avoid cache-line ping-pong between the xmit path and the + * completion path. + * @tso_bursts: Number of times TSO xmit invoked by kernel + * @tso_long_headers: Number of packets with headers too long for standard + * blocks + * @tso_packets: Number of packets via the TSO xmit path + * @tso_fallbacks: Number of times TSO fallback used + * @pushes: Number of times the TX push feature has been used + * @pio_packets: Number of times the TX PIO feature has been used + * @cb_packets: Number of times the TX copybreak feature has been used + * @doorbell_notify_tx: Number of doorbell updates from the xmit path. + * @notify_count: Count of notified descriptors to the NIC + * @notify_jiffies: Time when @notify_count was last updated. + * @tx_bytes: Number of bytes sent. + * @tx_packets: Number of packets sent. + * @xmit_pending: Are any packets waiting to be pushed to the NIC + * @empty_read_count: If the completion path has seen the queue as empty + * and the transmission path has not yet checked this, the value of + * @read_count bitwise-added to %EFX_EMPTY_COUNT_VALID; otherwise 0. + * @flush_outstanding: non-zero if TX queue flush is pending. + * + * This is a ring buffer of TX fragments. + * Since the TX completion path always executes on the same + * CPU and the xmit path can operate on different CPUs, + * performance is increased by ensuring that the completion + * path and the xmit path operate on different cache lines. + * This is particularly important if the xmit path is always + * executing on one CPU which is different from the completion + * path. There is also a cache line for members which are + * read but not written on the fast path. + */ +struct efx_tx_queue { + /* Members which don't change on the fast path */ + struct efx_nic *efx ____cacheline_aligned_in_smp; + unsigned int queue; + unsigned int label; + unsigned int csum_offload; + unsigned int tso_version; + bool tso_encap; + struct efx_channel *channel; + struct netdev_queue *core_txq; + struct efx_tx_buffer *buffer; + struct efx_buffer *cb_page; + struct efx_buffer txd; + unsigned int ptr_mask; + void __iomem *piobuf; + unsigned int piobuf_offset; + bool timestamping; + bool xdp_tx; +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_XDP_SOCK) +#if defined(CONFIG_XDP_SOCKETS) +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_XSK_POOL) + struct xsk_buff_pool *xsk_pool; +#else + /** @umem: umem assigned to this queue */ + struct xdp_umem *umem; +#endif +#endif +#endif +#ifdef CONFIG_DEBUG_FS + /** @debug_dir: debugfs directory for this queue */ + struct dentry *debug_dir; +#endif + + /* Function pointers used in the fast path. */ + struct sk_buff* (*handle_vlan)(struct efx_tx_queue*, struct sk_buff*); + int (*handle_tso)(struct efx_tx_queue*, struct sk_buff*, bool *); + + /* Members used mainly on the completion path */ + unsigned int read_count ____cacheline_aligned_in_smp; + unsigned long read_jiffies; + unsigned int old_write_count; + unsigned int merge_events; + unsigned int doorbell_notify_comp; + unsigned int bytes_compl; + unsigned int pkts_compl; + u32 completed_timestamp_major; + u32 completed_timestamp_minor; + unsigned int completion_remainder; + + /* Members used only on the xmit path */ + unsigned int insert_count ____cacheline_aligned_in_smp; + unsigned int write_count; + unsigned int packet_write_count; + unsigned int old_read_count; + unsigned int tso_bursts; + unsigned int tso_long_headers; + unsigned int tso_packets; + unsigned int tso_fallbacks; + unsigned int pushes; + unsigned int pio_packets; + unsigned int cb_packets; + unsigned int doorbell_notify_tx; + unsigned int notify_count; + unsigned long notify_jiffies; + /* Statistics to supplement MAC stats */ + u64 tx_bytes; + unsigned long tx_packets; + + bool xmit_pending; + + /* Members shared between paths and sometimes updated */ + unsigned int empty_read_count ____cacheline_aligned_in_smp; +#define EFX_EMPTY_COUNT_VALID 0x80000000 + atomic_t flush_outstanding; +}; + +/** + * struct efx_rx_buffer - An Efx RX data buffer + * @dma_addr: DMA base address of the buffer + * @page: The associated page buffer. + * Will be %NULL if the buffer slot is currently free. + * @addr: virtual address of buffer + * @handle: hande to the umem buffer + * @xsk_buf: umem buffer + * @page_offset: If pending this is the offset in @page of the DMA base address. + * If completed this is the offset in @page of the Ethernet header. + * @len: If pending this is the length of a DMA descriptor. + * If completed this is the received length, excluding hash prefix. + * @flags: Flags for buffer and packet state. These are only set on the + * first buffer of a scattered packet. + * @vlan_tci: VLAN tag in host byte order. If the EFX_RX_PKT_VLAN_XTAG + * flag is set, the tag has been moved here. + */ +struct efx_rx_buffer { + dma_addr_t dma_addr; + union { + struct page *page; +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_XDP_SOCK) +#if defined(CONFIG_XDP_SOCKETS) +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_USE_XSK_BUFFER_ALLOC) + struct xdp_buff *xsk_buf; +#else + struct { + void *addr; + u64 handle; + }; +#endif +#endif +#endif + }; + + u16 page_offset; + u16 len; + u16 flags; + u16 vlan_tci; +}; +#define EFX_RX_BUF_LAST_IN_PAGE 0x0001 +#define EFX_RX_PKT_CSUMMED 0x0002 +#define EFX_RX_PKT_DISCARD 0x0004 +#if defined(EFX_NOT_UPSTREAM) && defined(EFX_USE_FAKE_VLAN_RX_ACCEL) +#define EFX_RX_PKT_VLAN 0x0008 +#endif +#define EFX_RX_PKT_IPV4 0x0010 +#define EFX_RX_PKT_IPV6 0x0020 +#define EFX_RX_PKT_TCP 0x0040 +#define EFX_RX_PKT_PREFIX_LEN 0x0080 /* length is in prefix only */ +#define EFX_RX_PAGE_IN_RECYCLE_RING 0x0100 +#define EFX_RX_PKT_CSUM_LEVEL 0x0200 +#define EFX_RX_BUF_VLAN_XTAG 0x8000 +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_XDP_SOCK) +#define EFX_RX_BUF_ZC 0x0400 +#define EFX_RX_BUF_XSK_REUSE 0x0800 +#endif + +/** + * struct efx_rx_page_state - Page-based rx buffer state + * @dma_addr: The dma address of this page. + * + * Inserted at the start of every page allocated for receive buffers. + * Used to facilitate sharing dma mappings between recycled rx buffers + * and those passed up to the kernel. + */ +struct efx_rx_page_state { + dma_addr_t dma_addr; +/* private: */ + unsigned int __pad[0] ____cacheline_aligned; +}; + +#if defined(EFX_NOT_UPSTREAM) && defined(EFX_USE_SFC_LRO) + +/** + * struct efx_ssr_conn - Connection state for Soft Segment Reassembly (SSR) aka LRO + * @link: Link for hash table and free list. + * @active_link: Link for active_conns list + * @l2_id: Identifying information from layer 2 + * @conn_hash: Hash of connection 4-tuple + * @source: Source TCP port number + * @dest: Destination TCP port number + * @n_in_order_pkts: Number of in-order packets with payload. + * @next_seq: Next in-order sequence number. + * @last_pkt_jiffies: Time we last saw a packet on this connection. + * @skb: The SKB we are currently holding. + * If %NULL, then all following fields are undefined. + * @skb_tail: The tail of the frag_list of SKBs we're holding. + * Only valid after at least one merge (and when not in page-mode). + * @th_last: The TCP header of the last packet merged. + * @next_buf: The next RX buffer to process. + * @next_eh: Ethernet header of the next buffer. + * @next_iph: IP header of the next buffer. + * @delivered: True if we've delivered a payload packet up this interrupt. + * @sum_len: IPv4 tot_len or IPv6 payload_len for @skb. + */ +struct efx_ssr_conn { + struct list_head link; + struct list_head active_link; + u32 l2_id; + u32 conn_hash; + __be16 source, dest; + int n_in_order_pkts; + unsigned int next_seq; + unsigned int last_pkt_jiffies; + struct sk_buff *skb; + struct sk_buff *skb_tail; + struct tcphdr *th_last; + struct efx_rx_buffer next_buf; + char *next_eh; + void *next_iph; + int delivered; + u16 sum_len; +}; + +/** + * struct efx_ssr_state - Port state for Soft Segment Reassembly (SSR) aka LRO + * @efx: The associated NIC. + * @conns_mask: Number of hash buckets - 1. + * @conns: Hash buckets for tracked connections. + * @conns_n: Length of linked list for each hash bucket. + * @active_conns: Connections that are holding a packet. + * Connections are self-linked when not in this list. + * @free_conns: Free efx_ssr_conn instances. + * @last_purge_jiffies: The value of jiffies last time we purged idle + * connections. + * @n_merges: Number of packets absorbed by SSR. + * @n_bursts: Number of bursts spotted by SSR. + * @n_slow_start: Number of packets not merged because connection may be in + * slow-start. + * @n_misorder: Number of out-of-order packets seen in tracked streams. + * @n_too_many: Incremented when we're trying to track too many streams. + * @n_new_stream: Number of distinct streams we've tracked. + * @n_drop_idle: Number of streams discarded because they went idle. + * @n_drop_closed: Number of streams that have seen a FIN or RST. + */ +struct efx_ssr_state { + struct efx_nic *efx; + unsigned int conns_mask; + struct list_head *conns; + unsigned int *conns_n; + struct list_head active_conns; + struct list_head free_conns; + unsigned int last_purge_jiffies; + unsigned int n_merges; + unsigned int n_bursts; + unsigned int n_slow_start; + unsigned int n_misorder; + unsigned int n_too_many; + unsigned int n_new_stream; + unsigned int n_drop_idle; + unsigned int n_drop_closed; +}; + +#endif + +/** + * struct efx_rx_queue - An Efx RX queue + * @efx: The associated Efx NIC + * @buffer: The software buffer ring + * @rxd: The hardware descriptor ring + * @queue: Hardware RXQ instance number + * @label: Hardware RXQ label + * @core_index: Index of network core RX queue. Will be >= 0 iff this + * is associated with a real RX queue. + * @ptr_mask: The size of the ring minus 1. + * @refill_enabled: Enable refill whenever fill level is low + * @flush_pending: Set when a RX flush is pending. Has the same lifetime as + * @rxq_flush_pending. + * @grant_credits: Posted RX descriptors need to be granted to the MAE with + * %MC_CMD_MAE_COUNTERS_STREAM_GIVE_CREDITS. For %EFX_EXTRA_CHANNEL_TC, + * and only supported on EF100. + * @added_count: Number of buffers added to the receive queue. + * @notified_count: Number of buffers given to NIC (<= @added_count). + * @granted_count: Number of buffers granted to the MAE (<= @notified_count). + * @removed_count: Number of buffers removed from the receive queue. + * @scatter_n: Used by NIC specific receive code. + * @scatter_len: Used by NIC specific receive code. + * @rx_pkt_n_frags: Number of fragments in next packet to be delivered by + * __efx_rx_packet(), or zero if there is none + * @rx_pkt_index: Ring index of first buffer for next packet to be delivered + * by __efx_rx_packet(), if @rx_pkt_n_frags != 0 + * @page_ring: The ring to store DMA mapped pages for reuse. + * @page_add: Counter to calculate the write pointer for the recycle ring. + * @page_remove: Counter to calculate the read pointer for the recycle ring. + * @page_recycle_count: The number of pages that have been recycled. + * @page_recycle_failed: The number of pages that couldn't be recycled because + * the kernel still held a reference to them. + * @page_recycle_full: The number of pages that were released because the + * recycle ring was full. + * @page_repost_count: The number of pages that were reposted to the RX queue. + * @page_ptr_mask: The number of pages in the RX recycle ring minus 1. + * @max_fill: RX descriptor maximum fill level (<= ring size) + * @fast_fill_trigger: RX descriptor fill level that will trigger a fast fill + * (<= @max_fill) + * @min_fill: RX descriptor minimum non-zero fill level. + * This records the minimum fill level observed when a ring + * refill was triggered. + * @recycle_count: RX buffer recycle counter. + * @slow_fill_count: Number of slow fill events sent. + * @slow_fill_work: workitem used to defer ring refill to process context. + * @grant_work: workitem used to grant credits to the MAE if @grant_credits + * @rx_packets: Count of RX packets + * @n_rx_tobe_disc: Count of RX_TOBE_DISC errors + * @n_rx_ip_hdr_chksum_err: Count of RX IP header checksum errors + * @n_rx_tcp_udp_chksum_err: Count of RX TCP and UDP checksum errors + * @n_rx_outer_ip_hdr_chksum_err: Count of RX outer IP header checksum errors + * @n_rx_outer_tcp_udp_chksum_err: Count of RX outer TCP and UDP checksum errors + * @n_rx_inner_ip_hdr_chksum_err: Count of RX inner IP header checksum errors + * @n_rx_inner_tcp_udp_chksum_err: Count of RX inner TCP and UDP checksum errors + * @n_rx_eth_crc_err: Count of RX CRC errors + * @n_rx_mcast_mismatch: Count of unmatched multicast frames + * @n_rx_frm_trunc: Count of RX_FRM_TRUNC errors + * @n_rx_overlength: Count of RX_OVERLENGTH errors + * @n_rx_nodesc_trunc: Number of RX packets truncated and then dropped due to + * lack of descriptors + * @n_rx_merge_events: Number of RX merged completion events + * @n_rx_merge_packets: Number of RX packets completed by merged events + * @n_rx_xdp_drops: Count of RX packets intentionally dropped due to XDP + * @n_rx_xdp_bad_drops: Count of RX packets dropped due to XDP errors + * @n_rx_xdp_tx: Count of RX packets retransmitted due to XDP + * @n_rx_xdp_redirect: Count of RX packets redirected to a different NIC by XDP + * @n_rx_mport_bad: Count of RX packets dropped because their ingress mport was + * not recognised + * @failed_flush_count: Count of failed queue flush attempts. + * @debug_dir: debugfs directory for this queue + * @xsk_pool: reference to xsk_buff_pool assigned to this queue + * @xdp_rxq_info: XDP specific RX queue information. + * @receive_skb: Handle an skb ready to be passed to netif_receive_skb() + * @receive_raw: Handle an RX buffer ready to be passed to __efx_rx_packet(). + * Also takes the value of the USER_MARK extracted from the prefix. + */ +struct efx_rx_queue { + struct efx_nic *efx; + struct efx_rx_buffer *buffer; + struct efx_buffer rxd; + int queue; + int label; + int core_index; + unsigned int ptr_mask; + bool refill_enabled; + bool flush_pending; + bool grant_credits; + unsigned int added_count; + unsigned int notified_count; + unsigned int granted_count; + unsigned int removed_count; + unsigned int scatter_n; + unsigned int scatter_len; + + unsigned int rx_pkt_n_frags; + unsigned int rx_pkt_index; + +#if !defined(EFX_NOT_UPSTREAM) || defined(EFX_RX_PAGE_SHARE) + struct page **page_ring; + unsigned int page_add; + unsigned int page_remove; + unsigned int page_recycle_count; + unsigned int page_recycle_failed; + unsigned int page_recycle_full; + unsigned int page_repost_count; + unsigned int page_ptr_mask; +#endif + unsigned int max_fill; + unsigned int fast_fill_trigger; + unsigned int min_fill; + unsigned int recycle_count; + unsigned int slow_fill_count; + struct delayed_work slow_fill_work; + struct work_struct grant_work; + /* Statistics to supplement MAC stats */ + unsigned long rx_packets; + unsigned int n_rx_tobe_disc; + unsigned int n_rx_ip_hdr_chksum_err; + unsigned int n_rx_tcp_udp_chksum_err; + unsigned int n_rx_outer_ip_hdr_chksum_err; + unsigned int n_rx_outer_tcp_udp_chksum_err; + unsigned int n_rx_inner_ip_hdr_chksum_err; + unsigned int n_rx_inner_tcp_udp_chksum_err; + unsigned int n_rx_eth_crc_err; + unsigned int n_rx_mcast_mismatch; + unsigned int n_rx_frm_trunc; + unsigned int n_rx_overlength; + unsigned int n_rx_nodesc_trunc; + unsigned int n_rx_merge_events; + unsigned int n_rx_merge_packets; +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_XDP) + unsigned int n_rx_xdp_drops; + unsigned int n_rx_xdp_bad_drops; + unsigned int n_rx_xdp_tx; + unsigned int n_rx_xdp_redirect; +#endif + unsigned int n_rx_mport_bad; + unsigned int failed_flush_count; + +#if defined(EFX_NOT_UPSTREAM) && defined(EFX_USE_SFC_LRO) + /** @ssr: state for Soft Segment Reassembly (LRO) */ + struct efx_ssr_state ssr; +#endif + +#ifdef CONFIG_DEBUG_FS + struct dentry *debug_dir; +#endif + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_XDP_SOCK) +#if defined(CONFIG_XDP_SOCKETS) +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_XSK_POOL) + struct xsk_buff_pool *xsk_pool; +#else + /** @umem: umem assigned to this queue. */ + struct xdp_umem *umem; +#endif +#endif +#endif /* EFX_HAVE_XSK_POOL */ + +#if defined(EFX_USE_KCOMPAT) && !defined(EFX_USE_XSK_BUFFER_ALLOC) +#if defined(CONFIG_XDP_SOCKETS) + /** @zca: zero copy allocator for rx_queue */ + struct zero_copy_allocator zca; +#endif +#endif /* EFX_HAVE_XDP_SOCK */ + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_XDP_RXQ_INFO) + struct xdp_rxq_info xdp_rxq_info; +#endif /* EFX_HAVE_XDP_RXQ_INFO */ + + /* Special RX handlers (normally %NULL) */ + bool (*receive_skb)(struct efx_rx_queue *, struct sk_buff *); + bool (*receive_raw)(struct efx_rx_queue *, u32); +}; + +#ifdef CONFIG_SFC_PTP +enum efx_sync_events_state { + SYNC_EVENTS_DISABLED = 0, + SYNC_EVENTS_QUIESCENT, + SYNC_EVENTS_REQUESTED, + SYNC_EVENTS_VALID, +}; +#endif + +/* The reserved RSS context value */ +#define EFX_MCDI_RSS_CONTEXT_INVALID 0xffffffff +/** + * struct efx_rss_context - A user-defined RSS context for filtering + * @list: node of linked list on which this struct is stored + * @context_id: the RSS_CONTEXT_ID returned by MC firmware, or + * %EFX_MCDI_RSS_CONTEXT_INVALID if this context is not present on the NIC. + * For Siena, 0 if RSS is active, else %EFX_MCDI_RSS_CONTEXT_INVALID. + * @user_id: the rss_context ID exposed to userspace over ethtool. + * @flags: Hashing flags for this RSS context + * @rx_hash_key: Toeplitz hash key for this RSS context + * @rx_indir_table: Indirection table for this RSS context + */ +struct efx_rss_context { + struct list_head list; + u32 context_id; + u32 user_id; + u32 flags; +#ifdef EFX_NOT_UPSTREAM + /** + * @num_queues: Number of queues targeted by this context + * (set at alloc time). + */ + u8 num_queues; +#endif + u8 rx_hash_key[40]; + u32 rx_indir_table[128]; +}; + +/** + * struct efx_channel - An Efx channel + * @efx: Associated Efx NIC + * @channel: Channel instance number + * @type: Channel type definition + * @list: Link to the previous and next channel. + * @eventq_init: Event queue initialised flag + * @enabled: Channel enabled indicator + * @tx_coalesce_doorbell: Coalescing of doorbell notifications enabled + * for this channel + * @holdoff_doorbell: Flag indicating that the channel is being processed + * @irq: IRQ number (MSI and MSI-X only) + * @irq_moderation_us: IRQ moderation value (in microseconds) + * @napi_dev: Net device used with NAPI + * @napi_str: NAPI control structure + * @eventq: Event queue buffer + * @eventq_mask: Event queue pointer mask + * @eventq_read_ptr: Event queue read pointer + * @event_test_cpu: Last CPU to handle interrupt or test event for this channel + * @irq_count: Number of IRQs since last adaptive moderation decision + * @irq_mod_score: IRQ moderation score + * @rfs_filter_count: number of accelerated RFS filters currently in place; + * equals the count of @rps_flow_id slots filled + * @rfs_last_expiry: value of jiffies last time some accelerated RFS filters + * were checked for expiry + * @rfs_expire_index: next accelerated RFS filter ID to check for expiry + * @n_rfs_succeeded: number of successful accelerated RFS filter insertions + * @n_rfs_failed: number of failed accelerated RFS filter insertions + * @filter_work: Work item for efx_filter_rfs_expire() + * @rps_flow_id: Flow IDs of filters allocated for accelerated RFS, + * indexed by filter ID + * @debug_dir: debugfs directory for this channel + * @rx_list: list of SKBs from current RX, awaiting processing + * @zc: zero-copy enabled on channel + * @rx_queue: RX queue for this channel + * @tx_queue_count: Number of TX queues pointed to by %tx_queues + * @tx_queues: Pointer to TX queues for this channel + * @sync_events_state: Current state of sync events on this channel + * @sync_timestamp_major: Major part of the last ptp sync event + * @sync_timestamp_minor: Minor part of the last ptp sync event + * @irq_mem_node: Memory NUMA node of interrupt + * @irq_affinity: IRQ affinity notifier context + * @irq_affinity.notifier: IRQ notifier for changes to irq affinity + * @irq_affinity.complete: IRQ notifier completion structure + * + * A channel comprises an event queue, at least one TX queue, at least + * one RX queue, and an associated tasklet for processing the event + * queue. + */ +struct efx_channel { + struct efx_nic *efx; + int channel; + const struct efx_channel_type *type; + struct list_head list; + bool eventq_init; + bool enabled; + bool tx_coalesce_doorbell; + bool holdoff_doorbell; + int irq; + unsigned int irq_moderation_us; + struct net_device *napi_dev; + struct napi_struct napi_str; +#if defined(EFX_USE_KCOMPAT) && defined(EFX_WANT_DRIVER_BUSY_POLL) +#ifdef CONFIG_NET_RX_BUSY_POLL + /** @busy_poll_state: busy poll state */ + unsigned long busy_poll_state; + /** @poll_lock: Protect against concurrent busy poll and NAPI */ + spinlock_t poll_lock; +#endif +#endif + struct efx_buffer eventq; + unsigned int eventq_mask; + unsigned int eventq_read_ptr; + int event_test_cpu; + +#ifdef EFX_NOT_UPSTREAM +#ifdef SFC_NAPI_DEBUG + /** @last_irq_jiffies: time of last hardware interrupt */ + int last_irq_jiffies; + /** @last_napi_poll_jiffies: time of last NAPI poll start */ + int last_napi_poll_jiffies; + /** @last_napi_poll_end_jiffies: time of last NAPI poll end */ + int last_napi_poll_end_jiffies; + /** @last_budget: budget for last NAPI poll */ + int last_budget; + /** @last_spent: budget consumed in last NAPI poll */ + int last_spent; + /** @last_complete_done: time of last completed NAPI poll */ + bool last_complete_done; + /** @last_irq_reprime_jiffies: Time of last interrupt reprime */ + int last_irq_reprime_jiffies; +#endif +#endif + + unsigned int irq_count; + unsigned int irq_mod_score; +#ifdef CONFIG_RFS_ACCEL + unsigned int rfs_filter_count; + unsigned int rfs_last_expiry; + unsigned int rfs_expire_index; + unsigned int n_rfs_succeeded; + unsigned int n_rfs_failed; + struct delayed_work filter_work; +#define RPS_FLOW_ID_INVALID 0xFFFFFFFF + u32 *rps_flow_id; +#endif + +#ifdef CONFIG_DEBUG_FS + struct dentry *debug_dir; +#endif +#if defined(EFX_USE_KCOMPAT) && defined(EFX_NEED_SAVE_MSIX_MESSAGES) + /** @msix_msg: saved message data from MSI-X table */ + struct msi_msg msix_msg; +#endif + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_SKB__LIST) + struct list_head *rx_list; +#else + struct sk_buff_head *rx_list; +#endif + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_XDP_SOCK) + bool zc; +#endif + struct efx_rx_queue rx_queue; + unsigned int tx_queue_count; + struct efx_tx_queue *tx_queues; + +#ifdef CONFIG_SFC_PTP + enum efx_sync_events_state sync_events_state; + u32 sync_timestamp_major; + u32 sync_timestamp_minor; +#endif + + int irq_mem_node; + +#ifdef EFX_USE_IRQ_NOTIFIERS + struct { + struct irq_affinity_notify notifier; + struct completion complete; + } irq_affinity; +#endif +}; + +#if defined(EFX_USE_KCOMPAT) && defined(EFX_WANT_DRIVER_BUSY_POLL) +#ifdef CONFIG_NET_RX_BUSY_POLL +enum efx_channel_busy_poll_state { + EFX_CHANNEL_STATE_IDLE = 0, + EFX_CHANNEL_STATE_POLL_BIT = 1, + EFX_CHANNEL_STATE_POLL = BIT(1), + EFX_CHANNEL_STATE_DISABLE_BIT = 2, +}; + +static inline void efx_channel_busy_poll_init(struct efx_channel *channel) +{ + WRITE_ONCE(channel->busy_poll_state, EFX_CHANNEL_STATE_IDLE); + spin_lock_init(&channel->poll_lock); +} + +/* Called from efx_busy_poll(). */ +static inline bool efx_channel_try_lock_poll(struct efx_channel *channel) +{ + return cmpxchg(&channel->busy_poll_state, EFX_CHANNEL_STATE_IDLE, + EFX_CHANNEL_STATE_POLL) == EFX_CHANNEL_STATE_IDLE; +} + +static inline void efx_channel_unlock_poll(struct efx_channel *channel) +{ + clear_bit_unlock(EFX_CHANNEL_STATE_POLL_BIT, &channel->busy_poll_state); +} + +static inline bool efx_channel_busy_polling(struct efx_channel *channel) +{ + return test_bit(EFX_CHANNEL_STATE_POLL_BIT, &channel->busy_poll_state); +} + +static inline void efx_channel_enable(struct efx_channel *channel) +{ + clear_bit_unlock(EFX_CHANNEL_STATE_DISABLE_BIT, + &channel->busy_poll_state); +} + +/* Stop further polling or napi access. + * Returns false if the channel is currently busy polling. + */ +static inline bool efx_channel_disable(struct efx_channel *channel) +{ + set_bit(EFX_CHANNEL_STATE_DISABLE_BIT, &channel->busy_poll_state); + /* Implicit barrier in efx_channel_busy_polling() */ + return !efx_channel_busy_polling(channel); +} + +#else /* CONFIG_NET_RX_BUSY_POLL */ + +static inline void efx_channel_busy_poll_init(struct efx_channel *channel) +{ +} + +static inline bool efx_channel_lock_napi(struct efx_channel *channel) +{ + return true; +} + +static inline void efx_channel_unlock_napi(struct efx_channel *channel) +{ +} + +static inline bool efx_channel_try_lock_poll(struct efx_channel *channel) +{ + return false; +} + +static inline void efx_channel_unlock_poll(struct efx_channel *channel) +{ +} + +static inline bool efx_channel_busy_polling(struct efx_channel *channel) +{ + return false; +} + +static inline void efx_channel_enable(struct efx_channel *channel) +{ +} + +static inline bool efx_channel_disable(struct efx_channel *channel) +{ + return true; +} +#endif /* CONFIG_NET_RX_BUSY_POLL */ +#endif /* EFX_WANT_DRIVER_BUSY_POLL */ + +/** + * struct efx_msi_context - Context for each MSI + * @efx: The associated NIC + * @index: Index of the channel/IRQ + * @name: Name of the channel/IRQ + * + * Unlike &struct efx_channel, this is never reallocated and is always + * safe for the IRQ handler to access. + */ +struct efx_msi_context { + struct efx_nic *efx; + unsigned int index; + char name[IFNAMSIZ + 6]; +}; + +/** + * struct efx_channel_type - distinguishes traffic and extra channels + * @handle_no_channel: Handle failure to allocate an extra channel + * @pre_probe: Set up extra state prior to initialisation + * @start: called early in efx_start_channels() + * @stop: called early in efx_stop_channels() + * @post_remove: Tear down extra state after finalisation, if allocated. + * May be called on channels that have not been probed. + * @get_name: Generate the channel's name (used for its IRQ handler) + * @receive_skb: Handle an skb ready to be passed to netif_receive_skb() + * @receive_raw: Handle an RX buffer ready to be passed to __efx_rx_packet() + * @keep_eventq: Flag for whether event queue should be kept initialised + * while the device is stopped + * @hide_tx: Flag set the TX queue is used for internal driver purposes + * and is not exposed to the kernel. + * @get_queue_name: Get channel RX/TX queue name + */ +struct efx_channel_type { + void (*handle_no_channel)(struct efx_nic *); + int (*pre_probe)(struct efx_channel *); + int (*start)(struct efx_channel *); + void (*stop)(struct efx_channel *); + void (*post_remove)(struct efx_channel *); + void (*get_name)(struct efx_channel *, char *buf, size_t len); + bool (*receive_skb)(struct efx_rx_queue *, struct sk_buff *); + bool (*receive_raw)(struct efx_rx_queue *, u32); + bool keep_eventq; + bool hide_tx; + const char *(*get_queue_name)(struct efx_channel *, bool tx); +}; + +enum efx_led_mode { + EFX_LED_OFF = 0, + EFX_LED_ON = 1, + EFX_LED_DEFAULT = 2 +}; + +#define STRING_TABLE_LOOKUP(val, member) \ + ((val) < member ## _max) && member ## _names[val] ? member ## _names[val] : "(invalid)" + +extern const char *const efx_loopback_mode_names[]; +extern const unsigned int efx_loopback_mode_max; +#define LOOPBACK_MODE(efx) \ + STRING_TABLE_LOOKUP((efx)->loopback_mode, efx_loopback_mode) + +extern const char *const efx_interrupt_mode_names[]; +extern const unsigned int efx_interrupt_mode_max; +#define INT_MODE(efx) \ + STRING_TABLE_LOOKUP(efx->interrupt_mode, efx_interrupt_mode) + +void efx_get_udp_tunnel_type_name(u16 type, char *buf, size_t buflen); + +enum efx_int_mode { + /* Be careful if altering to correct macro below */ + EFX_INT_MODE_MSIX = 0, + EFX_INT_MODE_MSI = 1, +#if defined(EFX_NOT_UPSTREAM) && defined(CONFIG_SFC_BUSYPOLL) + EFX_INT_MODE_POLLED = 2, +#endif + EFX_INT_MODE_MAX /* Insert any new items before this */ +}; + +enum nic_state { + STATE_UNINIT = 0, /* device being probed/removed */ + STATE_PROBED, /* hardware probed */ + STATE_NET_DOWN, /* netdev registered */ + STATE_NET_ALLOCATED, /* resources allocated but no traffic */ + STATE_NET_UP, /* ready for traffic */ + STATE_DISABLED, /* device disabled due to hardware errors */ + STATE_VDPA, /* device bar_config changed to vDPA */ + + STATE_RECOVERY = 0x100,/* recovering from PCI error */ + STATE_FROZEN = 0x200, /* frozen by power management */ +}; + +static inline bool efx_net_active(enum nic_state state) +{ + return state == STATE_NET_DOWN || + state == STATE_NET_UP || + state == STATE_NET_ALLOCATED; +} + +static inline bool efx_net_allocated(enum nic_state state) +{ + return state == STATE_NET_UP || + state == STATE_NET_ALLOCATED; +} + +static inline bool efx_frozen(enum nic_state state) +{ + return state & STATE_FROZEN; +} + +static inline bool efx_recovering(enum nic_state state) +{ + return state & STATE_RECOVERY; +} + +static inline enum nic_state efx_freeze(enum nic_state state) +{ + WARN_ON(!efx_net_active(state)); + return state | STATE_FROZEN; +} + +static inline enum nic_state efx_thaw(enum nic_state state) +{ + WARN_ON(!efx_frozen(state)); + return state & ~STATE_FROZEN; +} + +static inline enum nic_state efx_begin_recovery(enum nic_state state) +{ + WARN_ON(!efx_net_active(state)); + return state | STATE_RECOVERY; +} + +static inline enum nic_state efx_end_recovery(enum nic_state state) +{ + WARN_ON(!efx_recovering(state)); + return state & ~STATE_RECOVERY; +} + +/* Forward declaration */ +struct efx_nic; + +/* Pseudo bit-mask flow control field */ +#define EFX_FC_RX FLOW_CTRL_RX +#define EFX_FC_TX FLOW_CTRL_TX +#define EFX_FC_AUTO 4 + +/** + * struct efx_link_state - Current state of the link + * @up: Link is up + * @fd: Link is full-duplex + * @fc: Actual flow control flags + * @speed: Link speed (Mbps) + * @ld_caps: Local device capabilities + * @lp_caps: Link partner capabilities + */ +struct efx_link_state { + bool up; + bool fd; + u8 fc; + unsigned int speed; + u32 ld_caps; + u32 lp_caps; +}; + +static inline bool efx_link_state_equal(const struct efx_link_state *left, + const struct efx_link_state *right) +{ + return left->up == right->up && left->fd == right->fd && + left->fc == right->fc && left->speed == right->speed; +} + +/** + * enum efx_phy_mode - PHY operating mode flags + * @PHY_MODE_NORMAL: on and should pass traffic + * @PHY_MODE_TX_DISABLED: on with TX disabled + * @PHY_MODE_LOW_POWER: set to low power through ethtool + * @PHY_MODE_OFF: switched off through external control + * @PHY_MODE_SPECIAL: on but will not pass traffic + */ +enum efx_phy_mode { + PHY_MODE_NORMAL = 0, + PHY_MODE_TX_DISABLED = 1, + PHY_MODE_LOW_POWER = 2, + PHY_MODE_OFF = 4, + PHY_MODE_SPECIAL = 8, +}; + +static inline bool efx_phy_mode_disabled(enum efx_phy_mode mode) +{ + return !!(mode & ~PHY_MODE_TX_DISABLED); +} + +/** + * struct efx_hw_stat_desc - Description of a hardware statistic + * @name: Name of the statistic as visible through ethtool, or %NULL if + * it should not be exposed + * @dma_width: Width in bits (0 for non-DMA statistics) + * @offset: Offset within stats (ignored for non-DMA statistics) + */ +struct efx_hw_stat_desc { + const char *name; + u16 dma_width; + u16 offset; +}; + +/* Efx Error condition statistics */ +struct efx_nic_errors { + atomic_t missing_event; + atomic_t rx_reset; + atomic_t rx_desc_fetch; + atomic_t tx_desc_fetch; + atomic_t spurious_tx; + +#ifdef CONFIG_DEBUG_FS + struct dentry *debug_dir; +#endif +}; + +struct vfdi_status; +struct sfc_rdma_dev; + +/* Useful collections of RSS flags. Caller needs mcdi_pcol.h. */ +#define RSS_CONTEXT_FLAGS_DEFAULT (1 << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV4_EN_LBN |\ + 1 << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV4_EN_LBN |\ + 1 << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV6_EN_LBN |\ + 1 << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV6_EN_LBN) +#define RSS_CONTEXT_FLAGS_ADDITIONAL_MASK ~0xff +#define RSS_MODE_HASH_ADDRS (1 << RSS_MODE_HASH_SRC_ADDR_LBN |\ + 1 << RSS_MODE_HASH_DST_ADDR_LBN) +#define RSS_MODE_HASH_PORTS (1 << RSS_MODE_HASH_SRC_PORT_LBN |\ + 1 << RSS_MODE_HASH_DST_PORT_LBN) +#define RSS_MODE_HASH_4TUPLE (RSS_MODE_HASH_ADDRS | RSS_MODE_HASH_PORTS) +#define RSS_CONTEXT_FLAGS_DEFAULT_ADDITIONAL (\ + RSS_MODE_HASH_4TUPLE << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV4_RSS_MODE_LBN |\ + RSS_MODE_HASH_ADDRS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV4_RSS_MODE_LBN |\ + RSS_MODE_HASH_ADDRS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV4_RSS_MODE_LBN |\ + RSS_MODE_HASH_4TUPLE << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV6_RSS_MODE_LBN |\ + RSS_MODE_HASH_ADDRS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV6_RSS_MODE_LBN |\ + RSS_MODE_HASH_ADDRS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV6_RSS_MODE_LBN) + +#ifdef CONFIG_RFS_ACCEL +/* Order of these is important, since filter_id >= %EFX_ARFS_FILTER_ID_PENDING + * is used to test if filter does or will exist. + */ +#define EFX_ARFS_FILTER_ID_PENDING -1 +#define EFX_ARFS_FILTER_ID_ERROR -2 +#define EFX_ARFS_FILTER_ID_REMOVING -3 +/** + * struct efx_arfs_rule - record of an ARFS filter and its IDs + * @node: linkage into hash table + * @spec: details of the filter (used as key for hash table). Use efx->type to + * determine which member to use. + * @rxq_index: channel to which the filter will steer traffic. + * @arfs_id: filter ID which was returned to ARFS + * @filter_id: index in software filter table. May be + * %EFX_ARFS_FILTER_ID_PENDING if filter was not inserted yet, + * %EFX_ARFS_FILTER_ID_ERROR if filter insertion failed, or + * %EFX_ARFS_FILTER_ID_REMOVING if expiry is currently removing the filter. + */ +struct efx_arfs_rule { + struct hlist_node node; + struct efx_filter_spec spec; + u16 rxq_index; + u16 arfs_id; + s32 filter_id; +}; + +/* Size chosen so that the table is one page (4kB) */ +#define EFX_ARFS_HASH_TABLE_SIZE 512 + +/** + * struct efx_async_filter_insertion - Request to asynchronously insert a filter + * @net_dev: Reference to the netdevice + * @spec: The filter to insert + * @work: Workitem for this request + * @rxq_index: Identifies the channel for which this request was made + * @flow_id: Identifies the kernel-side flow for which this request was made + */ +struct efx_async_filter_insertion { + struct net_device *net_dev; + struct efx_filter_spec spec; + struct work_struct work; + u16 rxq_index; + u32 flow_id; +}; + +/* Maximum number of ARFS workitems that may be in flight on an efx_nic */ +#define EFX_RPS_MAX_IN_FLIGHT 8 +#endif /* CONFIG_RFS_ACCEL */ + +/** + * struct efx_ntuple_rule - record of an ntuple filter and its IDs + * @list: linked list node in the ntuple_list + * @spec: details of the filter. + * @user_id: filter ID which was returned to ethtool + * @filter_id: index in software filter table. + * Only valid if efx_net_allocated(efx->state). + */ +struct efx_ntuple_rule { + struct list_head list; + struct efx_filter_spec spec; + u32 user_id; + s32 filter_id; +}; + +/** + * struct efx_vport - A driver-managed virtual port + * @list: node of linked list on which this struct is stored + * @vport_id: the VPORT_ID returned by MC firmware, or %EVB_PORT_ID_NULL if this + * vport is not present on the NIC + * @user_id: the port_id exposed to the user + * @vlan: VID of this vport's VLAN, or %EFX_FILTER_VID_UNSPEC for none + * @vlan_restrict: as per %MC_CMD_VPORT_ALLOC_IN_FLAG_VLAN_RESTRICT + */ +struct efx_vport { +#ifdef EFX_NOT_UPSTREAM + /* These are used by Driverlink (and no-one else currently). */ +#endif + struct list_head list; + u32 vport_id; + u16 user_id; + u16 vlan; + bool vlan_restrict; +}; + + +#ifdef CONFIG_SFC_MTD +/** + * struct efx_mtd - Struct to holds mtd list and krer for deletion without efx + * @efx: The associated efx_nic + * @list: List of MTDs attached to the NIC + * @parts: Memory allocated for MTD data + * @parts_kref: kref object for @parts + */ +struct efx_mtd { + struct efx_nic *efx; + struct list_head list; + void *parts; + struct kref parts_kref; +#if defined(EFX_WORKAROUND_87308) + /** @probed_flag: Flag set when netdev is registered or renamed */ + atomic_t probed_flag; + /** @creation_work: Delay MTD creation to avoid naming conflicts */ + struct delayed_work creation_work; +}; +#endif +#endif + + +/** + * enum efx_buf_alloc_mode - buffer allocation mode + * @EFX_BUF_MODE_EF100: buffer setup in ef100 mode + * @EFX_BUF_MODE_VDPA: buffer setup in vdpa mode + */ +enum efx_buf_alloc_mode { + EFX_BUF_MODE_EF100, + EFX_BUF_MODE_VDPA +}; + +struct efx_mae; + +/** + * struct efx_nic - an Efx NIC + * @name: Device name (net device name or bus id before net device registered) + * @pci_dev: The PCI device + * @type: Controller type attributes + * @mgmt_dev: vDPA Management device + * @port_num: Port number as reported by MCDI + * @client_id: client ID of this PCIe function + * @adapter_base_addr: MAC address of port0 (used as unique identifier) + * @reset_work: Scheduled reset workitem + * @membase_phys: Memory BAR value as physical address + * @membase: Memory BAR value + * @rdev: Device information for the virtual bus + * @vi_stride: step between per-VI registers / memory regions + * @interrupt_mode: Interrupt mode + * @timer_quantum_ns: Interrupt timer quantum, in nanoseconds + * @timer_max_ns: Interrupt timer maximum value, in nanoseconds + * @tc_match_ignore_ttl: Flag for whether TC match should ignore IP TTL value + * @irq_rx_adaptive: Adaptive IRQ moderation enabled for RX event queues + * @xdp_tx: Flag for whether XDP TX queues are supported + * @irqs_hooked: Channel interrupts are hooked + * @log_tc_errs: Error logging for TC filter insertion is enabled + * @irq_mod_step_us: Adaptive IRQ moderation time step for RX event queues + * @irq_rx_moderation_us: IRQ moderation time for RX event queues + * @rss_mode: RSS spreading mode + * @msg_enable: Log message enable flags + * @state: Device state number (%STATE_*). Serialised by the rtnl_lock. + * @max_irqs: Maximum number if interrupts the device supports. + * @reset_pending: Bitmask for pending resets + * @last_reset: Time of previous reset (jiffies) + * @current_reset: Time of current reset (jiffies) + * @reset_count: Count of resets to rate limit reset rescheduling + * @channel_list: Channels used by a PCI function. + * @msi_context: Context for each MSI + * @extra_channel_type: Types of extra (non-traffic) channels that + * should be allocated for this NIC + * @mae: Details of the Match Action Engine + * @xdp_tx_queue_count: Number of entries in %xdp_tx_queues. + * @xdp_tx_queues: Array of pointers to tx queues used for XDP transmit. + * @rxq_entries: Size of receive queues requested by user. + * @txq_entries: Size of transmit queues requested by user. + * @txq_stop_thresh: TX queue fill level at or above which we stop it. + * @txq_wake_thresh: TX queue fill level at or below which we wake it. + * @txq_min_entries: Minimum valid number of TX queue entries. + * @tx_dc_entries: Number of entries in each TX queue descriptor cache + * @rx_dc_entries: Number of entries in each RX queue descriptor cache + * @tx_dc_base: Base qword address in SRAM of TX queue descriptor caches + * @rx_dc_base: Base qword address in SRAM of RX queue descriptor caches + * @sram_lim_qw: Qword address limit of SRAM + * @max_channels: Max available channels + * @max_vis: Max available VI resources + * @max_tx_channels: Max available TX channels + * @supported_bitmap: Bitmap of supported ring sizes (EF100) + * @guaranteed_bitmap: Bitmap of guaranteed ring sizes (EF100) + * @n_combined_channels: Number of combined RX/TX channels + * @n_extra_channels: Number of extra channels (for driver-internal uses) + * @n_rx_only_channels: Number of channels used only for RX (after combined) + * @n_rss_channels: Number of rx channels available for RSS. + * @rss_spread: Number of event queues to spread traffic over. + * @n_tx_only_channels: Number of channels used only for TX (after RX) + * @tx_channel_offset: Offset of zeroth channel used for TX. + * @tx_queues_per_channel: Number of TX queues on a normal (non-XDP) TX channel. + * @n_xdp_channels: Number of channels used for XDP TX (after TX) + * @xdp_tx_per_channel: Max number of TX queues on an XDP TX channel. + * @rx_ip_align: RX DMA address offset to have IP header aligned in + * in accordance with NET_IP_ALIGN + * @rx_dma_len: Current maximum RX DMA length + * @rx_buffer_order: Order (log2) of number of pages for each RX buffer + * @rx_buffer_truesize: Amortised allocation size of an RX buffer, + * for use in &struct sk_buff.truesize + * @rx_page_buf_step: Stride between adjacent RX buffers, in bytes + * @rx_bufs_per_page: Number of RX buffers per memory page + * @rx_pages_per_batch: Preferred number of descriptors to fill at once + * @rx_prefix_size: Size of RX prefix before packet data + * @rx_packet_hash_offset: Offset of RX flow hash from start of packet data + * (valid only if @rx_prefix_size != 0; always negative) + * @rx_packet_len_offset: Offset of RX packet length from start of packet data + * (valid only for NICs that set %EFX_RX_PKT_PREFIX_LEN; always negative) + * @rx_packet_ts_offset: Offset of timestamp from start of packet data + * (valid only if channel->sync_timestamps_enabled; always negative) + * @rx_scatter: Scatter mode enabled for receives + * @rss_context: Main RSS context. Its @list member is the head of the list of + * RSS contexts created by user requests + * @rss_lock: Protects custom RSS context software state in @rss_context.list + * @vport: Main virtual port. Its @list member is the head of a list of vports. + * @vport_lock: Protects extra virtual port state in @vport.list + * @select_tx_queue: select appropriate TX queue for packet + * @errors: Error condition stats + * @int_error_count: Number of internal errors seen recently + * @int_error_expire: Time at which error count will be expired + * @irq_soft_enabled: Are IRQs soft-enabled? If not, IRQ handler will + * acknowledge but do nothing else. + * @irq_status: Interrupt status buffer + * @irq_level: IRQ level/index for IRQs not triggered by an event queue + * @selftest_work: Work item for asynchronous self-test + * @mtd_struct: A struct for holding combined mtd data for freeing + * independently + * @nic_data: Hardware dependent state + * @mcdi: Management-Controller-to-Driver Interface state + * @mac_lock: MAC access lock. Protects @port_enabled, @link_up, @phy_mode, + * efx_monitor() and efx_mac_work() + * @mac_work: Work item for changing MAC promiscuity and multicast hash + * @port_enabled: Port enabled indicator. + * Serialises efx_stop_all(), efx_start_all(), efx_monitor() and + * efx_mac_work() with kernel interfaces. Safe to read under any + * one of the rtnl_lock, mac_lock, or netif_tx_lock, but all three must + * be held to modify it. + * @datapath_started: Is the datapath running ? + * @mc_bist_for_other_fn: Is NIC unavailable due to BIST on another function ? + * @port_initialized: Port initialized? + * @net_dev: Operating system network device. Consider holding the rtnl lock + * @vlan_filter_available: are VLAN filters available ? + * @fixed_features: Features which cannot be turned off + * @stats_enabled: Is periodic statistics collection enabled ? + * @stats_initialised: Have initial stats counters been fetched ? + * @num_mac_stats: Number of MAC stats reported by firmware (MAC_STATS_NUM_STATS + * field of %MC_CMD_GET_CAPABILITIES_V4 response, or %MC_CMD_MAC_NSTATS) + * @stats_period_ms: Interval between statistic updates in milliseconds. + * Set from ethtool -C parameter stats-block-usecs. + * @stats_monitor_work: Work item to monitor periodic statistics updates + * @stats_monitor_generation: Periodic stats most recent generation count + * @stats_buffer: DMA buffer for statistics + * @mc_initial_stats: Buffer for statistics as they were when probing the device + * @rx_nodesc_drops_total: Count packets dropped when no RX descriptor is + * available on the NIC. + * @rx_nodesc_drops_while_down: Count packets dropped when no RX descriptor is + * available on the NIC and the interface is down. + * @rx_nodesc_drops_prev_state: Was interface up when nodesc drops last updated ? + * @phy_type: PHY type + * @phy_name: PHY name + * @phy_data: PHY data + * @phy_mode: PHY operating mode. Serialised by @mac_lock. + * @link_down_on_reset: force link down on reset + * @phy_power_follows_link: PHY powers off when link is taken down. + * @phy_power_force_off: PHY always powered off - only useful for test. + * @link_advertising: Autonegotiation advertising flags + * @fec_config: Forward Error Correction configuration flags. For bit positions + * see &enum ethtool_fec_config_bits. + * @link_state: Current state of the link + * @n_link_state_changes: Number of times the link has changed state + * @wanted_fc: Wanted flow control flags + * @fc_disable: When non-zero flow control is disabled. Typically used to + * ensure that network back pressure doesn't delay dma queue flushes. + * Serialised by the rtnl lock. + * @loopback_mode: Loopback status + * @loopback_modes: Supported loopback mode bitmask + * @loopback_selftest: Offline self-test private state + * @xdp_prog: Current XDP program for this interface + * @filter_sem: Filter table rw_semaphore, protects existence of @filter_state + * @filter_state: Architecture-dependent filter table state + * @rps_mutex: Protects RPS state of all channels + * @rps_slot_map: bitmap of in-flight entries in @rps_slot + * @rps_slot: array of ARFS insertion requests for efx_filter_rfs_work() + * @rps_hash_lock: Protects ARFS filter mapping state (@rps_hash_table and + * @rps_next_id). + * @rps_hash_table: Mapping between ARFS filters and their various IDs + * @rps_next_id: next arfs_id for an ARFS filter + * @ntuple_list: List of ntuple filters + * @active_queues: Count of RX and TX queues that haven't been flushed and drained. + * @rxq_flush_pending: Count of number of receive queues that need to be flushed. + * Decremented when the efx_flush_rx_queue() is called. + * @rxq_flush_outstanding: Count of number of RX flushes started but not yet + * completed (either success or failure). Not used when MCDI is used to + * flush receive queues. + * @flush_wq: wait queue used by efx_nic_flush_queues() to wait for flush completions. + * @ptp_data: PTP state data + * @phc_ptp_data: PTP state data of the adapter exposing the PHC clock. + * @node_ptp_all_funcs: List node for maintaining list of all functions. + * Serialised by ptp_all_funcs_list_lock + * @ptp_unavailable_warned: If PTP is unavailable has a warning been issued ? + * @ptp_capability: PTP capability flags + * @dump_data: state for NIC state dumping support + * @netdev_notifier: Netdevice notifier. + * @netevent_notifier: Netevent notifier (for neighbour updates). + * @tc: state for TC offload (EF100). + * @mem_bar: The BAR that is mapped into membase. + * @reg_base: Offset from the start of the bar to the function control window. + * @mcdi_buf_mode: mcdi buffer allocation mode + * @vdpa_nic: State when operating as a VDPA device (EF100) + * @reflash_mutex: Mutex for serialising firmware reflash operations. + * @monitor_work: Hardware monitor workitem + * @biu_lock: BIU (bus interface unit) lock + * @last_irq_cpu: Last CPU to handle a possible test interrupt. This + * field is used by efx_test_interrupts() to verify that an + * interrupt has occurred. + * @stats_lock: Statistics update lock. Used to serialize access to + * statistics-related NIC data. Obtained in efx_nic_type::update_stats + * and must be released by caller after statistics processing/copying + * if required. + * @n_rx_noskb_drops: Count of RX packets dropped due to failure to allocate an skb + * + * This is stored in the private area of the &struct net_device. + */ +struct efx_nic { + + /* The following fields should be written very rarely */ + char name[IFNAMSIZ]; + struct pci_dev *pci_dev; + const struct efx_nic_type *type; +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_VDPA_MGMT_INTERFACE) + struct vdpa_mgmt_dev *mgmt_dev; +#endif + unsigned int port_num; + u32 client_id; + /* Aligned for efx_mcdi_get_board_cfg()+ether_addr_copy() */ + u8 adapter_base_addr[ETH_ALEN] __aligned(2); + struct work_struct reset_work; +#ifdef EFX_NOT_UPSTREAM + /** + * @schedule_all_channels_work: work item to schedule NAPI on all + * channels + */ + struct work_struct schedule_all_channels_work; +#endif + resource_size_t membase_phys; + void __iomem *membase; + struct sfc_rdma_dev *rdev; + + unsigned int vi_stride; + + enum efx_int_mode interrupt_mode; + unsigned int timer_quantum_ns; + unsigned int timer_max_ns; + bool tc_match_ignore_ttl; + bool irq_rx_adaptive; + bool xdp_tx; + bool irqs_hooked; + bool log_tc_errs; + unsigned int irq_mod_step_us; + unsigned int irq_rx_moderation_us; +#if !defined(EFX_NOT_UPSTREAM) + enum efx_rss_mode rss_mode; +#endif + u32 msg_enable; +#ifdef EFX_NOT_UPSTREAM + /** @performance_profile: Performance tuning profile */ + enum efx_performance_profile performance_profile; +#endif + + enum nic_state state; + u16 max_irqs; + unsigned long reset_pending; + unsigned long last_reset; + unsigned long current_reset; + unsigned int reset_count; + + struct list_head channel_list; + struct efx_msi_context *msi_context; + const struct efx_channel_type * + extra_channel_type[EFX_MAX_EXTRA_CHANNELS]; + struct efx_mae *mae; + + unsigned int xdp_tx_queue_count; + struct efx_tx_queue **xdp_tx_queues; + + unsigned int rxq_entries; + unsigned int txq_entries; + unsigned int txq_stop_thresh; + unsigned int txq_wake_thresh; + unsigned int txq_min_entries; + + unsigned int tx_dc_entries; + unsigned int rx_dc_entries; + unsigned int tx_dc_base; + unsigned int rx_dc_base; + unsigned int sram_lim_qw; +#ifdef EFX_NOT_UPSTREAM +#if IS_MODULE(CONFIG_SFC_DRIVERLINK) + /** @n_dl_irqs: Number of IRQs to reserve for driverlink */ + int n_dl_irqs; + /** @ef10_resources: EF10 driverlink parameters */ + struct efx_dl_ef10_resources ef10_resources; + /** @irq_resources: IRQ driverlink parameters */ + struct efx_dl_irq_resources *irq_resources; +#endif +#endif + + u16 max_channels; + u16 max_vis; + unsigned int max_tx_channels; + unsigned long supported_bitmap; + unsigned long guaranteed_bitmap; + + /* RX and TX channel ranges must be contiguous. + * other channels are always combined channels. + * combined channels are first, followed by other channels OR + * TX-only channels OR RX-only channels. + * OR we can have RX-only then TX-only, but no combined or other. + * XDP is a special set of TX channels that go on the end and can + * coexist with anything. + * So we can have: + * combined and other + * combined and RX-only + * combined and TX-only + * RX-only and TX-only + */ + unsigned int n_combined_channels; + unsigned int n_extra_channels; + unsigned int n_rx_only_channels; + unsigned int n_rss_channels; + unsigned int rss_spread; + unsigned int n_tx_only_channels; + unsigned int tx_channel_offset; + unsigned int tx_queues_per_channel; + unsigned int n_xdp_channels; + unsigned int xdp_tx_per_channel; + unsigned int rx_ip_align; + unsigned int rx_dma_len; + unsigned int rx_buffer_order; + unsigned int rx_buffer_truesize; + unsigned int rx_page_buf_step; + unsigned int rx_bufs_per_page; + unsigned int rx_pages_per_batch; + unsigned int rx_prefix_size; + int rx_packet_hash_offset; + int rx_packet_len_offset; + int rx_packet_ts_offset; + bool rx_scatter; + struct efx_rss_context rss_context; + struct mutex rss_lock; + + struct efx_vport vport; + struct mutex vport_lock; + + struct efx_tx_queue *(*select_tx_queue)(struct efx_channel *channel, + struct sk_buff *skb); + + struct efx_nic_errors errors; + unsigned int_error_count; + unsigned long int_error_expire; + + bool irq_soft_enabled; + struct efx_buffer irq_status; + unsigned long irq_level; + struct delayed_work selftest_work; + +#ifdef CONFIG_SFC_MTD + struct efx_mtd *mtd_struct; +#endif + + void *nic_data; + struct efx_mcdi_data *mcdi; + + struct mutex mac_lock; + struct work_struct mac_work; +#ifdef EFX_NOT_UPSTREAM +#if IS_MODULE(CONFIG_SFC_DRIVERLINK) + /** @open_count: Count netdev opens */ + u16 open_count; +#endif +#endif + bool port_enabled; + bool datapath_started; + + bool mc_bist_for_other_fn; + bool port_initialized; + struct net_device *net_dev; +#if defined(EFX_USE_KCOMPAT) && !defined(EFX_HAVE_NDO_SET_FEATURES) && !defined(EFX_HAVE_EXT_NDO_SET_FEATURES) + /** @rx_checksum_enabled: Is RX checksum offload enabled ? */ + bool rx_checksum_enabled; +#endif +#if defined(EFX_NOT_UPSTREAM) && defined(EFX_USE_SFC_LRO) + /** @lro_available: Is LRO supported ? */ + bool lro_available; +#ifndef NETIF_F_LRO + /** @lro_enabled: Is LRO enabled ? */ + bool lro_enabled; +#endif +#endif + bool vlan_filter_available; + + netdev_features_t fixed_features; +#if defined(EFX_USE_KCOMPAT) && !defined(EFX_HAVE_NETDEV_HW_FEATURES) && !defined(EFX_HAVE_NETDEV_EXTENDED_HW_FEATURES) + /** @hw_features: Features which may be enabled/disabled */ + netdev_features_t hw_features; +#endif + + bool stats_enabled; + bool stats_initialised; + u16 num_mac_stats; + unsigned int stats_period_ms; + + struct delayed_work stats_monitor_work; + __le64 stats_monitor_generation; + + struct efx_buffer stats_buffer; + __le64 *mc_initial_stats; + u64 rx_nodesc_drops_total; + u64 rx_nodesc_drops_while_down; + bool rx_nodesc_drops_prev_state; + +#if defined(EFX_NOT_UPSTREAM) && defined(EFX_HAVE_VLAN_RX_PATH) + /** @vlan_group: VLAN group */ + struct vlan_group *vlan_group; +#endif + + unsigned int phy_type; + char phy_name[20]; + void *phy_data; + enum efx_phy_mode phy_mode; + bool link_down_on_reset; + bool phy_power_follows_link; + bool phy_power_force_off; + + __ETHTOOL_DECLARE_LINK_MODE_MASK(link_advertising); + u32 fec_config; + struct efx_link_state link_state; + unsigned int n_link_state_changes; + + u8 wanted_fc; + unsigned int fc_disable; + + enum efx_loopback_mode loopback_mode; + u64 loopback_modes; + void *loopback_selftest; +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_XDP) + /* We access loopback_selftest immediately before running XDP, + * so we want them next to each other. + */ + struct bpf_prog __rcu *xdp_prog; +#endif + + struct rw_semaphore filter_sem; + void *filter_state; +#ifdef CONFIG_RFS_ACCEL + struct mutex rps_mutex; + unsigned long rps_slot_map; + struct efx_async_filter_insertion rps_slot[EFX_RPS_MAX_IN_FLIGHT]; + spinlock_t rps_hash_lock; + struct hlist_head *rps_hash_table; + u32 rps_next_id; +#endif + struct list_head ntuple_list; + +#ifdef EFX_NOT_UPSTREAM +#if IS_MODULE(CONFIG_SFC_DRIVERLINK) + /** @dl_nic: Efx driverlink nic */ + struct efx_dl_nic dl_nic; + /** + * @dl_block_kernel_mutex: Mutex protecting @dl_block_kernel_count + * and corresponding per-client state + */ + struct mutex dl_block_kernel_mutex; + /** + * @dl_block_kernel_count: Number of times Driverlink clients are + * blocking the kernel stack from receiving packets + */ + unsigned int dl_block_kernel_count[EFX_DL_FILTER_BLOCK_KERNEL_MAX]; +#endif +#endif + +#ifdef CONFIG_DEBUG_FS + /** @debug_dir: NIC debugfs directory */ + struct dentry *debug_dir; + /** @debug_symlink: NIC debugfs symlink (``nic_eth%d``) */ + struct dentry *debug_symlink; + /** @debug_port_dir: Port debugfs directory */ + struct dentry *debug_port_dir; + /** @debug_port_symlink: Port debugfs symlink (``if_eth%d``) */ + struct dentry *debug_port_symlink; +#endif + + atomic_t active_queues; + atomic_t rxq_flush_pending; + atomic_t rxq_flush_outstanding; + wait_queue_head_t flush_wq; + +#ifdef CONFIG_SFC_PTP + struct efx_ptp_data *ptp_data; + struct efx_ptp_data *phc_ptp_data; + struct list_head node_ptp_all_funcs; + bool ptp_unavailable_warned; +#endif + unsigned int ptp_capability; + +#ifdef CONFIG_SFC_DUMP + struct efx_dump_data *dump_data; +#endif + struct notifier_block netdev_notifier; + struct notifier_block netevent_notifier; + struct efx_tc_state *tc; +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_USE_DEVLINK) + /** @devlink: Devlink instance */ + struct devlink *devlink; + /** @devlink_port: Devlink port instance */ + struct devlink_port *devlink_port; +#endif + unsigned int mem_bar; + u32 reg_base; + enum efx_buf_alloc_mode mcdi_buf_mode; +#ifdef CONFIG_SFC_VDPA + struct ef100_vdpa_nic *vdpa_nic; +#endif + struct mutex reflash_mutex; + + /* The following fields may be written more often */ + + struct delayed_work monitor_work ____cacheline_aligned_in_smp; + spinlock_t biu_lock; + int last_irq_cpu; +#if defined(EFX_USE_KCOMPAT) && !defined(EFX_USE_NETDEV_STATS) + /** @stats: net device statistics */ + struct net_device_stats stats; +#endif + spinlock_t stats_lock; + atomic_t n_rx_noskb_drops; +#if defined(EFX_USE_KCOMPAT) && !defined(EFX_HAVE_ETHTOOL_FCS) + /** + * @forward_fcs: Flag to enable appending of 4-byte Frame Checksum + * to Rx packet + */ + bool forward_fcs; +#endif + +#ifdef CONFIG_DEBUG_FS + /** + * @debugfs_symlink_mutex: Protect debugfs @debug_symlink and + * @debug_port_symlink + */ + struct mutex debugfs_symlink_mutex; +#endif +}; + +/** + * struct efx_probe_data - State after hardware probe + * @pci_dev: The PCI device + * @efx: Efx NIC details + */ +struct efx_probe_data { + struct pci_dev *pci_dev; + struct efx_nic efx; +}; + +static inline struct efx_nic *efx_netdev_priv(struct net_device *dev) +{ + struct efx_probe_data **probe_ptr = netdev_priv(dev); + struct efx_probe_data *probe_data = *probe_ptr; + + return &probe_data->efx; +} + +static inline unsigned int efx_xdp_channels(struct efx_nic *efx) +{ + return efx->xdp_tx ? efx->n_xdp_channels : 0; +} + +static inline unsigned int efx_channels(struct efx_nic *efx) +{ + return efx->n_combined_channels + efx->n_extra_channels + + efx->n_rx_only_channels + efx->n_tx_only_channels + + efx_xdp_channels(efx); +} + +static inline unsigned int efx_rx_channels(struct efx_nic *efx) +{ + return efx->n_combined_channels + efx->n_rx_only_channels + + efx->n_extra_channels; +} + +static inline unsigned int efx_tx_channels(struct efx_nic *efx) +{ + return efx->n_combined_channels + efx->n_tx_only_channels + + efx->n_extra_channels; +} + +static inline unsigned int efx_extra_channel_offset(struct efx_nic *efx) +{ + return efx->n_combined_channels; +} + +static inline unsigned int efx_xdp_channel_offset(struct efx_nic *efx) +{ + return efx->tx_channel_offset + efx_tx_channels(efx); +} + +static inline int efx_dev_registered(struct efx_nic *efx) +{ + return efx->net_dev->reg_state == NETREG_REGISTERED; +} + +static inline unsigned int efx_port_num(struct efx_nic *efx) +{ + return efx->port_num; +} + +#ifdef CONFIG_SFC_MTD +struct efx_mtd_partition { + struct list_head node; + struct efx_mtd *mtd_struct; + struct mtd_info mtd; +#if defined(EFX_USE_KCOMPAT) && !defined(EFX_USE_MTD_WRITESIZE) + size_t writesize; +#endif + const char *dev_type_name; + const char *type_name; + char name[IFNAMSIZ + 20]; + /* MCDI related attributes */ + bool updating; + u32 nvram_type; + u32 fw_subtype; +}; +#endif + +struct efx_udp_tunnel { +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_UDP_TUNNEL_NIC_INFO) +#define TUNNEL_ENCAP_UDP_PORT_ENTRY_INVALID 0xffff +#endif + u16 type; /* TUNNEL_ENCAP_UDP_PORT_ENTRY_foo, see mcdi_pcol.h */ + __be16 port; +#if defined(EFX_USE_KCOMPAT) && !defined(EFX_HAVE_UDP_TUNNEL_NIC_INFO) + /* Current state of slot. Used only inside the list, not in request + * arguments. + */ + u16 adding:1, removing:1, count:14; +#endif +}; +#define EFX_UDP_TUNNEL_COUNT_WARN 0x2000 /* top bit of 14-bit field */ +#define EFX_UDP_TUNNEL_COUNT_MAX 0x3fff /* saturate at this value */ + +#if defined(EFX_USE_KCOMPAT) && defined(EFX_TC_OFFLOAD) && !defined(EFX_HAVE_FLOW_INDR_BLOCK_CB_REGISTER) && !defined(EFX_HAVE_FLOW_INDR_DEV_REGISTER) +struct ef100_udp_tunnel { + enum efx_encap_type type; + __be16 port; + struct list_head list; +}; +#endif + +struct mae_mport_desc; + +/** + * struct efx_nic_type - Efx device type definition + * @is_vf: Tells whether the function is a VF or PF + * @mem_bar: Get the memory BAR + * @mem_map_size: Get memory BAR mapped size + * @probe: Probe the controller + * @dimension_resources: Dimension controller resources (buffer table, + * and VIs once the available interrupt resources are clear) + * @free_resources: Free resources allocated by dimension_resources + * @net_alloc: Bringup shared by netdriver and driverlink clients + * @net_dealloc: Teardown corresponding to @net_alloc() + * @remove: Free resources allocated by probe() + * @init: Initialise the controller + * @fini: Shut down the controller + * @monitor: Periodic function for polling link state and hardware monitor + * @hw_unavailable: Check for uninitialised or disabled hardware + * @map_reset_reason: Map ethtool reset reason to a reset method + * @map_reset_flags: Map ethtool reset flags to a reset method, if possible + * @reset: Reset the controller hardware and possibly the PHY. This will + * be called while the controller is uninitialised. + * @probe_port: Probe the MAC and PHY + * @remove_port: Free resources allocated by probe_port() + * @handle_global_event: Handle a "global" event (may be %NULL) + * @fini_dmaq: Flush and finalise DMA queues (RX and TX queues) + * @prepare_flr: Prepare for an FLR + * @finish_flr: Clean up after an FLR + * @describe_stats: Describe statistics for ethtool + * @update_stats: Update statistics not provided by event handling. + * Must obtain and hold efx_nic::stats_lock on return. + * Either argument may be %NULL. + * @start_stats: Start the regular fetching of statistics + * @pull_stats: Pull stats from the NIC and wait until they arrive. + * @stop_stats: Stop the regular fetching of statistics + * @update_stats_period: Set interval for periodic stats fetching. + * @push_irq_moderation: Apply interrupt moderation value + * @reconfigure_port: Push loopback/power/txdis changes to the MAC and PHY + * @reconfigure_mac: Push MAC address, MTU, flow control and filter settings + * to the hardware. Serialised by the mac_lock. + * Only change MTU if mtu_only is set. + * @check_mac_fault: Check MAC fault state. True if fault present. + * @get_wol: Get WoL configuration from driver state + * @set_wol: Push WoL configuration to the NIC + * @resume_wol: Synchronise WoL state between driver and MC (e.g. after resume) + * @check_caps: Check firmware capability flags + * @test_chip: Test registers and memory. This is expected to reset + * the NIC. + * @test_memory: Test read/write functionality of memory blocks, using + * the given test pattern generator + * @test_nvram: Test validity of NVRAM contents + * @mcdi_request: Send an MCDI request with the given header and SDU. + * The SDU length may be any value from 0 up to the protocol- + * defined maximum, but its buffer will be padded to a multiple + * of 4 bytes. + * @mcdi_poll_response: Test whether an MCDI response is available. + * @mcdi_read_response: Read the MCDI response PDU. The offset will + * be a multiple of 4. The length may not be, but the buffer + * will be padded so it is safe to round up. + * @mcdi_poll_reboot: Test whether the MCDI has rebooted. If so, + * return an appropriate error code for aborting any current + * request; otherwise return 0. + * @mcdi_record_bist_event: Record warm boot count at start of BIST + * @mcdi_poll_bist_end: Record warm boot count at end of BIST + * @mcdi_reboot_detected: Called when the MCDI module detects an MC reboot + * @mcdi_get_buf: Get a free buffer for MCDI + * @mcdi_put_buf: Return a buffer from MCDI + * @irq_enable_master: Enable IRQs on the NIC. Each event queue must + * be separately enabled after this. + * @irq_test_generate: Generate a test IRQ + * @irq_disable_non_ev: Disable non-event IRQs on the NIC. Each event + * queue must be separately disabled before this. + * @irq_handle_msi: Handle MSI for a channel. The @dev_id argument is + * a pointer to the &struct efx_msi_context for the channel. + * @tx_probe: Allocate resources for TX queue + * @tx_init: Initialise TX queue on the NIC + * @tx_write: Write TX descriptors and doorbell + * @tx_notify: Write TX doorbell + * @tx_enqueue: Add an SKB to TX queue + * @tx_limit_len: Max available data length for TX descriptor + * @tx_max_skb_descs: Max descriptors required for a single SKB + * @rx_push_rss_config: Write RSS hash key and indirection table to the NIC + * @rx_pull_rss_config: Read RSS hash key and indirection table back from the NIC + * @rx_push_rss_context_config: Write RSS hash key and indirection table for + * user RSS context to the NIC + * @rx_pull_rss_context_config: Read RSS hash key and indirection table for user + * RSS context back from the NIC + * @rx_restore_rss_contexts: Restore user RSS contexts removed from hardware + * @rx_get_default_rss_flags: Get default RSS flags + * @rx_set_rss_flags: Write RSS flow-hashing flags to the NIC + * @rx_get_rss_flags: Read RSS flow-hashing flags back from the NIC + * @rx_probe: Allocate resources for RX queue + * @rx_init: Initialise RX queue on the NIC + * @rx_remove: Free resources for RX queue + * @rx_write: Write RX descriptors and doorbell + * @rx_defer_refill: Generate a refill reminder event + * @rx_packet: Receive the queued RX buffer on a channel + * @rx_buf_hash_valid: Determine whether the RX prefix contains a valid hash + * @ev_probe: Allocate resources for event queue + * @ev_init: Initialise event queue on the NIC + * @ev_fini: Deinitialise event queue on the NIC + * @ev_remove: Free resources for event queue + * @ev_process: Process events for a queue, up to the given NAPI quota + * @ev_mcdi_pending: Peek at event queue, return whether MCDI event is pending + * @ev_read_ack: Acknowledge read events on a queue, rearming its IRQ + * @ev_test_generate: Generate a test event + * @max_rx_ip_filters: Max receive filters for IP + * @filter_table_probe: Probe filter capabilities and set up filter software state + * @filter_table_up: Insert filters due to interface up + * @filter_table_restore: Restore filters removed from hardware + * @filter_table_down: Remove filters due to interface down + * @filter_table_remove: Remove filters from hardware and tear down software state + * @filter_match_supported: Check if specified filter match supported + * @filter_update_rx_scatter: Update filters after change to rx scatter setting + * @filter_insert: add or replace a filter + * @filter_remove_safe: remove a filter by ID, carefully + * @filter_get_safe: retrieve a filter by ID, carefully + * @filter_clear_rx: Remove all RX filters whose priority is less than or + * equal to the given priority and is not %EFX_FILTER_PRI_AUTO + * @filter_count_rx_used: Get the number of filters in use at a given priority + * @filter_get_rx_id_limit: Get maximum value of a filter id, plus 1 + * @filter_get_rx_ids: Get list of RX filters at a given priority + * @filter_rfs_expire_one: Consider expiring a filter inserted for RFS. + * This must check whether the specified table entry is used by RFS + * and that rps_may_expire_flow() returns true for it. + * @mtd_probe: Probe and add MTD partitions associated with this net device, + * using efx_mtd_add() + * @mtd_rename: Set an MTD partition name using the net device name + * @mtd_read: Read from an MTD partition + * @mtd_erase: Erase part of an MTD partition + * @mtd_write: Write to an MTD partition + * @mtd_sync: Wait for write-back to complete on MTD partition. This + * also notifies the driver that a writer has finished using this + * partition. + * @ptp_write_host_time: Send host time to MC as part of sync protocol + * @ptp_set_ts_sync_events: Enable or disable sync events for inline RX + * timestamping, possibly only temporarily for the purposes of a reset. + * @ptp_set_ts_config: Set hardware timestamp configuration. The flags + * and tx_type will already have been validated but this operation + * must validate and update rx_filter. + * @pps_reset: Re-enable PPS if nic_hw_pps_enabled + * @vlan_rx_add_vid: Add RX VLAN filter + * @vlan_rx_kill_vid: Delete RX VLAN filter + * @get_phys_port_id: Get the underlying physical port id. + * @vport_add: Add a vport with specified VLAN parameters. Returns an MCDI id. + * @vport_del: Destroy a vport specified by MCDI id. + * @sriov_init: Initialise VFs when vf-count is set via module parameter. + * @sriov_fini: Disable sriov + * @sriov_wanted: Check that max_vf > 0. + * @sriov_configure: Enable VFs. + * @sriov_set_vf_mac: Performs MCDI commands to delete and add back a new + * vport with new mac address. + * @sriov_set_vf_vlan: Set up VF vlan. + * @sriov_set_vf_spoofchk: Checks if sppokcheck is supported. + * @sriov_get_vf_config: Gets VF config + * @sriov_set_vf_link_state: Set VF Link state + * @vswitching_probe: Allocate vswitches and vports. + * @vswitching_restore: Restore vswitching following a reset. + * @vswitching_remove: Free the vports and vswitches. + * @get_mac_address: Get mac address from underlying vport/pport. + * @set_mac_address: Set the MAC address of the device + * @mcdi_rpc_timeout: Select MCDI timeout for command + * @udp_tnl_has_port: Check if a port has been added as UDP tunnel + * @udp_tnl_push_ports: Push the list of UDP tunnel ports to the NIC if required. + * @udp_tnl_add_port: Add a UDP tunnel port + * @udp_tnl_del_port: Remove a UDP tunnel port + * @get_vf_rep: get the VF representor netdevice for given VF index + * @detach_reps: detach (stop TX on) all representors + * @attach_reps: attach (restart TX on) all representors + * @add_mport: process the addition of a new MAE port (e.g. create a repr) + * @remove_mport: process the deletion of an existing MAE port + * @has_dynamic_sensors: check if dynamic sensor capability is set + * @rx_recycle_ring_size: Size of the RX recycle ring + * @revision: Hardware architecture revision + * @txd_ptr_tbl_base: TX descriptor ring base address + * @rxd_ptr_tbl_base: RX descriptor ring base address + * @buf_tbl_base: Buffer table base address + * @evq_ptr_tbl_base: Event queue pointer table base address + * @evq_rptr_tbl_base: Event queue read-pointer table base address + * @max_dma_mask: Maximum possible DMA mask + * @rx_prefix_size: Size of RX prefix before packet data + * @rx_hash_offset: Offset of RX flow hash within prefix + * @rx_ts_offset: Offset of timestamp within prefix + * @rx_buffer_padding: Size of padding at end of RX packet + * @can_rx_scatter: NIC is able to scatter packets to multiple buffers + * @always_rx_scatter: NIC will always scatter packets to multiple buffers + * @option_descriptors: NIC supports TX option descriptors + * @copy_break: driver datapath may perform TX copy break + * @supported_interrupt_modes: A set of flags denoting which interrupt + * modes are supported, denoted by a bitshift by values in &enum + * efx_init_mode. + * @timer_period_max: Maximum period of interrupt timer (in ticks) + * @offload_features: net_device feature flags for protocol offload + * features implemented in hardware + * @mcdi_max_ver: Maximum MCDI version supported + * @hwtstamp_filters: Mask of hardware timestamp filter types supported + * @rx_hash_key_size: Size of RSS hash key in bytes + */ +struct efx_nic_type { + bool is_vf; + unsigned int (*mem_bar)(struct efx_nic *efx); + unsigned int (*mem_map_size)(struct efx_nic *efx); + int (*probe)(struct efx_nic *efx); + int (*dimension_resources)(struct efx_nic *efx); + void (*free_resources)(struct efx_nic *efx); + int (*net_alloc)(struct efx_nic *efx); + void (*net_dealloc)(struct efx_nic *efx); + void (*remove)(struct efx_nic *efx); + int (*init)(struct efx_nic *efx); + void (*fini)(struct efx_nic *efx); + void (*monitor)(struct efx_nic *efx); + bool (*hw_unavailable)(struct efx_nic *efx); + enum reset_type (*map_reset_reason)(enum reset_type reason); + int (*map_reset_flags)(u32 *flags); + int (*reset)(struct efx_nic *efx, enum reset_type method); + int (*probe_port)(struct efx_nic *efx); + void (*remove_port)(struct efx_nic *efx); + bool (*handle_global_event)(struct efx_channel *channel, efx_qword_t *); + int (*fini_dmaq)(struct efx_nic *efx); + void (*prepare_flr)(struct efx_nic *efx); + void (*finish_flr)(struct efx_nic *efx); + size_t (*describe_stats)(struct efx_nic *efx, u8 *names); +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_USE_NETDEV_STATS64) + size_t (*update_stats)(struct efx_nic *efx, u64 *full_stats, + struct rtnl_link_stats64 *core_stats) + __acquires(efx->stats_lock); +#else + size_t (*update_stats)(struct efx_nic *efx, u64 *full_stats, + struct net_device_stats *core_stats) + __acquires(efx->stats_lock); +#endif + void (*start_stats)(struct efx_nic *efx); + void (*pull_stats)(struct efx_nic *efx); + void (*stop_stats)(struct efx_nic *efx); + void (*update_stats_period)(struct efx_nic *efx); + void (*push_irq_moderation)(struct efx_channel *channel); + int (*reconfigure_port)(struct efx_nic *efx); + int (*reconfigure_mac)(struct efx_nic *efx, bool mtu_only); + bool (*check_mac_fault)(struct efx_nic *efx); + void (*get_wol)(struct efx_nic *efx, struct ethtool_wolinfo *wol); + int (*set_wol)(struct efx_nic *efx, u32 type); + void (*resume_wol)(struct efx_nic *efx); +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_ETHTOOL_FECSTATS) + /** @get_fec_stats: Report FEC block statistics. */ + void (*get_fec_stats)(struct efx_nic *efx, + struct ethtool_fec_stats *fec_stats); +#endif + unsigned int (*check_caps)(const struct efx_nic *efx, + u8 flag, + u32 offset); + int (*test_chip)(struct efx_nic *efx, struct efx_self_tests *tests); + int (*test_memory)(struct efx_nic *efx, + void (*pattern)(unsigned int, efx_qword_t *, int, int), + int a, int b); + int (*test_nvram)(struct efx_nic *efx); + void (*mcdi_request)(struct efx_nic *efx, u8 bufid, + const efx_dword_t *hdr, size_t hdr_len, + const efx_dword_t *sdu, size_t sdu_len); + bool (*mcdi_poll_response)(struct efx_nic *efx, u8 bufid); + void (*mcdi_read_response)(struct efx_nic *efx, u8 bufid, + efx_dword_t *pdu, size_t pdu_offset, + size_t pdu_len); + int (*mcdi_poll_reboot)(struct efx_nic *efx); + void (*mcdi_record_bist_event)(struct efx_nic *efx); + int (*mcdi_poll_bist_end)(struct efx_nic *efx); + void (*mcdi_reboot_detected)(struct efx_nic *efx); + bool (*mcdi_get_buf)(struct efx_nic *efx, u8 *bufid); + void (*mcdi_put_buf)(struct efx_nic *efx, u8 bufid); + void (*irq_enable_master)(struct efx_nic *efx); + int (*irq_test_generate)(struct efx_nic *efx); + void (*irq_disable_non_ev)(struct efx_nic *efx); + irqreturn_t (*irq_handle_msi)(int irq, void *dev_id); + int (*tx_probe)(struct efx_tx_queue *tx_queue); + int (*tx_init)(struct efx_tx_queue *tx_queue); + void (*tx_write)(struct efx_tx_queue *tx_queue); + void (*tx_notify)(struct efx_tx_queue *tx_queue); + int (*tx_enqueue)(struct efx_tx_queue *tx_queue, struct sk_buff *skb); + unsigned int (*tx_limit_len)(struct efx_tx_queue *tx_queue, + dma_addr_t dma_addr, unsigned int len); + unsigned int (*tx_max_skb_descs)(struct efx_nic *efx); + int (*rx_push_rss_config)(struct efx_nic *efx, bool user, + const u32 *rx_indir_table, const u8 *key); + int (*rx_pull_rss_config)(struct efx_nic *efx); + int (*rx_push_rss_context_config)(struct efx_nic *efx, + struct efx_rss_context *ctx, + const u32 *rx_indir_table, + const u8 *key); + int (*rx_pull_rss_context_config)(struct efx_nic *efx, + struct efx_rss_context *ctx); + void (*rx_restore_rss_contexts)(struct efx_nic *efx); + u32 (*rx_get_default_rss_flags)(struct efx_nic *efx); + int (*rx_set_rss_flags)(struct efx_nic *efx, struct efx_rss_context *ctx, + u32 flags); + int (*rx_get_rss_flags)(struct efx_nic *efx, struct efx_rss_context *ctx); + int (*rx_probe)(struct efx_rx_queue *rx_queue); + int (*rx_init)(struct efx_rx_queue *rx_queue); + void (*rx_remove)(struct efx_rx_queue *rx_queue); + void (*rx_write)(struct efx_rx_queue *rx_queue); + int (*rx_defer_refill)(struct efx_rx_queue *rx_queue); + void (*rx_packet)(struct efx_rx_queue *rx_queue); + bool (*rx_buf_hash_valid)(const u8 *prefix); + int (*ev_probe)(struct efx_channel *channel); + int (*ev_init)(struct efx_channel *channel); + void (*ev_fini)(struct efx_channel *channel); + void (*ev_remove)(struct efx_channel *channel); + int (*ev_process)(struct efx_channel *channel, int quota); + bool (*ev_mcdi_pending)(struct efx_channel *channel); + void (*ev_read_ack)(struct efx_channel *channel); + void (*ev_test_generate)(struct efx_channel *channel); + unsigned int max_rx_ip_filters; + int (*filter_table_probe)(struct efx_nic *efx); + int (*filter_table_up)(struct efx_nic *efx); + void (*filter_table_restore)(struct efx_nic *efx); + void (*filter_table_down)(struct efx_nic *efx); + void (*filter_table_remove)(struct efx_nic *efx); + bool (*filter_match_supported)(struct efx_nic *efx, bool encap, + unsigned int match_flags); + void (*filter_update_rx_scatter)(struct efx_nic *efx); + s32 (*filter_insert)(struct efx_nic *efx, + const struct efx_filter_spec *spec, bool replace); + int (*filter_remove_safe)(struct efx_nic *efx, + enum efx_filter_priority priority, + u32 filter_id); + int (*filter_get_safe)(struct efx_nic *efx, + enum efx_filter_priority priority, + u32 filter_id, struct efx_filter_spec *); + int (*filter_clear_rx)(struct efx_nic *efx, + enum efx_filter_priority priority); + u32 (*filter_count_rx_used)(struct efx_nic *efx, + enum efx_filter_priority priority); + u32 (*filter_get_rx_id_limit)(struct efx_nic *efx); + s32 (*filter_get_rx_ids)(struct efx_nic *efx, + enum efx_filter_priority priority, + u32 *buf, u32 size); +#ifdef CONFIG_RFS_ACCEL + bool (*filter_rfs_expire_one)(struct efx_nic *efx, u32 flow_id, + unsigned int index); +#endif +#ifdef EFX_NOT_UPSTREAM + /** + * @filter_redirect: update the queue (and RSS context if not NULL) + * for an existing RX filter + */ + int (*filter_redirect)(struct efx_nic *efx, u32 filter_id, + u32 *rss_context, int rxq_i, int stack_id); +#if IS_MODULE(CONFIG_SFC_DRIVERLINK) + /** + * @filter_block_kernel: Block kernel from receiving packets except + * through explicit configuration, i.e. remove and disable + * filters with priority < MANUAL + */ + int (*filter_block_kernel)(struct efx_nic *efx, + enum efx_dl_filter_block_kernel_type type); + /** + * @filter_unblock_kernel: Unblock kernel, i.e. enable automatic + * and hint filters + */ + void (*filter_unblock_kernel)(struct efx_nic *efx, enum + efx_dl_filter_block_kernel_type type); +#endif +#endif + /** @regionmap_buffer: Check if buffer is in accessible region */ + int (*regionmap_buffer)(struct efx_nic *efx, dma_addr_t *dma_addr); +#ifdef CONFIG_SFC_MTD + int (*mtd_probe)(struct efx_nic *efx); + void (*mtd_rename)(struct efx_mtd_partition *part); + int (*mtd_read)(struct mtd_info *mtd, loff_t start, size_t len, + size_t *retlen, u8 *buffer); + int (*mtd_erase)(struct mtd_info *mtd, loff_t start, size_t len); + int (*mtd_write)(struct mtd_info *mtd, loff_t start, size_t len, + size_t *retlen, const u8 *buffer); + int (*mtd_sync)(struct mtd_info *mtd); +#endif +#ifdef CONFIG_SFC_PTP + void (*ptp_write_host_time)(struct efx_nic *efx, u32 host_time); + int (*ptp_set_ts_sync_events)(struct efx_nic *efx, bool en, bool temp); + int (*ptp_set_ts_config)(struct efx_nic *efx, + struct hwtstamp_config *init); + int (*pps_reset)(struct efx_nic *efx); +#endif + int (*vlan_rx_add_vid)(struct efx_nic *efx, __be16 proto, u16 vid); + int (*vlan_rx_kill_vid)(struct efx_nic *efx, __be16 proto, u16 vid); +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_NEED_GET_PHYS_PORT_ID) + int (*get_phys_port_id)(struct efx_nic *efx, + struct netdev_phys_item_id *ppid); +#endif + int (*vport_add)(struct efx_nic *efx, u16 vlan, bool vlan_restrict, + unsigned int *port_id_out); + int (*vport_del)(struct efx_nic *efx, unsigned int port_id); + int (*sriov_init)(struct efx_nic *efx); + void (*sriov_fini)(struct efx_nic *efx); + bool (*sriov_wanted)(struct efx_nic *efx); + int (*sriov_configure)(struct efx_nic *efx, int num_vfs); + int (*sriov_set_vf_mac)(struct efx_nic *efx, int vf_i, const u8 *mac, + bool *reset); + int (*sriov_set_vf_vlan)(struct efx_nic *efx, int vf_i, u16 vlan, + u8 qos); + int (*sriov_set_vf_spoofchk)(struct efx_nic *efx, int vf_i, + bool spoofchk); +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_NDO_SET_VF_MAC) + int (*sriov_get_vf_config)(struct efx_nic *efx, int vf_i, + struct ifla_vf_info *ivi); +#endif + int (*sriov_set_vf_link_state)(struct efx_nic *efx, int vf_i, + int link_state); + int (*vswitching_probe)(struct efx_nic *efx); + int (*vswitching_restore)(struct efx_nic *efx); + void (*vswitching_remove)(struct efx_nic *efx); + int (*get_mac_address)(struct efx_nic *efx, unsigned char *perm_addr); + int (*set_mac_address)(struct efx_nic *efx); + unsigned int (*mcdi_rpc_timeout)(struct efx_nic *efx, unsigned int cmd); + bool (*udp_tnl_has_port)(struct efx_nic *efx, __be16 port); +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_UDP_TUNNEL_NIC_INFO) + int (*udp_tnl_push_ports)(struct efx_nic *efx); +#else + void (*udp_tnl_push_ports)(struct efx_nic *efx); + void (*udp_tnl_add_port)(struct efx_nic *efx, struct efx_udp_tunnel tnl); + void (*udp_tnl_del_port)(struct efx_nic *efx, struct efx_udp_tunnel tnl); +#endif +#if defined(EFX_USE_KCOMPAT) && defined(EFX_TC_OFFLOAD) && !defined(EFX_HAVE_FLOW_INDR_BLOCK_CB_REGISTER) && !defined(EFX_HAVE_FLOW_INDR_DEV_REGISTER) + /** @udp_tnl_add_port2: Add tunnel offload UDP port (EF100) */ + void (*udp_tnl_add_port2)(struct efx_nic *efx, struct ef100_udp_tunnel tnl); + /** @udp_tnl_lookup_port2: Lookup tunnel offload UDP port (EF100) */ + enum efx_encap_type (*udp_tnl_lookup_port2)(struct efx_nic *efx, __be16 port); + /** @udp_tnl_del_port2: Delete tunnel offload port (EF100) */ + void (*udp_tnl_del_port2)(struct efx_nic *efx, struct ef100_udp_tunnel tnl); +#endif + struct net_device *(*get_vf_rep)(struct efx_nic *efx, unsigned int vf); +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_TC_OFFLOAD) + void (*detach_reps)(struct efx_nic *efx); + void (*attach_reps)(struct efx_nic *efx); +#endif + int (*add_mport)(struct efx_nic *efx, struct mae_mport_desc *mport); + void (*remove_mport)(struct efx_nic *efx, struct mae_mport_desc *mport); + bool (*has_dynamic_sensors)(struct efx_nic *efx); + unsigned int (*rx_recycle_ring_size)(const struct efx_nic *efx); + + int revision; + unsigned int txd_ptr_tbl_base; + unsigned int rxd_ptr_tbl_base; + unsigned int buf_tbl_base; + unsigned int evq_ptr_tbl_base; + unsigned int evq_rptr_tbl_base; + u64 max_dma_mask; + unsigned int rx_prefix_size; + unsigned int rx_hash_offset; + unsigned int rx_ts_offset; + unsigned int rx_buffer_padding; + bool can_rx_scatter; + bool always_rx_scatter; + bool option_descriptors; + bool copy_break; + unsigned int supported_interrupt_modes; + unsigned int timer_period_max; +#ifdef EFX_NOT_UPSTREAM +#if IS_MODULE(CONFIG_SFC_DRIVERLINK) + /** + * @ef10_resources: Resources to be shared via driverlink (copied + * and updated as struct efx_nic.ef10_resources). + */ + struct efx_dl_ef10_resources ef10_resources; + /** @dl_hash_insertion: RX hash insertion details for driverlink */ + struct efx_dl_hash_insertion dl_hash_insertion; +#endif +#endif + netdev_features_t offload_features; + int mcdi_max_ver; + u32 hwtstamp_filters; + unsigned int rx_hash_key_size; +}; + +/************************************************************************** + * + * Prototypes and inline functions + * + *************************************************************************/ + +struct efx_channel *efx_get_channel(struct efx_nic *efx, unsigned int index); + +/* Iterate over all used channels */ +#define efx_for_each_channel(_channel, _efx) \ + list_for_each_entry(_channel, &_efx->channel_list, list) + +/* Iterate over all used channels in reverse */ +#define efx_for_each_channel_rev(_channel, _efx) \ + list_for_each_entry_reverse(_channel, &_efx->channel_list, list) + +static inline struct efx_channel * +efx_get_tx_channel(struct efx_nic *efx, unsigned int index) +{ + EFX_WARN_ON_ONCE_PARANOID(index >= efx_tx_channels(efx)); + return efx_get_channel(efx, efx->tx_channel_offset + index); +} + +static inline struct efx_channel * +efx_get_xdp_channel(struct efx_nic *efx, unsigned int index) +{ + EFX_WARN_ON_ONCE_PARANOID(index >= efx->n_xdp_channels); + return efx_get_channel(efx, efx_xdp_channel_offset(efx) + index); +} + +static inline struct efx_channel * +efx_get_rx_queue_channel(struct efx_rx_queue *rx_queue) +{ + struct efx_channel *channel = container_of(rx_queue, + struct efx_channel, + rx_queue); + return channel; +} + +static inline bool efx_channel_has_tx_queues(struct efx_channel *channel) +{ + return channel->channel - channel->efx->tx_channel_offset < + efx_tx_channels(channel->efx); +} + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_XDP_SOCK) +#if defined(CONFIG_XDP_SOCKETS) +static inline struct efx_tx_queue * +efx_channel_get_xsk_tx_queue(struct efx_channel *channel) +{ + if (unlikely(channel->tx_queue_count <= 1)) + return NULL; + + /* The Last TX queue is used for XSK Transmits */ + return channel->tx_queues + (channel->tx_queue_count - 1); +} + +static inline bool efx_is_xsk_tx_queue(struct efx_tx_queue *tx_queue) +{ + return (tx_queue->channel->tx_queue_count > 1 && + tx_queue->label == (tx_queue->channel->tx_queue_count - 1)); +} +#endif +#endif + +static inline struct efx_tx_queue * +efx_channel_get_tx_queue(struct efx_channel *channel, unsigned int label) +{ + if (unlikely(label > channel->tx_queue_count)) { + netif_err(channel->efx, drv, channel->efx->net_dev, + "Queue label %d out of range on channel %d (max %d)\n", + label, channel->channel, channel->tx_queue_count); + return NULL; + } + + return channel->tx_queues + label; +} + +static inline struct efx_tx_queue * +efx_get_tx_queue_from_index(struct efx_nic *efx, unsigned int index) +{ + struct efx_channel *channel; + + if (index < efx_tx_channels(efx) * efx->tx_queues_per_channel) { + channel = efx_get_tx_channel(efx, + index / efx->tx_queues_per_channel); + index %= efx->tx_queues_per_channel; + return efx_channel_get_tx_queue(channel, index); + } + + index -= efx_tx_channels(efx) * efx->tx_queues_per_channel; + if (index < efx->n_xdp_channels * efx->xdp_tx_per_channel) { + channel = efx_get_xdp_channel(efx, + index / efx->xdp_tx_per_channel); + index %= efx->xdp_tx_per_channel; + return efx_channel_get_tx_queue(channel, index); + } + + return NULL; +} + +/* Iterate over all TX queues belonging to a channel */ +#define efx_for_each_channel_tx_queue(_tx_queue, _channel) \ + if (!_channel || !(_channel)->tx_queues) \ + ; \ + else \ + for (_tx_queue = (_channel)->tx_queues; \ + _tx_queue < (_channel)->tx_queues + \ + (_channel)->tx_queue_count; \ + _tx_queue++) + +static inline bool efx_channel_has_rx_queue(struct efx_channel *channel) +{ + return channel && channel->rx_queue.core_index >= 0; +} + +static inline struct efx_rx_queue * +efx_channel_get_rx_queue(struct efx_channel *channel) +{ + EFX_WARN_ON_ONCE_PARANOID(!efx_channel_has_rx_queue(channel)); + return &channel->rx_queue; +} + +/* Iterate over all RX queues belonging to a channel */ +#define efx_for_each_channel_rx_queue(_rx_queue, _channel) \ + if (!efx_channel_has_rx_queue(_channel)) \ + ; \ + else \ + for (_rx_queue = &(_channel)->rx_queue; \ + _rx_queue; \ + _rx_queue = NULL) + +static inline bool efx_channel_is_xdp_tx(struct efx_channel *channel) +{ + return channel->channel - efx_xdp_channel_offset(channel->efx) < + efx_xdp_channels(channel->efx); +} + +/* Name formats */ +#define EFX_CHANNEL_NAME(_channel) "chan%d", (_channel)->channel +#define EFX_TX_QUEUE_NAME(_tx_queue) "txq%d", (_tx_queue)->queue +#define EFX_RX_QUEUE_NAME(_rx_queue) "rxq%d", efx_rx_queue_index(_rx_queue) + +static inline struct efx_channel * +efx_rx_queue_channel(struct efx_rx_queue *rx_queue) +{ + return container_of(rx_queue, struct efx_channel, rx_queue); +} + +/* Software index of the RX queue, meaningful to the driver. + * Not necessarily related to hardware RXQ number in any way. + */ +static inline int efx_rx_queue_index(struct efx_rx_queue *rx_queue) +{ + return rx_queue->label; +} + +/* Hardware RXQ instance number. Relative VI number of the VI backing + * this RX queue. + */ +static inline int efx_rx_queue_instance(struct efx_rx_queue *rx_queue) +{ + return rx_queue->queue; +} + +/* Returns a pointer to the specified receive buffer in the RX + * descriptor queue. + */ +static inline struct efx_rx_buffer *efx_rx_buffer(struct efx_rx_queue *rx_queue, + unsigned int index) +{ + return &rx_queue->buffer[index]; +} + +/* Returns a pointer to the receive buffer waiting in the RX pipeline */ +static inline struct efx_rx_buffer *efx_rx_buf_pipe(struct efx_rx_queue *rx_queue) +{ + return efx_rx_buffer(rx_queue, rx_queue->rx_pkt_index); +} + +static inline struct efx_rx_buffer * +efx_rx_buf_next(struct efx_rx_queue *rx_queue, struct efx_rx_buffer *rx_buf) +{ + if (unlikely(rx_buf == efx_rx_buffer(rx_queue, rx_queue->ptr_mask))) + return efx_rx_buffer(rx_queue, 0); + else + return rx_buf + 1; +} + +/** + * EFX_MAX_FRAME_LEN - calculate maximum frame length + * @mtu: maximum transmission unit (ethernet frame payload size). + * + * This calculates the maximum frame length that will be used for a + * given MTU. The frame length will be equal to the MTU plus a + * constant amount of header space and padding. This is the quantity + * that the net driver will program into the MAC as the maximum frame + * length. + * + * The 10G MAC requires 8-byte alignment on the frame + * length, so we round up to the nearest 8. + * + * Re-clocking by the XGXS on RX can reduce an IPG to 32 bits (half an + * XGMII cycle). If the frame length reaches the maximum value in the + * same cycle, the XMAC can miss the IPG altogether. We work around + * this by adding a further 16 bytes. + */ +#define EFX_MAX_FRAME_LEN(mtu) \ + ((((mtu) + ETH_HLEN + VLAN_HLEN + 4/* FCS */ + 7) & ~7) + 16) + +/* + * WARNING: This calculation must match with the value of page_offset + * calculated in efx_init_rx_buffer(). + */ +static inline size_t efx_rx_buffer_step(struct efx_nic *efx) +{ + return ALIGN(sizeof(struct efx_rx_page_state) + XDP_PACKET_HEADROOM + + efx->rx_dma_len + efx->rx_ip_align, EFX_RX_BUF_ALIGNMENT); +} + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_SKBTX_HW_TSTAMP) +static inline bool efx_xmit_with_hwtstamp(struct sk_buff *skb) +{ + return skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP; +} +static inline void efx_xmit_hwtstamp_pending(struct sk_buff *skb) +{ + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; +} +#elif defined(CONFIG_SFC_PTP) && !defined(EFX_HAVE_SKBTX_HW_TSTAMP) +static inline bool efx_xmit_with_hwtstamp(struct sk_buff *skb) +{ + return skb_shinfo(skb)->tx_flags.hardware; +} +static inline void efx_xmit_hwtstamp_pending(struct sk_buff *skb) +{ + skb_shinfo(skb)->tx_flags.in_progress = 1; +} +#elif defined(CONFIG_SFC_PTP) +/* No kernel timestamping: must examine the sk_buff */ +static inline bool efx_xmit_with_hwtstamp(struct sk_buff *skb) +{ + return (likely(skb->protocol == htons(ETH_P_IP)) && + skb_transport_header_was_set(skb) && + skb_network_header_len(skb) >= sizeof(struct iphdr) && + ip_hdr(skb)->protocol == IPPROTO_UDP && + skb_headlen(skb) >= + skb_transport_offset(skb) + sizeof(struct udphdr) && + unlikely(udp_hdr(skb)->dest == htons(319) || + udp_hdr(skb)->dest == htons(320))); +} +static inline void efx_xmit_hwtstamp_pending(struct sk_buff *skb) +{ +} +#else +static inline bool efx_xmit_with_hwtstamp(struct sk_buff *skb) +{ + return false; +} +static inline void efx_xmit_hwtstamp_pending(struct sk_buff *skb) +{ +} +#endif + +#ifdef EFX_NOT_UPSTREAM +static inline int efx_skb_encapsulation(const struct sk_buff *skb) +{ +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_SKB_ENCAPSULATION) + return skb->encapsulation; +#else + return 0; +#endif +} +#endif + +/* Get the max fill level of the TX queues on this channel */ +static inline unsigned int +efx_channel_tx_fill_level(struct efx_channel *channel) +{ + struct efx_tx_queue *tx_queue; + unsigned int fill_level = 0; + + efx_for_each_channel_tx_queue(tx_queue, channel) + fill_level = max(fill_level, + tx_queue->insert_count - tx_queue->read_count); + + return fill_level; +} + +/* Conservative approximation of efx_channel_tx_fill_level using cached value */ +static inline unsigned int +efx_channel_tx_old_fill_level(struct efx_channel *channel) +{ + struct efx_tx_queue *tx_queue; + unsigned int fill_level = 0; + + efx_for_each_channel_tx_queue(tx_queue, channel) + fill_level = max(fill_level, + tx_queue->insert_count - tx_queue->old_read_count); + + return fill_level; +} + +/* Get all supported features. + * If a feature is not fixed, it is present in hw_features. + * If a feature is fixed, it does not present in hw_features, but + * always in features. + */ +static inline netdev_features_t efx_supported_features(const struct efx_nic *efx) +{ + const struct net_device *net_dev = efx->net_dev; + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_NETDEV_HW_FEATURES) + return net_dev->features | net_dev->hw_features; +#elif defined(EFX_HAVE_NETDEV_EXTENDED_HW_FEATURES) + return net_dev->features | netdev_extended(net_dev)->hw_features; +#else + return net_dev->features | efx->hw_features; +#endif +} + +/* Add one or more hardware offload features */ +static inline void efx_add_hw_features(struct efx_nic *efx, + netdev_features_t features) +{ + struct net_device *net_dev = efx->net_dev; + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_NETDEV_HW_FEATURES) + net_dev->hw_features |= features; +#elif defined(EFX_HAVE_NETDEV_EXTENDED_HW_FEATURES) + netdev_extended(net_dev)->hw_features |= features; +#else + efx->hw_features |= features; +#endif +} + +/* Get the current TX queue insert index. */ +static inline unsigned int +efx_tx_queue_get_insert_index(const struct efx_tx_queue *tx_queue) +{ + return tx_queue->insert_count & tx_queue->ptr_mask; +} + +/* Get a TX buffer. */ +static inline struct efx_tx_buffer * +__efx_tx_queue_get_insert_buffer(const struct efx_tx_queue *tx_queue) +{ + return &tx_queue->buffer[efx_tx_queue_get_insert_index(tx_queue)]; +} + +/* Get a TX buffer, checking it's not currently in use. */ +static inline struct efx_tx_buffer * +efx_tx_queue_get_insert_buffer(const struct efx_tx_queue *tx_queue) +{ + struct efx_tx_buffer *buffer = + __efx_tx_queue_get_insert_buffer(tx_queue); + + EFX_WARN_ON_ONCE_PARANOID(buffer->len); + EFX_WARN_ON_ONCE_PARANOID(buffer->flags); + EFX_WARN_ON_ONCE_PARANOID(buffer->unmap_len); + + return buffer; +} + +#endif /* EFX_NET_DRIVER_H */ diff --git a/src/drivers/net/ethernet/sfc/nic.c b/src/drivers/net/ethernet/sfc/nic.c new file mode 100644 index 0000000..6602ee0 --- /dev/null +++ b/src/drivers/net/ethernet/sfc/nic.c @@ -0,0 +1,593 @@ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2005-2006 Fen Systems Ltd. + * Copyright 2006-2017 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#include +#include +#include +#include +#include +#include +#ifndef EFX_USE_KCOMPAT +#include +#endif +#include "net_driver.h" +#if defined(EFX_USE_KCOMPAT) && defined(EFX_HAVE_CPU_RMAP) +#include +#endif +#include "bitfield.h" +#include "efx.h" +#include "nic.h" +#include "ef10_regs.h" +#include "ef100_nic.h" +#include "io.h" +#include "workarounds.h" +#include "mcdi_pcol.h" + +/************************************************************************** + * + * Generic buffer handling + * These buffers are used for interrupt status, MAC stats, etc. + * + **************************************************************************/ + +int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer, + unsigned int len, gfp_t gfp_flags) +{ + buffer->addr = dma_alloc_coherent(&efx->pci_dev->dev, len, + &buffer->dma_addr, gfp_flags); + if (!buffer->addr) + return -ENOMEM; + buffer->len = len; + memset(buffer->addr, 0, len); + return 0; +} + +void efx_nic_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer) +{ + if (buffer->addr) { + dma_free_coherent(&efx->pci_dev->dev, buffer->len, + buffer->addr, buffer->dma_addr); + buffer->addr = NULL; + } +} + +/* Check whether an event is present in the eventq at the current + * read pointer. Only useful for self-test. + */ +bool efx_nic_event_present(struct efx_channel *channel) +{ + return efx_event_present(efx_event(channel, channel->eventq_read_ptr)); +} + +void efx_nic_event_test_start(struct efx_channel *channel) +{ + if (!channel->efx->type->ev_test_generate) + return; + + channel->event_test_cpu = -1; + smp_wmb(); + channel->efx->type->ev_test_generate(channel); +} + +int efx_nic_irq_test_start(struct efx_nic *efx) +{ + if (!efx->type->irq_test_generate) + return -EOPNOTSUPP; + + efx->last_irq_cpu = -1; + smp_wmb(); + return efx->type->irq_test_generate(efx); +} + +/* Hook interrupt handler(s) + * Try MSI and then legacy interrupts. + */ +int efx_nic_init_interrupt(struct efx_nic *efx) +{ + struct cpu_rmap *cpu_rmap __maybe_unused = NULL; + struct efx_channel *channel; + unsigned int n_irqs; + int rc; + +#ifdef CONFIG_RFS_ACCEL + if (efx->interrupt_mode == EFX_INT_MODE_MSIX) { + cpu_rmap = alloc_irq_cpu_rmap(efx_channels(efx)); + if (!cpu_rmap) { + rc = -ENOMEM; + goto fail1; + } + } +#endif + + /* Hook MSI or MSI-X interrupt */ + n_irqs = 0; + efx_for_each_channel(channel, efx) { + if (!channel->irq) + continue; + + rc = request_irq(channel->irq, efx->type->irq_handle_msi, 0, + efx->msi_context[channel->channel].name, + &efx->msi_context[channel->channel]); + if (rc) { + netif_err(efx, drv, efx->net_dev, + "failed to hook IRQ %d\n", channel->irq); + goto fail2; + } + ++n_irqs; + +#ifdef CONFIG_RFS_ACCEL + if (efx->interrupt_mode == EFX_INT_MODE_MSIX && + channel->channel < efx_rx_channels(efx)) { + rc = irq_cpu_rmap_add(cpu_rmap, channel->irq); + if (rc) + goto fail2; + } +#endif + } + +#ifdef CONFIG_RFS_ACCEL +#if !defined(EFX_USE_KCOMPAT) || !defined(EFX_HAVE_NETDEV_RFS_INFO) + efx->net_dev->rx_cpu_rmap = cpu_rmap; +#else + netdev_extended(efx->net_dev)->rfs_data.rx_cpu_rmap = cpu_rmap; +#endif +#endif + efx->irqs_hooked = true; + return 0; + + fail2: +#ifdef CONFIG_RFS_ACCEL + free_irq_cpu_rmap(cpu_rmap); +#endif + efx_for_each_channel(channel, efx) { + if (!channel->irq) + continue; + if (n_irqs-- == 0) + break; + free_irq(channel->irq, &efx->msi_context[channel->channel]); + } +#ifdef CONFIG_RFS_ACCEL + fail1: +#endif + return rc; +} + +void efx_nic_fini_interrupt(struct efx_nic *efx) +{ + struct efx_channel *channel; + +#ifdef CONFIG_RFS_ACCEL +#if !defined(EFX_USE_KCOMPAT) || !defined(EFX_HAVE_NETDEV_RFS_INFO) + if (efx->net_dev->rx_cpu_rmap) + free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap); + efx->net_dev->rx_cpu_rmap = NULL; +#else + if (netdev_extended(efx->net_dev)->rfs_data.rx_cpu_rmap) + free_irq_cpu_rmap(netdev_extended(efx->net_dev)->rfs_data.rx_cpu_rmap); + netdev_extended(efx->net_dev)->rfs_data.rx_cpu_rmap = NULL; +#endif +#endif + + if (efx->irqs_hooked) + /* Disable MSI/MSI-X interrupts */ + efx_for_each_channel(channel, efx) { + if (channel->irq) + free_irq(channel->irq, + &efx->msi_context[channel->channel]); + } + efx->irqs_hooked = false; +} + +#ifdef EFX_NOT_UPSTREAM +static unsigned int +efx_device_check_pcie_link(struct pci_dev *pdev, unsigned int *actual_width, + unsigned int *max_width, unsigned int *actual_speed, + unsigned int *nic_bandwidth) +{ + int cap = pci_find_capability(pdev, PCI_CAP_ID_EXP); + unsigned int nic_speed; + u16 lnksta; + u16 lnkcap; + + *actual_speed = 0; + *actual_width = 0; + *max_width = 0; + *nic_bandwidth = 0; + + if (!cap || + pci_read_config_word(pdev, cap + PCI_EXP_LNKSTA, &lnksta) || + pci_read_config_word(pdev, cap + PCI_EXP_LNKCAP, &lnkcap)) + return 0; + + *actual_width = (lnksta & PCI_EXP_LNKSTA_NLW) >> + __ffs(PCI_EXP_LNKSTA_NLW); + + *max_width = (lnkcap & PCI_EXP_LNKCAP_MLW) >> __ffs(PCI_EXP_LNKCAP_MLW); + *actual_speed = (lnksta & PCI_EXP_LNKSTA_CLS); + + nic_speed = 1; + if (lnkcap & PCI_EXP_LNKCAP_SLS_5_0GB) + nic_speed = 2; + /* PCIe Gen3 capabilities are in a different config word. */ + if (!pci_read_config_word(pdev, cap + PCI_EXP_LNKCAP2, &lnkcap)) { + if (lnkcap & PCI_EXP_LNKCAP2_SLS_8_0GB) + nic_speed = 3; + } + + *nic_bandwidth = *max_width << (nic_speed - 1); + + return nic_speed; +} + +/* Return the embedded PCI bridge device if it exists. If the PCI device has + * been assigned to a virtual machine, the bridge will not be found so the + * pcie link check is done with the NIC PCI device. + */ +static struct pci_dev *efx_get_bridge_device(struct pci_dev *nic) +{ + struct pci_dev *pdev = NULL; + struct pci_dev *p = NULL; + + /* Find PCI device bridging the NIC PCI bus. + * First the bridge downstream port device. + */ + for_each_pci_dev(p) { + if (p->subordinate == nic->bus) { + /* Is this the EF100 embedded bridge downstream + * port? + */ + if ((p->vendor != PCI_VENDOR_ID_XILINX) || + (p->device != EF100_BRIDGE_DOWNSTREAM_PCI_DEVICE)) + return nic; + /* We got the downstream port. */ + pdev = p; + } + } + + /* This should never happen. */ + if (!pdev) + return nic; + + /* We got the embedded bridge downstream port. Let's get the upstream + * port now which is the physical PCI link to the Host. + */ + p = NULL; + for_each_pci_dev(p) { + if (p->subordinate == pdev->bus) { + if ((p->vendor != PCI_VENDOR_ID_XILINX) || + (p->device != EF100_BRIDGE_UPSTREAM_PCI_DEVICE)) { + WARN_ON(1); + return nic; + } + /* We got the upstream port. This is the device we are + * interested in. + */ + return p; + } + } + return nic; +} + +void +efx_nic_check_pcie_link(struct efx_nic *efx, unsigned int desired_bandwidth, + unsigned int *actual_width, unsigned int *actual_speed) +{ + struct pci_dev *pdev = efx->pci_dev; + unsigned int nic_bandwidth; + unsigned int bandwidth; + unsigned int nic_width; + unsigned int nic_speed; + unsigned int width; + unsigned int speed; + + if (efx_nic_rev(efx) == EFX_REV_EF100) + pdev = efx_get_bridge_device(efx->pci_dev); + + nic_speed = efx_device_check_pcie_link(pdev, &width, &nic_width, &speed, + &nic_bandwidth); + + if(!nic_speed) + goto out; + + if (width > nic_width) + netif_dbg(efx, drv, efx->net_dev, + "PCI Express width is %d, with maximum expected %d. " + "If running on a virtualized platform this is fine, " + "otherwise it indicates a PCI problem.\n", + width, nic_width); + + bandwidth = width << (speed - 1); + + if (desired_bandwidth > nic_bandwidth) + /* You can desire all you want, it ain't gonna happen. */ + desired_bandwidth = nic_bandwidth; + + if (desired_bandwidth && (bandwidth < desired_bandwidth)) + netif_warn(efx, drv, efx->net_dev, + "This Solarflare Network Adapter requires the " + "equivalent of %d lanes at PCI Express %d speed for " + "full throughput, but is currently limited to %d " + "lanes at PCI Express %d speed. Consult your " + "motherboard documentation to find a more " + "suitable slot\n", + desired_bandwidth > EFX_BW_PCIE_GEN3_X8 ? 16 : 8, + nic_speed, width, speed); + else if (bandwidth < nic_bandwidth) + netif_warn(efx, drv, efx->net_dev, + "This Solarflare Network Adapter requires a " + "slot with %d lanes at PCI Express %d speed for " + "optimal latency, but is currently limited to " + "%d lanes at PCI Express %d speed\n", + nic_width, nic_speed, width, speed); + +out: + if (actual_width) + *actual_width = width; + + if (actual_speed) + *actual_speed = speed; +} +#endif + +/* Register dump */ + +#define REGISTER_REVISION_ED 4 +#define REGISTER_REVISION_EZ 4 /* latest EF10 revision */ + +struct efx_nic_reg { + u32 offset:24; + u32 min_revision:3, max_revision:3; +}; + +#define REGISTER(name, arch, min_rev, max_rev) { \ + arch ## R_ ## min_rev ## max_rev ## _ ## name, \ + REGISTER_REVISION_ ## arch ## min_rev, \ + REGISTER_REVISION_ ## arch ## max_rev \ +} +#define REGISTER_DZ(name) REGISTER(name, E, D, Z) + +static const struct efx_nic_reg efx_nic_regs[] = { + REGISTER_DZ(BIU_HW_REV_ID), + REGISTER_DZ(MC_DB_LWRD), + REGISTER_DZ(MC_DB_HWRD), +}; + +struct efx_nic_reg_table { + u32 offset:24; + u32 min_revision:3, max_revision:3; + u32 step:6, rows:21; +}; + +#define REGISTER_TABLE_DIMENSIONS(_, offset, arch, min_rev, max_rev, step, rows) { \ + offset, \ + REGISTER_REVISION_ ## arch ## min_rev, \ + REGISTER_REVISION_ ## arch ## max_rev, \ + step, rows \ +} +#define REGISTER_TABLE(name, arch, min_rev, max_rev) \ + REGISTER_TABLE_DIMENSIONS( \ + name, arch ## R_ ## min_rev ## max_rev ## _ ## name, \ + arch, min_rev, max_rev, \ + arch ## R_ ## min_rev ## max_rev ## _ ## name ## _STEP, \ + arch ## R_ ## min_rev ## max_rev ## _ ## name ## _ROWS) +#define REGISTER_TABLE_DZ(name) REGISTER_TABLE(name, E, D, Z) + +static const struct efx_nic_reg_table efx_nic_reg_tables[] = { + REGISTER_TABLE_DZ(BIU_MC_SFT_STATUS), +}; + +size_t efx_nic_get_regs_len(struct efx_nic *efx) +{ + const struct efx_nic_reg *reg; + const struct efx_nic_reg_table *table; + size_t len = 0; + + for (reg = efx_nic_regs; + reg < efx_nic_regs + ARRAY_SIZE(efx_nic_regs); + reg++) + if (efx->type->revision >= reg->min_revision && + efx->type->revision <= reg->max_revision) + len += sizeof(efx_oword_t); + + for (table = efx_nic_reg_tables; + table < efx_nic_reg_tables + ARRAY_SIZE(efx_nic_reg_tables); + table++) + if (efx->type->revision >= table->min_revision && + efx->type->revision <= table->max_revision) + len += table->rows * min_t(size_t, table->step, 16); + + return len; +} + +void efx_nic_get_regs(struct efx_nic *efx, void *buf) +{ + const struct efx_nic_reg *reg; + const struct efx_nic_reg_table *table; + + for (reg = efx_nic_regs; + reg < efx_nic_regs + ARRAY_SIZE(efx_nic_regs); + reg++) { + if (efx->type->revision >= reg->min_revision && + efx->type->revision <= reg->max_revision) { + efx_reado(efx, (efx_oword_t *)buf, reg->offset); + buf += sizeof(efx_oword_t); + } + } + + for (table = efx_nic_reg_tables; + table < efx_nic_reg_tables + ARRAY_SIZE(efx_nic_reg_tables); + table++) { + size_t size, i; + + if (!(efx->type->revision >= table->min_revision && + efx->type->revision <= table->max_revision)) + continue; + + size = min_t(size_t, table->step, 16); + + for (i = 0; i < table->rows; i++) { + switch (table->step) { + case 4: /* 32-bit SRAM */ + efx_readd(efx, buf, table->offset + 4 * i); + break; + case 16: /* 128-bit-readable register */ + efx_reado_table(efx, buf, table->offset, i); + break; + case 32: /* 128-bit register, interleaved */ + efx_reado_table(efx, buf, table->offset, 2 * i); + break; + default: + WARN_ON(1); + return; + } + buf += size; + } + } +} + +/** + * efx_nic_describe_stats - Describe supported statistics for ethtool + * @desc: Array of &struct efx_hw_stat_desc describing the statistics + * @count: Length of the @desc array + * @mask: Bitmask of which elements of @desc are enabled + * @names: Buffer to copy names to, or %NULL. The names are copied + * starting at intervals of %ETH_GSTRING_LEN bytes. + * + * Return: the number of visible statistics, i.e. the number of set + * bits in the first @count bits of @mask for which a name is defined. + */ +size_t efx_nic_describe_stats(const struct efx_hw_stat_desc *desc, size_t count, + const unsigned long *mask, u8 *names) +{ + size_t visible = 0; + size_t index; + + for_each_set_bit(index, mask, count) { + if (desc[index].name) { + if (names) { + strscpy(names, desc[index].name, + ETH_GSTRING_LEN); + names += ETH_GSTRING_LEN; + } + ++visible; + } + } + + return visible; +} + +/** + * efx_nic_copy_stats - Copy stats from the DMA buffer + * @efx: The associated NIC. + * @dest: Destination buffer. Must be the same size as the DMA buffer. + * + * Copy stats from the DMA buffer in to an intermediate buffer. This is + * used to get a consistent set of stats while the DMA buffer can be + * written at any time by the NIC. + * + * Return: a negative error code or 0 on success. + */ +int efx_nic_copy_stats(struct efx_nic *efx, __le64 *dest) +{ + int retry; + __le64 generation_start, generation_end; + __le64 *dma_stats = efx->stats_buffer.addr; + int rc = 0; + + if (!dest) + return 0; + + if (!dma_stats) + goto return_zeroes; + + for (retry = 0; retry < 100; ++retry) { + generation_end = dma_stats[efx->num_mac_stats - 1]; + if (generation_end == EFX_MC_STATS_GENERATION_INVALID) + goto return_zeroes; + rmb(); + memcpy(dest, dma_stats, efx->num_mac_stats * sizeof(__le64)); + rmb(); + generation_start = dma_stats[MC_CMD_MAC_GENERATION_START]; + if (generation_end == generation_start) + return 0; /* return good data */ + udelay(100); + } + + rc = -EIO; + +return_zeroes: + memset(dest, 0, efx->num_mac_stats * sizeof(u64)); + return rc; +} + +/** + * efx_nic_update_stats - Convert statistics DMA buffer to array of u64 + * @desc: Array of &struct efx_hw_stat_desc describing the DMA buffer + * layout. DMA widths of 0, 16, 32 and 64 are supported; where + * the width is specified as 0 the corresponding element of + * @stats is not updated. + * @count: Length of the @desc array + * @mask: Bitmask of which elements of @desc are enabled + * @stats: Buffer to update with the converted statistics. The length + * of this array must be at least @count. + * @mc_initial_stats: Copy of DMA buffer containing initial stats. Subtracted + * from the stats in mc_stats. + * @mc_stats: DMA buffer containing hardware statistics + */ +void efx_nic_update_stats(const struct efx_hw_stat_desc *desc, size_t count, + const unsigned long *mask, u64 *stats, + const void *mc_initial_stats, const void *mc_stats) +{ + size_t index; + __le64 zero = 0; + + for_each_set_bit(index, mask, count) { + if (desc[index].dma_width) { + const void *addr = + mc_stats ? + mc_stats + desc[index].offset : + &zero; + const void *init = + mc_initial_stats && mc_stats ? + mc_initial_stats + desc[index].offset : + &zero; + + switch (desc[index].dma_width) { + case 16: + stats[index] = le16_to_cpup((__le16 *)addr) - + le16_to_cpup((__le16 *)init); + break; + case 32: + stats[index] = le32_to_cpup((__le32 *)addr) - + le32_to_cpup((__le32 *)init); + break; + case 64: + stats[index] = le64_to_cpup((__le64 *)addr) - + le64_to_cpup((__le64 *)init); + break; + default: + WARN_ON_ONCE(1); + stats[index] = 0; + break; + } + } + } +} + +void efx_nic_fix_nodesc_drop_stat(struct efx_nic *efx, u64 *rx_nodesc_drops) +{ + /* if down, or this is the first update after coming up */ + if (!(efx->net_dev->flags & IFF_UP) || !efx->rx_nodesc_drops_prev_state) + efx->rx_nodesc_drops_while_down += + *rx_nodesc_drops - efx->rx_nodesc_drops_total; + efx->rx_nodesc_drops_total = *rx_nodesc_drops; + efx->rx_nodesc_drops_prev_state = !!(efx->net_dev->flags & IFF_UP); + *rx_nodesc_drops -= efx->rx_nodesc_drops_while_down; +} diff --git a/src/drivers/net/ethernet/sfc/nic.h b/src/drivers/net/ethernet/sfc/nic.h new file mode 100644 index 0000000..924233b --- /dev/null +++ b/src/drivers/net/ethernet/sfc/nic.h @@ -0,0 +1,269 @@ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2005-2006 Fen Systems Ltd. + * Copyright 2006-2017 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#ifndef EFX_NIC_H +#define EFX_NIC_H + +#include "nic_common.h" +#include "efx.h" + +enum { + PHY_TYPE_NONE = 0, + PHY_TYPE_TXC43128 = 1, + PHY_TYPE_88E1111 = 2, + PHY_TYPE_SFX7101 = 3, + PHY_TYPE_QT2022C2 = 4, + PHY_TYPE_PM8358 = 6, + PHY_TYPE_SFT9001A = 8, + PHY_TYPE_QT2025C = 9, + PHY_TYPE_SFT9001B = 10, +}; + +enum { + EF10_STAT_port_tx_bytes = GENERIC_STAT_COUNT, + EF10_STAT_port_tx_packets, + EF10_STAT_port_tx_pause, + EF10_STAT_port_tx_control, + EF10_STAT_port_tx_unicast, + EF10_STAT_port_tx_multicast, + EF10_STAT_port_tx_broadcast, + EF10_STAT_port_tx_lt64, + EF10_STAT_port_tx_64, + EF10_STAT_port_tx_65_to_127, + EF10_STAT_port_tx_128_to_255, + EF10_STAT_port_tx_256_to_511, + EF10_STAT_port_tx_512_to_1023, + EF10_STAT_port_tx_1024_to_15xx, + EF10_STAT_port_tx_15xx_to_jumbo, + EF10_STAT_port_rx_bytes, + EF10_STAT_port_rx_bytes_minus_good_bytes, + EF10_STAT_port_rx_good_bytes, + EF10_STAT_port_rx_bad_bytes, + EF10_STAT_port_rx_packets, + EF10_STAT_port_rx_good, + EF10_STAT_port_rx_bad, + EF10_STAT_port_rx_pause, + EF10_STAT_port_rx_control, + EF10_STAT_port_rx_unicast, + EF10_STAT_port_rx_multicast, + EF10_STAT_port_rx_broadcast, + EF10_STAT_port_rx_lt64, + EF10_STAT_port_rx_64, + EF10_STAT_port_rx_65_to_127, + EF10_STAT_port_rx_128_to_255, + EF10_STAT_port_rx_256_to_511, + EF10_STAT_port_rx_512_to_1023, + EF10_STAT_port_rx_1024_to_15xx, + EF10_STAT_port_rx_15xx_to_jumbo, + EF10_STAT_port_rx_gtjumbo, + EF10_STAT_port_rx_bad_gtjumbo, + EF10_STAT_port_rx_overflow, + EF10_STAT_port_rx_align_error, + EF10_STAT_port_rx_length_error, + EF10_STAT_port_rx_nodesc_drops, + EF10_STAT_port_rx_pm_trunc_bb_overflow, + EF10_STAT_port_rx_pm_discard_bb_overflow, + EF10_STAT_port_rx_pm_trunc_vfifo_full, + EF10_STAT_port_rx_pm_discard_vfifo_full, + EF10_STAT_port_rx_pm_trunc_qbb, + EF10_STAT_port_rx_pm_discard_qbb, + EF10_STAT_port_rx_pm_discard_mapping, + EF10_STAT_port_rx_dp_q_disabled_packets, + EF10_STAT_port_rx_dp_di_dropped_packets, + EF10_STAT_port_rx_dp_streaming_packets, + EF10_STAT_port_rx_dp_hlb_fetch, + EF10_STAT_port_rx_dp_hlb_wait, + EF10_STAT_port_COUNT, + EF10_STAT_rx_unicast = EF10_STAT_port_COUNT, + EF10_STAT_rx_unicast_bytes, + EF10_STAT_rx_multicast, + EF10_STAT_rx_multicast_bytes, + EF10_STAT_rx_broadcast, + EF10_STAT_rx_broadcast_bytes, + EF10_STAT_rx_bad, + EF10_STAT_rx_bad_bytes, + EF10_STAT_rx_overflow, + EF10_STAT_tx_unicast, + EF10_STAT_tx_unicast_bytes, + EF10_STAT_tx_multicast, + EF10_STAT_tx_multicast_bytes, + EF10_STAT_tx_broadcast, + EF10_STAT_tx_broadcast_bytes, + EF10_STAT_tx_bad, + EF10_STAT_tx_bad_bytes, + EF10_STAT_tx_overflow, + EF10_STAT_V1_COUNT, + EF10_STAT_fec_uncorrected_errors = EF10_STAT_V1_COUNT, + EF10_STAT_fec_corrected_errors, + EF10_STAT_fec_corrected_symbols_lane0, + EF10_STAT_fec_corrected_symbols_lane1, + EF10_STAT_fec_corrected_symbols_lane2, + EF10_STAT_fec_corrected_symbols_lane3, + EF10_STAT_ctpio_vi_busy_fallback, + EF10_STAT_ctpio_long_write_success, + EF10_STAT_ctpio_missing_dbell_fail, + EF10_STAT_ctpio_overflow_fail, + EF10_STAT_ctpio_underflow_fail, + EF10_STAT_ctpio_timeout_fail, + EF10_STAT_ctpio_noncontig_wr_fail, + EF10_STAT_ctpio_frm_clobber_fail, + EF10_STAT_ctpio_invalid_wr_fail, + EF10_STAT_ctpio_vi_clobber_fallback, + EF10_STAT_ctpio_unqualified_fallback, + EF10_STAT_ctpio_runt_fallback, + EF10_STAT_ctpio_success, + EF10_STAT_ctpio_fallback, + EF10_STAT_ctpio_poison, + EF10_STAT_ctpio_erase, + EF10_STAT_COUNT +}; + +/* Maximum number of TX PIO buffers we may allocate to a function. + * This matches the total number of buffers on each SFC9100-family + * controller. + */ +#define EF10_TX_PIOBUF_COUNT 16 + +#define EF10_NUM_MCDI_BUFFERS 1 + +/** + * struct efx_ef10_nic_data - EF10 architecture NIC state + * @efx: Pointer back to main interface structure + * @mcdi_buf: DMA buffer for MCDI + * @mcdi_buf_use: bitmap of which MCDI buffers are used + * @warm_boot_count: Last seen MC warm boot count + * @bist_warm_boot_count: Value of @warm_boot_count at start of BIST + * @n_allocated_vis: Number of VIs allocated to this function + * @must_realloc_vis: Flag: VIs have yet to be reallocated after MC reboot + * MC reboot + * @must_restore_vports: Flag: V-ports have yet to be restored after MC reboot + * @n_piobufs: Number of PIO buffers allocated to this function + * @wc_membase: Base address of write-combining mapping of the memory BAR + * @pio_write_base: Base address for writing PIO buffers + * @pio_write_vi_base: Relative VI number for @pio_write_base + * @piobuf_handle: Handle of each PIO buffer allocated + * @piobuf_size: size of a single PIO buffer + * @must_restore_piobufs: Flag: PIO buffers have yet to be restored after MC + * reboot + * @mc_stats: Scratch buffer for converting statistics to the kernel's format + * @stats: Hardware statistics + * @vf_stats_work: Work item to poll hardware statistics (VF driver only) + * @workaround_35388: Flag: firmware supports workaround for bug 35388 + * @workaround_26807: Flag: firmware supports workaround for bug 26807 + * @workaround_61265: Flag: firmware supports workaround for bug 61265 + * @must_check_datapath_caps: Flag: @datapath_caps needs to be revalidated + * after MC reboot + * @datapath_caps: Capabilities of datapath firmware (FLAGS1 field of + * %MC_CMD_GET_CAPABILITIES response) + * @datapath_caps2: Further capabilities of datapath firmware (FLAGS2 field of + * %MC_CMD_GET_CAPABILITIES_V2 response) + * @must_reprobe_sensors: Flag: sensors have yet to be reprobed after MC reboot + * @must_probe_vswitching: Flag: vswitching has yet to be setup after MC reboot + * @pf_index: The number for this PF, or the parent PF if this is a VF + * @vf_index: The number for this VF, or 0xFFFF if this is a PF + * @port_id: Physical port identity + * @vport_mac: The MAC address on the vport, only for PFs; VFs will be zero + * @udp_tunnels: UDP tunnel port numbers and types. + * @licensed_features: Flags for licensed firmware features. + */ +struct efx_ef10_nic_data { + struct efx_nic *efx; + struct efx_buffer mcdi_buf; + unsigned long mcdi_buf_use; + u16 warm_boot_count; + u16 bist_warm_boot_count; + unsigned int n_allocated_vis; + bool must_realloc_vis; + bool must_restore_vports; + unsigned int n_piobufs; + void __iomem *wc_membase, *pio_write_base; + unsigned int pio_write_vi_base; + unsigned int piobuf_handle[EF10_TX_PIOBUF_COUNT]; + u16 piobuf_size; + bool must_restore_piobufs; + __le64 *mc_stats; + u64 stats[EF10_STAT_COUNT]; + struct delayed_work vf_stats_work; + bool workaround_35388; + bool workaround_26807; + bool workaround_61265; + bool must_check_datapath_caps; + u32 datapath_caps; + u32 datapath_caps2; + bool must_reprobe_sensors; + bool must_probe_vswitching; + unsigned int pf_index; + unsigned int vf_index; + u8 port_id[ETH_ALEN]; +#ifdef CONFIG_SFC_SRIOV + /** @max_vfs: Number of VFs to be initialised by the driver */ + int max_vfs; + /** @vf_count: Number of VFs intended to be enabled */ + unsigned int vf_count; + /** @vf: Pointer to VF data structure */ + struct ef10_vf *vf; +#endif + u8 vport_mac[ETH_ALEN]; + struct efx_udp_tunnel udp_tunnels[16]; +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_UDP_TUNNEL_NIC_INFO) + /** + * @udp_tunnels_dirty: flag indicating a reboot occurred while + * pushing @udp_tunnels to hardware and thus the push must be + * re-done. + */ + bool udp_tunnels_dirty; + /** + * @udp_tunnels_lock: Serialises writes to @udp_tunnels and + * @udp_tunnels_dirty. + */ + struct mutex udp_tunnels_lock; +#else + /** + * @udp_tunnels_busy: Indicates if efx_ef10_set_udp_tnl_ports() + * is currently running. + */ + bool udp_tunnels_busy; + /** + * @udp_tunnels_lock: Serialises writes to @udp_tunnels and + * @udp_tunnels_dirty. + */ + spinlock_t udp_tunnels_lock; + /** + * @udp_tunnel_work: workitem for pushing UDP tunnel ports to the MC. + */ + struct work_struct udp_tunnel_work; +#endif + u64 licensed_features; +}; + +#define efx_ef10_has_cap(caps, flag) \ + (!!((caps) & BIT_ULL(MC_CMD_GET_CAPABILITIES_V4_OUT_ ## flag ## _LBN))) + +#ifdef EFX_NOT_UPSTREAM +struct efx_update_license2; +int efx_ef10_update_keys(struct efx_nic *efx, + struct efx_update_license2 *key_stats); +struct efx_licensed_app_state; +int efx_ef10_licensed_app_state(struct efx_nic *efx, + struct efx_licensed_app_state *app_state); +#endif + +extern const struct efx_nic_type efx_hunt_a0_nic_type; +extern const struct efx_nic_type efx_hunt_a0_vf_nic_type __attribute__((weak)); + +/* Only safe to call if protected against rep create/destroy */ +static inline struct net_device *efx_get_vf_rep(struct efx_nic *efx, unsigned int vf) +{ + if (efx->type->get_vf_rep == NULL) + return ERR_PTR(-EOPNOTSUPP); + return efx->type->get_vf_rep(efx, vf); +} + +#endif /* EFX_NIC_H */ diff --git a/src/drivers/net/ethernet/sfc/nic_common.h b/src/drivers/net/ethernet/sfc/nic_common.h new file mode 100644 index 0000000..dbe6af4 --- /dev/null +++ b/src/drivers/net/ethernet/sfc/nic_common.h @@ -0,0 +1,289 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2005-2006 Fen Systems Ltd. + * Copyright 2006-2013 Solarflare Communications Inc. + * Copyright 2019-2022 Xilinx Inc. + */ + +#ifndef EFX_NIC_COMMON_H +#define EFX_NIC_COMMON_H + +#include "net_driver.h" +#include "efx_common.h" +#include "mcdi.h" +#include "ptp.h" + +enum { + /* Revisions 0-2 were Falcon A0, A1 and B0 respectively. + * They are not supported by this driver but these revision numbers + * form part of the ethtool API for register dumping. + */ + EFX_REV_SIENA_A0 = 3, + EFX_REV_HUNT_A0 = 4, + EFX_REV_EF100 = 5, +}; + +static inline int efx_nic_rev(struct efx_nic *efx) +{ + return efx->type->revision; +} + +/* Read the current event from the event queue */ +static inline efx_qword_t *efx_event(struct efx_channel *channel, + unsigned int index) +{ + return ((efx_qword_t *) (channel->eventq.addr)) + + (index & channel->eventq_mask); +} + +/* See if an event is present + * + * We check both the high and low dword of the event for all ones. We + * wrote all ones when we cleared the event, and no valid event can + * have all ones in either its high or low dwords. This approach is + * robust against reordering. + * + * Note that using a single 64-bit comparison is incorrect; even + * though the CPU read will be atomic, the DMA write may not be. + */ +static inline int efx_event_present(efx_qword_t *event) +{ + return !(EFX_DWORD_IS_ALL_ONES(event->dword[0]) | + EFX_DWORD_IS_ALL_ONES(event->dword[1])); +} + +/* Returns a pointer to the specified transmit descriptor in the TX + * descriptor queue belonging to the specified channel. + */ +static inline efx_qword_t * +efx_tx_desc(struct efx_tx_queue *tx_queue, unsigned int index) +{ + return ((efx_qword_t *) (tx_queue->txd.addr)) + index; +} + +/* Report whether this TX queue would be empty for the given write_count. + * May return false negative. + */ +static inline bool __efx_nic_tx_is_empty(struct efx_tx_queue *tx_queue, + unsigned int write_count) +{ + unsigned int empty_read_count = READ_ONCE(tx_queue->empty_read_count); + + if (empty_read_count == 0) + return false; + + return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0; +} + +/* Report whether the NIC considers this TX queue empty, using + * packet_write_count (the write count recorded for the last completable + * doorbell push). May return false negative. EF10 only, which is OK + * because only EF10 supports PIO. + */ +static inline bool efx_nic_tx_is_empty(struct efx_tx_queue *tx_queue) +{ + EFX_WARN_ON_ONCE_PARANOID(!tx_queue->efx->type->option_descriptors); + return __efx_nic_tx_is_empty(tx_queue, tx_queue->packet_write_count); +} + +/* Decide whether to push a TX descriptor to the NIC vs merely writing + * the doorbell. This can reduce latency when we are adding a single + * descriptor to an empty queue, but is otherwise pointless. Further, + * Falcon and Siena have hardware bugs (SF bug 33851) that may be + * triggered if we don't check this. + * We use the write_count used for the last doorbell push, to get the + * NIC's view of the tx queue. + */ +static inline bool efx_nic_may_push_tx_desc(struct efx_tx_queue *tx_queue, + unsigned int write_count) +{ + bool was_empty = __efx_nic_tx_is_empty(tx_queue, write_count); + + tx_queue->empty_read_count = 0; + return was_empty && tx_queue->write_count - write_count == 1; +} + +/* Returns a pointer to the specified descriptor in the RX descriptor queue */ +static inline efx_qword_t * +efx_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index) +{ + return ((efx_qword_t *) (rx_queue->rxd.addr)) + index; +} + +/* Alignment of PCIe DMA boundaries (4KB) */ +#define EFX_PAGE_SIZE 4096 +/* Size and alignment of buffer table entries (same) */ +#define EFX_BUF_SIZE EFX_PAGE_SIZE + +/* NIC-generic software stats */ +enum { + GENERIC_STAT_rx_noskb_drops, + GENERIC_STAT_rx_nodesc_trunc, + GENERIC_STAT_COUNT +}; + +#define EFX_GENERIC_SW_STAT(ext_name) \ + [GENERIC_STAT_ ## ext_name] = { #ext_name, 0, 0 } + +/* TX data path */ +static inline int efx_nic_probe_tx(struct efx_tx_queue *tx_queue) +{ + return tx_queue->efx->type->tx_probe(tx_queue); +} +static inline int efx_nic_init_tx(struct efx_tx_queue *tx_queue) +{ + return tx_queue->efx->type->tx_init(tx_queue); +} +static inline void efx_nic_push_buffers(struct efx_tx_queue *tx_queue) +{ + tx_queue->efx->type->tx_write(tx_queue); +} +static inline void efx_nic_notify_tx_desc(struct efx_tx_queue *tx_queue) +{ + tx_queue->efx->type->tx_notify(tx_queue); +} +int efx_nic_tx_tso_sw(struct efx_tx_queue *tx_queue, struct sk_buff *skb, + bool *data_mapped); + +/* RX data path */ +static inline int efx_nic_probe_rx(struct efx_rx_queue *rx_queue) +{ + return rx_queue->efx->type->rx_probe(rx_queue); +} +static inline int efx_nic_init_rx(struct efx_rx_queue *rx_queue) +{ + return rx_queue->efx->type->rx_init(rx_queue); +} +static inline void efx_nic_remove_rx(struct efx_rx_queue *rx_queue) +{ + rx_queue->efx->type->rx_remove(rx_queue); +} +static inline void efx_nic_notify_rx_desc(struct efx_rx_queue *rx_queue) +{ + rx_queue->efx->type->rx_write(rx_queue); +} +static inline int efx_nic_generate_fill_event(struct efx_rx_queue *rx_queue) +{ + return rx_queue->efx->type->rx_defer_refill(rx_queue); +} + +/* Event data path */ +static inline int efx_nic_probe_eventq(struct efx_channel *channel) +{ + return channel->efx->type->ev_probe(channel); +} +static inline int efx_nic_init_eventq(struct efx_channel *channel) +{ + return channel->efx->type->ev_init(channel); +} +static inline void efx_nic_fini_eventq(struct efx_channel *channel) +{ + channel->efx->type->ev_fini(channel); +} +static inline void efx_nic_remove_eventq(struct efx_channel *channel) +{ + channel->efx->type->ev_remove(channel); +} +static inline int +efx_nic_process_eventq(struct efx_channel *channel, int quota) +{ + return channel->efx->type->ev_process(channel, quota); +} +static inline bool efx_nic_mcdi_ev_pending(struct efx_channel *channel) +{ + return channel->efx->type->ev_mcdi_pending(channel); +} +static inline void efx_nic_eventq_read_ack(struct efx_channel *channel) +{ + channel->efx->type->ev_read_ack(channel); +} + +static inline bool efx_nic_hw_unavailable(struct efx_nic *efx) +{ + if (efx->type->hw_unavailable) + return efx->type->hw_unavailable(efx); + return false; +} +static inline bool efx_nic_has_dynamic_sensors(struct efx_nic *efx) +{ + if (efx->type->has_dynamic_sensors) + return efx->type->has_dynamic_sensors(efx); + + return false; +} + +void efx_nic_event_test_start(struct efx_channel *channel); +bool efx_nic_event_present(struct efx_channel *channel); + +static inline unsigned int efx_rx_recycle_ring_size(const struct efx_nic *efx) +{ + return efx->type->rx_recycle_ring_size(efx); +} + +/* Some statistics are computed as A - B where A and B each increase + * linearly with some hardware counter(s) and the counters are read + * asynchronously. If the counters contributing to B are always read + * after those contributing to A, the computed value may be lower than + * the true value by some variable amount, and may decrease between + * subsequent computations. + * + * We should never allow statistics to decrease or to exceed the true + * value. Since the computed value will never be greater than the + * true value, except when the MAC stats are zeroed as a result of a NIC reset + * we can achieve this by only storing the computed value + * when it increases, or when it is zeroed. + */ +static inline void efx_update_diff_stat(u64 *stat, u64 diff) +{ + if (!diff || (s64)(diff - *stat) > 0) + *stat = diff; +} + +/* Interrupts */ +int efx_nic_init_interrupt(struct efx_nic *efx); +int efx_nic_irq_test_start(struct efx_nic *efx); +void efx_nic_fini_interrupt(struct efx_nic *efx); + +static inline int efx_nic_event_test_irq_cpu(struct efx_channel *channel) +{ + return READ_ONCE(channel->event_test_cpu); +} +static inline int efx_nic_irq_test_irq_cpu(struct efx_nic *efx) +{ + return READ_ONCE(efx->last_irq_cpu); +} + +/* Global Resources */ +void efx_nic_check_pcie_link(struct efx_nic *efx, + unsigned int desired_bandwidth, + unsigned int *actual_width, + unsigned int *actual_speed); + +int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer, + unsigned int len, gfp_t gfp_flags); +void efx_nic_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer); + +size_t efx_nic_get_regs_len(struct efx_nic *efx); +void efx_nic_get_regs(struct efx_nic *efx, void *buf); + +#define EFX_MC_STATS_GENERATION_INVALID ((__force __le64)(-1)) + +size_t efx_nic_describe_stats(const struct efx_hw_stat_desc *desc, size_t count, + const unsigned long *mask, u8 *names); +int efx_nic_copy_stats(struct efx_nic *efx, __le64 *dest); +static inline int efx_nic_reset_stats(struct efx_nic *efx) +{ + return efx_nic_copy_stats(efx, efx->mc_initial_stats); +} +void efx_nic_update_stats(const struct efx_hw_stat_desc *desc, size_t count, + const unsigned long *mask, u64 *stats, + const void *mc_initial_stats, const void *mc_stats); +void efx_nic_fix_nodesc_drop_stat(struct efx_nic *efx, u64 *stat); + +#define EFX_MAX_FLUSH_TIME 5000 + +bool efx_mcdi_port_process_event(struct efx_channel *channel, efx_qword_t *event, + int *rc, int budget); + +#endif /* EFX_NIC_COMMON_H */ diff --git a/src/drivers/net/ethernet/sfc/ptp.c b/src/drivers/net/ethernet/sfc/ptp.c new file mode 100644 index 0000000..67f4efe --- /dev/null +++ b/src/drivers/net/ethernet/sfc/ptp.c @@ -0,0 +1,3928 @@ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2011-2017 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +/* Theory of operation: + * + * PTP support is assisted by firmware running on the MC, which provides + * the hardware timestamping capabilities. Both transmitted and received + * PTP event packets are queued onto internal queues for subsequent processing; + * this is because the MC operations are relatively long and would block + * block NAPI/interrupt operation. + * + * Receive event processing: + * The event contains the packet's UUID and sequence number, together + * with the hardware timestamp. The PTP receive packet queue is searched + * for this UUID/sequence number and, if found, put on a pending queue. + * Packets not matching are delivered without timestamps (MCDI events will + * always arrive after the actual packet). + * It is important for the operation of the PTP protocol that the ordering + * of packets between the event and general port is maintained. + * + * Work queue processing: + * If work waiting, synchronise host/hardware time + * + * Transmit: send packet through MC, which returns the transmission time + * that is converted to an appropriate timestamp. + * + * Receive: the packet's reception time is converted to an appropriate + * timestamp. + */ +#include +#include +#include +#include +#include +#include +#ifndef EFX_USE_KCOMPAT +#include +#include +#include +#endif +#include +#include "net_driver.h" +#include "efx.h" +#include "mcdi.h" +#include "mcdi_pcol.h" +#include "io.h" +#include "nic.h" +#include "debugfs.h" +#ifdef EFX_USE_KCOMPAT +#include "efx_ioctl.h" +#include "kernel_compat.h" +#endif +#ifdef CONFIG_SFC_TRACING +#include +#endif + +/* Maximum number of events expected to make up a PTP event */ +#define MAX_EVENT_FRAGS 3 + +/* Maximum delay, ms, to begin synchronisation */ +#define MAX_SYNCHRONISE_WAIT_MS 2 + +/* How long, at most, to spend synchronising */ +#define SYNCHRONISE_PERIOD_NS 250000 + +/* How often to update the shared memory time */ +#define SYNCHRONISATION_GRANULARITY_NS 200 + +/* Minimum permitted length of a (corrected) synchronisation time */ +#define DEFAULT_MIN_SYNCHRONISATION_NS 120 + +/* Maximum permitted length of a (corrected) synchronisation time */ +#define MAX_SYNCHRONISATION_NS 1000 + +/* How many (MC) receive events that can be queued */ +#define MAX_RECEIVE_EVENTS 8 + +/* Length of (modified) moving average. */ +#define AVERAGE_LENGTH 16 + +/* How long an unmatched event or packet can be held */ +#define PKT_EVENT_LIFETIME_MS 10 + +/* How long unused unicast filters can be held */ +#define UCAST_FILTER_EXPIRY_JIFFIES msecs_to_jiffies(30000) + +/* Offsets into PTP packet for identification. These offsets are from the + * start of the IP header, not the MAC header. Note that neither PTP V1 nor + * PTP V2 permit the use of IPV4 options. + */ +#define PTP_DPORT_OFFSET 22 + +#define PTP_V1_VERSION_LENGTH 2 +#define PTP_V1_VERSION_OFFSET 28 + +#define PTP_V1_SEQUENCE_LENGTH 2 +#define PTP_V1_SEQUENCE_OFFSET 58 + +/* The minimum length of a PTP V1 packet for offsets, etc. to be valid: + * includes IP header. + */ +#define PTP_V1_MIN_LENGTH 64 + +#define PTP_V2_VERSION_LENGTH 1 +#define PTP_V2_VERSION_OFFSET 29 + +#define PTP_V2_SEQUENCE_LENGTH 2 +#define PTP_V2_SEQUENCE_OFFSET 58 + +/* The minimum length of a PTP V2 packet for offsets, etc. to be valid: + * includes IP header. + */ +#define PTP_V2_MIN_LENGTH 63 + +#define PTP_MIN_LENGTH 63 + +#define PTP_RXFILTERS_LEN 10 + +#define PTP_ADDR 0xe0000181 /* 224.0.1.129 */ +#define PTP_PEER_DELAY_ADDR 0xe000006B /* 224.0.0.107 */ + +/* ff0e::181 */ +static const struct in6_addr ptp_addr_ipv6 = { { { + 0xff, 0x0e, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x01, 0x81 } } }; + +/* ff02::6b */ +static const struct in6_addr ptp_peer_delay_addr_ipv6 = { { { + 0xff, 0x02, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x06, 0x0b } } }; + +/* 01-1B-19-00-00-00 */ +static const u8 ptp_addr_ether[ETH_ALEN] __aligned(2) = { + 0x01, 0x1b, 0x19, 0x00, 0x00, 0x00 }; + +/* 01-80-C2-00-00-0E */ +static const u8 ptp_peer_delay_addr_ether[ETH_ALEN] __aligned(2) = { + 0x01, 0x80, 0xc2, 0x00, 0x00, 0x0e }; + +#define PTP_EVENT_PORT 319 +#define PTP_GENERAL_PORT 320 + +/* Annoyingly the format of the version numbers are different between + * versions 1 and 2 so it isn't possible to simply look for 1 or 2. + */ +#define PTP_VERSION_V1 1 + +#define PTP_VERSION_V2 2 +#define PTP_VERSION_V2_MASK 0x0f + +#ifdef EFX_NOT_UPSTREAM + +/* Mask of VLAN tag in bytes 2 and 3 of 802.1Q header + */ +#define VLAN_TAG_MASK 0x0fff + +/* Offest into the packet where PTP header starts + */ +#define PTP_LAYER2_LEN 14 +#define PTP_LAYER2_VLAN_LEN (PTP_LAYER2_LEN + VLAN_HLEN) + +#endif /* EFX_NOT_UPSTREAM */ + +enum ptp_packet_state { + PTP_PACKET_STATE_UNMATCHED = 0, + PTP_PACKET_STATE_MATCHED, + PTP_PACKET_STATE_TIMED_OUT, + PTP_PACKET_STATE_MATCH_UNWANTED +}; + +/* NIC synchronised with single word of time only comprising + * partial seconds and full nanoseconds: 10^9 ~ 2^30 so 2 bits for seconds. + */ +#define MC_NANOSECOND_BITS 30 +#define MC_NANOSECOND_MASK ((1 << MC_NANOSECOND_BITS) - 1) +#define MC_SECOND_MASK ((1 << (32 - MC_NANOSECOND_BITS)) - 1) + +/* Maximum parts-per-billion adjustment that is acceptable */ +#define MAX_PPB 100000000 + +/* One billion: maximum ppb adjustment that is sane to express in API */ +#define ONE_BILLION 1000000000 + +#define PTP_SYNC_ATTEMPTS 4 + +/** + * struct efx_ptp_match - Matching structure, stored in sk_buff's cb area. + * @expiry: Time after which the packet should be delivered irrespective of + * event arrival. + * @state: The state of the packet - whether it is ready for processing or + * whether that is of no interest. + * @vlan_tci: VLAN tag in host byte order. + * @vlan_tagged: Whether the match is VLAN tagged. The vlan_tci attributes is + * only valid if this is true. + */ +struct efx_ptp_match { + unsigned long expiry; + enum ptp_packet_state state; + u16 vlan_tci; + bool vlan_tagged; +}; + +/** + * struct efx_ptp_event_rx - A PTP receive event (from MC) + * @link: list of events + * @seq0: First part of (PTP) UUID + * @seq1: Second part of (PTP) UUID and sequence number + * @hwtimestamp: Event timestamp + * @expiry: Time which the packet arrived + */ +struct efx_ptp_event_rx { + struct list_head link; + u32 seq0; + u32 seq1; + ktime_t hwtimestamp; + unsigned long expiry; +}; + +/** + * struct efx_ptp_timeset - Synchronisation between host and MC + * @host_start: Host time immediately before hardware timestamp taken + * @major: Hardware timestamp, major + * @minor: Hardware timestamp, minor + * @host_end: Host time immediately after hardware timestamp taken + * @wait: Number of NIC clock ticks between hardware timestamp being read and + * host end time being seen + * @window: Difference of host_end and host_start + * @mc_host_diff: Difference between firmware time and host time + */ +struct efx_ptp_timeset { + u32 host_start; + u32 major; + u32 minor; + u32 host_end; + u32 wait; + u32 window; /* Derived: end - start, allowing for wrap */ + s64 mc_host_diff; /* Derived: mc_time - host_time */ +}; + +/** + * struct efx_ptp_rxfilter - Filter for PTP packets + * @list: Node of the list where the filter is added + * @ether_type: Network protocol of the filter (ETHER_P_IP / ETHER_P_IPV6) + * @loc_port: UDP port of the filter (PTP_EVENT_PORT / PTP_GENERAL_PORT) + * @loc_host: IPv4/v6 address of the filter + * @expiry: time when the filter expires, in jiffies + * @handle: Handle ID for the MCDI filters table + */ +struct efx_ptp_rxfilter { + struct list_head list; + __be16 ether_type; + __be16 loc_port; + __be32 loc_host[4]; + unsigned long expiry; + int handle; +}; + +#if defined(EFX_NOT_UPSTREAM) +/* Fordward declaration */ +struct efx_ptp_data; + +/** + * struct efx_pps_data - PPS device node informatino + * @ptp: Pointer to parent ptp structure + * @kobj: kobject for stats handling + * @read_data: Queue for handling API reads + * @s_assert: sys assert time of hw_pps event + * @n_assert: nic assert time of hw_pps event + * @s_delta: computed delta between nic and sys clocks + * @nic_hw_pps_enabled: Are hw_pps events enabled + * @fd_count: Number of open fds + * @major: device major number + * @last_ev: Last event sequence number + * @last_ev_taken: Last event sequence number read by API + */ +struct efx_pps_data { + struct efx_ptp_data *ptp; + struct kobject kobj; + wait_queue_head_t read_data; + struct timespec64 s_assert; + ktime_t n_assert; + struct timespec64 s_delta; + bool nic_hw_pps_enabled; + int fd_count; + int major; + int last_ev; + int last_ev_taken; +}; + +/** + * struct efx_pps_dev_attr - PPS device attr structure + * @attr: attribute object + * @pos: offset of the stat desired + * @show: function pointer to obtain and print the stats + */ + +struct efx_pps_dev_attr { + struct attribute attr; + u8 pos; + ssize_t (*show)(struct efx_pps_data *, u8 pos, char *); +}; +#endif + +/** + * struct efx_ptp_data - Precision Time Protocol (PTP) state + * @efx: The NIC context + * @ifindex: Interface index of PHC + * @adapter_base_addr: MAC address of port0 (used as unique identifier) of PHC + * @node_ptp_all_phcs: Node in list of all PHC PTP datas + * @channel: The PTP channel (for Medford and Medford2) + * @rxq: Receive SKB queue (awaiting timestamps) + * @txq: Transmit SKB queue + * @workwq: Work queue for processing pending PTP operations + * @work: Work task + * @cleanup_work: Work task for periodic cleanup + * @kref: Reference count. + * @reset_required: A serious error has occurred and the PTP task needs to be + * reset (disable, enable). + * @rxfilters_mcast: Receive filters for multicast PTP packets + * @rxfilters_ucast: Receive filters for unicast PTP packets + * @rxfilters_lock: Serialise access to PTP filters + * @config: Current timestamp configuration + * @enabled: PTP operation enabled. If this is disabled normal timestamping + * can still work. + * @mode: Mode in which PTP operating (PTP version) + * @ns_to_nic_time: Function to convert from scalar nanoseconds to NIC time + * @nic_to_kernel_time: Function to convert from NIC to kernel time + * @nic_time: contains time details + * @nic_time.minor_max: Wrap point for NIC minor times + * @nic_time.sync_event_diff_min: Minimum acceptable difference between time + * in packet prefix and last MCDI time sync event i.e. how much earlier than + * the last sync event time a packet timestamp can be. + * @nic_time.sync_event_diff_max: Maximum acceptable difference between time + * in packet prefix and last MCDI time sync event i.e. how much later than + * the last sync event time a packet timestamp can be. + * @nic_time.sync_event_minor_shift: Shift required to make minor time from + * field in MCDI time sync event. + * @min_synchronisation_ns: Minimum acceptable corrected sync window + * @capabilities: Capabilities flags from the NIC + * @ts_corrections: contains corrections details + * @ts_corrections.ptp_tx: Required driver correction of PTP packet transmit + * timestamps + * @ts_corrections.ptp_rx: Required driver correction of PTP packet receive + * timestamps + * @ts_corrections.pps_out: PPS output error (information only) + * @ts_corrections.pps_in: Required driver correction of PPS input timestamps + * @ts_corrections.general_tx: Required driver correction of general packet + * transmit timestamps + * @ts_corrections.general_rx: Required driver correction of general packet + * receive timestamps + * @evt_frags: Partly assembled PTP events + * @evt_frag_idx: Current fragment number + * @evt_code: Last event code + * @start: Address at which MC indicates ready for synchronisation + * @host_time_pps: Host time at last PPS + * @adjfreq_prec: Fractional bits in frequency adjustment factor register + * frequency adjustment into a fixed point fractional nanosecond format. + * @max_adjfreq: Maximum ppb adjustment, lives here instead of phc_clock_info as + * it must be accessable without PHC support, using private ioctls. + * @max_adjfine: Maximum FP16 ppm adjustment, corresponding to max_adjfreq. + * @current_adjfreq: Current ppb adjustment. + * @last_mc_time: MC time corresponding to last measurement. + * @last_host_time_real: Host real time MC corresponding to last measurement. + * @last_host_time_raw: Host raw time MC corresponding to last measurement. + * @phc_clock: Pointer to registered phc device + * @phc_clock_info: Registration structure for phc device + * @pps_work: pps work task for handling pps events + * @pps_workwq: pps work queue + * @usr_evt_enabled: Flag indicating how NIC generated TS events are handled + * @txbuf: Buffer for use when transmitting (PTP) packets to MC (avoids + * allocations in main data path). + * @sw_stats: Driver level statistics. + * @sw_stats.good_syncs: Number of successful synchronisations. + * @sw_stats.fast_syncs: Number of synchronisations requiring short delay + * @sw_stats.bad_syncs: Number of failed synchronisations. + * @sw_stats.sync_timeouts: Number of synchronisation timeouts + * @sw_stats.no_time_syncs: Number of synchronisations with no good times. + * @sw_stats.invalid_sync_windows: Number of sync windows with bad durations. + * @sw_stats.undersize_sync_windows: Number of corrected sync windows that + * are too small + * @sw_stats.oversize_sync_windows: Number of corrected sync windows that + * are too large + * @sw_stats.rx_no_timestamp: Number of packets received without a timestamp. + * @initialised_stats: Indicates if @initial_mc_stats has been populated. + * @initial_mc_stats: Firmware statistics. + * @timeset: Last set of synchronisation statistics. + * @xmit_skb: Transmit SKB function. + */ +struct efx_ptp_data { + struct efx_nic *efx; + int ifindex; + u8 adapter_base_addr[ETH_ALEN]; + struct list_head node_ptp_all_phcs; + struct efx_channel *channel; + struct sk_buff_head rxq; + struct sk_buff_head txq; + struct workqueue_struct *workwq; + struct work_struct work; + struct delayed_work cleanup_work; + struct kref kref; + bool reset_required; + struct list_head rxfilters_mcast; + struct list_head rxfilters_ucast; + struct mutex rxfilters_lock; + struct hwtstamp_config config; + bool enabled; + unsigned int mode; + void (*ns_to_nic_time)(s64 ns, u32 *nic_major, u32 *nic_minor); + ktime_t (*nic_to_kernel_time)(u32 nic_major, u32 nic_minor, + s32 correction); + struct { + u32 minor_max; + u32 sync_event_diff_min; + u32 sync_event_diff_max; + unsigned int sync_event_minor_shift; + } nic_time; + unsigned int min_synchronisation_ns; + unsigned int capabilities; + struct { + s32 ptp_tx; + s32 ptp_rx; + s32 pps_out; + s32 pps_in; + s32 general_tx; + s32 general_rx; + } ts_corrections; + efx_qword_t evt_frags[MAX_EVENT_FRAGS]; + int evt_frag_idx; + int evt_code; + struct efx_buffer start; + struct pps_event_time host_time_pps; + unsigned int adjfreq_prec; + s64 max_adjfreq; + s64 max_adjfine; + s64 current_adjfreq; +#if defined(EFX_NOT_UPSTREAM) + /** + * @last_delta: Last measurement of delta between system and NIC + * clocks. + */ + struct timespec64 last_delta; + /** @last_delta_valid: indicates if @last_delta is valid. */ + bool last_delta_valid; +#endif + struct timespec64 last_mc_time; + struct timespec64 last_host_time_real; + struct timespec64 last_host_time_raw; + struct ptp_clock *phc_clock; + struct ptp_clock_info phc_clock_info; + struct work_struct pps_work; + struct workqueue_struct *pps_workwq; +#if defined(EFX_NOT_UPSTREAM) + /** @pps_data: Data associated with optional HW PPS events. */ + struct efx_pps_data *pps_data; +#endif +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_PHC_SUPPORT) +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_PTP_PF_NONE) + /** @pin_config: Function of the EXTTS pin. */ + struct ptp_pin_desc pin_config[1]; +#endif + u8 usr_evt_enabled; +#endif + efx_dword_t txbuf[MCDI_TX_BUF_LEN(MC_CMD_PTP_IN_TRANSMIT_LENMAX)]; + + struct { + unsigned int good_syncs; + unsigned int fast_syncs; + unsigned int bad_syncs; + unsigned int sync_timeouts; + unsigned int no_time_syncs; + unsigned int invalid_sync_windows; + unsigned int undersize_sync_windows; + unsigned int oversize_sync_windows; + unsigned int rx_no_timestamp; + } sw_stats; + bool initialised_stats; + __le16 initial_mc_stats[MC_CMD_PTP_OUT_STATUS_LEN / sizeof(__le16)]; +#ifdef CONFIG_DEBUG_FS + /** @last_sync_time_host: Host nanoseconds at last synchronisation. */ + unsigned int last_sync_time_host; + /** @min_sync_delta: Minimum time between event and synchronisation. */ + unsigned int min_sync_delta; + /** @max_sync_delta: Maximum time between event and synchronisation. */ + unsigned int max_sync_delta; + /** + * @average_sync_delta: Average time between event and synchronisation. + * Modified moving average. + */ + unsigned int average_sync_delta; + /** @last_sync_delta: Last time between event and synchronisation. */ + unsigned int last_sync_delta; + /** @sync_window_last: Last synchronisation window. */ + int sync_window_last[PTP_SYNC_ATTEMPTS]; + /** @sync_window_min: Minimum synchronisation window seen. */ + int sync_window_min; + /** @sync_window_max: Maximum synchronisation window seen. */ + int sync_window_max; + /** @sync_window_average: Average synchronisation window seen. */ + int sync_window_average; + /** + * @corrected_sync_window_last: Last corrected synchronisation window. + */ + int corrected_sync_window_last[PTP_SYNC_ATTEMPTS]; + /** + * @corrected_sync_window_min: Minimum corrected synchronisation + * window seen. + */ + int corrected_sync_window_min; + /** + * @corrected_sync_window_max: Maximum corrected synchronisation + * window seen. + */ + int corrected_sync_window_max; + /** + * @corrected_sync_window_average: Average corrected synchronisation + * window seen. + */ + int corrected_sync_window_average; + /** @mc_stats: Context value for MC statistics. */ + u8 mc_stats[MC_CMD_PTP_OUT_STATUS_LEN / sizeof(u32)]; +#endif + struct efx_ptp_timeset + timeset[MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_MAXNUM]; + void (*xmit_skb)(struct efx_nic *efx, struct sk_buff *skb); +}; + +static int efx_phc_adjtime(struct ptp_clock_info *ptp, s64 delta); +static int efx_phc_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts); +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_PHC_SUPPORT) +static int efx_phc_settime(struct ptp_clock_info *ptp, + const struct timespec64 *e_ts); + +#if defined(EFX_USE_KCOMPAT) && !defined(EFX_USE_64BIT_PHC) +static int efx_phc_gettime32(struct ptp_clock_info *ptp, struct timespec *ts); +static int efx_phc_settime32(struct ptp_clock_info *ptp, + const struct timespec *ts); +#endif +#endif + +static LIST_HEAD(efx_all_phcs_list); +static DEFINE_SPINLOCK(ptp_all_phcs_list_lock); + +bool efx_ptp_use_mac_tx_timestamps(struct efx_nic *efx) +{ + return efx_has_cap(efx, TX_MAC_TIMESTAMPING); +} + +static int efx_ptp_insert_unicast_filter(struct efx_nic *efx, + struct sk_buff *skb); + +#define PTP_SW_STAT(ext_name, field_name) \ + { #ext_name, 0, offsetof(struct efx_ptp_data, field_name) } +#define PTP_MC_STAT(ext_name, mcdi_name) \ + { #ext_name, 32, MC_CMD_PTP_OUT_STATUS_STATS_ ## mcdi_name ## _OFST } +static const struct efx_hw_stat_desc efx_ptp_stat_desc[] = { + PTP_SW_STAT(ptp_good_syncs, sw_stats.good_syncs), + PTP_SW_STAT(ptp_fast_syncs, sw_stats.fast_syncs), + PTP_SW_STAT(ptp_bad_syncs, sw_stats.bad_syncs), + PTP_SW_STAT(ptp_sync_timeouts, sw_stats.sync_timeouts), + PTP_SW_STAT(ptp_no_time_syncs, sw_stats.no_time_syncs), + PTP_SW_STAT(ptp_invalid_sync_windows, sw_stats.invalid_sync_windows), + PTP_SW_STAT(ptp_undersize_sync_windows, sw_stats.undersize_sync_windows), + PTP_SW_STAT(ptp_oversize_sync_windows, sw_stats.oversize_sync_windows), + PTP_SW_STAT(ptp_rx_no_timestamp, sw_stats.rx_no_timestamp), + PTP_MC_STAT(ptp_tx_timestamp_packets, TX), + PTP_MC_STAT(ptp_rx_timestamp_packets, RX), + PTP_MC_STAT(ptp_timestamp_packets, TS), + PTP_MC_STAT(ptp_filter_matches, FM), + PTP_MC_STAT(ptp_non_filter_matches, NFM), +}; +#define PTP_STAT_COUNT ARRAY_SIZE(efx_ptp_stat_desc) +static const unsigned long efx_ptp_stat_mask[] = { + [0 ... BITS_TO_LONGS(PTP_STAT_COUNT) - 1] = ~0UL, +}; + +size_t efx_ptp_describe_stats(struct efx_nic *efx, u8 *strings) +{ + if (!efx->ptp_data) + return 0; + + return efx_nic_describe_stats(efx_ptp_stat_desc, PTP_STAT_COUNT, + efx_ptp_stat_mask, strings); +} + +void efx_ptp_reset_stats(struct efx_nic *efx) +{ + struct efx_ptp_data *ptp = efx->ptp_data; + + if (ptp) { + u64 temp[PTP_STAT_COUNT]; + + memset(&ptp->sw_stats, 0, sizeof(ptp->sw_stats)); + + ptp->initialised_stats = false; + efx_ptp_update_stats(efx, temp); + } +} + +size_t efx_ptp_update_stats(struct efx_nic *efx, u64 *stats) +{ + struct efx_ptp_data *ptp = efx->ptp_data; + MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_STATUS_LEN); + MCDI_DECLARE_BUF(outbuf, MC_CMD_PTP_OUT_STATUS_LEN); + void *initial_mc_stats; + size_t i; + int rc; + + if (!ptp) + return 0; + + /* Copy software statistics */ + for (i = 0; i < PTP_STAT_COUNT; i++) { + if (efx_ptp_stat_desc[i].dma_width) + continue; + stats[i] = *(unsigned int *)((char *)efx->ptp_data + + efx_ptp_stat_desc[i].offset); + } + + /* Fetch MC statistics. We *must* fill in all statistics or + * risk leaking kernel memory to userland, so if the MCDI + * request fails we pretend we got zeroes. + */ + MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_STATUS); + MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0); + rc = efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), NULL); + if (rc) + memset(outbuf, 0, sizeof(outbuf)); + + if (IS_ENABLED(CONFIG_DEBUG_FS) && !ptp->initialised_stats) { + memcpy(ptp->initial_mc_stats, _MCDI_PTR(outbuf, 0), + sizeof(ptp->initial_mc_stats)); + ptp->initialised_stats = !rc; + } + + if (ptp->initialised_stats) + initial_mc_stats = ptp->initial_mc_stats; + else + initial_mc_stats = NULL; + efx_nic_update_stats(efx_ptp_stat_desc, PTP_STAT_COUNT, + efx_ptp_stat_mask, stats, initial_mc_stats, + _MCDI_PTR(outbuf, 0)); + + return PTP_STAT_COUNT; +} + + +static void efx_ptp_delete_data(struct kref *kref) +{ + struct efx_ptp_data *ptp = container_of(kref, struct efx_ptp_data, + kref); + +#if defined(EFX_NOT_UPSTREAM) + if (ptp->pps_data) + kfree(ptp->pps_data); +#endif + ptp->efx = NULL; + kfree(ptp); +} +#if defined(EFX_NOT_UPSTREAM) +/* Read one MC PTP related statistic. This actually gathers + * all PTP statistics, throwing away the others. + */ +static int ptp_read_stat(struct efx_ptp_data *ptp, + u8 pos, efx_dword_t *value) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_STATUS_LEN); + MCDI_DECLARE_BUF(outbuf, MC_CMD_PTP_OUT_STATUS_LEN); + int rc; + + MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_STATUS); + MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0); + rc = efx_mcdi_rpc(ptp->efx, MC_CMD_PTP, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), NULL); + if (rc) { + *value->u32 = 0; + return rc; + } + + *value = outbuf[pos/sizeof(efx_dword_t)]; + + return 0; +} + +static ssize_t efx_pps_stats_int(struct efx_pps_data *pps, u8 pos, char *data) +{ + efx_dword_t value; + + ptp_read_stat(pps->ptp, pos, &value); + + return scnprintf(data, PAGE_SIZE, "%d\n", EFX_DWORD_FIELD(value, EFX_DWORD_0)); +} + +static ssize_t efx_pps_stats_show(struct kobject *kobj, + struct attribute *attr, + char *buff) +{ + struct efx_pps_data *pps = container_of(kobj, + struct efx_pps_data, + kobj); + + struct efx_pps_dev_attr *efx_attr = container_of(attr, + struct efx_pps_dev_attr, + attr); + return efx_attr->show(pps, efx_attr->pos, buff); +} + +#define EFX_PPS_DEVICE_ATTR(_name, _mode, _pos) \ + static struct efx_pps_dev_attr efx_pps_attr_##_name = { \ + .attr = {.name = __stringify(_name), .mode = _mode }, \ + .pos = MC_CMD_PTP_OUT_STATUS_STATS_##_pos##_OFST, \ + .show = efx_pps_stats_int, \ + } + +#define EFX_PPS_ATTR_PTR(_name) \ + &efx_pps_attr_##_name.attr + +EFX_PPS_DEVICE_ATTR(pps_oflow, S_IRUGO, PPS_OFLOW); +EFX_PPS_DEVICE_ATTR(pps_bad, S_IRUGO, PPS_BAD); +EFX_PPS_DEVICE_ATTR(pps_per_min, S_IRUGO, PPS_PER_MIN); +EFX_PPS_DEVICE_ATTR(pps_per_max, S_IRUGO, PPS_PER_MAX); +EFX_PPS_DEVICE_ATTR(pps_per_last, S_IRUGO, PPS_PER_LAST); +EFX_PPS_DEVICE_ATTR(pps_per_mean, S_IRUGO, PPS_PER_MEAN); +EFX_PPS_DEVICE_ATTR(pps_off_min, S_IRUGO, PPS_OFF_MIN); +EFX_PPS_DEVICE_ATTR(pps_off_max, S_IRUGO, PPS_OFF_MAX); +EFX_PPS_DEVICE_ATTR(pps_off_last, S_IRUGO, PPS_OFF_LAST); +EFX_PPS_DEVICE_ATTR(pps_off_mean, S_IRUGO, PPS_OFF_MEAN); + +static struct attribute *efx_pps_device_attrs[] = { + EFX_PPS_ATTR_PTR(pps_oflow), + EFX_PPS_ATTR_PTR(pps_bad), + EFX_PPS_ATTR_PTR(pps_per_min), + EFX_PPS_ATTR_PTR(pps_per_max), + EFX_PPS_ATTR_PTR(pps_per_last), + EFX_PPS_ATTR_PTR(pps_per_mean), + EFX_PPS_ATTR_PTR(pps_off_min), + EFX_PPS_ATTR_PTR(pps_off_max), + EFX_PPS_ATTR_PTR(pps_off_last), + EFX_PPS_ATTR_PTR(pps_off_mean), + NULL, +}; +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_KOBJECT_DEFAULT_GROUPS) +ATTRIBUTE_GROUPS(efx_pps_device); +#endif + +/* Expose maximum PPB freq adjustment as a device attribute, allowing + * applications to use correct freq adjustment limit per NIC */ +static ssize_t show_max_adjfreq(struct device *dev, + struct device_attribute *attr, + char *buff) +{ + struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); + struct efx_ptp_data *ptp = efx->phc_ptp_data; + s32 max_adjfreq = 0; + + if (ptp) + max_adjfreq = ptp->max_adjfreq; + + return scnprintf(buff, PAGE_SIZE, "%d\n", max_adjfreq); +} + +static DEVICE_ATTR(max_adjfreq, S_IRUGO, show_max_adjfreq, NULL); + +static void ptp_boardattr_release(struct kobject *kobj) +{ + struct efx_pps_data *pps = container_of(kobj, struct efx_pps_data, + kobj); + struct efx_ptp_data *ptp = pps->ptp; + + kref_put(&ptp->kref, efx_ptp_delete_data); +} + +static const struct sysfs_ops efx_sysfs_ops = { + .show = efx_pps_stats_show, + .store = NULL, +}; + +static struct kobj_type efx_sysfs_ktype = { + .release = ptp_boardattr_release, + /* May need to cast away const */ + .sysfs_ops = (struct sysfs_ops *)&efx_sysfs_ops, +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_KOBJECT_DEFAULT_GROUPS) + .default_groups = efx_pps_device_groups, +#else + .default_attrs = efx_pps_device_attrs, +#endif +}; + +#endif + +#ifdef CONFIG_DEBUG_FS + +#define EFX_PTP_UINT_PARAMETER(container_type, parameter) \ + EFX_NAMED_PARAMETER(ptp_ ## parameter, container_type, parameter, \ + unsigned int, efx_debugfs_read_int) + +#define EFX_PTP_INT_PARAMETER(container_type, parameter) \ + EFX_NAMED_PARAMETER(ptp_ ## parameter, container_type, parameter, \ + int, efx_debugfs_read_int) + +#define EFX_PTP_INT_ARRAY(container_type, parameter, idx) \ + EFX_NAMED_PARAMETER(ptp_ ## parameter ## idx, container_type, \ + parameter[idx], int, efx_debugfs_read_int) + +/* PTP parameters */ +static const struct efx_debugfs_parameter efx_debugfs_ptp_parameters[] = { + EFX_PTP_UINT_PARAMETER(struct efx_ptp_data, last_sync_time_host), + EFX_PTP_UINT_PARAMETER(struct efx_ptp_data, min_sync_delta), + EFX_PTP_UINT_PARAMETER(struct efx_ptp_data, max_sync_delta), + EFX_PTP_UINT_PARAMETER(struct efx_ptp_data, average_sync_delta), + EFX_PTP_UINT_PARAMETER(struct efx_ptp_data, last_sync_delta), + EFX_PTP_INT_ARRAY(struct efx_ptp_data, sync_window_last, 0), + EFX_PTP_INT_ARRAY(struct efx_ptp_data, sync_window_last, 1), + EFX_PTP_INT_ARRAY(struct efx_ptp_data, sync_window_last, 2), + EFX_PTP_INT_ARRAY(struct efx_ptp_data, sync_window_last, 3), + EFX_PTP_INT_PARAMETER(struct efx_ptp_data, sync_window_min), + EFX_PTP_INT_PARAMETER(struct efx_ptp_data, sync_window_max), + EFX_PTP_INT_PARAMETER(struct efx_ptp_data, sync_window_average), + EFX_PTP_INT_ARRAY(struct efx_ptp_data, corrected_sync_window_last, 0), + EFX_PTP_INT_ARRAY(struct efx_ptp_data, corrected_sync_window_last, 1), + EFX_PTP_INT_ARRAY(struct efx_ptp_data, corrected_sync_window_last, 2), + EFX_PTP_INT_ARRAY(struct efx_ptp_data, corrected_sync_window_last, 3), + EFX_PTP_INT_PARAMETER(struct efx_ptp_data, corrected_sync_window_min), + EFX_PTP_INT_PARAMETER(struct efx_ptp_data, corrected_sync_window_max), + EFX_PTP_INT_PARAMETER(struct efx_ptp_data, corrected_sync_window_average), + {NULL}, +}; + +static ssize_t set_ptp_stats(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + bool clear = count > 0 && *buf != '0'; + + if (clear) { + struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); + MCDI_DECLARE_BUF(in_rst_stats, MC_CMD_PTP_IN_RESET_STATS_LEN); + int rc; + + MCDI_SET_DWORD(in_rst_stats, PTP_IN_OP, MC_CMD_PTP_OP_RESET_STATS); + MCDI_SET_DWORD(in_rst_stats, PTP_IN_PERIPH_ID, 0); + + rc = efx_mcdi_rpc(efx, MC_CMD_PTP, in_rst_stats, sizeof(in_rst_stats), + NULL, 0, NULL); + if (rc < 0) + count = (size_t) rc; + } + + return count; +} + +static DEVICE_ATTR(ptp_stats, S_IWUSR, NULL, set_ptp_stats); +#else /* CONFIG_DEBUG_FS */ +static const struct efx_debugfs_parameter efx_debugfs_ptp_parameters[] = {}; +#endif /* CONFIG_DEBUG_FS */ + +/* To convert from s27 format to ns we multiply then divide by a power of 2. + * For the conversion from ns to s27, the operation is also converted to a + * multiply and shift. + */ +#define S27_TO_NS_SHIFT (27) +#define NS_TO_S27_MULT (((1ULL << 63) + NSEC_PER_SEC / 2) / NSEC_PER_SEC) +#define NS_TO_S27_SHIFT (63 - S27_TO_NS_SHIFT) +#define S27_MINOR_MAX (1 << S27_TO_NS_SHIFT) + +/* For Huntington platforms NIC time is in seconds and fractions of a second + * where the minor register only uses 27 bits in units of 2^-27s. + */ +static void efx_ptp_ns_to_s27(s64 ns, u32 *nic_major, u32 *nic_minor) +{ + struct timespec64 ts = ns_to_timespec64(ns); + u32 maj = (u32)ts.tv_sec; + u32 min = (u32)(((u64)ts.tv_nsec * NS_TO_S27_MULT + + (1ULL << (NS_TO_S27_SHIFT - 1))) >> NS_TO_S27_SHIFT); + + /* The conversion can result in the minor value exceeding the maximum. + * In this case, round up to the next second. + */ + if (min >= S27_MINOR_MAX) { + min -= S27_MINOR_MAX; + maj++; + } + + *nic_major = maj; + *nic_minor = min; +} + +static inline ktime_t efx_ptp_s27_to_ktime(u32 nic_major, u32 nic_minor) +{ + u32 ns = (u32)(((u64)nic_minor * NSEC_PER_SEC + + (1ULL << (S27_TO_NS_SHIFT - 1))) >> S27_TO_NS_SHIFT); + return ktime_set(nic_major, ns); +} + +static ktime_t efx_ptp_s27_to_ktime_correction(u32 nic_major, u32 nic_minor, + s32 correction) +{ + /* Apply the correction and deal with carry */ + nic_minor += correction; + if ((s32)nic_minor < 0) { + nic_minor += S27_MINOR_MAX; + nic_major--; + } else if (nic_minor >= S27_MINOR_MAX) { + nic_minor -= S27_MINOR_MAX; + nic_major++; + } + + return efx_ptp_s27_to_ktime(nic_major, nic_minor); +} + +/* For Medford2 platforms the time is in seconds and quarter nanoseconds. */ +static void efx_ptp_ns_to_s_qns(s64 ns, u32 *nic_major, u32 *nic_minor) +{ + struct timespec64 ts = ns_to_timespec64(ns); + + *nic_major = (u32)ts.tv_sec; + *nic_minor = ts.tv_nsec * 4; +} + +static ktime_t efx_ptp_s_qns_to_ktime_correction(u32 nic_major, u32 nic_minor, + s32 correction) +{ + ktime_t kt; + + nic_minor = DIV_ROUND_CLOSEST(nic_minor, 4); + correction = DIV_ROUND_CLOSEST(correction, 4); + + kt = ktime_set(nic_major, nic_minor); + + if (correction >= 0) + kt = ktime_add_ns(kt, (u64)correction); + else + kt = ktime_sub_ns(kt, (u64)-correction); + return kt; +} + + +struct efx_channel *efx_ptp_channel(struct efx_nic *efx) +{ + return efx->ptp_data ? efx->ptp_data->channel : NULL; +} + +static u32 last_sync_timestamp_major(struct efx_nic *efx) +{ + struct efx_channel *channel = efx_ptp_channel(efx); + u32 major = 0; + + if (channel) + major = channel->sync_timestamp_major; + return major; +} + +/* The 8XXX series and later can provide the time from the MAC, which is only + * 48 bits long and provides meta-information in the top 2 bits. + * See SFC bug 57928. + */ +static ktime_t +efx_ptp_mac_nic_to_ktime_correction(struct efx_nic *efx, + struct efx_ptp_data *ptp, + u32 nic_major, u32 nic_minor, + s32 correction) +{ + u32 sync_timestamp; + ktime_t kt = { 0 }; + s16 delta; + + if (!(nic_major & 0x80000000)) { + WARN_ON_ONCE(nic_major >> 16); + + /* Medford provides 48 bits of timestamp, so we must get the top + * 16 bits from the timesync event state. + * + * We only have the lower 16 bits of the time now, but we do + * have a full resolution timestamp at some point in past. As + * long as the difference between the (real) now and the sync + * is less than 2^15, then we can reconstruct the difference + * between those two numbers using only the lower 16 bits of + * each. + * + * Put another way + * + * a - b = ((a mod k) - b) mod k + * + * when -k/2 < (a-b) < k/2. In our case k is 2^16. We know + * (a mod k) and b, so can calculate the delta, a - b. + * + */ + sync_timestamp = last_sync_timestamp_major(efx); + + /* Because delta is s16 this does an implicit mask down to + * 16 bits which is what we need, assuming + * MEDFORD_TX_SECS_EVENT_BITS is 16. delta is signed so that + * we can deal with the (unlikely) case of sync timestamps + * arriving from the future. + */ + delta = nic_major - sync_timestamp; + + /* Recover the fully specified time now, by applying the offset + * to the (fully specified) sync time. + */ + nic_major = sync_timestamp + delta; + + kt = ptp->nic_to_kernel_time(nic_major, nic_minor, + correction); + } + return kt; +} + +ktime_t efx_ptp_nic_to_kernel_time(struct efx_tx_queue *tx_queue) +{ + struct efx_nic *efx = tx_queue->efx; + struct efx_ptp_data *ptp = efx->ptp_data; + ktime_t kt; + + if (efx_ptp_use_mac_tx_timestamps(efx)) + kt = efx_ptp_mac_nic_to_ktime_correction(efx, ptp, + tx_queue->completed_timestamp_major, + tx_queue->completed_timestamp_minor, + ptp->ts_corrections.general_tx); + else + kt = ptp->nic_to_kernel_time( + tx_queue->completed_timestamp_major, + tx_queue->completed_timestamp_minor, + ptp->ts_corrections.general_tx); + return kt; +} + +/* Get PTP attributes and set up time conversions */ +int efx_ptp_get_attributes(struct efx_nic *efx) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_GET_ATTRIBUTES_LEN); + MCDI_DECLARE_BUF(outbuf, MC_CMD_PTP_OUT_GET_ATTRIBUTES_LEN); + struct efx_ptp_data *ptp = efx->ptp_data; + int rc; + u32 fmt; + size_t out_len; + + /* Get the PTP attributes. If the NIC doesn't support the operation we + * use the default format for compatibility with older NICs i.e. + * seconds and nanoseconds. + */ + MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_GET_ATTRIBUTES); + MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0); + rc = efx_mcdi_rpc_quiet(efx, MC_CMD_PTP, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), &out_len); + if (ptp) { + if (rc == 0) { + fmt = MCDI_DWORD(outbuf, PTP_OUT_GET_ATTRIBUTES_TIME_FORMAT); + } else if (rc == -EINVAL) { + fmt = MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_NANOSECONDS; + } else if (rc == -EPERM) { + pci_info(efx->pci_dev, "no PTP support\n"); + return rc; + } else { + efx_mcdi_display_error(efx, MC_CMD_PTP, sizeof(inbuf), + outbuf, sizeof(outbuf), rc); + return rc; + } + + switch (fmt) { + case MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_27FRACTION: + ptp->ns_to_nic_time = efx_ptp_ns_to_s27; + ptp->nic_to_kernel_time = efx_ptp_s27_to_ktime_correction; + ptp->nic_time.minor_max = (1<<27); + ptp->nic_time.sync_event_minor_shift = 19; + break; + case MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_QTR_NANOSECONDS: + ptp->ns_to_nic_time = efx_ptp_ns_to_s_qns; + ptp->nic_to_kernel_time = efx_ptp_s_qns_to_ktime_correction; + ptp->nic_time.minor_max = 4000000000UL; + ptp->nic_time.sync_event_minor_shift = 24; + break; + default: + return -ERANGE; + } + + /* Precalculate acceptable difference between the minor time in the + * packet prefix and the last MCDI time sync event. We expect the + * packet prefix timestamp to be after of sync event by up to one + * sync event interval (0.25s) but we allow it to exceed this by a + * fuzz factor of (0.1s) + */ + ptp->nic_time.sync_event_diff_min = ptp->nic_time.minor_max + - (ptp->nic_time.minor_max / 10); + ptp->nic_time.sync_event_diff_max = (ptp->nic_time.minor_max / 4) + + (ptp->nic_time.minor_max / 10); + + /* MC_CMD_PTP_OP_GET_ATTRIBUTES has been extended twice from an older + * operation MC_CMD_PTP_OP_GET_TIME_FORMAT. The function now may return + * a value to use for the minimum acceptable corrected synchronization + * window and may return further capabilities. + * If we have the extra information store it. For older firmware that + * does not implement the extended command use the default value. + */ + if (rc == 0 && + out_len >= MC_CMD_PTP_OUT_GET_ATTRIBUTES_CAPABILITIES_OFST) + ptp->min_synchronisation_ns = + MCDI_DWORD(outbuf, + PTP_OUT_GET_ATTRIBUTES_SYNC_WINDOW_MIN); + else + ptp->min_synchronisation_ns = DEFAULT_MIN_SYNCHRONISATION_NS; + } + + if (rc == 0 && + out_len >= MC_CMD_PTP_OUT_GET_ATTRIBUTES_LEN) + efx->ptp_capability = + MCDI_DWORD(outbuf, + PTP_OUT_GET_ATTRIBUTES_CAPABILITIES); + else + efx->ptp_capability = 0; + + if (ptp) { + ptp->capabilities = efx->ptp_capability; + + /* Set up the shift for conversion between frequency + * adjustments in parts-per-billion and the fixed-point + * fractional ns format that the adapter uses. + */ + if (ptp->capabilities & (1 << MC_CMD_PTP_OUT_GET_ATTRIBUTES_FP44_FREQ_ADJ_LBN)) + ptp->adjfreq_prec = 44; + else + ptp->adjfreq_prec = 40; + } + + return 0; +} + +/* Get PTP timestamp corrections */ +static int efx_ptp_get_timestamp_corrections(struct efx_nic *efx) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_GET_TIMESTAMP_CORRECTIONS_LEN); + MCDI_DECLARE_BUF(outbuf, MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_LEN); + int rc; + size_t out_len; + + /* Get the timestamp corrections from the NIC. If this operation is + * not supported (older NICs) then no correction is required. + */ + MCDI_SET_DWORD(inbuf, PTP_IN_OP, + MC_CMD_PTP_OP_GET_TIMESTAMP_CORRECTIONS); + MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0); + + rc = efx_mcdi_rpc_quiet(efx, MC_CMD_PTP, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), &out_len); + if (rc == 0) { + efx->ptp_data->ts_corrections.ptp_tx = MCDI_DWORD(outbuf, + PTP_OUT_GET_TIMESTAMP_CORRECTIONS_TRANSMIT); + efx->ptp_data->ts_corrections.ptp_rx = MCDI_DWORD(outbuf, + PTP_OUT_GET_TIMESTAMP_CORRECTIONS_RECEIVE); + efx->ptp_data->ts_corrections.pps_out = MCDI_DWORD(outbuf, + PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_OUT); + efx->ptp_data->ts_corrections.pps_in = MCDI_DWORD(outbuf, + PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_IN); + + if (out_len >= MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_LEN) { + efx->ptp_data->ts_corrections.general_tx = MCDI_DWORD( + outbuf, + PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_GENERAL_TX); + efx->ptp_data->ts_corrections.general_rx = MCDI_DWORD( + outbuf, + PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_GENERAL_RX); + } else { + efx->ptp_data->ts_corrections.general_tx = + efx->ptp_data->ts_corrections.ptp_tx; + efx->ptp_data->ts_corrections.general_rx = + efx->ptp_data->ts_corrections.ptp_rx; + } + } else if (rc == -EINVAL) { + efx->ptp_data->ts_corrections.ptp_tx = 0; + efx->ptp_data->ts_corrections.ptp_rx = 0; + efx->ptp_data->ts_corrections.pps_out = 0; + efx->ptp_data->ts_corrections.pps_in = 0; + efx->ptp_data->ts_corrections.general_tx = 0; + efx->ptp_data->ts_corrections.general_rx = 0; + } else { + efx_mcdi_display_error(efx, MC_CMD_PTP, sizeof(inbuf), outbuf, + sizeof(outbuf), rc); + return rc; + } + + return 0; +} + +static int efx_ptp_adapter_has_support(struct efx_nic *efx) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_READ_NIC_TIME_LEN); + MCDI_DECLARE_BUF_ERR(outbuf); + int rc; + + MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_READ_NIC_TIME); + MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0); + rc = efx_mcdi_rpc_quiet(efx, MC_CMD_PTP, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), NULL); + /* ENOSYS => the NIC doesn't support PTP. + * EPERM => the NIC doesn't have a PTP license. + */ + if (rc == -ENOSYS || rc == -EPERM) + pci_info(efx->pci_dev, "no PTP support (rc=%d)\n", rc); + else if (rc) + efx_mcdi_display_error(efx, MC_CMD_PTP, + MC_CMD_PTP_IN_READ_NIC_TIME_LEN, + outbuf, sizeof(outbuf), rc); + return rc == 0; +} + +/* Enable MCDI PTP support. */ +static int efx_ptp_enable(struct efx_nic *efx) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_ENABLE_LEN); + MCDI_DECLARE_BUF_ERR(outbuf); + int rc; + + MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_ENABLE); + MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0); + MCDI_SET_DWORD(inbuf, PTP_IN_ENABLE_QUEUE, + efx->ptp_data->channel ? + efx->ptp_data->channel->channel : 0); + MCDI_SET_DWORD(inbuf, PTP_IN_ENABLE_MODE, efx->ptp_data->mode); + + rc = efx_mcdi_rpc_quiet(efx, MC_CMD_PTP, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), NULL); + rc = (rc == -EALREADY) ? 0 : rc; + if (rc) + efx_mcdi_display_error(efx, MC_CMD_PTP, + MC_CMD_PTP_IN_ENABLE_LEN, + outbuf, sizeof(outbuf), rc); + return rc; +} + +/* Disable MCDI PTP support. + */ +static int efx_ptp_disable(struct efx_nic *efx) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_DISABLE_LEN); + MCDI_DECLARE_BUF_ERR(outbuf); + int rc; + + MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_DISABLE); + MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0); + rc = efx_mcdi_rpc_quiet(efx, MC_CMD_PTP, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), NULL); + rc = (rc == -EALREADY) ? 0 : rc; + if (rc) + efx_mcdi_display_error(efx, MC_CMD_PTP, + MC_CMD_PTP_IN_DISABLE_LEN, + outbuf, sizeof(outbuf), rc); + return rc; +} + +static void efx_ptp_deliver_rx_queue(struct efx_nic *efx) +{ + struct sk_buff *skb; + + while ((skb = skb_dequeue(&efx->ptp_data->rxq))) { +#if IS_ENABLED(CONFIG_VLAN_8021Q) || defined(CONFIG_SFC_TRACING) + struct efx_ptp_match *match = (struct efx_ptp_match *)skb->cb; +#endif + + local_bh_disable(); +#ifdef CONFIG_SFC_TRACING + trace_sfc_receive(skb, false, match->vlan_tagged, + match->vlan_tci); +#endif +#if IS_ENABLED(CONFIG_VLAN_8021Q) +#if defined(EFX_NOT_UPSTREAM) && defined(EFX_HAVE_VLAN_RX_PATH) + if (match->vlan_tagged) + vlan_hwaccel_receive_skb(skb, efx->vlan_group, + match->vlan_tci); + else + /* fall through */ +#else + if (match->vlan_tagged) + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), + match->vlan_tci); +#endif +#endif + netif_receive_skb(skb); + local_bh_enable(); + } +} + +static void efx_ptp_handle_no_channel(struct efx_nic *efx) +{ + netif_err(efx, drv, efx->net_dev, + "ERROR: PTP requires MSI-X and 1 additional interrupt" + "vector. PTP disabled\n"); +} + +/* Repeatedly send the host time to the MC which will capture the hardware + * time. + */ +struct efx_ptp_mcdi_data { + struct kref ref; + bool done; + spinlock_t done_lock; + int rc; + wait_queue_head_t wq; + size_t resplen; + _MCDI_DECLARE_BUF(outbuf, MC_CMD_PTP_OUT_SYNCHRONIZE_LENMAX); +}; + +static void efx_ptp_mcdi_data_release(struct kref *ref) +{ + kfree(container_of(ref, struct efx_ptp_mcdi_data, ref)); +} + +static void efx_ptp_send_times(struct efx_nic *efx, + struct timespec64 *last_time_real, + struct timespec64 *last_time_raw, + struct efx_ptp_mcdi_data *mcdi_data) +{ + struct efx_ptp_data *ptp = efx->ptp_data; + int *mc_running = ptp->start.addr; + struct system_time_snapshot now; + struct timespec64 ts_real; + ktime_t limit; + + ktime_get_snapshot(&now); + /* Initialise ts_real in case the MC is very fast and the while loop + * below is skipped. + */ + ts_real = ktime_to_timespec64(now.real); + limit = ktime_add_ns(now.real, SYNCHRONISE_PERIOD_NS); + + /* Write host time for specified period or until MC is done */ + while ((ktime_compare(now.real, limit) < 0) && + READ_ONCE(*mc_running) && !READ_ONCE(mcdi_data->done)) { + ktime_t update_time; + unsigned int host_time; + + /* Don't update continuously to avoid saturating the PCIe bus */ + update_time = ktime_add_ns(now.real, + SYNCHRONISATION_GRANULARITY_NS); + do { + ktime_get_snapshot(&now); + } while ((ktime_compare(now.real, update_time) < 0) && + READ_ONCE(*mc_running)); + + /* Synchronize against the MCDI completion to ensure we don't + * trash the MC doorbell on EF10 if the command completes and + * another is issued. + */ + spin_lock_bh(&mcdi_data->done_lock); + + /* Read time again to make sure we're as up-to-date as possible */ + ktime_get_snapshot(&now); + + /* Synchronise NIC with single word of time only */ + ts_real = ktime_to_timespec64(now.real); + host_time = (ts_real.tv_sec << MC_NANOSECOND_BITS | + ts_real.tv_nsec); + + /* Update host time in NIC memory */ + if (!mcdi_data->done) + efx->type->ptp_write_host_time(efx, host_time); + + spin_unlock_bh(&mcdi_data->done_lock); + } + *last_time_real = ts_real; + *last_time_raw = ktime_to_timespec64(now.raw); +#ifdef CONFIG_DEBUG_FS + ptp->last_sync_time_host = (unsigned int)ts_real.tv_nsec; +#endif +} + +/* Read a timeset from the MC's results and partial process. */ +static void efx_ptp_read_timeset(MCDI_DECLARE_STRUCT_PTR(data), + struct efx_ptp_timeset *timeset) +{ + unsigned int start_ns, end_ns; + + timeset->host_start = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_HOSTSTART); + timeset->major = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_MAJOR); + timeset->minor = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_MINOR); + timeset->host_end = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_HOSTEND), + timeset->wait = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_WAITNS); + + /* Ignore seconds */ + start_ns = timeset->host_start & MC_NANOSECOND_MASK; + end_ns = timeset->host_end & MC_NANOSECOND_MASK; + /* Allow for rollover */ + if (end_ns < start_ns) + end_ns += NSEC_PER_SEC; + /* Determine duration of operation */ + timeset->window = end_ns - start_ns; +} + +#ifdef CONFIG_DEBUG_FS +/* Calculate synchronisation window statistics */ +static void efx_ptp_sync_stats_update_window(struct efx_ptp_data *ptp, + unsigned int idx, s32 window, + s32 corrected) +{ + if (idx < PTP_SYNC_ATTEMPTS) + ptp->sync_window_last[idx] = window; + if (window < ptp->sync_window_min) + ptp->sync_window_min = window; + if (window > ptp->sync_window_max) + ptp->sync_window_max = window; + + /* This will underestimate the average because of the truncating + * integer calculations. Attempt to correct by pseudo rounding up. + */ + ptp->sync_window_average = DIV_ROUND_UP( + (AVERAGE_LENGTH - 1) * ptp->sync_window_average + window, + AVERAGE_LENGTH); + + if (idx < PTP_SYNC_ATTEMPTS) + ptp->corrected_sync_window_last[idx] = corrected; + if (corrected < ptp->corrected_sync_window_min) + ptp->corrected_sync_window_min = corrected; + if (corrected > ptp->corrected_sync_window_max) + ptp->corrected_sync_window_max = corrected; + + /* This will underestimate the average because of the truncating + * integer calculations. Attempt to correct by pseudo rounding up. + */ + ptp->corrected_sync_window_average = DIV_ROUND_UP( + (AVERAGE_LENGTH - 1) * ptp->corrected_sync_window_average + + corrected, AVERAGE_LENGTH); +} +#else +static void efx_ptp_sync_stats_update_window(struct efx_ptp_data *ptp, + unsigned int idx, s32 window, + s32 corrected) +{ +} +#endif + +/* Process times received from MC. + * + * Extract times from returned results, and establish the minimum value + * seen. The minimum value represents the "best" possible time and events + * too much greater than this are rejected - the machine is, perhaps, too + * busy. A number of readings are taken so that, hopefully, at least one good + * synchronisation will be seen in the results. + */ +static int +efx_ptp_process_times(struct efx_nic *efx, MCDI_DECLARE_STRUCT_PTR(synch_buf), + size_t response_length, + const struct timespec64 *last_time_real, + const struct timespec64 *last_time_raw) +{ + unsigned int number_readings = + MCDI_VAR_ARRAY_LEN(response_length, + PTP_OUT_SYNCHRONIZE_TIMESET); + unsigned int i; + s32 ngood = 0; + unsigned int last_good = 0; + struct efx_ptp_data *ptp = efx->ptp_data; + u32 last_sec; + u32 start_sec; + struct timespec64 mc_time; + struct timespec64 delta; + struct timespec64 diff; + s64 diff_min = LONG_MAX; + s64 diff_avg = 0; + unsigned int good_mask = 0; + + if (number_readings == 0) + return -EAGAIN; + + /* Read the set of results and find the last good host-MC + * synchronization result. The MC times when it finishes reading the + * host time so the corrected window time should be fairly constant + * for a given platform. Increment stats for any results that appear + * to be erroneous. + */ + for (i = 0; i < number_readings; i++) { + s32 window, corrected; + struct timespec64 wait; + + efx_ptp_read_timeset( + MCDI_ARRAY_STRUCT_PTR(synch_buf, + PTP_OUT_SYNCHRONIZE_TIMESET, i), + &ptp->timeset[i]); + + wait = ktime_to_timespec64( + ptp->nic_to_kernel_time(0, ptp->timeset[i].wait, 0)); + window = ptp->timeset[i].window; + corrected = window - wait.tv_nsec; + + /* We expect the uncorrected synchronization window to be at + * least as large as the interval between host start and end + * times. If it is smaller than this then this is mostly likely + * to be a consequence of the host's time being adjusted. + * Check that the corrected sync window is in a reasonable + * range. If it is out of range it is likely to be because an + * interrupt or other delay occurred between reading the system + * time and writing it to MC memory. + */ + if (window < SYNCHRONISATION_GRANULARITY_NS) { + ++ptp->sw_stats.invalid_sync_windows; + } else if (corrected >= MAX_SYNCHRONISATION_NS) { + ++ptp->sw_stats.oversize_sync_windows; + } else if (corrected < ptp->min_synchronisation_ns) { + ++ptp->sw_stats.undersize_sync_windows; + } else { + ngood++; + last_good = i; + + /* Compute the average, marking this sample as good */ + good_mask |= 1 << i; + mc_time = ktime_to_timespec64(ptp->nic_to_kernel_time( + ptp->timeset[i].major, ptp->timeset[i].minor, 0)); + mc_time.tv_sec &= MC_SECOND_MASK; + + delta.tv_sec = ptp->timeset[i].host_start >> MC_NANOSECOND_BITS; + delta.tv_nsec = ptp->timeset[i].host_start & MC_NANOSECOND_MASK; + + diff = timespec64_sub(mc_time, delta); + ptp->timeset[i].mc_host_diff = timespec64_to_ns(&diff); + diff_avg += ptp->timeset[i].mc_host_diff; /* Avg normalised below */ + } + efx_ptp_sync_stats_update_window(ptp, i, window, corrected); + } + + if (ngood == 0) + return -EAGAIN; + + if (ngood > 2) { /* No point doing this if only 1-2 valid samples */ + diff_avg = div_s64(diff_avg, ngood); + /* Find the sample which is closest to the average */ + for (i = 0; i < number_readings; i++) { + if (good_mask & (1 << i)) { + s64 d = abs(ptp->timeset[i].mc_host_diff - diff_avg); + if (d < diff_min) { + diff_min = d; + last_good = i; + } + } + } + } + + /* Calculate delay from last good sync (host time) to last_time. + * It is possible that the seconds rolled over between taking + * the start reading and the last value written by the host. The + * timescales are such that a gap of more than one second is never + * expected. delta is *not* normalised. + */ + start_sec = ptp->timeset[last_good].host_start >> MC_NANOSECOND_BITS; + last_sec = last_time_real->tv_sec & MC_SECOND_MASK; + if (start_sec != last_sec && + ((start_sec + 1) & MC_SECOND_MASK) != last_sec) { + netif_warn(efx, hw, efx->net_dev, + "PTP bad synchronisation seconds\n"); + return -EAGAIN; + } + delta.tv_sec = (last_sec - start_sec) & 1; + delta.tv_nsec = + last_time_real->tv_nsec - + (ptp->timeset[last_good].host_start & MC_NANOSECOND_MASK); + + /* Convert the NIC time at last good sync into kernel time. + * No correction is required - this time is the output of a + * firmware process. + */ + mc_time = ktime_to_timespec64(ptp->nic_to_kernel_time( + ptp->timeset[last_good].major, + ptp->timeset[last_good].minor, 0)); + ptp->last_mc_time = mc_time; + ptp->last_host_time_real = timespec64_sub(*last_time_real, delta); + ptp->last_host_time_raw = timespec64_sub(*last_time_raw, delta); + +#if defined(EFX_NOT_UPSTREAM) + { + struct timespec64 last_delta = + timespec64_sub(mc_time, + ptp->last_host_time_real); + + /* Don't let compiler treat last_delta as an alias for + * ptp->last_delta, which would exacerbate bug 41339 + */ + barrier(); + + ptp->last_delta = last_delta; + ptp->last_delta_valid = true; + } +#endif + + /* Calculate delay from NIC top of second to last_time */ + delta.tv_nsec += ptp->last_mc_time.tv_nsec; + + /* Set PPS timestamp to match NIC top of second */ +#if defined(EFX_USE_KCOMPAT) && defined(EFX_HAVE_PPS_EVENT_TIME_TIMESPEC) + /* struct pps_event_time is old-style struct timespec, so convert */ + ptp->host_time_pps.ts_real = timespec64_to_timespec(*last_time_real); +#ifdef CONFIG_NTP_PPS + ptp->host_time_pps.ts_raw = timespec64_to_timespec(*last_time_raw); +#endif /* CONFIG_NTP_PPS */ + pps_sub_ts(&ptp->host_time_pps, timespec64_to_timespec(delta)); +#else /* pps_event_time uses timespec64 */ + ptp->host_time_pps.ts_real = *last_time_real; +#ifdef CONFIG_NTP_PPS + ptp->host_time_pps.ts_raw = *last_time_raw; +#endif /* CONFIG_NTP_PPS */ + pps_sub_ts(&ptp->host_time_pps, delta); +#endif + + return 0; +} + +static void efx_ptp_cmd_complete(struct efx_nic *efx, unsigned long cookie, + int rc, efx_dword_t *outbuf, + size_t outlen_actual) +{ + struct efx_ptp_mcdi_data *data = (struct efx_ptp_mcdi_data *) cookie; + + data->resplen = min_t(size_t, outlen_actual, + MC_CMD_PTP_OUT_SYNCHRONIZE_LENMAX); + memcpy(data->outbuf, outbuf, data->resplen); + data->rc = rc; + spin_lock_bh(&data->done_lock); + data->done = true; + spin_unlock_bh(&data->done_lock); + wake_up(&data->wq); + kref_put(&data->ref, efx_ptp_mcdi_data_release); +} + +/* Synchronize times between the host and the MC */ +static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings) +{ + struct efx_ptp_data *ptp = efx->ptp_data; + struct efx_ptp_mcdi_data *mcdi_data; + unsigned int mcdi_handle; + MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_SYNCHRONIZE_LEN); + int rc; + unsigned long timeout; + struct timespec64 last_time_real = {}; + struct timespec64 last_time_raw = {}; + unsigned int loops = 0; + int *start = ptp->start.addr; + static const unsigned int PTP_SYNC_TIMEOUT = 10 * HZ; + static const unsigned int PTP_START_TIMEOUT = PTP_SYNC_TIMEOUT * 4; + unsigned long started; + + mcdi_data = kmalloc(sizeof(*mcdi_data), GFP_KERNEL); + if (!mcdi_data) + return -ENOMEM; + + kref_init(&mcdi_data->ref); + mcdi_data->done = false; + init_waitqueue_head(&mcdi_data->wq); + spin_lock_init(&mcdi_data->done_lock); + + MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_SYNCHRONIZE); + MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0); + MCDI_SET_DWORD(inbuf, PTP_IN_SYNCHRONIZE_NUMTIMESETS, + num_readings); + MCDI_SET_QWORD(inbuf, PTP_IN_SYNCHRONIZE_START_ADDR, + ptp->start.dma_addr); + + /* Clear flag that signals MC ready */ + WRITE_ONCE(*start, 0); + started = jiffies; + timeout = started + PTP_START_TIMEOUT; + /* Get an additional reference - if efx_mcdi_rpc_async_ext returns + * successfully then the reference is no longer owned by us.:w + */ + kref_get(&mcdi_data->ref); + while ((rc = efx_mcdi_rpc_async_ext(efx, MC_CMD_PTP, + inbuf, MC_CMD_PTP_IN_SYNCHRONIZE_LEN, + efx_ptp_cmd_complete, + NULL, + (unsigned long) mcdi_data, + false, true, &mcdi_handle)) == -EAGAIN && + time_before(jiffies, timeout)) + efx_mcdi_wait_for_quiescence(efx, PTP_START_TIMEOUT); + + if (rc) + /* Completer won't be called. */ + kref_put(&mcdi_data->ref, efx_ptp_mcdi_data_release); + + if (rc == -EAGAIN) { + netif_err(efx, drv, efx->net_dev, + "MC PTP_OP command timed out trying to send after %u ms\n", + jiffies_to_msecs(jiffies - started)); + rc = -ETIMEDOUT; + } + + if (rc == 0) { + /* Wait for start from MC (or timeout) */ + timeout = jiffies + msecs_to_jiffies(MAX_SYNCHRONISE_WAIT_MS); + while (!READ_ONCE(mcdi_data->done) && !READ_ONCE(*start) && + (time_before(jiffies, timeout))) { + udelay(20); /* Usually start MCDI execution quickly */ + loops++; + } + + if (mcdi_data->done || !READ_ONCE(*start)) + ++ptp->sw_stats.sync_timeouts; + else if (loops <= 1) + ++ptp->sw_stats.fast_syncs; + + if (!mcdi_data->done && READ_ONCE(*start)) + efx_ptp_send_times(efx, &last_time_real, + &last_time_raw, mcdi_data); + + if (!wait_event_timeout(mcdi_data->wq, mcdi_data->done, + PTP_SYNC_TIMEOUT) && + !mcdi_data->done) { + efx_mcdi_cancel_cmd(efx, mcdi_handle); + rc = -ETIMEDOUT; + } else { + rc = mcdi_data->rc; + } + } + + if (rc == 0) { + rc = efx_ptp_process_times(efx, mcdi_data->outbuf, + mcdi_data->resplen, + &last_time_real, + &last_time_raw); + if (rc == 0) + ++ptp->sw_stats.good_syncs; + else + ++ptp->sw_stats.no_time_syncs; + } + + kref_put(&mcdi_data->ref, efx_ptp_mcdi_data_release); + + /* Increment the bad syncs counter if the synchronize fails, whatever + * the reason. + */ + if (rc != 0) + ++ptp->sw_stats.bad_syncs; + + return rc; +} + +#ifdef EFX_NOT_UPSTREAM +/* Get the host time from a given hardware time */ +static bool efx_ptp_get_host_time(struct efx_nic *efx, + struct skb_shared_hwtstamps *timestamps) +{ +#ifdef EFX_HAVE_SKB_SYSTSTAMP + if (efx->ptp_data->last_delta_valid) { + ktime_t diff = timespec64_to_ktime(efx->ptp_data->last_delta); + timestamps->syststamp = ktime_sub(timestamps->hwtstamp, diff); + } else { + timestamps->syststamp = ktime_set(0, 0); + } + + return efx->ptp_data->last_delta_valid; +#else + return false; +#endif +} +#endif + + +/* Transmit a PTP packet via the dedicated hardware timestamped queue. */ +static void efx_ptp_xmit_skb_queue(struct efx_nic *efx, struct sk_buff *skb) +{ + struct efx_ptp_data *ptp_data = efx->ptp_data; + struct efx_tx_queue *tx_queue; + + tx_queue = efx->select_tx_queue(ptp_data->channel, skb); + if (tx_queue && tx_queue->timestamping) { + skb_get(skb); + + /* This code invokes normal driver TX code which is always + * protected from softirqs when called from generic TX code, + * which in turn disables preemption. Look at __dev_queue_xmit + * which uses rcu_read_lock_bh disabling preemption for RCU + * plus disabling softirqs. We do not need RCU reader + * protection here. + * + * Although it is theoretically safe for current PTP TX/RX code + * running without disabling softirqs, there are three good + * reasond for doing so: + * + * 1) The code invoked is mainly implemented for non-PTP + * packets and it is always executed with softirqs + * disabled. + * 2) This being a single PTP packet, better to not + * interrupt its processing by softirqs which can lead + * to high latencies. + * 3) netdev_xmit_more checks preemption is disabled and + * triggers a BUG_ON if not. + */ + local_bh_disable(); + efx_enqueue_skb(tx_queue, skb); + + /* If netdev_xmit_more() was true in enqueue_skb() then our + * queue will be waiting for the next packet to push the + * doorbell. Since the next packet might not be coming this + * way (if it doesn't need a timestamp) we need to push it + * directly. + */ + efx_nic_push_buffers(tx_queue); + local_bh_enable(); + + /* We need to add the filters after enqueuing the packet. + * Otherwise, there's high latency in sending back the + * timestamp, causing ptp4l timeouts + */ + efx_ptp_insert_unicast_filter(efx, skb); + dev_consume_skb_any(skb); + } else { + WARN_ONCE(1, "PTP channel has no timestamped tx queue\n"); + dev_kfree_skb_any(skb); + } +} + +/* Transmit a PTP packet, via the MCDI interface, to the wire. */ +static void efx_ptp_xmit_skb_mc(struct efx_nic *efx, struct sk_buff *skb) +{ + MCDI_DECLARE_BUF(txtime, MC_CMD_PTP_OUT_TRANSMIT_LEN); + struct efx_ptp_data *ptp_data = efx->ptp_data; + struct skb_shared_hwtstamps timestamps; + size_t len; + int rc; + + MCDI_SET_DWORD(ptp_data->txbuf, PTP_IN_OP, MC_CMD_PTP_OP_TRANSMIT); + MCDI_SET_DWORD(ptp_data->txbuf, PTP_IN_PERIPH_ID, 0); + + /* Get the UDP source IP address and use it to set up a unicast receive + * filter for received PTP packets. This enables PTP hybrid mode to + * work. */ + efx_ptp_insert_unicast_filter(efx, skb); + + if (skb_shinfo(skb)->nr_frags != 0) { + rc = skb_linearize(skb); + if (rc != 0) + goto fail; + } + + if (skb->ip_summed == CHECKSUM_PARTIAL) { + rc = skb_checksum_help(skb); + if (rc != 0) + goto fail; + } +#if defined(EFX_NOT_UPSTREAM) && defined(EFX_USE_FAKE_VLAN_TX_ACCEL) + if (skb_vlan_tag_present(skb)) { + skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q), + skb_vlan_tag_get(skb)); + if (unlikely(!skb)) + return; + } +#endif + skb_copy_from_linear_data(skb, + MCDI_PTR(ptp_data->txbuf, + PTP_IN_TRANSMIT_PACKET), + skb->len); + MCDI_SET_DWORD(ptp_data->txbuf, PTP_IN_TRANSMIT_LENGTH, skb->len); + + rc = efx_mcdi_rpc(efx, MC_CMD_PTP, + ptp_data->txbuf, MC_CMD_PTP_IN_TRANSMIT_LEN(skb->len), + txtime, sizeof(txtime), &len); + if (rc != 0) + goto fail; + + memset(×tamps, 0, sizeof(timestamps)); + timestamps.hwtstamp = ptp_data->nic_to_kernel_time( + MCDI_DWORD(txtime, PTP_OUT_TRANSMIT_MAJOR), + MCDI_DWORD(txtime, PTP_OUT_TRANSMIT_MINOR), + ptp_data->ts_corrections.ptp_tx); + +#if defined(EFX_NOT_UPSTREAM) || (defined(EFX_USE_KCOMPAT) && !defined(EFX_HAVE_PHC_SUPPORT)) + /* Failure to get the system timestamp is non-fatal */ + (void)efx_ptp_get_host_time(efx, ×tamps); +#endif + skb_tstamp_tx(skb, ×tamps); + + rc = 0; + +fail: + dev_kfree_skb_any(skb); + + return; +} + +static enum ptp_packet_state efx_ptp_match_rx(struct efx_nic *efx, + struct sk_buff *skb) +{ +#ifndef EFX_USE_KCOMPAT + + WARN_ON_ONCE(true); + return PTP_PACKET_STATE_UNMATCHED; +#else + struct efx_ptp_match *match = (struct efx_ptp_match *)skb->cb; + + match->state = PTP_PACKET_STATE_MATCHED; + return PTP_PACKET_STATE_MATCHED; +#endif +} + +/* Process any queued receive events and corresponding packets + * + * q is returned with all the packets that are ready for delivery. + */ +static void efx_ptp_process_events(struct efx_nic *efx, struct sk_buff_head *q) +{ + struct efx_ptp_data *ptp = efx->ptp_data; + struct sk_buff *skb; + + while ((skb = skb_dequeue(&ptp->rxq))) { + struct efx_ptp_match *match; + + match = (struct efx_ptp_match *)skb->cb; + if (match->state == PTP_PACKET_STATE_MATCH_UNWANTED) { + __skb_queue_tail(q, skb); + } else if (efx_ptp_match_rx(efx, skb) == + PTP_PACKET_STATE_MATCHED) { + __skb_queue_tail(q, skb); + } else if (time_after(jiffies, match->expiry)) { + match->state = PTP_PACKET_STATE_TIMED_OUT; + ++ptp->sw_stats.rx_no_timestamp; + __skb_queue_tail(q, skb); + } else { + /* Replace unprocessed entry and stop */ + skb_queue_head(&ptp->rxq, skb); + break; + } + } +} + +#ifdef EFX_NOT_UPSTREAM +#ifdef CONFIG_DEBUG_FS +/* Calculate synchronisation delta statistics */ +static void efx_ptp_update_delta_stats(struct efx_nic *efx, + struct skb_shared_hwtstamps *timestamps) +{ + struct efx_ptp_data *ptp = efx->ptp_data; + ktime_t diff; + ktime_t delta = timespec64_to_ktime(ptp->last_delta); + + diff = ktime_sub(timestamps->hwtstamp, delta); + ptp->last_sync_delta = ktime_to_ns(diff); + if (ptp->last_sync_delta < ptp->min_sync_delta) + ptp->min_sync_delta = ptp->last_sync_delta; + + if (ptp->last_sync_delta > ptp->max_sync_delta) + ptp->max_sync_delta = ptp->last_sync_delta; + + /* This will underestimate the average because of the + * truncating integer calculations. Attempt to correct by + * pseudo rounding up. + */ + ptp->average_sync_delta = DIV_ROUND_UP( + (AVERAGE_LENGTH - 1) * ptp->average_sync_delta + + ptp->last_sync_delta, AVERAGE_LENGTH); +} +#else +static void efx_ptp_update_delta_stats(struct efx_nic *efx, + struct skb_shared_hwtstamps *timestamps) +{ +} +#endif +#endif /* EFX_NOT_UPSTREAM */ + +/* Complete processing of a received packet */ +static inline void efx_ptp_process_rx(struct efx_nic *efx, struct sk_buff *skb) +{ +#if defined(CONFIG_SFC_TRACING) || IS_ENABLED(CONFIG_VLAN_8021Q) || \ + defined(EFX_NOT_UPSTREAM) + struct efx_ptp_match *match = (struct efx_ptp_match *)skb->cb; +#endif +#ifdef EFX_NOT_UPSTREAM + struct skb_shared_hwtstamps *timestamps = skb_hwtstamps(skb); + + /* Translate timestamps, as required */ + if (match->state == PTP_PACKET_STATE_MATCHED && +#if !defined(EFX_USE_KCOMPAT) || !defined(EFX_HAVE_KTIME_UNION) + timestamps->hwtstamp) { +#else + timestamps->hwtstamp.tv64) { +#endif + efx_ptp_get_host_time(efx, timestamps); + efx_ptp_update_delta_stats(efx, timestamps); + } + +#endif + local_bh_disable(); +#ifdef CONFIG_SFC_TRACING + trace_sfc_receive(skb, false, match->vlan_tagged, match->vlan_tci); +#endif +#if IS_ENABLED(CONFIG_VLAN_8021Q) +#if defined(EFX_NOT_UPSTREAM) && defined(EFX_HAVE_VLAN_RX_PATH) + if (match->vlan_tagged) + vlan_hwaccel_receive_skb(skb, efx->vlan_group, + match->vlan_tci); + else + /* fall through */ +#else + if (match->vlan_tagged) + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), + match->vlan_tci); +#endif +#endif + netif_receive_skb(skb); + local_bh_enable(); +} + +static struct efx_ptp_rxfilter * +efx_ptp_filter_exists(struct efx_nic *efx, + struct list_head *filter_list, + struct efx_filter_spec *spec) +{ + struct efx_ptp_data *ptp = efx->ptp_data; + struct efx_ptp_rxfilter *rxfilter; + WARN_ON(!mutex_is_locked(&ptp->rxfilters_lock)); + + list_for_each_entry(rxfilter, filter_list, list) { + if (rxfilter->ether_type == spec->ether_type && + rxfilter->loc_port == spec->loc_port && + !memcmp(rxfilter->loc_host, spec->loc_host, sizeof(spec->loc_host))) + return rxfilter; + } + + return NULL; +} + +static void efx_ptp_remove_one_filter(struct efx_nic *efx, + struct efx_ptp_rxfilter *rxfilter) +{ + struct efx_ptp_data *ptp = efx->ptp_data; + + WARN_ON(!mutex_is_locked(&ptp->rxfilters_lock)); + efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED, + rxfilter->handle); + list_del(&rxfilter->list); + kfree(rxfilter); +} + +static void efx_ptp_remove_filters(struct efx_nic *efx, + struct list_head *filter_list) +{ + struct efx_ptp_data *ptp = efx->ptp_data; + struct efx_ptp_rxfilter *rxfilter, *tmp; + + mutex_lock(&ptp->rxfilters_lock); + list_for_each_entry_safe(rxfilter, tmp, filter_list, list) + efx_ptp_remove_one_filter(efx, rxfilter); + mutex_unlock(&ptp->rxfilters_lock); +} + +static void efx_ptp_init_filter(struct efx_nic *efx, + struct efx_filter_spec *rxfilter) +{ + struct efx_channel *channel = efx->ptp_data->channel; + struct efx_rx_queue *queue = efx_channel_get_rx_queue(channel); + + efx_filter_init_rx(rxfilter, EFX_FILTER_PRI_REQUIRED, 0, + efx_rx_queue_index(queue)); +} + +static int efx_ptp_insert_filter(struct efx_nic *efx, + struct list_head *filter_list, + struct efx_filter_spec *spec, + unsigned long expiry) +{ + struct efx_ptp_data *ptp = efx->ptp_data; + struct efx_ptp_rxfilter *rxfilter; + int rc = 0; + + mutex_lock(&ptp->rxfilters_lock); + + rxfilter = efx_ptp_filter_exists(efx, filter_list, spec); + if (rxfilter) { + rxfilter->expiry = expiry; + goto out; + } + + rxfilter = kzalloc(sizeof(*rxfilter), GFP_KERNEL); + if (!rxfilter) { + rc = -ENOMEM; + goto out; + } + + rc = efx_filter_insert_filter(efx, spec, true); + if (rc < 0) { + kfree(rxfilter); + goto out; + } + + rxfilter->handle = rc; + rxfilter->ether_type = spec->ether_type; + rxfilter->loc_port = spec->loc_port; + memcpy(rxfilter->loc_host, spec->loc_host, sizeof(spec->loc_host)); + rxfilter->expiry = expiry; + list_add(&rxfilter->list, filter_list); + + if (expiry > 0) + queue_delayed_work(ptp->workwq, &ptp->cleanup_work, + UCAST_FILTER_EXPIRY_JIFFIES + 1); + +out: + mutex_unlock(&ptp->rxfilters_lock); + return rc; +} + +static int efx_ptp_insert_ipv4_filter(struct efx_nic *efx, + struct list_head *filter_list, + __be32 addr, u16 port, + unsigned long expiry) +{ + struct efx_filter_spec spec; + + efx_ptp_init_filter(efx, &spec); + efx_filter_set_ipv4_local(&spec, IPPROTO_UDP, addr, htons(port)); + return efx_ptp_insert_filter(efx, filter_list, &spec, expiry); +} + +static int efx_ptp_insert_ipv6_filter(struct efx_nic *efx, + struct list_head *filter_list, + const struct in6_addr *address, + u16 port, unsigned long expiry) { + + struct efx_filter_spec spec; + + efx_ptp_init_filter(efx, &spec); + + efx_filter_set_ipv6_local(&spec, IPPROTO_UDP, address, htons(port)); + + return efx_ptp_insert_filter(efx, filter_list, &spec, expiry); +} + +static int efx_ptp_insert_eth_multicast_filter(struct efx_nic *efx, const u8 *addr) +{ + struct efx_ptp_data *ptp = efx->ptp_data; + struct efx_filter_spec spec; + + efx_ptp_init_filter(efx, &spec); + efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC, addr); + spec.match_flags |= EFX_FILTER_MATCH_ETHER_TYPE; + spec.ether_type = htons(ETH_P_1588); + return efx_ptp_insert_filter(efx, &ptp->rxfilters_mcast, &spec, 0); +} + +static int efx_ptp_insert_multicast_filters(struct efx_nic *efx) +{ + struct efx_ptp_data *ptp = efx->ptp_data; + int rc; + + if (!ptp->channel || !list_empty(&ptp->rxfilters_mcast)) + return 0; + + /* Must filter on both event and general ports to ensure + * that there is no packet re-ordering. + */ + rc = efx_ptp_insert_ipv4_filter(efx, &ptp->rxfilters_mcast, + htonl(PTP_ADDR), PTP_EVENT_PORT, 0); + if (rc < 0) + goto fail; + + rc = efx_ptp_insert_ipv4_filter(efx, &ptp->rxfilters_mcast, + htonl(PTP_ADDR), PTP_GENERAL_PORT, 0); + if (rc < 0) + goto fail; + + rc = efx_ptp_insert_ipv4_filter(efx, &ptp->rxfilters_mcast, + htonl(PTP_PEER_DELAY_ADDR), PTP_EVENT_PORT, 0); + if (rc < 0) + goto fail; + + rc = efx_ptp_insert_ipv4_filter(efx, &ptp->rxfilters_mcast, + htonl(PTP_PEER_DELAY_ADDR), PTP_GENERAL_PORT, 0); + if (rc < 0) + goto fail; + + /* if the NIC supports hw timestamps by the MAC, we can support + * PTP over IPv6 and Ethernet. + */ + if (efx_ptp_use_mac_tx_timestamps(efx)) { + rc = efx_ptp_insert_ipv6_filter(efx, &ptp->rxfilters_mcast, + &ptp_addr_ipv6, PTP_EVENT_PORT, 0); + + if (rc < 0) + goto fail; + + rc = efx_ptp_insert_ipv6_filter(efx, &ptp->rxfilters_mcast, + &ptp_addr_ipv6, PTP_GENERAL_PORT, 0); + + if (rc < 0) + goto fail; + + rc = efx_ptp_insert_ipv6_filter(efx, &ptp->rxfilters_mcast, + &ptp_peer_delay_addr_ipv6, PTP_EVENT_PORT, 0); + if (rc < 0) + goto fail; + + rc = efx_ptp_insert_ipv6_filter(efx, &ptp->rxfilters_mcast, + &ptp_peer_delay_addr_ipv6, PTP_GENERAL_PORT, 0); + if (rc < 0) + goto fail; + + /* Not all firmware variants support RSS filters */ + rc = efx_ptp_insert_eth_multicast_filter(efx, ptp_addr_ether); + if (rc < 0 && rc != -EPROTONOSUPPORT) + goto fail; + + rc = efx_ptp_insert_eth_multicast_filter(efx, ptp_peer_delay_addr_ether); + if (rc < 0 && rc != -EPROTONOSUPPORT) + goto fail; + } + + return 0; + +fail: + efx_ptp_remove_filters(efx, &ptp->rxfilters_mcast); + + return rc; +} + +static bool efx_ptp_valid_mcast_ipv4_event_pkt(struct sk_buff *skb) +{ + return skb->protocol == htons(ETH_P_IP) && + ip_hdr(skb)->daddr == htonl(PTP_ADDR) && + ip_hdr(skb)->protocol == IPPROTO_UDP && + udp_hdr(skb)->source == htons(PTP_EVENT_PORT); +} + +static bool efx_ptp_valid_mcast_ipv6_event_pkt(struct sk_buff *skb) +{ + return skb->protocol == htons(ETH_P_IPV6) && + ipv6_addr_equal(&ipv6_hdr(skb)->daddr, &ptp_addr_ipv6) && + ipv6_hdr(skb)->nexthdr == IPPROTO_UDP && + udp_hdr(skb)->source == htons(PTP_EVENT_PORT); +} + +static bool efx_ptp_valid_unicast_event_pkt(struct sk_buff *skb) +{ + if (!(efx_ptp_valid_mcast_ipv4_event_pkt(skb) || + efx_ptp_valid_mcast_ipv6_event_pkt(skb))) { + return (skb->protocol == htons(ETH_P_IP) || skb->protocol == htons(ETH_P_IPV6)) && + ip_hdr(skb)->protocol == IPPROTO_UDP && + udp_hdr(skb)->source == htons(PTP_EVENT_PORT); + } + return false; +} + +static int efx_ptp_insert_unicast_filter(struct efx_nic *efx, + struct sk_buff *skb) +{ + struct efx_ptp_data *ptp = efx->ptp_data; + unsigned long expiry; + int rc; + + if (!efx_ptp_valid_unicast_event_pkt(skb)) + return -EINVAL; + + expiry = jiffies + UCAST_FILTER_EXPIRY_JIFFIES; + + if (skb->protocol == htons(ETH_P_IP)) { + __be32 addr = ip_hdr(skb)->saddr; + + rc = efx_ptp_insert_ipv4_filter(efx, &ptp->rxfilters_ucast, + addr, PTP_EVENT_PORT, expiry); + if (rc < 0) + goto out; + + rc = efx_ptp_insert_ipv4_filter(efx, &ptp->rxfilters_ucast, + addr, PTP_GENERAL_PORT, expiry); + } else if (skb->protocol == htons(ETH_P_IPV6) && + efx_ptp_use_mac_tx_timestamps(efx)) { + /* IPv6 PTP only supported by devices with MAC hw timestamp */ + struct in6_addr *addr = &ipv6_hdr(skb)->saddr; + + rc = efx_ptp_insert_ipv6_filter(efx, &ptp->rxfilters_ucast, + addr, PTP_EVENT_PORT, expiry); + if (rc < 0) + goto out; + + rc = efx_ptp_insert_ipv6_filter(efx, &ptp->rxfilters_ucast, + addr, PTP_GENERAL_PORT, expiry); + } else { + return -EOPNOTSUPP; + } + +out: + return rc; +} + +static void efx_ptp_cleanup_worker(struct work_struct *work) +{ + struct efx_ptp_data *ptp = + container_of(work, struct efx_ptp_data, cleanup_work.work); + struct efx_ptp_rxfilter *rxfilter, *tmp; + + mutex_lock(&ptp->rxfilters_lock); + list_for_each_entry_safe(rxfilter, tmp, &ptp->rxfilters_ucast, list) { + if (time_is_before_jiffies(rxfilter->expiry)) + efx_ptp_remove_one_filter(ptp->efx, rxfilter); + } + mutex_unlock(&ptp->rxfilters_lock); + + if (!list_empty(&ptp->rxfilters_ucast)) { + queue_delayed_work(ptp->workwq, &ptp->cleanup_work, + UCAST_FILTER_EXPIRY_JIFFIES + 1); + } +} + +static int efx_ptp_start(struct efx_nic *efx) +{ + struct efx_ptp_data *ptp = efx->ptp_data; + int rc; + + ptp->reset_required = false; + + rc = efx_ptp_insert_multicast_filters(efx); + if (rc) + return rc; + + rc = efx_ptp_enable(efx); + if (rc != 0) + goto fail; + + ptp->evt_frag_idx = 0; + ptp->current_adjfreq = 0; + + return 0; + +fail: + efx_ptp_remove_filters(efx, &ptp->rxfilters_mcast); + return rc; +} + +static int efx_ptp_stop(struct efx_nic *efx) +{ + struct efx_ptp_data *ptp = efx->ptp_data; + int rc; + + if (ptp == NULL) + return 0; + + rc = efx_ptp_disable(efx); + + efx_ptp_remove_filters(efx, &ptp->rxfilters_mcast); + efx_ptp_remove_filters(efx, &ptp->rxfilters_ucast); + + /* Make sure RX packets are really delivered */ + efx_ptp_deliver_rx_queue(efx); + skb_queue_purge(&efx->ptp_data->txq); +#if defined(EFX_NOT_UPSTREAM) + ptp->last_delta_valid = false; +#endif + + return rc; +} + +static int efx_ptp_restart(struct efx_nic *efx) +{ + if (efx->ptp_data && efx->ptp_data->enabled) +#ifndef EFX_NOT_UPSTREAM + return efx_ptp_start(efx); +#else + { + int rc = efx_ptp_start(efx); + if (!rc) + rc = efx_ptp_synchronize(efx, PTP_SYNC_ATTEMPTS); + return rc; + } +#endif + return 0; +} + +#ifdef EFX_NOT_UPSTREAM +static void efx_ptp_pps_worker(struct work_struct *work) +{ + struct efx_ptp_data *ptp = + container_of(work, struct efx_ptp_data, pps_work); + struct efx_nic *efx = (ptp? ptp->efx: NULL); +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_PHC_SUPPORT) + struct ptp_clock_event ptp_evt; +#endif + + if (!ptp || !efx || !ptp->pps_workwq) + return; + kref_get(&ptp->kref); + + if (efx_ptp_synchronize(efx, PTP_SYNC_ATTEMPTS)) + goto out; + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_PHC_SUPPORT) + if (ptp->usr_evt_enabled & (1 << PTP_CLK_REQ_PPS)) { + ptp_evt.type = PTP_CLOCK_PPSUSR; + ptp_evt.pps_times = ptp->host_time_pps; + ptp_clock_event(ptp->phc_clock, &ptp_evt); + } +#endif +out: + kref_put(&ptp->kref, efx_ptp_delete_data); +} + +int efx_ptp_pps_get_event(struct efx_nic *efx, struct efx_ts_get_pps *event) +{ + struct timespec64 nic_time; + struct efx_pps_data *pps_data; + unsigned int ev; + unsigned int err; + unsigned int timeout; + + if (!efx->ptp_data) + return -ENOTTY; + + if (!efx->ptp_data->pps_data) + return -ENOTTY; + + pps_data = efx->ptp_data->pps_data; + timeout = msecs_to_jiffies(event->timeout); + + ev = pps_data->last_ev_taken; + + if (ev == pps_data->last_ev) { + err = wait_event_interruptible_timeout(pps_data->read_data, + ev != pps_data->last_ev, + timeout); + if (err == 0) + return -ETIMEDOUT; + + /* Check for pending signals */ + if (err == -ERESTARTSYS) + return -EINTR; + } + + /* Return the fetched timestamp */ + nic_time = ktime_to_timespec64(pps_data->n_assert); + event->nic_assert.tv_sec = nic_time.tv_sec; + event->nic_assert.tv_nsec = nic_time.tv_nsec; + + event->sys_assert.tv_sec = pps_data->s_assert.tv_sec; + event->sys_assert.tv_nsec = pps_data->s_assert.tv_nsec; + + event->delta.tv_sec = pps_data->s_delta.tv_sec; + event->delta.tv_nsec = pps_data->s_delta.tv_nsec; + event->sequence = pps_data->last_ev; + + pps_data->last_ev_taken = pps_data->last_ev; + + return 0; +} + +static bool efx_is_pps_possible(struct efx_nic *efx) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_RESET_STATS_LEN); + int rc; + + MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_RESET_STATS); + MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0); + + rc = efx_mcdi_rpc_quiet(efx, MC_CMD_PTP, inbuf, sizeof(inbuf), + NULL, 0, NULL); + + return rc != -EPERM; +} + +int efx_ptp_hw_pps_enable(struct efx_nic *efx, bool enable) +{ + struct efx_pps_data *pps_data; + MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_PPS_ENABLE_LEN); + int rc; + + if (!efx->ptp_data) + return -ENOTTY; + + if (!efx->ptp_data->pps_data) + return -ENOTTY; + + pps_data = efx->ptp_data->pps_data; + + MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_PPS_ENABLE); + MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0); + MCDI_SET_DWORD(inbuf, PTP_IN_PPS_ENABLE_OP, + enable ? MC_CMD_PTP_ENABLE_PPS : + MC_CMD_PTP_DISABLE_PPS); + MCDI_SET_DWORD(inbuf, PTP_IN_PPS_ENABLE_QUEUE_ID, + efx->ptp_data->channel ? + efx->ptp_data->channel->channel : 0); + + rc = efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf), + NULL, 0, NULL); + + if (rc && rc != -MC_CMD_ERR_EALREADY) + return rc; + + if (enable) { + pps_data->last_ev = 0; + pps_data->last_ev_taken = 0; + memset(&pps_data->s_delta, 0x0, sizeof(pps_data->s_delta)); + memset(&pps_data->s_assert, 0x0, sizeof(pps_data->s_assert)); + memset(&pps_data->n_assert, 0x0, sizeof(pps_data->n_assert)); + } + + pps_data->nic_hw_pps_enabled = enable; + + return 0; +} + +static inline bool efx_phc_exposed(struct efx_nic *efx) +{ + return efx->phc_ptp_data == efx->ptp_data && efx->ptp_data != NULL; +} + +int efx_ptp_pps_reset(struct efx_nic *efx) +{ + struct efx_pps_data *pps_data; + + if (!efx->ptp_data) + return -ENOTTY; + + if (!efx->ptp_data->pps_data) + return -ENOTTY; + + pps_data = efx->ptp_data->pps_data; + + /* If PPS was enabled before MC reset, re-enable PPS*/ + if (pps_data->nic_hw_pps_enabled && efx_phc_exposed(efx)) + return efx_ptp_hw_pps_enable(efx, true); + + return 0; +} +#endif + +static void efx_ptp_worker(struct work_struct *work) +{ + struct efx_ptp_data *ptp_data = + container_of(work, struct efx_ptp_data, work); + struct efx_nic *efx = ptp_data->efx; + struct sk_buff *skb; + struct sk_buff_head tempq; + + if (ptp_data->reset_required) { + efx_ptp_stop(efx); + efx_ptp_start(efx); + return; + } + + __skb_queue_head_init(&tempq); + efx_ptp_process_events(efx, &tempq); + + while ((skb = skb_dequeue(&ptp_data->txq))) + ptp_data->xmit_skb(efx, skb); + + while ((skb = __skb_dequeue(&tempq))) + efx_ptp_process_rx(efx, skb); +} + +#if defined(EFX_NOT_UPSTREAM) +static int efx_ptp_create_pps(struct efx_ptp_data *ptp) +{ + struct efx_pps_data *pps; + + pps = kzalloc(sizeof(*pps), GFP_ATOMIC); + if (!pps) + return -ENOMEM; + + init_waitqueue_head(&pps->read_data); + pps->nic_hw_pps_enabled = false; + + if (kobject_init_and_add(&pps->kobj, + &efx_sysfs_ktype, + &ptp->efx->pci_dev->dev.kobj, + "pps_stats")) + goto fail1; + + pps->ptp = ptp; + ptp->pps_data = pps; + + if (efx_phc_exposed(ptp->efx)) { + if (efx_ptp_hw_pps_enable(ptp->efx, true)) + goto fail2; + } + + kref_get(&ptp->kref); + + return 0; + +fail2: + kobject_del(&ptp->pps_data->kobj); + kobject_put(&ptp->pps_data->kobj); +fail1: + kfree(pps); + ptp->pps_data = NULL; + + return -ENOMEM; +} + +static void efx_ptp_destroy_pps(struct efx_ptp_data *ptp) +{ + if (!ptp->pps_data) + return; + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_PHC_SUPPORT) + /* Stop generating user space events */ + ptp->usr_evt_enabled = 0; +#endif + if (efx_phc_exposed(ptp->efx)) + efx_ptp_hw_pps_enable(ptp->efx, false); + + kobject_del(&ptp->pps_data->kobj); + kobject_put(&ptp->pps_data->kobj); +} +#endif + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_PTP_CLOCK_GETTIMEX64) +static int efx_phc_gettimex64(struct ptp_clock_info *ptp, struct timespec64 *ts, + struct ptp_system_timestamp *sts) +{ + struct efx_ptp_data *ptp_data = container_of(ptp, + struct efx_ptp_data, + phc_clock_info); + MCDI_DECLARE_BUF(outbuf, MC_CMD_PTP_OUT_READ_NIC_TIME_LEN); + MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_READ_NIC_TIME_LEN); + struct efx_nic *efx = ptp_data->efx; + struct system_time_snapshot snap; + ktime_t kt; + int rc; + + MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_READ_NIC_TIME); + MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0); + + if (sts) { + ktime_get_snapshot(&snap); + sts->pre_ts = ktime_to_timespec64(snap.real); + } + + rc = efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), NULL); + if (rc) + return rc; + + if (sts) { + ktime_get_snapshot(&snap); + sts->post_ts = ktime_to_timespec64(snap.real); + } + + kt = ptp_data->nic_to_kernel_time( + MCDI_DWORD(outbuf, PTP_OUT_READ_NIC_TIME_MAJOR), + MCDI_DWORD(outbuf, PTP_OUT_READ_NIC_TIME_MINOR), 0); + *ts = ktime_to_timespec64(kt); + return 0; +} +#endif + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_PHC_SUPPORT) +static int efx_phc_enable(struct ptp_clock_info *ptp, + struct ptp_clock_request *request, + int enable) +{ + struct efx_ptp_data *ptp_data = container_of(ptp, + struct efx_ptp_data, + phc_clock_info); + + switch (request->type) { +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_PTP_PF_NONE) + case PTP_CLK_REQ_EXTTS: + if (ptp->pin_config[0].func != PTP_PF_EXTTS) + enable = false; + fallthrough; +#endif + case PTP_CLK_REQ_PPS: + if (enable) + ptp_data->usr_evt_enabled |= (1 << request->type); + else + ptp_data->usr_evt_enabled &= ~(1 << request->type); + break; + default: + return -EOPNOTSUPP; + } + return 0; +} +#endif + +#if !defined(EFX_USE_KCOMPAT) || (defined(EFX_HAVE_PHC_SUPPORT) && defined(EFX_HAVE_PTP_PF_NONE)) +static int efx_phc_verify(struct ptp_clock_info *ptp, unsigned int pin, + enum ptp_pin_function func, unsigned int chan) +{ + switch (func) { + case PTP_PF_NONE: + case PTP_PF_EXTTS: + break; + default: + return -1; + } + return 0; +} +#endif + +#if !defined(EFX_USE_KCOMPAT) || (defined(EFX_HAVE_PHC_SUPPORT) && defined(EFX_HAVE_PTP_GETCROSSTSTAMP)) +static int efx_phc_getcrosststamp(struct ptp_clock_info *ptp, + struct system_device_crosststamp *cts) +{ + struct efx_ptp_data *ptp_data = container_of(ptp, + struct efx_ptp_data, + phc_clock_info); + struct efx_nic *efx = ptp_data->efx; + int rc; + + if (!efx->ptp_data) + return -ENOTTY; + + rc = efx_ptp_synchronize(efx, PTP_SYNC_ATTEMPTS); + if (rc == -EAGAIN) + return -EBUSY; /* keep phc2sys etc happy */ + if (rc) + return rc; + + cts->device = timespec64_to_ktime(ptp_data->last_mc_time); + cts->sys_realtime = timespec64_to_ktime(ptp_data->last_host_time_real); + cts->sys_monoraw = timespec64_to_ktime(ptp_data->last_host_time_raw); + return 0; +} +#endif + +/* Convert FP16 ppm value to frequency adjustment in specified fixed point form. + * + * Input should be within the range +/- 0.1. If MAX_PPB is increased beyond + * 10^8 then the formula here must be revised to avoid overflow. + * + * Precalculate multipliers to scale FP16 PPM values to a fixed point frequency + * adjustment factor. + * - The required scaling factor is: 1 / (10^6 * 2^16). + * - The resultant factor will be represented in FP40 or FP44 as specified. + * - Required adjustment range is: +/- 0.1. + * - This requires 33 bits to represent FP16 PPM precision plus sign bit. + * - The multiplier width cannot exceed 31 bits, to avoid overflow. + * - 2^50 / 10^6 fits into 31 bits so we precalculate this as the scale word. + * - Cancelling out the 2^16 in the denominator this gives a result in FP66. + * - Shifting this down by 22 gets us the FP44 value required for some hardware. + * - Shifting this down by 26 gets us the FP40 value required by other hardware. + * - A further scale word corrects error in low order bits of large adjustments. + * - This additional scale word arbitrarily use 30 bits of precision. + */ +static s64 ppm_fp16_to_factor(s64 ppm_fp16, unsigned int prec) +{ + /* FP66 multipliers to achieve division of multiplicand */ + static const s64 scale_word_whole = (1LL << 50) / 1000000LL; + static const s64 scale_word_frac = (((1LL << 50) % 1000000LL) << 30) / 1000000LL; + + /* 1. Apply whole and fractional scale words to effect division. + * 2. Round to nearest at target precision. + * 3. Shift down to target precision. + */ + return (ppm_fp16 * scale_word_whole + + (ppm_fp16 * scale_word_frac >> 30) + + (1 << (66 - prec - 1))) >> (66 - prec); +} + +static int efx_phc_adjfine(struct ptp_clock_info *ptp, long ppm_fp16) +{ + struct efx_ptp_data *ptp_data = container_of(ptp, + struct efx_ptp_data, + phc_clock_info); + MCDI_DECLARE_BUF(inadj, MC_CMD_PTP_IN_ADJUST_LEN); + struct efx_nic *efx = ptp_data->efx; + s64 adjustment; + int rc; + + ppm_fp16 = clamp_val(ppm_fp16, -ptp_data->max_adjfine, ptp_data->max_adjfine); + adjustment = ppm_fp16_to_factor(ppm_fp16, ptp_data->adjfreq_prec); + + MCDI_SET_DWORD(inadj, PTP_IN_OP, MC_CMD_PTP_OP_ADJUST); + MCDI_SET_DWORD(inadj, PTP_IN_PERIPH_ID, 0); + MCDI_SET_QWORD(inadj, PTP_IN_ADJUST_FREQ, adjustment); + MCDI_SET_DWORD(inadj, PTP_IN_ADJUST_SECONDS, 0); + MCDI_SET_DWORD(inadj, PTP_IN_ADJUST_NANOSECONDS, 0); + rc = efx_mcdi_rpc(efx, MC_CMD_PTP, inadj, sizeof(inadj), + NULL, 0, NULL); + if (rc != 0) + return rc; + + ptp_data->current_adjfreq = adjustment; + return 0; +} + +/* Convert ppb value, clamped to +/- 10^9, to FP16 ppm. */ +static s64 ppb_to_ppm_fp16(s64 ppb) +{ + ppb = clamp_val(ppb, -ONE_BILLION, ONE_BILLION); + + /* Multiply by floor(2^41 / 1000) and shift right by 41 - 16). */ + return (ppb * 2199023255LL) >> (41 - 16); +} + +#if defined(EFX_NOT_UPSTREAM) || (defined(EFX_USE_KCOMPAT) && \ + defined(EFX_HAVE_PHC_SUPPORT) && \ + defined(EFX_HAVE_PTP_CLOCK_INFO_ADJFREQ) && \ + !defined(EFX_HAVE_PTP_CLOCK_INFO_ADJFINE)) +static int efx_phc_adjfreq(struct ptp_clock_info *ptp, s32 ppb) +{ + return efx_phc_adjfine(ptp, ppb_to_ppm_fp16(ppb)); +} +#endif + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_PHC_SUPPORT) +static const struct ptp_clock_info efx_phc_clock_info = { + .owner = THIS_MODULE, + .name = "sfc", + .max_adj = MAX_PPB, /* unused, ptp_data->max_adjfreq used instead */ + .n_alarm = 0, +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_PTP_PF_NONE) + .n_ext_ts = 1, + .n_pins = 1, +#else + .n_ext_ts = 0, +#endif + .n_per_out = 0, + .pps = 1, +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_PTP_CLOCK_INFO_ADJFINE) + .adjfine = efx_phc_adjfine, +#elif defined(EFX_USE_KCOMPAT) && defined(EFX_HAVE_PTP_CLOCK_INFO_ADJFREQ) + .adjfreq = efx_phc_adjfreq, +#endif + .adjtime = efx_phc_adjtime, +#if defined(EFX_USE_KCOMPAT) && !defined(EFX_USE_64BIT_PHC) + .gettime = efx_phc_gettime32, + .settime = efx_phc_settime32, +#else + .gettime64 = efx_phc_gettime, + .settime64 = efx_phc_settime, +#endif +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_PTP_CLOCK_GETTIMEX64) + .gettimex64 = efx_phc_gettimex64, +#endif +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_PTP_GETCROSSTSTAMP) + .getcrosststamp = efx_phc_getcrosststamp, +#endif + .enable = efx_phc_enable, +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_PTP_PF_NONE) + .verify = efx_phc_verify, +#endif +}; +#endif + +#ifdef EFX_NOT_UPSTREAM +static int efx_create_pps_worker(struct efx_ptp_data *ptp) +{ + char busdevice[11]; + + snprintf(busdevice, sizeof(busdevice), "%04x:%02x:%02x", + pci_domain_nr(ptp->efx->pci_dev->bus), + ptp->efx->pci_dev->bus->number, + ptp->efx->pci_dev->devfn); + + INIT_WORK(&ptp->pps_work, efx_ptp_pps_worker); +#if defined(EFX_NOT_UPSTREAM) + ptp->pps_workwq = efx_alloc_workqueue("sfc_pps_%s", WQ_UNBOUND | + WQ_MEM_RECLAIM | WQ_SYSFS, 1, + busdevice); +#else + ptp->pps_workwq = alloc_workqueue("sfc_pps_%s", WQ_UNBOUND | + WQ_MEM_RECLAIM | WQ_SYSFS, 1, + busdevice); +#endif + if (!ptp->pps_workwq) + return -ENOMEM; + return 0; +} +#endif +bool efx_ptp_uses_separate_channel(struct efx_nic *efx) +{ + return efx->ptp_capability & (1 << MC_CMD_PTP_OUT_GET_ATTRIBUTES_RX_TSTAMP_OOB_LBN); +} + +/* Find PTP data of this adapter's PHC or add its own to the list. + */ +static void efx_associate_phc(struct efx_nic *efx) +{ + struct efx_ptp_data *other, *next; + bool associated = false; + + EFX_WARN_ON_PARANOID(efx->phc_ptp_data); + spin_lock(&ptp_all_phcs_list_lock); + + list_for_each_entry_safe(other, next, &efx_all_phcs_list, + node_ptp_all_phcs) { + EFX_WARN_ON_PARANOID(other == efx->ptp_data); + if (ether_addr_equal(other->adapter_base_addr, + efx->adapter_base_addr)) { + efx->phc_ptp_data = other; + kref_get(&other->kref); + goto out; + } + } + + efx->phc_ptp_data = efx->ptp_data; + list_add(&efx->phc_ptp_data->node_ptp_all_phcs, &efx_all_phcs_list); + associated = true; + +out: + spin_unlock(&ptp_all_phcs_list_lock); + + if (associated && !((efx->mcdi->fn_flags) & + (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_PRIMARY))) { + netif_warn(efx, hw, efx->net_dev, + "This function initialised before the primary function (PCI function 0)."); + netif_warn(efx, hw, efx->net_dev, + "PTP may not work if this function is passed through to a VM."); + netif_warn(efx, hw, efx->net_dev, + "Manually rebind the PCI functions such that function 0 binds first."); + } +} + +/* Release reference to phc_ptp_data or remove own ptp_data from list. +*/ +static void efx_dissociate_phc(struct efx_nic *efx) +{ + if (!efx->phc_ptp_data) + return; + + if (efx->ptp_data == efx->phc_ptp_data) { + spin_lock(&ptp_all_phcs_list_lock); + list_del(&efx->ptp_data->node_ptp_all_phcs); + spin_unlock(&ptp_all_phcs_list_lock); + } else { + kref_put(&efx->phc_ptp_data->kref, efx_ptp_delete_data); + efx->phc_ptp_data = NULL; + } +} + +static int efx_ptp_probe_post_io(struct efx_nic *efx) +{ + unsigned int __maybe_unused pos; + struct efx_ptp_data *ptp; +#if !defined(EFX_USE_KCOMPAT) || (defined(EFX_HAVE_PTP_PF_NONE) && defined(EFX_HAVE_PHC_SUPPORT)) + struct ptp_pin_desc *ppd; +#endif +#ifdef EFX_NOT_UPSTREAM + bool pps_ok; +#endif + int rc = 0; + + ptp = kzalloc(sizeof(struct efx_ptp_data), GFP_KERNEL); + if (!ptp) + return -ENOMEM; + efx->ptp_data = ptp; + + rc = efx_mcdi_get_board_cfg(efx, 0, efx->adapter_base_addr, NULL, + NULL); + if (rc < 0) + goto fail1; + + efx_associate_phc(efx); + + ptp->efx = efx; + ether_addr_copy(ptp->adapter_base_addr, efx->adapter_base_addr); + ptp->ifindex = efx->net_dev->ifindex; + kref_init(&ptp->kref); + + rc = efx_nic_alloc_buffer(efx, &ptp->start, sizeof(int), GFP_KERNEL); + if (rc != 0) + goto fail2; + +#ifdef CONFIG_DEBUG_FS + for (pos = 0; pos < (MC_CMD_PTP_OUT_STATUS_LEN / sizeof(u32)); pos++) + ptp->mc_stats[pos] = pos; +#endif + rc = efx_extend_debugfs_port(efx, ptp, 0, efx_debugfs_ptp_parameters); + if (rc < 0) + goto fail3; + + INIT_WORK(&ptp->work, efx_ptp_worker); + INIT_DELAYED_WORK(&ptp->cleanup_work, efx_ptp_cleanup_worker); + + ptp->config.flags = 0; + ptp->config.tx_type = HWTSTAMP_TX_OFF; + ptp->config.rx_filter = HWTSTAMP_FILTER_NONE; + + mutex_init(&ptp->rxfilters_lock); + INIT_LIST_HEAD(&ptp->rxfilters_mcast); + INIT_LIST_HEAD(&ptp->rxfilters_ucast); + + /* Get the NIC PTP attributes and set up time conversions */ + rc = efx_ptp_get_attributes(efx); + if (rc < 0) + goto fail4; + + /* Get the timestamp corrections */ + rc = efx_ptp_get_timestamp_corrections(efx); + if (rc < 0) + goto fail4; + + /* Set the NIC clock maximum frequency adjustment */ + /* TODO: add MCDI call to get this value from the NIC */ + ptp->max_adjfreq = MAX_PPB; + ptp->max_adjfine = ppb_to_ppm_fp16(ptp->max_adjfreq); + +#ifdef EFX_NOT_UPSTREAM + pps_ok = efx_is_pps_possible(efx); +#endif +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_PHC_SUPPORT) + if (efx_phc_exposed(efx)) { + ptp->phc_clock_info = efx_phc_clock_info; + ptp->phc_clock_info.max_adj = ptp->max_adjfreq; +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_PTP_PF_NONE) + ppd = &ptp->pin_config[0]; + snprintf(ppd->name, sizeof(ppd->name), "pps0"); + ppd->index = 0; + ppd->func = PTP_PF_EXTTS; + ptp->phc_clock_info.pin_config = ptp->pin_config; +#endif + ptp->phc_clock = ptp_clock_register(&ptp->phc_clock_info, + &efx->pci_dev->dev); + if (IS_ERR(ptp->phc_clock)) { + rc = PTR_ERR(ptp->phc_clock); + goto fail4; + } + kref_get(&ptp->kref); +#ifdef EFX_NOT_UPSTREAM + rc = pps_ok ? efx_create_pps_worker(ptp) : 0; + if (rc < 0) + goto fail5; +#endif + } + ptp->usr_evt_enabled = 0; +#else +#ifdef EFX_NOT_UPSTREAM + rc = pps_ok ? efx_create_pps_worker(ptp) : 0; + if (rc < 0) + goto fail4; +#endif +#endif + +#ifdef EFX_NOT_UPSTREAM + rc = pps_ok ? efx_ptp_create_pps(ptp) : 0; + if (rc < 0) + goto fail5; +#ifdef CONFIG_DEBUG_FS + ptp->min_sync_delta = UINT_MAX; + ptp->sync_window_min = INT_MAX; + ptp->sync_window_max = INT_MIN; + ptp->corrected_sync_window_min = INT_MAX; + ptp->corrected_sync_window_max = INT_MIN; + + rc = device_create_file(&efx->pci_dev->dev, + &dev_attr_ptp_stats); + if (rc < 0) + goto fail7; +#endif + + rc = device_create_file(&efx->pci_dev->dev, &dev_attr_max_adjfreq); + if (rc < 0) + goto fail8; +#endif /* EFX_NOT_UPSTREAM */ + + return 0; +#ifdef EFX_NOT_UPSTREAM +fail8: +#ifdef CONFIG_DEBUG_FS + device_remove_file(&efx->pci_dev->dev, &dev_attr_ptp_stats); +fail7: +#endif + if (pps_ok) + efx_ptp_destroy_pps(ptp); +fail5: +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_PHC_SUPPORT) + if (ptp->pps_workwq) + destroy_workqueue(ptp->pps_workwq); +#endif +#endif /* EFX_NOT_UPSTREAM */ +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_PHC_SUPPORT) + if (efx_phc_exposed(efx)) + kref_put(&ptp->kref, efx_ptp_delete_data); + if (ptp->phc_clock) + ptp_clock_unregister(ptp->phc_clock); +#endif + +fail4: + efx_trim_debugfs_port(efx, efx_debugfs_ptp_parameters); +fail3: + efx_nic_free_buffer(efx, &ptp->start); +fail2: + efx_dissociate_phc(efx); +fail1: + efx->ptp_data = NULL; + kfree(ptp); + return rc; +} + +/* Initialise PTP state. */ +int efx_ptp_probe(struct efx_nic *efx, struct efx_channel *channel) +{ + struct efx_ptp_data *ptp = efx->ptp_data; + + /* No PTP data? if efx_ptp_probe_post_io didn't complain, it means the + * adapter doesn't have PTP. + */ + if (!ptp) + return 0; + + ptp->channel = channel; + + skb_queue_head_init(&ptp->rxq); + skb_queue_head_init(&ptp->txq); + ptp->workwq = create_singlethread_workqueue("sfc_ptp"); + if (!ptp->workwq) + return -ENOMEM; + + if (efx_ptp_use_mac_tx_timestamps(efx)) { + ptp->xmit_skb = efx_ptp_xmit_skb_queue; + /* Request sync events on this channel. */ + channel->sync_events_state = SYNC_EVENTS_QUIESCENT; + } else { + ptp->xmit_skb = efx_ptp_xmit_skb_mc; + } + + return 0; +} + +/* Initialise PTP channel. + * + * Setting core_index to zero causes the queue to be initialised and doesn't + * overlap with 'rxq0' because ptp.c doesn't use skb_record_rx_queue. + */ +static int efx_ptp_probe_channel(struct efx_channel *channel) +{ + struct efx_nic *efx = channel->efx; + + channel->irq_moderation_us = 0; + channel->rx_queue.core_index = 0; + + return efx_ptp_probe(efx, channel); +} + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_PHC_SUPPORT) || defined(EFX_NOT_UPSTREAM) +static void efx_ptp_remove_pps_workqueue(struct efx_ptp_data *ptp_data) +{ + struct workqueue_struct *pps_workwq = ptp_data->pps_workwq; + ptp_data->pps_workwq = NULL; /* tells worker to do nothing */ + + if (!pps_workwq) + return; + + cancel_work_sync(&ptp_data->pps_work); + + destroy_workqueue(pps_workwq); +} +#else +static inline void efx_ptp_remove_pps_workqueue(struct efx_ptp_data *ptp_data) +{ + /* nothing to do */ +} +#endif + +void efx_ptp_remove_post_io(struct efx_nic *efx) +{ + struct pci_dev __maybe_unused *pci_dev = efx->pci_dev; + struct efx_ptp_data *ptp_data = efx->ptp_data; + + /* ensure that the work queues are canceled and destroyed only once. */ + if (!ptp_data) + return; + + (void)efx_ptp_disable(efx); + + efx_ptp_remove_filters(efx, &ptp_data->rxfilters_mcast); + efx_ptp_remove_filters(efx, &ptp_data->rxfilters_ucast); +#if defined(EFX_NOT_UPSTREAM) + efx_ptp_destroy_pps(ptp_data); +#endif + +#ifdef EFX_NOT_UPSTREAM + device_remove_file(&pci_dev->dev, &dev_attr_max_adjfreq); +#endif +#ifdef CONFIG_DEBUG_FS + device_remove_file(&pci_dev->dev, &dev_attr_ptp_stats); +#endif + efx_ptp_remove_pps_workqueue(ptp_data); +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_PHC_SUPPORT) + if (ptp_data->phc_clock) + ptp_clock_unregister(ptp_data->phc_clock); +#endif + + efx_dissociate_phc(efx); + efx_nic_free_buffer(efx, &ptp_data->start); + efx_trim_debugfs_port(efx, efx_debugfs_ptp_parameters); + + efx->ptp_data = NULL; + ptp_data->efx = NULL; +} + +void efx_ptp_remove(struct efx_nic *efx) +{ + struct efx_ptp_data *ptp_data = efx->ptp_data; + + /* ensure that the work queues are canceled and destroyed only once. + * Use the workwq pointer to track this. + */ + if (!ptp_data || !ptp_data->workwq) + return; + + cancel_work_sync(&ptp_data->work); + cancel_delayed_work_sync(&ptp_data->cleanup_work); + + skb_queue_purge(&ptp_data->rxq); + skb_queue_purge(&ptp_data->txq); + + destroy_workqueue(ptp_data->workwq); + ptp_data->workwq = NULL; +} + +static void efx_ptp_remove_channel(struct efx_channel *channel) +{ + efx_ptp_remove(channel->efx); +} + +static void efx_ptp_get_channel_name(struct efx_channel *channel, + char *buf, size_t len) +{ + snprintf(buf, len, "%s-ptp", channel->efx->name); +} + +/* Determine whether this packet should be processed by the PTP module + * or transmitted conventionally. + */ +bool efx_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb) +{ + return efx->ptp_data && + efx->ptp_data->enabled && + skb->len >= PTP_MIN_LENGTH && + skb->len <= MC_CMD_PTP_IN_TRANSMIT_PACKET_MAXNUM && + likely(skb->protocol == htons(ETH_P_IP)) && + skb_transport_header_was_set(skb) && + skb_network_header_len(skb) >= sizeof(struct iphdr) && + ip_hdr(skb)->protocol == IPPROTO_UDP && + skb_headlen(skb) >= + skb_transport_offset(skb) + sizeof(struct udphdr) && + udp_hdr(skb)->dest == htons(PTP_EVENT_PORT); +} + +/* Receive a PTP packet. Packets are queued until the arrival of + * the receive timestamp from the MC - this will probably occur after the + * packet arrival because of the processing in the MC. + */ +static bool efx_ptp_rx(struct efx_rx_queue *rx_queue, struct sk_buff *skb) +{ + struct efx_nic *efx = rx_queue->efx; + struct efx_ptp_data *ptp = efx->ptp_data; + struct efx_ptp_match *match = (struct efx_ptp_match *)skb->cb; + unsigned int version; + u8 *data = skb->data; + + match->expiry = jiffies + msecs_to_jiffies(PKT_EVENT_LIFETIME_MS); + +#if defined(EFX_NOT_UPSTREAM) && defined(EFX_USE_FAKE_VLAN_RX_ACCEL) + /* All vlan info is provided by the "hardware" in this case. */ + match->vlan_tagged = !vlan_get_tag(skb, &match->vlan_tci); +#else + /* In this case, software accounts for lack of receive vlan acceleration */ + if (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX) + match->vlan_tagged = !vlan_get_tag(skb, &match->vlan_tci); + else if ((skb->protocol == htons(ETH_P_8021Q) || + skb->protocol == htons(ETH_P_8021AD)) && pskb_may_pull(skb, VLAN_HLEN)) { + /* required because of the fixed offsets used below */ + data += VLAN_HLEN; + } +#endif + + /* catch up with skb->data if there's a VLAN tag present */ + if (match->vlan_tagged) + data += VLAN_HLEN; + + /* Correct version? */ + if (ptp->mode == MC_CMD_PTP_MODE_V1) { + if (!pskb_may_pull(skb, PTP_V1_MIN_LENGTH)) + return false; + version = ntohs(*(__be16 *)&data[PTP_V1_VERSION_OFFSET]); + if (version != PTP_VERSION_V1) { + return false; + } + } else { + if (!pskb_may_pull(skb, PTP_V2_MIN_LENGTH)) + return false; + version = data[PTP_V2_VERSION_OFFSET]; + if ((version & PTP_VERSION_V2_MASK) != PTP_VERSION_V2) { + return false; + } + } + + /* Does this packet require timestamping? */ + if (ntohs(*(__be16 *)&data[PTP_DPORT_OFFSET]) == PTP_EVENT_PORT) { + match->state = PTP_PACKET_STATE_UNMATCHED; + + /* We expect the sequence number to be in the same position in + * the packet for PTP V1 and V2 + */ + BUILD_BUG_ON(PTP_V1_SEQUENCE_OFFSET != PTP_V2_SEQUENCE_OFFSET); + BUILD_BUG_ON(PTP_V1_SEQUENCE_LENGTH != PTP_V2_SEQUENCE_LENGTH); + } else { + match->state = PTP_PACKET_STATE_MATCH_UNWANTED; + } + + skb_queue_tail(&ptp->rxq, skb); + queue_work(ptp->workwq, &ptp->work); + + return true; +} + +/* Transmit a PTP packet. This has to be transmitted by the MC + * itself, through an MCDI call. MCDI calls aren't permitted + * in the transmit path so defer the actual transmission to a suitable worker. + */ +int efx_ptp_tx(struct efx_nic *efx, struct sk_buff *skb) +{ + struct efx_ptp_data *ptp = efx->ptp_data; + + skb_queue_tail(&ptp->txq, skb); + efx_xmit_hwtstamp_pending(skb); + queue_work(ptp->workwq, &ptp->work); + + return NETDEV_TX_OK; +} + +int efx_ptp_get_mode(struct efx_nic *efx) +{ + return efx->ptp_data->mode; +} + +int efx_ptp_change_mode(struct efx_nic *efx, bool enable_wanted, + unsigned int new_mode) +{ + int rc = 0; + + /* If we are being asked to disable PTP we always disable it. + * Otherwise, carry out the enable request unless we are already + * enabled and the mode isn't changing. + */ + if (!enable_wanted) { + rc = efx_ptp_stop(efx); + } else if (!efx->ptp_data->enabled || + (efx->ptp_data->mode != new_mode)) { + /* We need to disable PTP to change modes */ + if (efx->ptp_data->enabled) { + efx->ptp_data->enabled = false; + rc = efx_ptp_stop(efx); + if (rc != 0) + return rc; + } + + /* Set new operating mode and establish baseline + * synchronisation, which must succeed. + */ + efx->ptp_data->mode = new_mode; + if (netif_running(efx->net_dev)) +#ifdef EFX_NOT_UPSTREAM +#if IS_MODULE(CONFIG_SFC_DRIVERLINK) + { + /* if it exists then we *actually* want to check + * open_count. + */ + } + if (efx->open_count) +#endif +#endif + rc = efx_ptp_start(efx); + if (rc == 0) { + rc = efx_ptp_synchronize(efx, PTP_SYNC_ATTEMPTS * 2); + if (rc != 0) + efx_ptp_stop(efx); + } + } + + if (rc != 0) + return rc; + + efx->ptp_data->enabled = enable_wanted; + return 0; +} + +static int efx_ptp_ts_init(struct efx_nic *efx, struct hwtstamp_config *init) +{ + int rc; + +#if defined(EFX_USE_KCOMPAT) && !defined(EFX_HAVE_HWTSTAMP_FLAGS) + if (init->flags) + return -EINVAL; +#endif + + if ((init->tx_type != HWTSTAMP_TX_OFF) && + (init->tx_type != HWTSTAMP_TX_ON)) + return -ERANGE; + + rc = efx->type->ptp_set_ts_config(efx, init); + if (rc) + return rc; + + efx->ptp_data->config = *init; + return 0; +} + +void efx_ptp_get_ts_info(struct efx_nic *efx, struct ethtool_ts_info *ts_info) +{ + struct efx_ptp_data *phc_ptp = efx->phc_ptp_data; + struct efx_ptp_data *ptp = efx->ptp_data; + + ASSERT_RTNL(); + + if (!ptp) + return; + + ts_info->so_timestamping |= (SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE); +#ifdef EFX_NOT_UPSTREAM + ts_info->so_timestamping |= SOF_TIMESTAMPING_SYS_HARDWARE; +#endif + /* Check licensed features. */ + if (efx_ptp_use_mac_tx_timestamps(efx)) { + struct efx_ef10_nic_data *nic_data = efx->nic_data; + + if (!(nic_data->licensed_features & + (1 << LICENSED_V3_FEATURES_TX_TIMESTAMPS_LBN))) + ts_info->so_timestamping &= + ~SOF_TIMESTAMPING_TX_HARDWARE; + } +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_PHC_SUPPORT) + if (ptp->phc_clock) + ts_info->phc_index = ptp_clock_index(ptp->phc_clock); + else if (phc_ptp && phc_ptp->phc_clock) + ts_info->phc_index = ptp_clock_index(phc_ptp->phc_clock); +#else + /* Use phc_ptp's ifindex as a fake clock index */ + if (phc_ptp) + ts_info->phc_index = phc_ptp->ifindex; +#endif + ts_info->tx_types = 1 << HWTSTAMP_TX_OFF | 1 << HWTSTAMP_TX_ON; + ts_info->rx_filters = ptp->efx->type->hwtstamp_filters; +} + +int efx_ptp_set_ts_config(struct efx_nic *efx, struct ifreq *ifr) +{ + struct hwtstamp_config config; + int rc; + + /* Not a PTP enabled port */ + if (!efx->ptp_data) + return -EOPNOTSUPP; + + if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) + return -EFAULT; + + rc = efx_ptp_ts_init(efx, &config); + if (rc != 0) + return rc; + + return copy_to_user(ifr->ifr_data, &config, sizeof(config)) + ? -EFAULT : 0; +} + +int efx_ptp_get_ts_config(struct efx_nic *efx, struct ifreq *ifr) +{ + if (!efx->ptp_data) + return -EOPNOTSUPP; + + return copy_to_user(ifr->ifr_data, &efx->ptp_data->config, + sizeof(efx->ptp_data->config)) ? -EFAULT : 0; +} + +#if defined(EFX_NOT_UPSTREAM) +static struct ptp_clock_info *efx_ptp_clock_info(struct efx_nic *efx) +{ + struct ptp_clock_info *phc_clock_info; + + if (!efx->ptp_data) + return NULL; + + phc_clock_info = &efx->ptp_data->phc_clock_info; + if (!phc_clock_info && efx->phc_ptp_data) + phc_clock_info = &efx->phc_ptp_data->phc_clock_info; + + return phc_clock_info; +} + +int efx_ptp_ts_settime(struct efx_nic *efx, struct efx_ts_settime *settime) +{ + struct ptp_clock_info *phc_clock_info = efx_ptp_clock_info(efx); + int ret; + struct timespec64 ts; + s64 delta; + + if (!phc_clock_info) + return -ENOTTY; + + ts.tv_sec = settime->ts.tv_sec; + ts.tv_nsec = settime->ts.tv_nsec; + + if (settime->iswrite) { + delta = timespec64_to_ns(&ts); + + return efx_phc_adjtime(phc_clock_info, delta); + } else { + ret = efx_phc_gettime(phc_clock_info, &ts); + if (!ret) { + settime->ts.tv_sec = ts.tv_sec; + settime->ts.tv_nsec = ts.tv_nsec; + } + return ret; + } +} + +int efx_ptp_ts_adjtime(struct efx_nic *efx, struct efx_ts_adjtime *adjtime) +{ + struct ptp_clock_info *phc_clock_info = efx_ptp_clock_info(efx); + + if (!phc_clock_info) + return -ENOTTY; + + return efx_phc_adjfreq(phc_clock_info, adjtime->adjustment); +} + +int efx_ptp_ts_sync(struct efx_nic *efx, struct efx_ts_sync *sync) +{ + int rc; + + if (!efx->ptp_data) + return -ENOTTY; + + rc = efx_ptp_synchronize(efx, PTP_SYNC_ATTEMPTS); + if (rc == 0) { + sync->ts.tv_sec = efx->ptp_data->last_delta.tv_sec; + sync->ts.tv_nsec = efx->ptp_data->last_delta.tv_nsec; + } + return rc; +} + +int efx_ptp_ts_set_sync_status(struct efx_nic *efx, + struct efx_ts_set_sync_status *status) +{ + MCDI_DECLARE_BUF(mcdi_req, MC_CMD_PTP_IN_SET_SYNC_STATUS_LEN); + u32 flag; + int rc; + + if (!efx->ptp_data) + return -ENOTTY; + + if (!(efx->ptp_data->capabilities & + (1 << MC_CMD_PTP_OUT_GET_ATTRIBUTES_REPORT_SYNC_STATUS_LBN))) + return -EOPNOTSUPP; + + if (status->in_sync != 0) + flag = MC_CMD_PTP_IN_SET_SYNC_STATUS_IN_SYNC; + else + flag = MC_CMD_PTP_IN_SET_SYNC_STATUS_NOT_IN_SYNC; + + MCDI_SET_DWORD(mcdi_req, PTP_IN_OP, MC_CMD_PTP_OP_SET_SYNC_STATUS); + MCDI_SET_DWORD(mcdi_req, PTP_IN_PERIPH_ID, 0); + MCDI_SET_DWORD(mcdi_req, PTP_IN_SET_SYNC_STATUS_STATUS, flag); + MCDI_SET_DWORD(mcdi_req, PTP_IN_SET_SYNC_STATUS_TIMEOUT, + status->timeout); + + rc = efx_mcdi_rpc(efx, MC_CMD_PTP, mcdi_req, sizeof(mcdi_req), + NULL, 0, NULL); + return rc; +} +#endif + +static void ptp_event_failure(struct efx_nic *efx, int expected_frag_len) +{ + struct efx_ptp_data *ptp = efx->ptp_data; + + netif_err(efx, hw, efx->net_dev, + "PTP unexpected event length: got %d expected %d\n", + ptp->evt_frag_idx, expected_frag_len); + ptp->reset_required = true; + queue_work(ptp->workwq, &ptp->work); +} + +static void ptp_event_fault(struct efx_nic *efx, struct efx_ptp_data *ptp) +{ + int code = EFX_QWORD_FIELD(ptp->evt_frags[0], MCDI_EVENT_DATA); + if (ptp->evt_frag_idx != 1) { + ptp_event_failure(efx, 1); + return; + } + + netif_err(efx, hw, efx->net_dev, "PTP error %d\n", code); +} + +static void ptp_event_pps(struct efx_nic *efx, struct efx_ptp_data *ptp) +{ + if (efx && ptp->pps_workwq) + queue_work(ptp->pps_workwq, &ptp->pps_work); +} + +#if defined(EFX_NOT_UPSTREAM) +static void hw_pps_event_pps(struct efx_nic *efx, struct efx_ptp_data *ptp) +{ + struct efx_pps_data *pps = efx->ptp_data->pps_data; +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_PHC_SUPPORT) + struct ptp_clock_event ptp_evt; +#endif + + pps->n_assert = ptp->nic_to_kernel_time( + EFX_QWORD_FIELD(ptp->evt_frags[0], MCDI_EVENT_DATA), + EFX_QWORD_FIELD(ptp->evt_frags[1], MCDI_EVENT_DATA), + ptp->ts_corrections.pps_in); + + if (pps->nic_hw_pps_enabled) { + pps->s_assert = timespec64_sub( + ktime_to_timespec64(pps->n_assert), + pps->ptp->last_delta); + pps->s_delta = pps->ptp->last_delta; + pps->last_ev++; + + if (waitqueue_active(&pps->read_data)) + wake_up(&pps->read_data); + } + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_PHC_SUPPORT) + if (ptp->usr_evt_enabled & (1 << PTP_CLK_REQ_EXTTS)) { + struct efx_pps_data *pps = ptp->pps_data; + + ptp_evt.type = PTP_CLOCK_EXTTS; + ptp_evt.index = 0; + ptp_evt.timestamp = ktime_to_ns(pps->n_assert); + ptp_clock_event(ptp->phc_clock, &ptp_evt); + } +#endif +} +#endif + +static bool efx_ptp_warn_once(struct efx_nic *efx) +{ + if (efx->ptp_unavailable_warned) + return false; + efx->ptp_unavailable_warned = true; + return true; +} + +void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev) +{ + int code = EFX_QWORD_FIELD(*ev, MCDI_EVENT_CODE); + struct efx_ptp_data *ptp = efx->phc_ptp_data; + + if (!ptp) { + if (efx_ptp_warn_once(efx)) + netif_warn(efx, drv, efx->net_dev, + "Received PTP event (code %d) but PTP not set up\n", + code); + return; + } else if (!ptp->efx) + return; /* PTP data is being removed, ignore */ + + if (ptp->evt_frag_idx == 0) { + ptp->evt_code = code; + } else if (ptp->evt_code != code) { + netif_err(efx, hw, efx->net_dev, + "PTP out of sequence event %d\n", code); + ptp->evt_frag_idx = 0; + } + /* Relay all events to the PF that administers the hardware */ + efx = ptp->efx; + + ptp->evt_frags[ptp->evt_frag_idx++] = *ev; + if (!MCDI_EVENT_FIELD(*ev, CONT)) { + /* Process resulting event */ + switch (code) { + case MCDI_EVENT_CODE_PTP_FAULT: + ptp_event_fault(efx, ptp); + break; + case MCDI_EVENT_CODE_PTP_PPS: + ptp_event_pps(efx, ptp); + break; +#if defined(EFX_NOT_UPSTREAM) + case MCDI_EVENT_CODE_HW_PPS: + hw_pps_event_pps(efx, ptp); + break; +#endif + default: + netif_err(efx, hw, efx->net_dev, + "PTP unknown event %d\n", code); + break; + } + ptp->evt_frag_idx = 0; + } else if (MAX_EVENT_FRAGS == ptp->evt_frag_idx) { + netif_err(efx, hw, efx->net_dev, + "PTP too many event fragments\n"); + ptp->evt_frag_idx = 0; + } +} + +void efx_time_sync_event(struct efx_channel *channel, efx_qword_t *ev) +{ + struct efx_nic *efx = channel->efx; + struct efx_ptp_data *ptp = efx->ptp_data; + + /* When extracting the sync timestamp minor value, we should discard + * the least significant two bits. These are not required in order + * to reconstruct full-range timestamps and they are optionally used + * to report status depending on the options supplied when subscribing + * for sync events. + */ + channel->sync_timestamp_major = MCDI_EVENT_FIELD(*ev, PTP_TIME_MAJOR); + channel->sync_timestamp_minor = + (MCDI_EVENT_FIELD(*ev, PTP_TIME_MINOR_MS_8BITS) & 0xFC) + << ptp->nic_time.sync_event_minor_shift; + + /* if sync events have been disabled then we want to silently ignore + * this event, so throw away result. + */ + (void) cmpxchg(&channel->sync_events_state, SYNC_EVENTS_REQUESTED, + SYNC_EVENTS_VALID); +} + +static inline u32 efx_rx_buf_timestamp_minor(struct efx_nic *efx, + const u8 *prefix) +{ +#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) + return __le32_to_cpup((const __le32 *)(prefix + + efx->type->rx_ts_offset)); +#else + const u8 *data = prefix + efx->type->rx_ts_offset; + return (u32)data[0] | + (u32)data[1] << 8 | + (u32)data[2] << 16 | + (u32)data[3] << 24; +#endif +} + +void __efx_rx_skb_attach_timestamp(struct efx_channel *channel, + struct sk_buff *skb, + const u8 *prefix) +{ + struct efx_nic *efx = channel->efx; + struct efx_ptp_data *ptp = efx->ptp_data; + u32 pkt_timestamp_major, pkt_timestamp_minor; + u32 diff, carry; + struct skb_shared_hwtstamps *timestamps; + + // TODO do we need to check if ptp is null? + + if (channel->sync_events_state != SYNC_EVENTS_VALID) + return; + + pkt_timestamp_minor = efx_rx_buf_timestamp_minor(efx, prefix); + + /* get the difference between the packet and sync timestamps, + * modulo one second + */ + diff = pkt_timestamp_minor - channel->sync_timestamp_minor; + if (pkt_timestamp_minor < channel->sync_timestamp_minor) + diff += ptp->nic_time.minor_max; + + /* do we roll over a second boundary and need to carry the one? */ + carry = (channel->sync_timestamp_minor >= ptp->nic_time.minor_max - diff) ? + 1 : 0; + + if (diff <= ptp->nic_time.sync_event_diff_max) { + /* packet is ahead of the sync event by a quarter of a second or + * less (allowing for fuzz) + */ + pkt_timestamp_major = channel->sync_timestamp_major + carry; + } else if (diff >= ptp->nic_time.sync_event_diff_min) { + /* packet is behind the sync event but within the fuzz factor. + * This means the RX packet and sync event crossed as they were + * placed on the event queue, which can sometimes happen. + */ + pkt_timestamp_major = channel->sync_timestamp_major - 1 + carry; + } else { + /* it's outside tolerance in both directions. this might be + * indicative of us missing sync events for some reason, so + * we'll call it an error rather than risk giving a bogus + * timestamp. + */ + netif_vdbg(efx, drv, efx->net_dev, + "packet timestamp %x too far from sync event %x:%x\n", + pkt_timestamp_minor, channel->sync_timestamp_major, + channel->sync_timestamp_minor); + return; + } + + /* attach the timestamps to the skb */ + timestamps = skb_hwtstamps(skb); + timestamps->hwtstamp = + ptp->nic_to_kernel_time(pkt_timestamp_major, + pkt_timestamp_minor, + ptp->ts_corrections.general_rx); +#if defined(EFX_NOT_UPSTREAM) + /* Note the unusual preprocessor condition: + * - setting syststamp is deprecated, so this is EFX_NOT_UPSTREAM + */ + efx_ptp_get_host_time(efx, timestamps); +#endif +} + +static int efx_phc_adjtime(struct ptp_clock_info *ptp, s64 delta) +{ + u32 nic_major, nic_minor; + struct efx_ptp_data *ptp_data = container_of(ptp, + struct efx_ptp_data, + phc_clock_info); + struct efx_nic *efx = ptp_data->efx; + MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_ADJUST_LEN); +#if defined(EFX_NOT_UPSTREAM) + int rc; + + ptp_data->last_delta_valid = false; +#endif + + efx->ptp_data->ns_to_nic_time(delta, &nic_major, &nic_minor); + + MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_ADJUST); + MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0); + MCDI_SET_QWORD(inbuf, PTP_IN_ADJUST_FREQ, ptp_data->current_adjfreq); + MCDI_SET_DWORD(inbuf, PTP_IN_ADJUST_MAJOR, nic_major); + MCDI_SET_DWORD(inbuf, PTP_IN_ADJUST_MINOR, nic_minor); +#if defined(EFX_NOT_UPSTREAM) + rc = efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf), NULL, 0, NULL); + + if (!rc) + return rc; + return efx_ptp_synchronize(efx, PTP_SYNC_ATTEMPTS); +#else + return efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf), + NULL, 0, NULL); +#endif +} + +static int efx_phc_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts) +{ + struct efx_ptp_data *ptp_data = container_of(ptp, + struct efx_ptp_data, + phc_clock_info); + MCDI_DECLARE_BUF(outbuf, MC_CMD_PTP_OUT_READ_NIC_TIME_LEN); + MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_READ_NIC_TIME_LEN); + struct efx_nic *efx = ptp_data->efx; + ktime_t kt; + int rc; + + MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_READ_NIC_TIME); + MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0); + + rc = efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), NULL); + if (rc != 0) + return rc; + + kt = ptp_data->nic_to_kernel_time( + MCDI_DWORD(outbuf, PTP_OUT_READ_NIC_TIME_MAJOR), + MCDI_DWORD(outbuf, PTP_OUT_READ_NIC_TIME_MINOR), 0); + *ts = ktime_to_timespec64(kt); + return 0; +} + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_PHC_SUPPORT) +static int efx_phc_settime(struct ptp_clock_info *ptp, + const struct timespec64 *e_ts) +{ + /* Get the current NIC time, efx_phc_gettime. + * Subtract from the desired time to get the offset + * call efx_phc_adjtime with the offset + */ + int rc; + struct timespec64 time_now; + struct timespec64 delta; + + rc = efx_phc_gettime(ptp, &time_now); + if (rc != 0) + return rc; + + delta = timespec64_sub(*e_ts, time_now); + + rc = efx_phc_adjtime(ptp, timespec64_to_ns(&delta)); + if (rc != 0) + return rc; + + return 0; +} + +#if defined(EFX_USE_KCOMPAT) && !defined(EFX_USE_64BIT_PHC) +static int efx_phc_settime32(struct ptp_clock_info *ptp, + const struct timespec *ts) +{ + struct timespec64 ts64 = timespec_to_timespec64(*ts); + + return efx_phc_settime(ptp, &ts64); +} + +static int efx_phc_gettime32(struct ptp_clock_info *ptp, + struct timespec *ts) +{ + struct timespec64 ts64 = timespec_to_timespec64(*ts); + int rc; + + rc = efx_phc_gettime(ptp, &ts64); + if (rc == 0) + *ts = timespec64_to_timespec(ts64); + + return rc; +} +#endif +#endif + +static const struct efx_channel_type efx_ptp_channel_type = { + .handle_no_channel = efx_ptp_handle_no_channel, + .pre_probe = efx_ptp_probe_channel, + .post_remove = efx_ptp_remove_channel, + .get_name = efx_ptp_get_channel_name, + .receive_skb = efx_ptp_rx, + .keep_eventq = false, + .hide_tx = true, +}; + +int efx_ptp_defer_probe_with_channel(struct efx_nic *efx) +{ + int rc = 0; + + if (efx_ptp_adapter_has_support(efx)) { + efx->extra_channel_type[EFX_EXTRA_CHANNEL_PTP] = + &efx_ptp_channel_type; + + rc = efx_ptp_probe_post_io(efx); + } + + return rc; +} + +void efx_ptp_start_datapath(struct efx_nic *efx) +{ + if (efx_ptp_restart(efx)) + netif_err(efx, drv, efx->net_dev, "Failed to restart PTP.\n"); + /* re-enable timestamping if it was previously enabled */ + if (efx->type->ptp_set_ts_sync_events) + efx->type->ptp_set_ts_sync_events(efx, true, true); +} + +void efx_ptp_stop_datapath(struct efx_nic *efx) +{ + /* temporarily disable timestamping */ + if (efx->type->ptp_set_ts_sync_events) + efx->type->ptp_set_ts_sync_events(efx, false, true); + efx_ptp_stop(efx); +} diff --git a/src/drivers/net/ethernet/sfc/ptp.h b/src/drivers/net/ethernet/sfc/ptp.h new file mode 100644 index 0000000..3d9cc0a --- /dev/null +++ b/src/drivers/net/ethernet/sfc/ptp.h @@ -0,0 +1,156 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2005-2006 Fen Systems Ltd. + * Copyright 2006-2013 Solarflare Communications Inc. + * Copyright 2019-2022 Xilinx Inc. + */ + +#ifndef EFX_PTP_H +#define EFX_PTP_H + +#include +#include "net_driver.h" + +struct ethtool_ts_info; +#ifdef CONFIG_SFC_PTP +#if defined(EFX_NOT_UPSTREAM) +struct efx_ts_settime; +struct efx_ts_adjtime; +struct efx_ts_sync; +struct efx_ts_set_sync_status; +struct efx_ts_set_vlan_filter; +struct efx_ts_set_uuid_filter; +struct efx_ts_set_domain_filter; +int efx_ptp_ts_settime(struct efx_nic *efx, struct efx_ts_settime *settime); +int efx_ptp_ts_adjtime(struct efx_nic *efx, struct efx_ts_adjtime *adjtime); +int efx_ptp_ts_sync(struct efx_nic *efx, struct efx_ts_sync *sync); +int efx_ptp_ts_set_sync_status(struct efx_nic *efx, struct efx_ts_set_sync_status *status); +#endif +void efx_ptp_remove_post_io(struct efx_nic *efx); +int efx_ptp_probe(struct efx_nic *efx, struct efx_channel *channel); +int efx_ptp_defer_probe_with_channel(struct efx_nic *efx); +struct efx_channel *efx_ptp_channel(struct efx_nic *efx); +void efx_ptp_remove(struct efx_nic *efx); +int efx_ptp_set_ts_config(struct efx_nic *efx, struct ifreq *ifr); +int efx_ptp_get_ts_config(struct efx_nic *efx, struct ifreq *ifr); +void efx_ptp_get_ts_info(struct efx_nic *efx, struct ethtool_ts_info *ts_info); +int efx_ptp_get_attributes(struct efx_nic *efx); +bool efx_ptp_uses_separate_channel(struct efx_nic *efx); +bool efx_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb); +int efx_ptp_get_mode(struct efx_nic *efx); +int efx_ptp_change_mode(struct efx_nic *efx, bool enable_wanted, + unsigned int new_mode); +int efx_ptp_tx(struct efx_nic *efx, struct sk_buff *skb); +void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev); +size_t efx_ptp_describe_stats(struct efx_nic *efx, u8 *strings); +void efx_ptp_reset_stats(struct efx_nic *efx); +size_t efx_ptp_update_stats(struct efx_nic *efx, u64 *stats); +void efx_time_sync_event(struct efx_channel *channel, efx_qword_t *ev); +void __efx_rx_skb_attach_timestamp(struct efx_channel *channel, + struct sk_buff *skb, + const u8 *prefix); +static inline void efx_rx_skb_attach_timestamp(struct efx_channel *channel, + struct sk_buff *skb, + const u8 *prefix) +{ + __efx_rx_skb_attach_timestamp(channel, skb, prefix); +} +void efx_ptp_start_datapath(struct efx_nic *efx); +void efx_ptp_stop_datapath(struct efx_nic *efx); +bool efx_ptp_use_mac_tx_timestamps(struct efx_nic *efx); +ktime_t efx_ptp_nic_to_kernel_time(struct efx_tx_queue *tx_queue); +#else +static inline int efx_ptp_probe(struct efx_nic *efx, struct efx_channel *channel) +{ + return -ENODEV; +} +static inline int efx_ptp_defer_probe_with_channel(struct efx_nic *efx) +{ + return 0; +} +static inline struct efx_channel *efx_ptp_channel(struct efx_nic *efx) +{ + return NULL; +} +static inline void efx_ptp_remove(struct efx_nic *efx) {} +static inline int efx_ptp_set_ts_config(struct efx_nic *efx, struct ifreq *ifr) +{ + return -EOPNOTSUPP; +} +static inline int efx_ptp_get_ts_config(struct efx_nic *efx, struct ifreq *ifr) +{ + return -EOPNOTSUPP; +} +static inline int efx_ptp_get_ts_info(struct efx_nic *efx, + struct ethtool_ts_info *ts_info) +{ + return -EOPNOTSUPP; +} +static inline int efx_ptp_get_attributes(struct efx_nic *efx) +{ + return 0; +} +static inline bool efx_ptp_uses_separate_channel(struct efx_nic *efx) +{ + return false; +} +static inline bool efx_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb) +{ + return false; +} +static inline int efx_ptp_tx(struct efx_nic *efx, struct sk_buff *skb) +{ + return NETDEV_TX_OK; +} +static inline void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev) {} +static inline size_t efx_ptp_describe_stats(struct efx_nic *efx, u8 *strings) +{ + return 0; +} +static inline void efx_ptp_reset_stats(struct efx_nic *efx) {} +static inline size_t efx_ptp_update_stats(struct efx_nic *efx, u64 *stats) +{ + return 0; +} +static inline void efx_rx_skb_attach_timestamp(struct efx_channel *channel, + struct sk_buff *skb, + const u8 *prefix) {} +static inline void efx_time_sync_event(struct efx_channel *channel, + efx_qword_t *ev) {} +static inline void efx_ptp_start_datapath(struct efx_nic *efx) {} +static inline void efx_ptp_stop_datapath(struct efx_nic *efx) {} +static inline bool efx_ptp_use_mac_tx_timestamps(struct efx_nic *efx) +{ + return false; +} +static inline ktime_t efx_ptp_nic_to_kernel_time(struct efx_tx_queue *tx_queue) +{ + return (ktime_t) { 0 }; +} +#endif + +#if defined(EFX_NOT_UPSTREAM) +struct efx_ts_get_pps; +struct efx_ts_hw_pps; +#ifdef CONFIG_SFC_PTP +int efx_ptp_pps_get_event(struct efx_nic *efx, struct efx_ts_get_pps *data); +int efx_ptp_hw_pps_enable(struct efx_nic *efx, bool enable); +int efx_ptp_pps_reset(struct efx_nic *efx); +#else +static inline int efx_ptp_pps_get_event(struct efx_nic *efx, + struct efx_ts_get_pps *data) +{ + return -EOPNOTSUPP; +} +static inline int efx_ptp_hw_pps_enable(struct efx_nic *efx, bool enable) +{ + return -EOPNOTSUPP; +} +static inline int efx_ptp_pps_reset(struct efx_nic *efx) +{ + return -EOPNOTSUPP; +} +#endif +#endif +#endif /* EFX_PTP_H */ diff --git a/src/drivers/net/ethernet/sfc/rx.c b/src/drivers/net/ethernet/sfc/rx.c new file mode 100644 index 0000000..80b1339 --- /dev/null +++ b/src/drivers/net/ethernet/sfc/rx.c @@ -0,0 +1,1274 @@ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2005-2006 Fen Systems Ltd. + * Copyright 2005-2017 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef EFX_NOT_UPSTREAM +#include +#endif +#include "net_driver.h" +#include "efx.h" +#include "rx_common.h" +#include "filter.h" +#include "nic.h" +#include "xdp.h" +#include "selftest.h" +#include "workarounds.h" +#ifdef CONFIG_SFC_TRACING +#include +#endif +#include "mcdi_pcol.h" + +#ifdef EFX_NOT_UPSTREAM +static bool underreport_skb_truesize; +module_param(underreport_skb_truesize, bool, 0444); +MODULE_PARM_DESC(underreport_skb_truesize, "Give false skb truesizes. " + "Debug option to restore previous driver behaviour."); +#endif + +/* Size of headers copied into skb linear data area */ +#define EFX_RX_CB_DEFAULT 192u +static unsigned int rx_cb_size = EFX_RX_CB_DEFAULT; + +#if !defined(EFX_NOT_UPSTREAM) || defined(EFX_RX_PAGE_SHARE) +static void efx_repost_rx_page(struct efx_rx_queue *rx_queue, + struct efx_rx_buffer *rx_buf) +{ + struct efx_nic *efx = rx_queue->efx; + struct page *page = rx_buf->page; + u16 flags; + unsigned int page_offset = 0; + unsigned int fill_level; + unsigned int nr_bufs; + unsigned int space; + + /* We only repost pages that have not been pushed up the stack. + * These are signalled by a non-null rx_buf page field + */ + if (page == NULL) + return; + + /* This indicates broken logic in packet processing functions */ + EFX_WARN_ON_ONCE_PARANOID(rx_queue->rx_pkt_n_frags > 1); + /* Non-recycled page has ended up being marked for reposting. */ + EFX_WARN_ON_ONCE_PARANOID(!(rx_buf->flags & EFX_RX_PAGE_IN_RECYCLE_RING)); + + fill_level = rx_queue->added_count - rx_queue->removed_count; + + /* Note subtle fill_level condition check. By only releasing + * descriptors that are above the fast_fill_trigger threshold + * we avoid alternating between releasing descriptors and + * reposting them for series of small packets. + */ + if (efx->rx_bufs_per_page > 2 || + fill_level > rx_queue->fast_fill_trigger) { + put_page(page); + return; + } + + EFX_WARN_ON_ONCE_PARANOID(efx->rx_bufs_per_page < 1); + EFX_WARN_ON_ONCE_PARANOID(efx->rx_bufs_per_page > 2); + /* repost the first buffer, and the second if there are no refs to it */ + nr_bufs = 1; + if (efx->rx_bufs_per_page == 2) + nr_bufs += page_count(page) < efx->rx_bufs_per_page + 1; + + /* Clamp nr_bufs to the minimum of the number of buffers + * creatable from the page and the number of free slots on the + * RX descriptor ring. + */ + space = 1 + (rx_queue->max_fill - fill_level); + if (space < nr_bufs) + nr_bufs = space; + + /* Page offset calculation assumes maximum 2 buffers per page */ + if (rx_buf->page_offset >= efx->rx_page_buf_step) + page_offset = efx->rx_page_buf_step; + flags = EFX_RX_PAGE_IN_RECYCLE_RING; + flags |= rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE; + + do { + efx_init_rx_buffer(rx_queue, page, page_offset, flags); + rx_queue->page_repost_count++; + + flags ^= EFX_RX_BUF_LAST_IN_PAGE; + page_offset ^= efx->rx_page_buf_step; + + /* We need to bump up the reference count if we're reposting + * two buffers onto the ring + */ + if (nr_bufs > 1) + get_page(page); + } while (--nr_bufs > 0); +} +#endif + +static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue, + struct efx_rx_buffer *rx_buf, + int len) +{ + struct efx_nic *efx = rx_queue->efx; + unsigned int max_len = rx_buf->len - efx->type->rx_buffer_padding; + + if (likely(len <= max_len)) + return; + + /* The packet must be discarded, but this is only a fatal error + * if the caller indicated it was + */ + rx_buf->flags |= EFX_RX_PKT_DISCARD; + + if (net_ratelimit()) + netif_err(efx, rx_err, efx->net_dev, + " RX queue %d overlength RX event " + "(%#x > %#x)\n", + efx_rx_queue_index(rx_queue), len, max_len); + + rx_queue->n_rx_overlength++; +} + +/* Allocate and construct an SKB around page fragments + * + * Note that in the RX copybreak case the rx_buf page may be placed + * onto the RX recycle ring, and efx_init_rx_buffer() will be called. + * If so reset the callers rx_buf so that it is not reused. + */ +static struct sk_buff *efx_rx_mk_skb(struct efx_rx_queue *rx_queue, + struct efx_rx_buffer **_rx_buf, + unsigned int n_frags, + u8 **ehp, int hdr_len) +{ + struct efx_channel *channel = efx_rx_queue_channel(rx_queue); + struct efx_rx_buffer *rx_buf = *_rx_buf; + struct efx_nic *efx = rx_queue->efx; + struct sk_buff *skb = NULL; + unsigned int data_cp_len = efx->rx_prefix_size + hdr_len; + unsigned int alloc_len = efx->rx_ip_align + efx->rx_prefix_size + + hdr_len; + u8 *new_eh; + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_XDP_SOCK) + if (channel->zc) { + data_cp_len = rx_buf->len + efx->rx_prefix_size; + alloc_len = data_cp_len; + } +#endif + /* Allocate an SKB to store the headers */ + skb = netdev_alloc_skb(efx->net_dev, alloc_len); + if (unlikely(skb == NULL)) { + atomic_inc(&efx->n_rx_noskb_drops); + return NULL; + } + + EFX_WARN_ON_ONCE_PARANOID(rx_buf->len < hdr_len); + + memcpy(skb->data + efx->rx_ip_align, *ehp - efx->rx_prefix_size, + data_cp_len); + skb_reserve(skb, efx->rx_ip_align + efx->rx_prefix_size); +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_XDP_SOCK) + if (channel->zc) { + __skb_put(skb, rx_buf->len); + goto finalize_skb; + } +#endif + new_eh = skb->data; + __skb_put(skb, hdr_len); + + /* Append the remaining page(s) onto the frag list */ + if (rx_buf->len > hdr_len) { + rx_buf->page_offset += hdr_len; + rx_buf->len -= hdr_len; + + for (;;) { +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_SKB_FRAG_TRUESIZE) + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, + rx_buf->page, rx_buf->page_offset, + rx_buf->len, efx->rx_buffer_truesize); +#else + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, + rx_buf->page, rx_buf->page_offset, + rx_buf->len); +#endif + rx_buf->page = NULL; + if (skb_shinfo(skb)->nr_frags == n_frags) + break; + + rx_buf = efx_rx_buf_next(rx_queue, rx_buf); + } + } else { +#if !defined(EFX_NOT_UPSTREAM) || defined(EFX_RX_PAGE_SHARE) + if (!(rx_buf->flags & EFX_RX_PAGE_IN_RECYCLE_RING)) { +#endif + __free_pages(rx_buf->page, efx->rx_buffer_order); +#if !defined(EFX_NOT_UPSTREAM) || defined(EFX_RX_PAGE_SHARE) + } else { + efx_repost_rx_page(rx_queue, rx_buf); + *_rx_buf = NULL; + } +#endif + *ehp = new_eh; + rx_buf->page = NULL; + n_frags = 0; + } + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_XDP_SOCK) +finalize_skb: +#endif + /* Move past the ethernet header */ + skb->protocol = eth_type_trans(skb, efx->net_dev); + + skb_mark_napi_id(skb, &channel->napi_str); + + return skb; +} + +void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index, + unsigned int n_frags, unsigned int len, u16 flags) +{ + struct efx_nic *efx = rx_queue->efx; + struct efx_channel *channel = efx_rx_queue_channel(rx_queue); + struct efx_rx_buffer *rx_buf; + + rx_queue->rx_packets++; + + rx_buf = efx_rx_buffer(rx_queue, index); + rx_buf->flags |= flags; + + /* Validate the number of fragments and completed length */ + if (n_frags == 1) { + if (!(flags & EFX_RX_PKT_PREFIX_LEN)) + efx_rx_packet__check_len(rx_queue, rx_buf, len); + } else if (unlikely(n_frags > EFX_RX_MAX_FRAGS) || + unlikely(len <= (n_frags - 1) * efx->rx_dma_len) || + unlikely(len > n_frags * efx->rx_dma_len) || + unlikely(!efx->rx_scatter)) { + /* If this isn't an explicit discard request, either + * the hardware or the driver is broken. + */ + WARN_ON(!(len == 0 && rx_buf->flags & EFX_RX_PKT_DISCARD)); + rx_buf->flags |= EFX_RX_PKT_DISCARD; + } + + netif_vdbg(efx, rx_status, efx->net_dev, + "RX queue %d received ids %x-%x len %d %s%s\n", + efx_rx_queue_index(rx_queue), index, + (index + n_frags - 1) & rx_queue->ptr_mask, len, + (rx_buf->flags & EFX_RX_PKT_CSUMMED) ? " [SUMMED]" : "", + (rx_buf->flags & EFX_RX_PKT_DISCARD) ? " [DISCARD]" : ""); + + /* Discard packet, if instructed to do so. Process the + * previous receive first. + */ + if (unlikely(rx_buf->flags & EFX_RX_PKT_DISCARD)) { + efx_rx_flush_packet(rx_queue); + efx_discard_rx_packet(channel, rx_buf, n_frags); + return; + } + + if (n_frags == 1 && !(flags & EFX_RX_PKT_PREFIX_LEN)) + rx_buf->len = len; + + /* Release and/or sync the DMA mapping - assumes all RX buffers + * consumed in-order per RX queue. + */ +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_XDP_SOCK) + if (channel->zc) + dma_sync_single_for_cpu(&efx->pci_dev->dev, rx_buf->dma_addr, + len, DMA_BIDIRECTIONAL); + else +#endif + efx_sync_rx_buffer(efx, rx_buf, rx_buf->len); + + /* Prefetch nice and early so data will (hopefully) be in cache by + * the time we look at it. + */ + prefetch(efx_rx_buf_va(rx_buf)); + + rx_buf->page_offset += efx->rx_prefix_size; + rx_buf->len -= efx->rx_prefix_size; +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_XDP_SOCK) + /* XDP does not support more than 1 frag */ + if (channel->zc) + goto skip_recycle_pages; +#endif + + if (n_frags > 1) { + /* Release/sync DMA mapping for additional fragments. + * Fix length for last fragment. + */ + unsigned int tail_frags = n_frags - 1; + + for (;;) { + rx_buf = efx_rx_buf_next(rx_queue, rx_buf); + if (--tail_frags == 0) + break; + efx_sync_rx_buffer(efx, rx_buf, efx->rx_dma_len); + } + rx_buf->len = len - (n_frags - 1) * efx->rx_dma_len; + efx_sync_rx_buffer(efx, rx_buf, rx_buf->len); + } + + /* All fragments have been DMA-synced, so recycle pages. */ + rx_buf = efx_rx_buffer(rx_queue, index); + efx_recycle_rx_pages(channel, rx_buf, n_frags); + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_XDP_SOCK) +skip_recycle_pages: +#endif + /* Pipeline receives so that we give time for packet headers to be + * prefetched into cache. + */ + efx_rx_flush_packet(rx_queue); + rx_queue->rx_pkt_n_frags = n_frags; + rx_queue->rx_pkt_index = index; +} + +static void efx_rx_deliver(struct efx_rx_queue *rx_queue, u8 *eh, + struct efx_rx_buffer *rx_buf, + unsigned int n_frags) +{ + struct efx_channel *channel = efx_rx_queue_channel(rx_queue); + u16 hdr_len = min_t(u16, rx_buf->len, rx_cb_size); +#if IS_ENABLED(CONFIG_VLAN_8021Q) + u16 rx_buf_vlan_tci = rx_buf->vlan_tci; +#endif + struct efx_nic *efx = rx_queue->efx; + u16 rx_buf_flags = rx_buf->flags; + bool free_buf_on_fail = true; + struct sk_buff *skb; + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_XDP_SOCK) + if (channel->zc) + free_buf_on_fail = false; +#endif + skb = efx_rx_mk_skb(rx_queue, &rx_buf, n_frags, &eh, hdr_len); + + if (unlikely(skb == NULL)) { + if (free_buf_on_fail) + efx_free_rx_buffers(rx_queue, rx_buf, n_frags); + return; + } + skb_record_rx_queue(skb, rx_queue->core_index); + + /* Set the SKB flags */ + skb_checksum_none_assert(skb); + if (likely(rx_buf_flags & EFX_RX_PKT_CSUMMED)) { + skb->ip_summed = CHECKSUM_UNNECESSARY; +#if !defined(EFX_USE_KCOMPAT) || defined (EFX_HAVE_CSUM_LEVEL) + skb->csum_level = !!(rx_buf_flags & EFX_RX_PKT_CSUM_LEVEL); +#endif + } + + efx_rx_skb_attach_timestamp(channel, skb, + eh - efx->type->rx_prefix_size); + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_RXHASH_SUPPORT) + if (efx->net_dev->features & NETIF_F_RXHASH) + skb_set_hash(skb, efx_rx_buf_hash(efx, eh), + (rx_buf_flags & EFX_RX_PKT_TCP? PKT_HASH_TYPE_L4: + PKT_HASH_TYPE_L3)); +#endif + +#if IS_ENABLED(CONFIG_VLAN_8021Q) + if (rx_buf_flags & EFX_RX_BUF_VLAN_XTAG) + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), + rx_buf_vlan_tci); +#endif + + if (rx_queue->receive_skb) + if (rx_queue->receive_skb(rx_queue, skb)) + return; + + /* Pass the packet up */ +#ifdef CONFIG_SFC_TRACING + trace_sfc_receive(skb, false, rx_buf_flags & EFX_RX_BUF_VLAN_XTAG, + rx_buf_vlan_tci); +#endif +#if IS_ENABLED(CONFIG_VLAN_8021Q) && defined(EFX_USE_KCOMPAT) && \ + defined(EFX_HAVE_VLAN_RX_PATH) + if (rx_buf_flags & EFX_RX_BUF_VLAN_XTAG) { + vlan_hwaccel_receive_skb(skb, efx->vlan_group, + rx_buf_vlan_tci); + return; + } +#endif + if (channel->rx_list != NULL) + /* Add to list, will pass up later */ +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_SKB__LIST) + list_add_tail(&skb->list, channel->rx_list); +#else + __skb_queue_tail(channel->rx_list, skb); +#endif + else + /* No list, so pass it up now */ + netif_receive_skb(skb); +} + +/* Handle a received packet. Second half: Touches packet payload. */ +void __efx_rx_packet(struct efx_rx_queue *rx_queue) +{ +#if (defined(EFX_USE_KCOMPAT) && defined(EFX_WANT_DRIVER_BUSY_POLL) || !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_XDP_SOCK)) + struct efx_channel *channel = efx_get_rx_queue_channel(rx_queue); +#endif + struct efx_rx_buffer *rx_buf = efx_rx_buf_pipe(rx_queue); + struct efx_nic *efx = rx_queue->efx; + u8 *eh = efx_rx_buf_va(rx_buf); +#if defined(EFX_NOT_UPSTREAM) && defined(EFX_USE_FAKE_VLAN_RX_ACCEL) + struct vlan_ethhdr *veh; +#endif + bool rx_deliver = false; + int rc; + + /* Read length from the prefix if necessary. This already + * excludes the length of the prefix itself. + */ + if (rx_buf->flags & EFX_RX_PKT_PREFIX_LEN) { + rx_buf->len = le16_to_cpup((__le16 *) + (eh + efx->rx_packet_len_offset)); + /* A known issue may prevent this being filled in; + * if that happens, just drop the packet. + * Must do that in the driver since passing a zero-length + * packet up to the stack may cause a crash. + */ + if (unlikely(!rx_buf->len)) { + efx_free_rx_buffers(rx_queue, rx_buf, + rx_queue->rx_pkt_n_frags); + rx_queue->n_rx_frm_trunc++; + goto out; + } + } + + /* If we're in loopback test, then pass the packet directly to the + * loopback layer, and free the rx_buf here + */ + if (unlikely(efx->loopback_selftest)) { + efx_loopback_rx_packet(efx, eh, rx_buf->len); + efx_free_rx_buffers(rx_queue, rx_buf, + rx_queue->rx_pkt_n_frags); + goto out; + } + + + rc = efx_xdp_rx(efx, rx_queue, rx_buf, &eh); +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_XDP_SOCK) + if (channel->zc) { + if (rc == XDP_REDIRECT || rc == XDP_TX) + goto free_buf; + else + efx_recycle_rx_bufs_zc(channel, rx_buf, + rx_queue->rx_pkt_n_frags); + if (rc != XDP_PASS) + goto free_buf; + } else if (rc != XDP_PASS) { + goto out; + } +#else +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_XDP) + if (rc != XDP_PASS) + goto out; +#else + if (rc != -ENOTSUPP) + goto out; +#endif +#endif + +#if defined(EFX_NOT_UPSTREAM) && defined(EFX_USE_FAKE_VLAN_RX_ACCEL) + /* Fake VLAN tagging */ + veh = (struct vlan_ethhdr *) eh; +#if defined(EFX_HAVE_VLAN_RX_PATH) + if ((rx_buf->flags & EFX_RX_PKT_VLAN) && efx->vlan_group) { +#else + if ((rx_buf->flags & EFX_RX_PKT_VLAN) && + ((veh->h_vlan_proto == htons(ETH_P_8021Q)) || + (veh->h_vlan_proto == htons(ETH_P_QINQ1)) || + (veh->h_vlan_proto == htons(ETH_P_QINQ2)) || + (veh->h_vlan_proto == htons(ETH_P_QINQ3)) || + (veh->h_vlan_proto == htons(ETH_P_8021AD)))) { +#endif + rx_buf->vlan_tci = ntohs(veh->h_vlan_TCI); + memmove(eh - efx->rx_prefix_size + VLAN_HLEN, + eh - efx->rx_prefix_size, + 2 * ETH_ALEN + efx->rx_prefix_size); + eh += VLAN_HLEN; + rx_buf->page_offset += VLAN_HLEN; + rx_buf->len -= VLAN_HLEN; + rx_buf->flags |= EFX_RX_BUF_VLAN_XTAG; + } +#endif + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_XDP_SOCK) + if (rx_buf->flags & EFX_RX_BUF_ZC) { + rx_deliver = true; + goto deliver_now; + } +#endif +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_NDO_SET_FEATURES) || defined(EFX_HAVE_EXT_NDO_SET_FEATURES) + if (unlikely(!(efx->net_dev->features & NETIF_F_RXCSUM))) +#else + if (unlikely(!efx->rx_checksum_enabled)) +#endif + rx_buf->flags &= ~EFX_RX_PKT_CSUMMED; + +#if defined(EFX_NOT_UPSTREAM) && defined(EFX_USE_SFC_LRO) + if ((rx_buf->flags & (EFX_RX_PKT_CSUMMED | EFX_RX_PKT_TCP)) == + (EFX_RX_PKT_CSUMMED | EFX_RX_PKT_TCP) && + efx_ssr_enabled(efx) && + likely(rx_queue->rx_pkt_n_frags == 1)) + efx_ssr(rx_queue, rx_buf, eh); + else + /* fall through */ +#endif +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_USE_GRO) + if ((rx_buf->flags & EFX_RX_PKT_TCP) && + !rx_queue->receive_skb +#if defined(EFX_USE_KCOMPAT) && defined(EFX_WANT_DRIVER_BUSY_POLL) + && !efx_channel_busy_polling(channel) +#endif + ) { + efx_rx_packet_gro(rx_queue, rx_buf, + rx_queue->rx_pkt_n_frags, + eh, 0); + } else { + rx_deliver = true; + goto deliver_now; + } +#endif +deliver_now: + if (rx_deliver) + efx_rx_deliver(rx_queue, eh, rx_buf, rx_queue->rx_pkt_n_frags); +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_XDP_SOCK) +free_buf: + if (channel->zc) + efx_free_rx_buffers(rx_queue, rx_buf, + rx_queue->rx_pkt_n_frags); +#endif +out: + rx_queue->rx_pkt_n_frags = 0; +} + +#if defined(EFX_NOT_UPSTREAM) +static int __init +#ifndef EFX_HAVE_NON_CONST_KERNEL_PARAM +efx_rx_alloc_method_set(const char *val, const struct kernel_param *kp) +#else +efx_rx_alloc_method_set(const char *val, struct kernel_param *kp) +#endif +{ + pr_warn("sfc: module parameter rx_alloc_method is obsolete\n"); + return 0; +} +#ifdef EFX_HAVE_KERNEL_PARAM_OPS +static const struct kernel_param_ops efx_rx_alloc_method_ops = { + .set = efx_rx_alloc_method_set +}; +module_param_cb(rx_alloc_method, &efx_rx_alloc_method_ops, NULL, 0); +#else +module_param_call(rx_alloc_method, efx_rx_alloc_method_set, NULL, NULL, 0); +#endif +#endif + +#if defined(EFX_NOT_UPSTREAM) +static int __init +#if !defined(EFX_USE_KCOMPAT) || !defined(EFX_HAVE_NON_CONST_KERNEL_PARAM) +rx_copybreak_set(const char *val, const struct kernel_param *kp) +#else +rx_copybreak_set(const char *val, struct kernel_param *kp) +#endif +{ + int rc = param_set_uint(val, kp); + + if (rc) + return rc; + + if (rx_cb_size == 0) { + /* Adjust so that ethernet headers are copied into the skb. */ + rx_cb_size = ETH_HLEN; + } else if (rx_cb_size < ETH_ZLEN) { + rx_cb_size = ETH_ZLEN; + pr_warn("sfc: Invalid rx_copybreak value. Clamping to %u.\n", + rx_cb_size); + } + + return 0; +} + +#if !defined(EFX_USE_KCOMPAT) || !defined(EFX_HAVE_NON_CONST_KERNEL_PARAM) +static int rx_copybreak_get(char *buffer, const struct kernel_param *kp) +#else +static int rx_copybreak_get(char *buffer, struct kernel_param *kp) +#endif +{ + int rc = param_get_uint(buffer, kp); + + if (!strcmp(buffer, "14")) + rc = scnprintf(buffer, PAGE_SIZE, "0"); + + return rc; +} + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_KERNEL_PARAM_OPS) +static const struct kernel_param_ops rx_copybreak_ops = { + .set = rx_copybreak_set, + .get = rx_copybreak_get, +}; +module_param_cb(rx_copybreak, &rx_copybreak_ops, &rx_cb_size, 0444); +#else +module_param_call(rx_copybreak, rx_copybreak_set, rx_copybreak_get, + &rx_cb_size, 0444); +#endif +MODULE_PARM_DESC(rx_copybreak, + "Size of headers copied into skb linear data area"); +#endif /* EFX_NOT_UPSTREAM */ + + +#if defined(EFX_NOT_UPSTREAM) && defined(EFX_USE_SFC_LRO) + +#define EFX_SSR_MAX_SKB_FRAGS MAX_SKB_FRAGS + +/* Size of the LRO hash table. Must be a power of 2. A larger table + * means we can accelerate a larger number of streams. + */ +static unsigned int lro_table_size = 128; +module_param(lro_table_size, uint, 0644); +MODULE_PARM_DESC(lro_table_size, + "Size of the LRO hash table. Must be a power of 2"); + +/* Maximum length of a hash chain. If chains get too long then the lookup + * time increases and may exceed the benefit of LRO. + */ +static unsigned int lro_chain_max = 20; +module_param(lro_chain_max, uint, 0644); +MODULE_PARM_DESC(lro_chain_max, + "Maximum length of chains in the LRO hash table"); + + +/* Maximum time (in jiffies) that a connection can be idle before it's LRO + * state is discarded. + */ +static unsigned int lro_idle_jiffies = HZ / 10 + 1; /* 100ms */ +module_param(lro_idle_jiffies, uint, 0644); +MODULE_PARM_DESC(lro_idle_jiffies, "Time (in jiffies) after which an" + " idle connection's LRO state is discarded"); + + +/* Number of packets with payload that must arrive in-order before a + * connection is eligible for LRO. The idea is we should avoid coalescing + * segments when the sender is in slow-start because reducing the ACK rate + * can damage performance. + */ +static int lro_slow_start_packets = 2000; +module_param(lro_slow_start_packets, uint, 0644); +MODULE_PARM_DESC(lro_slow_start_packets, "Number of packets that must " + "pass in-order before starting LRO."); + + +/* Number of packets with payload that must arrive in-order following loss + * before a connection is eligible for LRO. The idea is we should avoid + * coalescing segments when the sender is recovering from loss, because + * reducing the ACK rate can damage performance. + */ +static int lro_loss_packets = 20; +module_param(lro_loss_packets, uint, 0644); +MODULE_PARM_DESC(lro_loss_packets, "Number of packets that must " + "pass in-order following loss before restarting LRO."); + + +/* Flags for efx_ssr_conn::l2_id; must not collide with VLAN tag bits */ +#define EFX_SSR_L2_ID_VLAN 0x10000 +#define EFX_SSR_L2_ID_IPV6 0x20000 +#define EFX_SSR_CONN_IS_VLAN_ENCAP(c) ((c)->l2_id & EFX_SSR_L2_ID_VLAN) +#define EFX_SSR_CONN_IS_TCPIPV4(c) (!((c)->l2_id & EFX_SSR_L2_ID_IPV6)) +#define EFX_SSR_CONN_VLAN_TCI(c) ((c)->l2_id & 0xffff) + +int efx_ssr_init(struct efx_rx_queue *rx_queue, struct efx_nic *efx) +{ + struct efx_ssr_state *st = &rx_queue->ssr; + unsigned int i; + + st->conns_mask = lro_table_size - 1; + if (!is_power_of_2(lro_table_size)) { + netif_err(efx, drv, efx->net_dev, + "lro_table_size(=%u) must be a power of 2\n", + lro_table_size); + return -EINVAL; + } + st->efx = efx; + st->conns = kmalloc_array((st->conns_mask + 1), + sizeof(st->conns[0]), GFP_KERNEL); + if (st->conns == NULL) + return -ENOMEM; + st->conns_n = kmalloc_array((st->conns_mask + 1), + sizeof(st->conns_n[0]), GFP_KERNEL); + if (st->conns_n == NULL) { + kfree(st->conns); + st->conns = NULL; + return -ENOMEM; + } + for (i = 0; i <= st->conns_mask; ++i) { + INIT_LIST_HEAD(&st->conns[i]); + st->conns_n[i] = 0; + } + INIT_LIST_HEAD(&st->active_conns); + INIT_LIST_HEAD(&st->free_conns); + return 0; +} + +static inline bool efx_rx_buffer_is_full(struct efx_rx_buffer *rx_buf) +{ + return rx_buf->page != NULL; +} + +static inline void efx_rx_buffer_set_empty(struct efx_rx_buffer *rx_buf) +{ + rx_buf->page = NULL; +} + +/* Drop the given connection, and add it to the free list. */ +static void efx_ssr_drop(struct efx_rx_queue *rx_queue, struct efx_ssr_conn *c) +{ + unsigned int bucket; + + EFX_WARN_ON_ONCE_PARANOID(c->skb); + + if (efx_rx_buffer_is_full(&c->next_buf)) { + efx_rx_deliver(rx_queue, c->next_eh, &c->next_buf, 1); + list_del(&c->active_link); + } + + bucket = c->conn_hash & rx_queue->ssr.conns_mask; + EFX_WARN_ON_ONCE_PARANOID(rx_queue->ssr.conns_n[bucket] <= 0); + --rx_queue->ssr.conns_n[bucket]; + list_del(&c->link); + list_add(&c->link, &rx_queue->ssr.free_conns); +} + +void efx_ssr_fini(struct efx_rx_queue *rx_queue) +{ + struct efx_ssr_state *st = &rx_queue->ssr; + struct efx_ssr_conn *c; + unsigned int i; + + /* Return cleanly if efx_ssr_init() has not been called. */ + if (st->conns == NULL) + return; + + EFX_WARN_ON_ONCE_PARANOID(!list_empty(&st->active_conns)); + + for (i = 0; i <= st->conns_mask; ++i) { + while (!list_empty(&st->conns[i])) { + c = list_entry(st->conns[i].prev, + struct efx_ssr_conn, link); + efx_ssr_drop(rx_queue, c); + } + } + + while (!list_empty(&st->free_conns)) { + c = list_entry(st->free_conns.prev, struct efx_ssr_conn, link); + list_del(&c->link); + EFX_WARN_ON_ONCE_PARANOID(c->skb); + kfree(c); + } + + kfree(st->conns_n); + st->conns_n = NULL; + kfree(st->conns); + st->conns = NULL; +} + +static inline u8 * +efx_ssr_skb_iph(struct sk_buff *skb) +{ + return skb->data; +} + +/* Calc IP checksum and deliver to the OS */ +static void efx_ssr_deliver(struct efx_ssr_state *st, struct efx_ssr_conn *c) +{ + struct tcphdr *c_th; + + EFX_WARN_ON_ONCE_PARANOID(!c->skb); + + ++st->n_bursts; + + /* Finish off packet munging and recalculate IP header checksum. */ + if (EFX_SSR_CONN_IS_TCPIPV4(c)) { + struct iphdr *iph = (struct iphdr *)efx_ssr_skb_iph(c->skb); + iph->tot_len = htons(c->sum_len); + iph->check = 0; +#if __GNUC__+0 == 4 && __GNUC_MINOR__+0 == 5 && __GNUC_PATCHLEVEL__+0 <= 1 + /* Compiler may wrongly eliminate the preceding assignment */ + barrier(); +#endif + iph->check = ip_fast_csum((u8 *) iph, iph->ihl); + c_th = (struct tcphdr *)(iph + 1); + } else { + struct ipv6hdr *iph = (struct ipv6hdr *)efx_ssr_skb_iph(c->skb); + iph->payload_len = htons(c->sum_len); + c_th = (struct tcphdr *)(iph + 1); + } + +#ifdef EFX_NOT_UPSTREAM + if (underreport_skb_truesize) { + struct ethhdr *c_eh = eth_hdr(c->skb); + int len = c->skb->len + ((u8 *)c->skb->data - (u8 *)c_eh); + c->skb->truesize = len + sizeof(struct sk_buff); + } else +#endif + c->skb->truesize += c->skb->data_len; + + c->skb->ip_summed = CHECKSUM_UNNECESSARY; + + c_th->window = c->th_last->window; + c_th->ack_seq = c->th_last->ack_seq; + if (c_th->doff == c->th_last->doff) { + /* Copy TCP options (take care to avoid going negative). */ + int optlen = ((c_th->doff - 5) & 0xf) << 2u; + memcpy(c_th + 1, c->th_last + 1, optlen); + } + +#if IS_ENABLED(CONFIG_VLAN_8021Q) +#ifndef EFX_HAVE_VLAN_RX_PATH + if (EFX_SSR_CONN_IS_VLAN_ENCAP(c)) + __vlan_hwaccel_put_tag(c->skb, htons(ETH_P_8021Q), + EFX_SSR_CONN_VLAN_TCI(c)); +#endif +#endif + +#ifdef CONFIG_SFC_TRACING + trace_sfc_receive(c->skb, false, EFX_SSR_CONN_IS_VLAN_ENCAP(c), + EFX_SSR_CONN_VLAN_TCI(c)); +#endif +#if IS_ENABLED(CONFIG_VLAN_8021Q) +#ifdef EFX_HAVE_VLAN_RX_PATH + if (EFX_SSR_CONN_IS_VLAN_ENCAP(c)) + vlan_hwaccel_receive_skb(c->skb, st->efx->vlan_group, + EFX_SSR_CONN_VLAN_TCI(c)); + else + /* fall through */ +#endif +#endif + netif_receive_skb(c->skb); + + c->skb = NULL; + c->delivered = 1; +} + +/* Stop tracking connections that have gone idle in order to keep hash + * chains short. + */ +static void efx_ssr_purge_idle(struct efx_rx_queue *rx_queue, unsigned int now) +{ + struct efx_ssr_conn *c; + unsigned int i; + + EFX_WARN_ON_ONCE_PARANOID(!list_empty(&rx_queue->ssr.active_conns)); + + rx_queue->ssr.last_purge_jiffies = now; + for (i = 0; i <= rx_queue->ssr.conns_mask; ++i) { + if (list_empty(&rx_queue->ssr.conns[i])) + continue; + + c = list_entry(rx_queue->ssr.conns[i].prev, + struct efx_ssr_conn, link); + if (now - c->last_pkt_jiffies > lro_idle_jiffies) { + ++rx_queue->ssr.n_drop_idle; + efx_ssr_drop(rx_queue, c); + } + } +} + +/* Construct an skb Push held skbs down into network stack. + * Only called when active list is non-empty. + */ +static int +efx_ssr_merge(struct efx_ssr_state *st, struct efx_ssr_conn *c, + struct tcphdr *th, int data_length) +{ + struct tcphdr *c_th; + + /* Increase lengths appropriately */ + c->skb->len += data_length; + c->skb->data_len += data_length; + + if (data_length > skb_shinfo(c->skb)->gso_size) + skb_shinfo(c->skb)->gso_size = data_length; + + /* Update the connection state flags */ + if (EFX_SSR_CONN_IS_TCPIPV4(c)) { + struct iphdr *iph = (struct iphdr *)efx_ssr_skb_iph(c->skb); + c_th = (struct tcphdr *)(iph + 1); + } else { + struct ipv6hdr *iph = (struct ipv6hdr *)efx_ssr_skb_iph(c->skb); + c_th = (struct tcphdr *)(iph + 1); + } + c->sum_len += data_length; + c_th->psh |= th->psh; + c->th_last = th; + ++st->n_merges; + + /* Pass packet up now if another segment could overflow the IP + * length. + */ + return (c->skb->len > 65536 - 9200); +} + +static void +efx_ssr_start(struct efx_ssr_state *st, struct efx_ssr_conn *c, + struct tcphdr *th, int data_length) +{ + skb_shinfo(c->skb)->gso_size = data_length; + + if (EFX_SSR_CONN_IS_TCPIPV4(c)) { + struct iphdr *iph = (struct iphdr *)efx_ssr_skb_iph(c->skb); + c->sum_len = ntohs(iph->tot_len); + skb_shinfo(c->skb)->gso_type = SKB_GSO_TCPV4; + } else { + struct ipv6hdr *iph = (struct ipv6hdr *)efx_ssr_skb_iph(c->skb); + c->sum_len = ntohs(iph->payload_len); + skb_shinfo(c->skb)->gso_type = SKB_GSO_TCPV6; + } +} + +static int +efx_ssr_merge_page(struct efx_ssr_state *st, struct efx_ssr_conn *c, + struct tcphdr *th, int hdr_length, int data_length) +{ + struct efx_rx_buffer *rx_buf = &c->next_buf; + struct efx_rx_queue *rx_queue; + size_t rx_prefix_size; + u8 *eh = c->next_eh; + + if (likely(c->skb)) { + skb_fill_page_desc(c->skb, skb_shinfo(c->skb)->nr_frags, + rx_buf->page, + rx_buf->page_offset + hdr_length, + data_length); + rx_buf->page = NULL; + + if (efx_ssr_merge(st, c, th, data_length) || + (skb_shinfo(c->skb)->nr_frags == EFX_SSR_MAX_SKB_FRAGS)) + efx_ssr_deliver(st, c); + + return 1; + } else { + rx_queue = container_of(st, struct efx_rx_queue, ssr); + + c->skb = efx_rx_mk_skb(rx_queue, &rx_buf, 1, &eh, hdr_length); + if (unlikely(c->skb == NULL)) + return 0; + + rx_prefix_size = rx_queue->efx->type->rx_prefix_size; + + efx_rx_skb_attach_timestamp(efx_get_rx_queue_channel(rx_queue), + c->skb, eh - rx_prefix_size); + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_RXHASH_SUPPORT) + if (st->efx->net_dev->features & NETIF_F_RXHASH) + skb_set_hash(c->skb, c->conn_hash, PKT_HASH_TYPE_L4); +#endif + + if (EFX_SSR_CONN_IS_TCPIPV4(c)) { + struct iphdr *iph = + (struct iphdr *)efx_ssr_skb_iph(c->skb); + c->th_last = (struct tcphdr *)(iph + 1); + } else { + struct ipv6hdr *iph = + (struct ipv6hdr *)efx_ssr_skb_iph(c->skb); + c->th_last = (struct tcphdr *)(iph + 1); + } + efx_ssr_start(st, c, th, data_length); + + return 1; + } +} + +/* Try to merge or otherwise hold or deliver (as appropriate) the + * packet buffered for this connection (c->next_buf). Return a flag + * indicating whether the connection is still active for SSR purposes. + */ +static bool +efx_ssr_try_merge(struct efx_rx_queue *rx_queue, struct efx_ssr_conn *c) +{ + struct efx_rx_buffer *rx_buf = &c->next_buf; + u8 *eh = c->next_eh; + int data_length, hdr_length, dont_merge; + unsigned int th_seq, pkt_length; + struct tcphdr *th; + unsigned int now; + + now = jiffies; + if (now - c->last_pkt_jiffies > lro_idle_jiffies) { + ++rx_queue->ssr.n_drop_idle; + if (c->skb) + efx_ssr_deliver(&rx_queue->ssr, c); + efx_ssr_drop(rx_queue, c); + return false; + } + c->last_pkt_jiffies = jiffies; + + if (EFX_SSR_CONN_IS_TCPIPV4(c)) { + struct iphdr *iph = c->next_iph; + th = (struct tcphdr *)(iph + 1); + pkt_length = ntohs(iph->tot_len) + (u8 *) iph - (u8 *) eh; + } else { + struct ipv6hdr *iph = c->next_iph; + th = (struct tcphdr *)(iph + 1); + pkt_length = ntohs(iph->payload_len) + (u8 *) th - (u8 *) eh; + } + + hdr_length = (u8 *) th + th->doff * 4 - (u8 *) eh; + rx_buf->len = min_t(u16, pkt_length, rx_buf->len); + data_length = rx_buf->len - hdr_length; + th_seq = ntohl(th->seq); + dont_merge = ((data_length <= 0) + | th->urg | th->syn | th->rst | th->fin); + + /* Check for options other than aligned timestamp. */ + if (th->doff != 5) { + const __be32 *opt_ptr = (const __be32 *) (th + 1); + if (th->doff == 8 && + opt_ptr[0] == htonl((TCPOPT_NOP << 24) | + (TCPOPT_NOP << 16) | + (TCPOPT_TIMESTAMP << 8) | + TCPOLEN_TIMESTAMP)) { + /* timestamp option -- okay */ + } else { + dont_merge = 1; + } + } + + if (unlikely(th_seq - c->next_seq)) { + /* Out-of-order, so start counting again. */ + if (c->skb) + efx_ssr_deliver(&rx_queue->ssr, c); + c->n_in_order_pkts -= lro_loss_packets; + c->next_seq = th_seq + data_length; + ++rx_queue->ssr.n_misorder; + goto deliver_buf_out; + } + c->next_seq = th_seq + data_length; + + if (c->n_in_order_pkts < lro_slow_start_packets) { + /* May be in slow-start, so don't merge. */ + ++rx_queue->ssr.n_slow_start; + ++c->n_in_order_pkts; + goto deliver_buf_out; + } + + if (unlikely(dont_merge)) { + if (c->skb) + efx_ssr_deliver(&rx_queue->ssr, c); + if (th->fin || th->rst) { + ++rx_queue->ssr.n_drop_closed; + efx_ssr_drop(rx_queue, c); + return false; + } + goto deliver_buf_out; + } + + if (efx_ssr_merge_page(&rx_queue->ssr, c, th, + hdr_length, data_length) == 0) + goto deliver_buf_out; + + efx_get_rx_queue_channel(rx_queue)->irq_mod_score += 2; + return true; + + deliver_buf_out: + efx_rx_deliver(rx_queue, eh, rx_buf, 1); + return true; +} + +static void efx_ssr_new_conn(struct efx_ssr_state *st, u32 conn_hash, + u32 l2_id, struct tcphdr *th) +{ + unsigned int bucket = conn_hash & st->conns_mask; + struct efx_ssr_conn *c; + + if (st->conns_n[bucket] >= lro_chain_max) { + ++st->n_too_many; + return; + } + + if (!list_empty(&st->free_conns)) { + c = list_entry(st->free_conns.next, struct efx_ssr_conn, link); + list_del(&c->link); + } else { + c = kmalloc(sizeof(*c), GFP_ATOMIC); + if (c == NULL) + return; + c->skb = NULL; + efx_rx_buffer_set_empty(&c->next_buf); + } + + /* Create the connection tracking data */ + ++st->conns_n[bucket]; + list_add(&c->link, &st->conns[bucket]); + c->l2_id = l2_id; + c->conn_hash = conn_hash; + c->source = th->source; + c->dest = th->dest; + c->n_in_order_pkts = 0; + c->last_pkt_jiffies = jiffies; + c->delivered = 0; + ++st->n_new_stream; + /* NB. We don't initialise c->next_seq, and it doesn't matter what + * value it has. Most likely the next packet received for this + * connection will not match -- no harm done. + */ +} + +/* Process SKB and decide whether to dispatch it to the stack now or + * later. + */ +void efx_ssr(struct efx_rx_queue *rx_queue, struct efx_rx_buffer *rx_buf, + u8 *rx_data) +{ + struct efx_nic *efx = rx_queue->efx; + struct ethhdr *eh = (struct ethhdr *)rx_data; + struct efx_ssr_conn *c; + u32 l2_id = 0; + void *nh = eh + 1; + struct tcphdr *th; + u32 conn_hash; + unsigned int bucket; + + /* Get the hardware hash if available */ +#ifdef EFX_HAVE_RXHASH_SUPPORT + if (efx->net_dev->features & NETIF_F_RXHASH) +#else + if (efx->rx_prefix_size) +#endif + conn_hash = efx_rx_buf_hash(efx, rx_data); + else + conn_hash = 0; + +#if IS_ENABLED(CONFIG_VLAN_8021Q) + if (rx_buf->flags & EFX_RX_BUF_VLAN_XTAG) + l2_id = rx_buf->vlan_tci | EFX_SSR_L2_ID_VLAN; +#endif + + /* Check whether this is a suitable packet (unfragmented + * TCP/IPv4 or TCP/IPv6). If so, find the TCP header and + * length, and compute a hash if necessary. If not, return. + */ + if (eh->h_proto == htons(ETH_P_IP)) { + struct iphdr *iph = nh; + if ((iph->protocol - IPPROTO_TCP) | + (iph->ihl - (sizeof(*iph) >> 2u)) | + (__force u16)(iph->frag_off & htons(IP_MF | IP_OFFSET))) + goto deliver_now; + th = (struct tcphdr *)(iph + 1); + if (conn_hash == 0) + /* Can't use ip_fast_csum(,2) as architecture dependent + * implementations may assume min 5 for ihl + */ + conn_hash = hash_64(*((u64*)&iph->saddr), 32); + } else if (eh->h_proto == htons(ETH_P_IPV6)) { + struct ipv6hdr *iph = nh; + if (iph->nexthdr != NEXTHDR_TCP) + goto deliver_now; + l2_id |= EFX_SSR_L2_ID_IPV6; + th = (struct tcphdr *)(iph + 1); + if (conn_hash == 0) + conn_hash = ((__force u32)ip_fast_csum(&iph->saddr, 8) ^ + (__force u32)(th->source ^ th->dest)); + } else { + goto deliver_now; + } + + bucket = conn_hash & rx_queue->ssr.conns_mask; + + list_for_each_entry(c, &rx_queue->ssr.conns[bucket], link) { + if ((c->l2_id - l2_id) | (c->conn_hash - conn_hash)) + continue; + if ((c->source ^ th->source) | (c->dest ^ th->dest)) + continue; + if (c->skb) { + if (EFX_SSR_CONN_IS_TCPIPV4(c)) { + struct iphdr *c_iph, *iph = nh; + c_iph = (struct iphdr *)efx_ssr_skb_iph(c->skb); + if ((c_iph->saddr ^ iph->saddr) | + (c_iph->daddr ^ iph->daddr)) + continue; + } else { + struct ipv6hdr *c_iph, *iph = nh; + c_iph = (struct ipv6hdr *) + efx_ssr_skb_iph(c->skb); + if (ipv6_addr_cmp(&c_iph->saddr, &iph->saddr) | + ipv6_addr_cmp(&c_iph->daddr, &iph->daddr)) + continue; + } + } + + /* Re-insert at head of list to reduce lookup time. */ + list_del(&c->link); + list_add(&c->link, &rx_queue->ssr.conns[bucket]); + + if (efx_rx_buffer_is_full(&c->next_buf)) { + if (!efx_ssr_try_merge(rx_queue, c)) + goto deliver_now; + } else { + list_add(&c->active_link, &rx_queue->ssr.active_conns); + } + c->next_buf = *rx_buf; + c->next_eh = rx_data; + efx_rx_buffer_set_empty(rx_buf); + c->next_iph = nh; + return; + } + + efx_ssr_new_conn(&rx_queue->ssr, conn_hash, l2_id, th); + deliver_now: + efx_rx_deliver(rx_queue, rx_data, rx_buf, 1); +} + +/* Push held skbs down into network stack. + * Only called when active list is non-empty. + */ +void __efx_ssr_end_of_burst(struct efx_rx_queue *rx_queue) +{ + struct efx_ssr_state *st = &rx_queue->ssr; + struct efx_ssr_conn *c; + unsigned int j; + + EFX_WARN_ON_ONCE_PARANOID(list_empty(&st->active_conns)); + + do { + c = list_entry(st->active_conns.next, struct efx_ssr_conn, + active_link); + if (!c->delivered && c->skb) + efx_ssr_deliver(st, c); + if (efx_ssr_try_merge(rx_queue, c)) { + if (c->skb) + efx_ssr_deliver(st, c); + list_del(&c->active_link); + } + c->delivered = 0; + } while (!list_empty(&st->active_conns)); + + j = jiffies; + if (unlikely(j != st->last_purge_jiffies)) + efx_ssr_purge_idle(rx_queue, j); +} + + +#endif /* EFX_USE_SFC_LRO */ + diff --git a/src/drivers/net/ethernet/sfc/rx_common.c b/src/drivers/net/ethernet/sfc/rx_common.c new file mode 100644 index 0000000..299946a --- /dev/null +++ b/src/drivers/net/ethernet/sfc/rx_common.c @@ -0,0 +1,1982 @@ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2018 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ +#include "net_driver.h" +#include +#include "efx.h" +#include "nic.h" +#include "rx_common.h" +#ifdef CONFIG_SFC_TRACING +#include +#endif +#include "mcdi_pcol.h" + +/* This is the percentage fill level below which new RX descriptors + * will be added to the RX descriptor ring. + */ +static unsigned int rx_refill_threshold; +module_param(rx_refill_threshold, uint, 0444); +MODULE_PARM_DESC(rx_refill_threshold, + "RX descriptor ring refill threshold (%)"); + +#if !defined(EFX_NOT_UPSTREAM) || defined(EFX_RX_PAGE_SHARE) +/* By default use the NIC specific calculation. This can be set to 0 + * to disable the RX recycle ring. + */ +static int rx_recycle_ring_size = -1; +module_param(rx_recycle_ring_size, uint, 0444); +MODULE_PARM_DESC(rx_recycle_ring_size, + "Maximum number of RX buffers to recycle pages for"); +#endif + +/* + * RX maximum head room required. + * + * This must be at least 1 to prevent overflow, plus one packet-worth + * to allow pipelined receives. + */ +#define EFX_RXD_HEAD_ROOM (1 + EFX_RX_MAX_FRAGS) + +/* Preferred number of descriptors to fill at once */ +#define EFX_RX_PREFERRED_BATCH 8U + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_XDP_SOCK) +#if defined(CONFIG_XDP_SOCKETS) +static void efx_reuse_rx_buffer_zc(struct efx_rx_queue *rx_queue, + struct efx_rx_buffer *rx_buf_reuse); +#endif +#endif + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_XDP_SOCK) +void efx_recycle_rx_bufs_zc(struct efx_channel *channel, + struct efx_rx_buffer *rx_buf, + unsigned int n_frags) +{ + struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel); + + while (n_frags) { + rx_buf->flags |= EFX_RX_BUF_XSK_REUSE; + rx_buf = efx_rx_buf_next(rx_queue, rx_buf); + --n_frags; + } +} +#endif + +static void efx_rx_slow_fill(struct work_struct *data); +static void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue); +static void efx_cancel_slow_fill(struct efx_rx_queue *rx_queue); + +#if !defined(EFX_NOT_UPSTREAM) || defined(EFX_RX_PAGE_SHARE) +/* Check the RX page recycle ring for a page that can be reused. */ +static struct page *efx_reuse_page(struct efx_rx_queue *rx_queue) +{ + struct page *page; + unsigned int index; + + /* No page recycling when queue in ZC mode */ + if (!rx_queue->page_ring) + return NULL; + index = rx_queue->page_remove & rx_queue->page_ptr_mask; + rx_queue->page_remove++; + + page = rx_queue->page_ring[index]; + if (!page) + return NULL; + + /* If page_count is 1 we hold the only reference to this page. */ + if (page_count(page) != 1) { + ++rx_queue->page_recycle_failed; + return NULL; + } + + ++rx_queue->page_recycle_count; + return page; +} + +/* Attempt to recycle the page if there is an RX recycle ring; the page can + * only be added if this is the final RX buffer, to prevent pages being used in + * the descriptor ring and appearing in the recycle ring simultaneously. + */ +static void efx_recycle_rx_page(struct efx_channel *channel, + struct efx_rx_buffer *rx_buf) +{ + struct page *page = rx_buf->page; + struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel); + struct efx_nic *efx = rx_queue->efx; + unsigned int index; + + /* Don't continue if page is already present in recycle ring. + * Prevents the page being added to the ring twice + */ + if (rx_buf->flags & EFX_RX_PAGE_IN_RECYCLE_RING) + return; + + /* Only recycle the page after processing the final buffer. */ + if (!(rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE)) + return; + + if (rx_queue->page_ring) { + index = rx_queue->page_add & rx_queue->page_ptr_mask; + if (rx_queue->page_ring[index] == NULL) { + rx_queue->page_ring[index] = page; + ++rx_queue->page_add; + return; + } + } + ++rx_queue->page_recycle_full; + efx_unmap_rx_buffer(efx, rx_buf); + put_page(rx_buf->page); +} + +/* Recycle the pages that are used by buffers that have just been received. */ +void efx_recycle_rx_pages(struct efx_channel *channel, + struct efx_rx_buffer *rx_buf, + unsigned int n_frags) +{ + struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel); + + do { + efx_recycle_rx_page(channel, rx_buf); + rx_buf = efx_rx_buf_next(rx_queue, rx_buf); + } while (--n_frags); +} + +static void efx_init_rx_recycle_ring(struct efx_rx_queue *rx_queue) +{ +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_XDP_SOCK) + struct efx_channel *channel = efx_get_rx_queue_channel(rx_queue); +#endif + unsigned int bufs_in_recycle_ring, page_ring_size; + struct efx_nic *efx = rx_queue->efx; + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_XDP_SOCK) + if (channel->zc) { + rx_queue->page_ring = NULL; + return; + } +#endif +#if !defined(EFX_NOT_UPSTREAM) || defined(EFX_RX_PAGE_SHARE) + if (!rx_recycle_ring_size) + return; + else if (rx_recycle_ring_size == -1) + bufs_in_recycle_ring = efx_rx_recycle_ring_size(efx); + else + bufs_in_recycle_ring = rx_recycle_ring_size; +#else + bufs_in_recycle_ring = efx_rx_recycle_ring_size(efx); +#endif + page_ring_size = roundup_pow_of_two(bufs_in_recycle_ring / + efx->rx_bufs_per_page); + rx_queue->page_ring = kcalloc(page_ring_size, + sizeof(*rx_queue->page_ring), GFP_KERNEL); + if (!rx_queue->page_ring) + rx_queue->page_ptr_mask = 0; + else + rx_queue->page_ptr_mask = page_ring_size - 1; +} + +static void efx_fini_rx_recycle_ring(struct efx_rx_queue *rx_queue) +{ + struct efx_nic *efx = rx_queue->efx; + unsigned int i; + + if (unlikely(!rx_queue->page_ring)) + return; + + /* Unmap and release the pages in the recycle ring. Remove the ring. */ + for (i = 0; i <= rx_queue->page_ptr_mask; i++) { + struct page *page = rx_queue->page_ring[i]; + struct efx_rx_page_state *state; + + if (page == NULL) + continue; + + state = page_address(page); + dma_unmap_page(&efx->pci_dev->dev, state->dma_addr, + PAGE_SIZE << efx->rx_buffer_order, + DMA_FROM_DEVICE); + put_page(page); + } + kfree(rx_queue->page_ring); + rx_queue->page_ring = NULL; +} + +/* Recycle Rx buffer directly back into the rx_queue. + * If may be done on discard only when Rx buffers do not share page. + * There is always room to add this buffer, because pipeline is empty and + * we've just popped a buffer. + */ +static void efx_recycle_rx_buf(struct efx_rx_queue *rx_queue, + struct efx_rx_buffer *rx_buf) +{ + struct efx_rx_buffer *new_buf; + unsigned int index; + + index = rx_queue->added_count & rx_queue->ptr_mask; + new_buf = efx_rx_buffer(rx_queue, index); + + memcpy(new_buf, rx_buf, sizeof(*new_buf)); + rx_buf->page = NULL; + + /* Page is not shared, so it is always the last */ + new_buf->flags = rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE; + if (likely(rx_queue->page_ring)) { + new_buf->flags |= rx_buf->flags & EFX_RX_PAGE_IN_RECYCLE_RING; + ++rx_queue->recycle_count; + } + + /* Since removed_count is updated after packet processing the + * following can happen here: + * added_count > removed_count + rx_queue->ptr_mask + 1 + * efx_fast_push_rx_descriptors() asserts this is not true. + * efx_fast_push_rx_descriptors() is only called at the end of + * a NAPI poll cycle, at which point removed_count has been updated. + */ + ++rx_queue->added_count; +} + +void efx_discard_rx_packet(struct efx_channel *channel, + struct efx_rx_buffer *rx_buf, + unsigned int n_frags) +{ + struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel); +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_XDP_SOCK) + struct efx_rx_buffer *_rx_buf = rx_buf; +#endif + + do { +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_XDP_SOCK) + if (rx_buf->flags & EFX_RX_BUF_ZC) + rx_buf->flags |= EFX_RX_BUF_XSK_REUSE; + else +#endif + efx_recycle_rx_buf(rx_queue, rx_buf); + rx_buf = efx_rx_buf_next(rx_queue, rx_buf); + } while (--n_frags); +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_XDP_SOCK) + if (channel->zc) + efx_free_rx_buffers(efx_channel_get_rx_queue(channel), _rx_buf, + n_frags); +#endif +} +#else +struct page *efx_reuse_page(struct efx_rx_queue *rx_queue) +{ + (void)rx_queue; + return NULL; +} + +void efx_recycle_rx_pages(struct efx_channel *channel, + struct efx_rx_buffer *rx_buf, + unsigned int n_frags) +{ + (void) channel; + (void) rx_buf; + (void) n_frags; +} + +void efx_discard_rx_packet(struct efx_channel *channel, + struct efx_rx_buffer *rx_buf, + unsigned int n_frags) +{ + struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel); + struct efx_rx_buffer *_rx_buf = rx_buf; + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_XDP_SOCK) + if (channel->zc) { + do { + rx_buf->flags |= EFX_RX_BUF_XSK_REUSE; + rx_buf = efx_rx_buf_next(rx_queue, rx_buf); + } while (--n_frags); + } +#endif + efx_free_rx_buffers(rx_queue, _rx_buf, n_frags); +} +#endif + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_XDP_SOCK) +#if defined(CONFIG_XDP_SOCKETS) +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_USE_XSK_BUFFER_ALLOC) +static void efx_free_xsk_buffers(struct efx_rx_queue *rx_queue, + struct efx_rx_buffer *rx_buf, + unsigned int num_bufs) +{ + + while (num_bufs) { + xsk_buff_free(rx_buf->xsk_buf); + rx_buf->xsk_buf = NULL; + rx_buf = efx_rx_buf_next(rx_queue, rx_buf); + num_bufs--; + } +} +#endif + +static void efx_fini_rx_buffer_zc(struct efx_rx_queue *rx_queue, + struct efx_rx_buffer *rx_buf) +{ +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_USE_XSK_BUFFER_ALLOC) + if (rx_buf->xsk_buf) + efx_free_xsk_buffers(rx_queue, rx_buf, 1); +#else + if (rx_buf->addr) + efx_free_rx_buffers(rx_queue, rx_buf, 1); +#endif +} +#endif /* CONFIG_XDP_SOCKETS */ +#endif + +static void efx_fini_rx_buffer_nzc(struct efx_rx_queue *rx_queue, + struct efx_rx_buffer *rx_buf) +{ +#if !defined(EFX_NOT_UPSTREAM) || defined(EFX_RX_PAGE_SHARE) + /* Release the page reference we hold for the buffer. */ + if (rx_buf->page) + put_page(rx_buf->page); +#endif + + /* If this is the last buffer in a page, unmap and free it. */ + if (rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE) { + efx_unmap_rx_buffer(rx_queue->efx, rx_buf); +#if !defined(EFX_NOT_UPSTREAM) || defined(EFX_RX_PAGE_SHARE) + if (!(rx_buf->flags & EFX_RX_PAGE_IN_RECYCLE_RING)) +#endif + efx_free_rx_buffers(rx_queue, rx_buf, 1); + } + rx_buf->page = NULL; +} + +static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue, + struct efx_rx_buffer *rx_buf) +{ +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_XDP_SOCK) + struct efx_channel *channel = efx_get_rx_queue_channel(rx_queue); + + if (channel->zc) { +#if defined(CONFIG_XDP_SOCKETS) + efx_fini_rx_buffer_zc(rx_queue, rx_buf); +#endif + return; + } +#endif + efx_fini_rx_buffer_nzc(rx_queue, rx_buf); +} + +int efx_probe_rx_queue(struct efx_rx_queue *rx_queue) +{ + struct efx_nic *efx = rx_queue->efx; + unsigned int entries; + int rc; + + INIT_DELAYED_WORK(&rx_queue->slow_fill_work, efx_rx_slow_fill); + + /* Create the smallest power-of-two aligned ring */ + entries = max(roundup_pow_of_two(efx->rxq_entries), + efx_min_dmaq_size(efx)); + EFX_WARN_ON_PARANOID(entries > efx_max_dmaq_size(efx)); + rx_queue->ptr_mask = entries - 1; + + netif_dbg(efx, probe, efx->net_dev, + "creating RX queue %d size %#x mask %#x\n", + efx_rx_queue_index(rx_queue), entries, rx_queue->ptr_mask); + + /* Allocate RX buffers */ + rx_queue->buffer = kcalloc(entries, sizeof(*rx_queue->buffer), + GFP_KERNEL); + if (!rx_queue->buffer) + return -ENOMEM; + + rc = efx_nic_probe_rx(rx_queue); + if (rc) { + kfree(rx_queue->buffer); + rx_queue->buffer = NULL; + } + + return rc; +} + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_XDP_SOCK) +#if defined(EFX_USE_KCOMPAT) && !defined(EFX_USE_XSK_BUFFER_ALLOC) +#if defined(CONFIG_XDP_SOCKETS) +static void efx_zca_free(struct zero_copy_allocator *alloc, + unsigned long handle) +{ + struct efx_rx_queue *rx_queue = + container_of(alloc, struct efx_rx_queue, zca); + + xsk_umem_fq_reuse(rx_queue->umem, handle & rx_queue->umem->chunk_mask); +} +#endif +#endif +#endif + +int efx_init_rx_queue(struct efx_rx_queue *rx_queue) +{ +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_XDP_SOCK) +#if defined(CONFIG_XDP_SOCKETS) + struct efx_channel *channel = efx_get_rx_queue_channel(rx_queue); +#endif +#endif + struct efx_nic *efx = rx_queue->efx; + unsigned int max_fill, trigger, max_trigger; + int rc; + + netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev, + "initialising RX queue %d\n", efx_rx_queue_index(rx_queue)); + + /* Initialise ptr fields */ + rx_queue->added_count = 0; + rx_queue->notified_count = 0; + rx_queue->granted_count = 0; + rx_queue->removed_count = 0; + rx_queue->min_fill = -1U; + rx_queue->failed_flush_count = 0; +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_XDP_SOCK) +#if defined(CONFIG_XDP_SOCKETS) +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_XSK_POOL) + rx_queue->xsk_pool = NULL; + if (channel->zc) + rx_queue->xsk_pool = xsk_get_pool_from_qid(efx->net_dev, + rx_queue->core_index); +#else + rx_queue->umem = NULL; + if (channel->zc) + rx_queue->umem = xdp_get_umem_from_qid(efx->net_dev, + rx_queue->core_index); +#endif + if (channel->zc && +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_USE_XSK_BUFFER_ALLOC) +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_XSK_POOL) + efx->rx_dma_len > xsk_pool_get_rx_frame_size(rx_queue->xsk_pool)) { +#else + efx->rx_dma_len > xsk_umem_get_rx_frame_size(rx_queue->umem)) { +#endif +#else + efx->rx_dma_len > (rx_queue->umem->chunk_mask + 1)) { +#endif + netif_err(rx_queue->efx, drv, rx_queue->efx->net_dev, + "MTU and UMEM/POOL frame size not in sync\n. Required min. UMEM frame size = %u", + efx->rx_dma_len); +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_XSK_POOL) +#if defined(CONFIG_XDP_SOCKETS) + rx_queue->xsk_pool = NULL; +#else + rx_queue->umem = NULL; +#endif +#endif + return -EINVAL; + } +#endif /* CONFIG_XDP_SOCKETS */ +#endif /*EFX_HAVE_XDP_SOCK */ +#if !defined(EFX_NOT_UPSTREAM) || defined(EFX_RX_PAGE_SHARE) + efx_init_rx_recycle_ring(rx_queue); + + rx_queue->page_remove = 1; + rx_queue->page_add = 0; + rx_queue->page_recycle_count = 0; + rx_queue->page_recycle_failed = 0; + rx_queue->page_recycle_full = 0; + rx_queue->page_repost_count = 0; +#endif + + /* Initialise limit fields */ + max_fill = rx_queue->ptr_mask + 1 - EFX_RXD_HEAD_ROOM; + max_trigger = + max_fill - efx->rx_pages_per_batch * efx->rx_bufs_per_page; + if (rx_refill_threshold != 0) { + trigger = max_fill * min(rx_refill_threshold, 100U) / 100U; + if (trigger > max_trigger) + trigger = max_trigger; + } else { + trigger = max_trigger; + } + + rx_queue->max_fill = max_fill; + rx_queue->fast_fill_trigger = trigger; + rx_queue->refill_enabled = false; + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_XSK_POOL) +#if defined(CONFIG_XDP_SOCKETS) + if (channel->zc) + xsk_pool_set_rxq_info(rx_queue->xsk_pool, + &rx_queue->xdp_rxq_info); +#endif +#endif +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_XDP_RXQ_INFO) + /* Initialise XDP queue information */ + rc = xdp_rxq_info_reg(&rx_queue->xdp_rxq_info, efx->net_dev, + rx_queue->core_index, 0); +#endif +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_XDP_SOCK) +#if defined(CONFIG_XDP_SOCKETS) + if (!rc && channel->zc) { +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_USE_XSK_BUFFER_ALLOC) + rc = xdp_rxq_info_reg_mem_model(&rx_queue->xdp_rxq_info, + MEM_TYPE_XSK_BUFF_POOL, + NULL); +#else + rx_queue->zca.free = efx_zca_free; + rc = xdp_rxq_info_reg_mem_model(&rx_queue->xdp_rxq_info, + MEM_TYPE_ZERO_COPY, + &rx_queue->zca); +#endif + } +#endif + if (rc) + return rc; +#endif + + /* Set up RX descriptor ring */ + rc = efx_nic_init_rx(rx_queue); + +#if defined(EFX_NOT_UPSTREAM) && defined(EFX_USE_SFC_LRO) + if (!rc) + rc = efx_ssr_init(rx_queue, efx); +#endif +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_XDP_RXQ_INFO) + if (rc) + xdp_rxq_info_unreg(&rx_queue->xdp_rxq_info); +#endif + + return rc; +} + +void efx_fini_rx_queue(struct efx_rx_queue *rx_queue) +{ + int i; + struct efx_rx_buffer *rx_buf; + + netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev, + "shutting down RX queue %d\n", efx_rx_queue_index(rx_queue)); + +#if defined(EFX_NOT_UPSTREAM) && defined(EFX_USE_SFC_LRO) + efx_ssr_fini(rx_queue); +#endif + + efx_cancel_slow_fill(rx_queue); + if (rx_queue->grant_credits) + flush_work(&rx_queue->grant_work); + + /* Release RX buffers from the current read ptr to the write ptr */ + if (rx_queue->buffer) { + for (i = rx_queue->removed_count; i < rx_queue->added_count; + i++) { + unsigned int index = i & rx_queue->ptr_mask; + + rx_buf = efx_rx_buffer(rx_queue, index); + efx_fini_rx_buffer(rx_queue, rx_buf); + } + } + +#if !defined(EFX_NOT_UPSTREAM) || defined(EFX_RX_PAGE_SHARE) + efx_fini_rx_recycle_ring(rx_queue); +#endif + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_XDP_RXQ_INFO) + if (xdp_rxq_info_is_reg(&rx_queue->xdp_rxq_info)) + xdp_rxq_info_unreg(&rx_queue->xdp_rxq_info); +#endif +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_XDP_SOCK) +#if defined(CONFIG_XDP_SOCKETS) +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_XSK_POOL) + rx_queue->xsk_pool = NULL; +#else + rx_queue->umem = NULL; +#endif +#endif +#endif +} + +void efx_remove_rx_queue(struct efx_rx_queue *rx_queue) +{ + netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev, + "removing RX queue %d\n", efx_rx_queue_index(rx_queue)); + + efx_nic_remove_rx(rx_queue); +} + +void efx_destroy_rx_queue(struct efx_rx_queue *rx_queue) +{ + netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev, + "destroying RX queue %d\n", efx_rx_queue_index(rx_queue)); + + kfree(rx_queue->buffer); + rx_queue->buffer = NULL; +} + +/* Unmap a DMA-mapped page. This function is only called for the final RX + * buffer in a page. + */ +void efx_unmap_rx_buffer(struct efx_nic *efx, + struct efx_rx_buffer *rx_buf) +{ + struct page *page = rx_buf->page; + +#if !defined(EFX_NOT_UPSTREAM) || defined(EFX_RX_PAGE_SHARE) + if (rx_buf->flags & EFX_RX_PAGE_IN_RECYCLE_RING) + return; +#endif + if (page) { + struct efx_rx_page_state *state = page_address(page); + + dma_unmap_page(&efx->pci_dev->dev, + state->dma_addr, + PAGE_SIZE << efx->rx_buffer_order, + DMA_FROM_DEVICE); + } +} + +void efx_free_rx_buffers(struct efx_rx_queue *rx_queue, + struct efx_rx_buffer *rx_buf, + unsigned int num_bufs) +{ + while (num_bufs) { +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_XDP_SOCK) + if (rx_buf->flags & EFX_RX_BUF_ZC) { +#if defined(CONFIG_XDP_SOCKETS) + if (rx_buf->flags & EFX_RX_BUF_XSK_REUSE) { + efx_reuse_rx_buffer_zc(rx_queue, rx_buf); + } else { +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_USE_XSK_BUFFER_ALLOC) + rx_buf->xsk_buf = NULL; +#else + rx_buf->addr = NULL; +#endif + rx_buf->flags = 0; + } +#endif + } else if (rx_buf->page) { +#else + if (rx_buf->page) { +#endif + put_page(rx_buf->page); + rx_buf->page = NULL; + } + rx_buf = efx_rx_buf_next(rx_queue, rx_buf); + --num_bufs; + } +} + +static void efx_rx_slow_fill(struct work_struct *data) +{ + struct efx_rx_queue *rx_queue = + container_of(data, struct efx_rx_queue, slow_fill_work.work); + + /* Post an event to cause NAPI to run and refill the queue */ + if (efx_nic_generate_fill_event(rx_queue) != 0) + efx_schedule_slow_fill(rx_queue); + ++rx_queue->slow_fill_count; +} + +static void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue) +{ + schedule_delayed_work(&rx_queue->slow_fill_work, + msecs_to_jiffies(1)); +} + +static void efx_cancel_slow_fill(struct efx_rx_queue *rx_queue) +{ + cancel_delayed_work_sync(&rx_queue->slow_fill_work); +} + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_XDP_SOCK) +#if defined(CONFIG_XDP_SOCKETS) +#if defined(EFX_USE_KCOMPAT) && !defined(EFX_USE_XSK_BUFFER_ALLOC) +static void efx_xdp_umem_discard_addr(struct xdp_umem *umem, bool slow) +{ +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_XDP_UMEM_RELEASE_ADDR) + if (slow) + xsk_umem_release_addr_rq(umem); + else + xsk_umem_release_addr(umem); +#else + if (slow) + xsk_umem_discard_addr_rq(umem); + else + xsk_umem_discard_addr(umem); +#endif +} + +static bool efx_alloc_buffer_zc(struct efx_rx_queue *rx_queue, + struct efx_rx_buffer *rx_buf, bool slow) +{ + struct xdp_umem *umem = rx_queue->umem; + bool alloc_failed = true; + u64 handle = 0; + u64 hr; + + if (slow) { + if (!xsk_umem_peek_addr_rq(umem, &handle)) + goto alloc_fail; + } else { + if (!xsk_umem_peek_addr(umem, &handle)) + goto alloc_fail; + } + alloc_failed = false; + + handle &= umem->chunk_mask; + + hr = umem->headroom + XDP_PACKET_HEADROOM; + + rx_buf->dma_addr = xdp_umem_get_dma(umem, handle); + rx_buf->dma_addr += hr; + + rx_buf->addr = xdp_umem_get_data(umem, handle); + rx_buf->addr += hr; +#if defined(EFX_USE_KCOMPAT) && defined(EFX_HAVE_XDP_SOCK) && defined(EFX_HAVE_XSK_OFFSET_ADJUST) + rx_buf->handle = xsk_umem_adjust_offset(umem, handle, umem->headroom); +#endif + efx_xdp_umem_discard_addr(umem, slow); + + +alloc_fail: +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_XSK_NEED_WAKEUP) + if (xsk_umem_uses_need_wakeup(umem)) { + if (alloc_failed) + xsk_set_rx_need_wakeup(umem); + else + xsk_clear_rx_need_wakeup(umem); + } +#endif + return alloc_failed; +} +#else +static bool efx_alloc_buffer_zc(struct efx_rx_queue *rx_queue, + struct efx_rx_buffer *rx_buf, bool slow) +{ +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_XSK_POOL) + struct xsk_buff_pool *buff_pool = rx_queue->xsk_pool; +#else + struct xdp_umem *buff_pool = rx_queue->umem; +#endif + bool alloc_failed = false; + struct xdp_buff *xsk_buf; + + xsk_buf = xsk_buff_alloc(buff_pool); + if (!xsk_buf) { + alloc_failed = true; + goto alloc_fail; + } + rx_buf->dma_addr = xsk_buff_xdp_get_dma(xsk_buf);; + xsk_buf->rxq = &rx_queue->xdp_rxq_info; + rx_buf->xsk_buf = xsk_buf; + +alloc_fail: + return alloc_failed; +} +#endif + +/** + * efx_reuse_rx_buffer_zc - reuse a single zc rx_buf structure + * + * @rx_queue: Efx RX queue + * @rx_buf_reuse: EFX RX buffer that can be reused + * This will reuse zc buffer dma addresses. + * + */ +static void efx_reuse_rx_buffer_zc(struct efx_rx_queue *rx_queue, + struct efx_rx_buffer *rx_buf_reuse) +{ + struct efx_nic *efx = rx_queue->efx; + struct efx_rx_buffer *rx_buf; + unsigned int index; + + index = rx_queue->added_count & rx_queue->ptr_mask; + rx_buf = efx_rx_buffer(rx_queue, index); + rx_buf->dma_addr = rx_buf_reuse->dma_addr; +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_USE_XSK_BUFFER_ALLOC) + rx_buf->xsk_buf = rx_buf_reuse->xsk_buf; +#else + rx_buf->addr = rx_buf_reuse->addr; + rx_buf->handle = rx_buf_reuse->handle; +#endif + rx_buf_reuse->flags = 0; + rx_buf->page_offset = 0; + rx_buf->len = efx->rx_dma_len; + rx_buf->flags = EFX_RX_BUF_ZC; + rx_buf->vlan_tci = 0; + ++rx_queue->added_count; +} + +/** + * efx_init_rx_buffer_zc - inititalise a single zc rx_buf structure + * @rx_queue: Efx RX queue + * @flags: Flags field + * + * This will initialise a single rx_buf structure for use at the end of + * the rx_buf array. + * + * WARNING: The page_offset calculated here must match with the value of + * calculated by efx_rx_buffer_step(). + * + * Return: a negative error code or 0 on success. + */ +static int efx_init_rx_buffer_zc(struct efx_rx_queue *rx_queue, + u16 flags) +{ + struct efx_rx_buffer *rx_buf; + struct efx_nic *efx = rx_queue->efx; + unsigned int index; + bool slow = (rx_queue->added_count < rx_queue->ptr_mask); + + index = rx_queue->added_count & rx_queue->ptr_mask; + rx_buf = efx_rx_buffer(rx_queue, index); + if (rx_buf->flags & EFX_RX_BUF_XSK_REUSE) + goto init_buf; /* can be reused at same index */ + if (!efx_alloc_buffer_zc(rx_queue, rx_buf, slow)) { +#if defined(EFX_USE_KCOMPAT) && !defined(EFX_USE_XSK_BUFFER_ALLOC) + dma_sync_single_range_for_device(&efx->net_dev->dev, + rx_buf->dma_addr, 0, + efx->rx_dma_len, + DMA_BIDIRECTIONAL); +#endif +init_buf: + ++rx_queue->added_count; + rx_buf->page_offset = 0; + rx_buf->len = efx->rx_dma_len; + rx_buf->flags = flags; + rx_buf->vlan_tci = 0; + return 0; + } + return -ENOMEM; +} +#endif /* CONFIG_XDP_SOCKETS */ +#endif + +/** + * efx_init_rx_buffer - inititalise a single rx_buf structure + * + * @rx_queue: Efx RX queue + * @page: Page to reference + * @page_offset: Page offset, expressed in efx->rx_page_buf_step + * increments + * @flags: Flags field + * This will initialise a single rx_buf structure for use at the end of + * the rx_buf array. It assumes the input page is initialised with the + * efx_rx_page_state metadata necessary to correctly calculate dma addresses. + * + * WARNING: The page_offset calculated here must match with the value of + * calculated by efx_rx_buffer_step(). + */ +void efx_init_rx_buffer(struct efx_rx_queue *rx_queue, + struct page *page, + unsigned int page_offset, + u16 flags) +{ + struct efx_rx_buffer *rx_buf; + struct efx_nic *efx = rx_queue->efx; + struct efx_rx_page_state *state; + dma_addr_t dma_addr; + unsigned int index; + + EFX_WARN_ON_ONCE_PARANOID(page_offset > + PAGE_SIZE << efx->rx_buffer_order); + + state = page_address(page); + dma_addr = state->dma_addr; + + page_offset += sizeof(struct efx_rx_page_state); + page_offset += XDP_PACKET_HEADROOM; + + index = rx_queue->added_count & rx_queue->ptr_mask; + rx_buf = efx_rx_buffer(rx_queue, index); + rx_buf->dma_addr = dma_addr + page_offset + efx->rx_ip_align; + rx_buf->page = page; + rx_buf->page_offset = ALIGN(page_offset + efx->rx_ip_align, + EFX_RX_BUF_ALIGNMENT); + rx_buf->len = efx->rx_dma_len; + rx_buf->flags = flags; + rx_buf->vlan_tci = 0; + ++rx_queue->added_count; + + EFX_WARN_ON_PARANOID(rx_buf->page_offset + rx_buf->len > + PAGE_SIZE << efx->rx_buffer_order); +} + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_XDP_SOCK) +#if defined(CONFIG_XDP_SOCKETS) +/** + * efx_init_rx_buffers_zc - create xsk_pool/umem->fq based RX buffers + * @rx_queue: Efx RX queue + * + * This allocates a buffers from xsk_pool/umem->fq using memory model alloc calls for + * zero-copy RX, and populates struct efx_rx_buffers for each one. + * + * Return: a negative error code or 0 on success. + */ +static int efx_init_rx_buffers_zc(struct efx_rx_queue *rx_queue) +{ + u16 flags = 0; + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_XSK_POOL) + if (unlikely(!rx_queue->xsk_pool)) +#else + if (unlikely(!rx_queue->umem)) +#endif + return -EINVAL; + if ((rx_queue->added_count - rx_queue->removed_count) < + rx_queue->ptr_mask) { + flags = EFX_RX_BUF_ZC; + return efx_init_rx_buffer_zc(rx_queue, flags); + } + + return 0; +} +#endif /* CONFIG_XDP_SOCKETS */ +#endif + +/** + * efx_init_rx_buffers_nzc - create EFX_RX_BATCH page-based RX buffers + * + * @rx_queue: Efx RX queue + * @atomic: Perform atomic allocations + * + * This allocates a batch of pages, maps them for DMA, and populates struct + * efx_rx_buffers for each one. If a single page can be used for multiple + * buffers, then the page will either be inserted fully, or not at all. + * + * Return: a negative error code or 0 on success. + */ +static int efx_init_rx_buffers_nzc(struct efx_rx_queue *rx_queue, bool atomic) +{ + struct efx_nic *efx = rx_queue->efx; + struct page *page; + struct efx_rx_page_state *state; + dma_addr_t dma_addr; + unsigned int count; + unsigned int i; + unsigned int page_offset; + u16 flags; + + count = 0; + do { + page_offset = 0; + flags = 0; + + page = efx_reuse_page(rx_queue); + + if (!page) { + /* GFP_ATOMIC may fail because of various reasons, + * and we re-schedule rx_fill from non-atomic + * context in such a case. So, use __GFP_NO_WARN + * in case of atomic. + */ +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_ALLOC_PAGES_NODE) + struct efx_channel *channel; + + channel = efx_rx_queue_channel(rx_queue); + page = alloc_pages_node(channel->irq_mem_node, + __GFP_COMP | +#else + page = alloc_pages(__GFP_COMP | +#endif + (atomic ? + (GFP_ATOMIC | __GFP_NOWARN) + : GFP_KERNEL), + efx->rx_buffer_order); + if (unlikely(!page)) + return -ENOMEM; + dma_addr = + dma_map_page(&efx->pci_dev->dev, page, 0, + PAGE_SIZE << efx->rx_buffer_order, + DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(&efx->pci_dev->dev, + dma_addr))) { + __free_pages(page, efx->rx_buffer_order); + return -EIO; + } + state = page_address(page); + state->dma_addr = dma_addr; +#if !defined(EFX_NOT_UPSTREAM) || defined(EFX_RX_PAGE_SHARE) + } else if (rx_queue->page_ring) { + flags |= EFX_RX_PAGE_IN_RECYCLE_RING; +#endif + } + + i = 0; + do { + if (i == efx->rx_bufs_per_page - 1) + flags |= EFX_RX_BUF_LAST_IN_PAGE; + efx_init_rx_buffer(rx_queue, page, page_offset, flags); + page_offset += efx->rx_page_buf_step; + } while (++i < efx->rx_bufs_per_page); +#if !defined(EFX_NOT_UPSTREAM) || defined(EFX_RX_PAGE_SHARE) + /* We hold the only reference so just set to required count */ + page_ref_add(page, efx->rx_bufs_per_page); +#endif + + } while (++count < efx->rx_pages_per_batch); + + return 0; +} + +static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue, bool atomic) +{ +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_XDP_SOCK) +#if defined(CONFIG_XDP_SOCKETS) + struct efx_channel *channel = efx_get_rx_queue_channel(rx_queue); + + if (channel->zc) + return efx_init_rx_buffers_zc(rx_queue); +#endif +#endif + return efx_init_rx_buffers_nzc(rx_queue, atomic); +} + +void efx_rx_config_page_split(struct efx_nic *efx) +{ + efx->rx_page_buf_step = efx_rx_buffer_step(efx); +#if !defined(EFX_NOT_UPSTREAM) || defined(EFX_RX_PAGE_SHARE) + efx->rx_bufs_per_page = efx->rx_buffer_order ? 1 : + ((PAGE_SIZE - sizeof(struct efx_rx_page_state)) / + efx->rx_page_buf_step); +#else + efx->rx_bufs_per_page = 1; +#endif + efx->rx_buffer_truesize = (PAGE_SIZE << efx->rx_buffer_order) / + efx->rx_bufs_per_page; + efx->rx_pages_per_batch = DIV_ROUND_UP(EFX_RX_PREFERRED_BATCH, + efx->rx_bufs_per_page); +} + +/** + * efx_fast_push_rx_descriptors - push new RX descriptors quickly + * @rx_queue: RX descriptor queue + * @atomic: Perform atomic allocations + * + * This will aim to fill the RX descriptor queue up to + * @rx_queue->@max_fill. If there is insufficient atomic + * memory to do so, a slow fill will be scheduled. + * + * The caller must provide serialisation (none is used here). In practise, + * this means this function must run from the NAPI handler, or be called + * when NAPI is disabled. + */ +void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, bool atomic) +{ + struct efx_nic *efx = rx_queue->efx; + unsigned int fill_level, batch_size; + int space, rc = 0; + + if (!rx_queue->refill_enabled) + return; + + /* Calculate current fill level, and exit if we don't need to fill */ + fill_level = (rx_queue->added_count - rx_queue->removed_count); + EFX_WARN_ON_ONCE_PARANOID(fill_level > rx_queue->ptr_mask + 1); + if (fill_level >= rx_queue->fast_fill_trigger) + goto out; + + /* Record minimum fill level */ + if (unlikely(fill_level < rx_queue->min_fill)) { + if (fill_level) + rx_queue->min_fill = fill_level; + } + + batch_size = efx->rx_pages_per_batch * efx->rx_bufs_per_page; + space = rx_queue->max_fill - fill_level; + EFX_WARN_ON_ONCE_PARANOID(space < batch_size); + + netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev, + "RX queue %d fast-filling descriptor ring from level %d to level %d\n", + efx_rx_queue_index(rx_queue), fill_level, + rx_queue->max_fill); + + while (space >= batch_size) { + rc = efx_init_rx_buffers(rx_queue, atomic); + if (unlikely(rc)) { + /* Ensure that we don't leave the rx queue empty */ + efx_schedule_slow_fill(rx_queue); + goto out; + } + space -= batch_size; + } + + netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev, + "RX queue %d fast-filled descriptor ring to level %d\n", + efx_rx_queue_index(rx_queue), + rx_queue->added_count - rx_queue->removed_count); + +out: + if (rx_queue->notified_count != rx_queue->added_count) + efx_nic_notify_rx_desc(rx_queue); +} + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_USE_GRO) + +/* Pass a received packet up through GRO. GRO can handle pages + * regardless of checksum state and skbs with a good checksum. + */ +void +efx_rx_packet_gro(struct efx_rx_queue *rx_queue, struct efx_rx_buffer *rx_buf, + unsigned int n_frags, u8 *eh, __wsum csum) +{ + struct efx_channel *channel = efx_rx_queue_channel(rx_queue); +#if IS_ENABLED(CONFIG_VLAN_8021Q) || defined(CONFIG_SFC_TRACING) + struct efx_rx_buffer *head_buf = rx_buf; +#endif + struct napi_struct *napi = &channel->napi_str; + struct efx_nic *efx = channel->efx; + struct sk_buff *skb; + + skb = napi_get_frags(napi); + if (unlikely(!skb)) { + efx_free_rx_buffers(rx_queue, rx_buf, n_frags); + return; + } + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_RXHASH_SUPPORT) + if (efx->net_dev->features & NETIF_F_RXHASH && + efx_rx_buf_hash_valid(efx, eh)) + skb_set_hash(skb, efx_rx_buf_hash(efx, eh), PKT_HASH_TYPE_L4); +#endif + if (csum) { + skb->csum = csum; + skb->ip_summed = CHECKSUM_COMPLETE; + } else { + skb->ip_summed = ((rx_buf->flags & EFX_RX_PKT_CSUMMED) ? + CHECKSUM_UNNECESSARY : CHECKSUM_NONE); + } +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_CSUM_LEVEL) + skb->csum_level = !!(rx_buf->flags & EFX_RX_PKT_CSUM_LEVEL); +#endif + + for (;;) { + skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, + rx_buf->page, rx_buf->page_offset, + rx_buf->len); + rx_buf->page = NULL; + skb->len += rx_buf->len; + if (skb_shinfo(skb)->nr_frags == n_frags) + break; + + rx_buf = efx_rx_buf_next(rx_queue, rx_buf); + } + + skb->data_len = skb->len; + skb->truesize += n_frags * efx->rx_buffer_truesize; + + skb_record_rx_queue(skb, rx_queue->core_index); + + skb_mark_napi_id(skb, napi); + + efx_rx_skb_attach_timestamp(channel, skb, + eh - efx->type->rx_prefix_size); +#if IS_ENABLED(CONFIG_VLAN_8021Q) +#if !defined(EFX_USE_KCOMPAT) || !defined(EFX_HAVE_VLAN_RX_PATH) + if (head_buf->flags & EFX_RX_BUF_VLAN_XTAG) + __vlan_hwaccel_put_tag(napi->skb, htons(ETH_P_8021Q), + head_buf->vlan_tci); +#endif +#endif + +#ifdef CONFIG_SFC_TRACING + trace_sfc_receive(skb, true, head_buf->flags & EFX_RX_BUF_VLAN_XTAG, + head_buf->vlan_tci); +#endif +#if IS_ENABLED(CONFIG_VLAN_8021Q) +#if defined(EFX_USE_KCOMPAT) && defined(EFX_HAVE_VLAN_RX_PATH) + if (head_buf->flags & EFX_RX_BUF_VLAN_XTAG) + vlan_gro_frags(napi, efx->vlan_group, + head_buf->vlan_tci); + else + /* fall through */ +#endif +#endif + napi_gro_frags(napi); +} + +#endif /* EFX_USE_GRO */ + +/* RSS contexts. We're using linked lists and crappy O(n) algorithms, because + * (a) this is an infrequent control-plane operation and (b) n is small (max 64) + */ +struct efx_rss_context *efx_alloc_rss_context_entry(struct efx_nic *efx) +{ + struct list_head *head = &efx->rss_context.list; + struct efx_rss_context *ctx, *new; + u32 id = 1; /* Don't use zero, that refers to the master RSS context */ + + WARN_ON(!mutex_is_locked(&efx->rss_lock)); + + /* Search for first gap in the numbering */ + list_for_each_entry(ctx, head, list) { + if (ctx->user_id != id) + break; + id++; + /* Check for wrap. If this happens, we have nearly 2^32 + * allocated RSS contexts, which seems unlikely. + */ + if (WARN_ON_ONCE(!id)) + return NULL; + } + + /* Create the new entry */ + new = kmalloc(sizeof(struct efx_rss_context), GFP_KERNEL); + if (!new) + return NULL; + new->context_id = EFX_MCDI_RSS_CONTEXT_INVALID; + new->flags = RSS_CONTEXT_FLAGS_DEFAULT; +#ifdef EFX_NOT_UPSTREAM + new->num_queues = 0; +#endif + + /* Insert the new entry into the gap */ + new->user_id = id; + list_add_tail(&new->list, &ctx->list); + return new; +} + +struct efx_rss_context *efx_find_rss_context_entry(struct efx_nic *efx, u32 id) +{ + struct list_head *head = &efx->rss_context.list; + struct efx_rss_context *ctx; + + WARN_ON(!mutex_is_locked(&efx->rss_lock)); + + list_for_each_entry(ctx, head, list) + if (ctx->user_id == id) + return ctx; + return NULL; +} + +void efx_free_rss_context_entry(struct efx_rss_context *ctx) +{ + list_del(&ctx->list); + kfree(ctx); +} + +void efx_set_default_rx_indir_table(struct efx_nic *efx, + struct efx_rss_context *ctx) +{ + size_t i; + + for (i = 0; i < ARRAY_SIZE(ctx->rx_indir_table); i++) + ctx->rx_indir_table[i] = + ethtool_rxfh_indir_default(i, efx->rss_spread); +} + +/** + * efx_filter_is_mc_recipient - test whether spec is a multicast recipient + * @spec: Specification to test + * + * Return: %true if the specification is a non-drop RX filter that + * matches a local MAC address I/G bit value of 1 or matches a local + * IPv4 or IPv6 address value in the respective multicast address + * range, or is IPv4 broadcast. Otherwise %false. + */ +bool efx_filter_is_mc_recipient(const struct efx_filter_spec *spec) +{ + if (!(spec->flags & EFX_FILTER_FLAG_RX) || + spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP) + return false; + + if (spec->match_flags & + (EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_LOC_MAC_IG) && + is_multicast_ether_addr(spec->loc_mac)) + return true; + + if ((spec->match_flags & + (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) == + (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) { + if (spec->ether_type == htons(ETH_P_IP) && + (ipv4_is_multicast(spec->loc_host[0]) || + ipv4_is_lbcast(spec->loc_host[0]))) + return true; + if (spec->ether_type == htons(ETH_P_IPV6) && + ((const u8 *)spec->loc_host)[0] == 0xff) + return true; + } + + return false; +} + +bool efx_filter_spec_equal(const struct efx_filter_spec *left, + const struct efx_filter_spec *right) +{ + if ((left->match_flags ^ right->match_flags) | + ((left->flags ^ right->flags) & + (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX))) + return false; + + return memcmp(&left->match_key, &right->match_key, + sizeof(left->match_key)) == 0; +} + +u32 efx_filter_spec_hash(const struct efx_filter_spec *spec) +{ + BUILD_BUG_ON(offsetof(struct efx_filter_spec, match_key) & 3); + return jhash2((const u32 *)&spec->match_key, + sizeof(spec->match_key) / 4, + 0); +} + +#ifdef CONFIG_RFS_ACCEL +bool efx_rps_check_rule(struct efx_arfs_rule *rule, unsigned int filter_idx, + bool *force) +{ + if (rule->filter_id == EFX_ARFS_FILTER_ID_PENDING) { + /* ARFS is currently updating this entry, leave it */ + return false; + } + if (rule->filter_id == EFX_ARFS_FILTER_ID_ERROR) { + /* ARFS tried and failed to update this, so it's probably out + * of date. Remove the filter and the ARFS rule entry. + */ + rule->filter_id = EFX_ARFS_FILTER_ID_REMOVING; + *force = true; + return true; + } else if (WARN_ON(rule->filter_id != filter_idx)) { /* can't happen */ + /* ARFS has moved on, so old filter is not needed. Since we did + * not mark the rule with EFX_ARFS_FILTER_ID_REMOVING, it will + * not be removed by efx_rps_hash_del() subsequently. + */ + *force = true; + return true; + } + /* Remove it iff ARFS wants to. */ + return true; +} + +static +struct hlist_head *efx_rps_hash_bucket(struct efx_nic *efx, + const struct efx_filter_spec *spec) +{ + u32 hash = efx_filter_spec_hash(spec); + + WARN_ON(!spin_is_locked(&efx->rps_hash_lock)); + if (!efx->rps_hash_table) + return NULL; + return &efx->rps_hash_table[hash % EFX_ARFS_HASH_TABLE_SIZE]; +} + +struct efx_arfs_rule *efx_rps_hash_find(struct efx_nic *efx, + const struct efx_filter_spec *spec) +{ + struct efx_arfs_rule *rule; + struct hlist_head *head; + struct hlist_node *node; + + head = efx_rps_hash_bucket(efx, spec); + if (!head) + return NULL; + hlist_for_each(node, head) { + rule = container_of(node, struct efx_arfs_rule, node); + if (efx_filter_spec_equal(spec, &rule->spec)) + return rule; + } + return NULL; +} + +struct efx_arfs_rule *efx_rps_hash_add(struct efx_nic *efx, + const struct efx_filter_spec *spec, + bool *new) +{ + struct efx_arfs_rule *rule; + struct hlist_head *head; + struct hlist_node *node; + + head = efx_rps_hash_bucket(efx, spec); + if (!head) + return NULL; + hlist_for_each(node, head) { + rule = container_of(node, struct efx_arfs_rule, node); + if (efx_filter_spec_equal(spec, &rule->spec)) { + *new = false; + return rule; + } + } + rule = kmalloc(sizeof(*rule), GFP_ATOMIC); + *new = true; + if (rule) { + memcpy(&rule->spec, spec, sizeof(rule->spec)); + hlist_add_head(&rule->node, head); + } + return rule; +} + +void efx_rps_hash_del(struct efx_nic *efx, const struct efx_filter_spec *spec) +{ + struct efx_arfs_rule *rule; + struct hlist_head *head; + struct hlist_node *node; + + head = efx_rps_hash_bucket(efx, spec); + if (WARN_ON(!head)) + return; + hlist_for_each(node, head) { + rule = container_of(node, struct efx_arfs_rule, node); + if (efx_filter_spec_equal(spec, &rule->spec)) { + /* Someone already reused the entry. We know that if + * this check doesn't fire (i.e. filter_id == REMOVING) + * then the REMOVING mark was put there by our caller, + * because caller is holding a lock on filter table and + * only holders of that lock set REMOVING. + */ + if (rule->filter_id != EFX_ARFS_FILTER_ID_REMOVING) + return; + hlist_del(node); + kfree(rule); + return; + } + } + /* We didn't find it. */ + WARN_ON(1); +} +#endif + +/* We're using linked lists and crappy O(n) algorithms, because + * this is an infrequent control-plane operation, n is likely to be + * small, and it gives good memory efficiency in the likely event n is + * very small. + */ +static struct efx_ntuple_rule *efx_find_ntuple_rule(struct efx_nic *efx, u32 id) +{ + struct list_head *head = &efx->ntuple_list; + struct efx_ntuple_rule *rule; + + list_for_each_entry(rule, head, list) + if (rule->user_id == id) + return rule; + return NULL; +} + +size_t efx_filter_count_ntuple(struct efx_nic *efx) +{ + struct list_head *head = &efx->ntuple_list, *l; + size_t n = 0; + + list_for_each(l, head) + ++n; + + return n; +} + +void efx_filter_get_ntuple_ids(struct efx_nic *efx, u32 *buf, u32 size) +{ + struct list_head *head = &efx->ntuple_list; + struct efx_ntuple_rule *rule; + size_t n = 0; + + list_for_each_entry(rule, head, list) + if (n < size) + buf[n++] = rule->user_id; +} + +int efx_filter_ntuple_get(struct efx_nic *efx, u32 id, + struct efx_filter_spec *spec) +{ + struct efx_ntuple_rule *rule = efx_find_ntuple_rule(efx, id); + + if (!rule) + return -ENOENT; + + *spec = rule->spec; + + return 0; +} + +int efx_filter_ntuple_insert(struct efx_nic *efx, struct efx_filter_spec *spec) +{ + struct efx_ntuple_rule *gap = NULL, *rule, *new; + struct list_head *head = &efx->ntuple_list; + u32 id = 0; + + /* Search for first gap in the numbering */ + list_for_each_entry(rule, head, list) { + /* is this rule here already? */ + if (efx_filter_spec_equal(&rule->spec, spec)) + return rule->user_id; + + /* if we haven't found a gap yet, see if there's one here */ + if (!gap) { + if (rule->user_id != id) { + gap = rule; + continue; + } + + id++; + } + } + + if (id >= efx_filter_get_rx_id_limit(efx)) + return -ENOSPC; + + /* Create the new entry */ + new = kmalloc(sizeof(*new), GFP_KERNEL); + if (!new) + return -ENOMEM; + + if (efx_net_allocated(efx->state)) { + int rc = efx->type->filter_insert(efx, spec, false); + + if (rc < 0) { + kfree(new); + return rc; + } + + new->filter_id = rc; + } + + new->spec = *spec; + new->user_id = id; + + /* Insert the new entry into the gap (or tail if none) */ + list_add_tail(&new->list, gap ? &gap->list : head); + + return id; +} + +int efx_filter_ntuple_remove(struct efx_nic *efx, u32 id) +{ + struct efx_ntuple_rule *rule = efx_find_ntuple_rule(efx, id); + + if (!rule) + return -ENOENT; + + if (efx_net_allocated(efx->state)) + efx->type->filter_remove_safe(efx, EFX_FILTER_PRI_MANUAL, + rule->filter_id); + + list_del(&rule->list); + kfree(rule); + + return 0; +} + +void efx_filter_clear_ntuple(struct efx_nic *efx) +{ + struct list_head *head = &efx->ntuple_list; + /* this only clears the structure, it doesn't remove filters, + * so should only be done when the interface is down + */ + WARN_ON(efx_net_allocated(efx->state)); + + while (!list_empty(head)) { + struct efx_ntuple_rule *rule = + list_first_entry(head, struct efx_ntuple_rule, list); + + efx_filter_ntuple_remove(efx, rule->user_id); + } +} + +static void efx_filter_init_ntuple(struct efx_nic *efx) +{ + struct list_head *head = &efx->ntuple_list; + struct efx_ntuple_rule *rule, *tmp; + + list_for_each_entry_safe(rule, tmp, head, list) { + int rc = efx->type->filter_insert(efx, &rule->spec, false); + + if (rc >= 0) { + rule->filter_id = rc; + } else { + /* if we couldn't insert, delete the entry */ + netif_err(efx, drv, efx->net_dev, + "error inserting ntuple filter ID %u\n", + rule->user_id); + list_del(&rule->list); + kfree(rule); + } + } +} + +int efx_init_filters(struct efx_nic *efx) +{ + int rc = 0; + + mutex_lock(&efx->mac_lock); + down_write(&efx->filter_sem); + if (efx->type->filter_table_probe) + rc = efx->type->filter_table_probe(efx); + if (rc) + goto out_unlock; + +#ifdef CONFIG_RFS_ACCEL + if (efx->net_dev->features & NETIF_F_NTUPLE) { + struct efx_channel *channel; + int i, success = 1; + + efx_for_each_channel(channel, efx) { + channel->rps_flow_id = + kcalloc(efx->type->max_rx_ip_filters, + sizeof(*channel->rps_flow_id), + GFP_KERNEL); + if (!channel->rps_flow_id) + success = 0; + else + for (i = 0; + i < efx->type->max_rx_ip_filters; + ++i) + channel->rps_flow_id[i] = + RPS_FLOW_ID_INVALID; + channel->rfs_expire_index = 0; + channel->rfs_filter_count = 0; + } + + if (!success) { + efx_for_each_channel(channel, efx) { + kfree(channel->rps_flow_id); + channel->rps_flow_id = NULL; + } + if (efx->type->filter_table_remove) + efx->type->filter_table_remove(efx); + rc = -ENOMEM; + goto out_unlock; + } + } +#endif + +out_unlock: + up_write(&efx->filter_sem); + mutex_unlock(&efx->mac_lock); + + if (!rc) + efx_filter_init_ntuple(efx); + + return rc; +} + +void efx_fini_filters(struct efx_nic *efx) +{ +#ifdef CONFIG_RFS_ACCEL + struct efx_channel *channel; + + efx_for_each_channel(channel, efx) { + cancel_delayed_work_sync(&channel->filter_work); + kfree(channel->rps_flow_id); + channel->rps_flow_id = NULL; + } +#endif + if (efx->type->filter_table_remove) { + down_write(&efx->filter_sem); + efx->type->filter_table_remove(efx); + up_write(&efx->filter_sem); + } +} + +#ifdef CONFIG_RFS_ACCEL + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_NEW_FLOW_KEYS) +static int efx_rfs_filter_spec(struct efx_nic *efx, const struct sk_buff *skb, + struct efx_filter_spec *spec) +{ + struct flow_keys fk; + + if (!skb_flow_dissect_flow_keys(skb, &fk, 0)) + return -EPROTONOSUPPORT; + if (fk.basic.n_proto != htons(ETH_P_IP) && + fk.basic.n_proto != htons(ETH_P_IPV6)) + return -EPROTONOSUPPORT; +#if !defined(EFX_USE_KCOMPAT) || defined(FLOW_DIS_IS_FRAGMENT) + if (fk.control.flags & FLOW_DIS_IS_FRAGMENT) + return -EPROTONOSUPPORT; +#endif + + spec->match_flags = EFX_FILTER_MATCH_FLAGS_RFS; + spec->ether_type = fk.basic.n_proto; + spec->ip_proto = fk.basic.ip_proto; + + if (fk.basic.n_proto == htons(ETH_P_IP)) { + spec->rem_host[0] = fk.addrs.v4addrs.src; + spec->loc_host[0] = fk.addrs.v4addrs.dst; + } else { + memcpy(spec->rem_host, &fk.addrs.v6addrs.src, + sizeof(struct in6_addr)); + memcpy(spec->loc_host, &fk.addrs.v6addrs.dst, + sizeof(struct in6_addr)); + } + + spec->rem_port = fk.ports.src; + spec->loc_port = fk.ports.dst; + return 0; +} + +#else /* !EFX_HAVE_NEW_FLOW_KEYS */ + +/* The kernel flow dissector isn't up to the job, so use our own. */ +static int efx_rfs_filter_spec(struct efx_nic *efx, const struct sk_buff *skb, + struct efx_filter_spec *spec) +{ + /* 60 octets is the maximum length of an IPv4 header (all IPv6 headers + * are 40 octets), and we pull 4 more to get the port numbers + */ + #define EFX_RFS_HEADER_LENGTH (sizeof(struct vlan_hdr) + 60 + 4) + unsigned char header[EFX_RFS_HEADER_LENGTH]; + int headlen = min_t(int, EFX_RFS_HEADER_LENGTH, skb->len); + #undef EFX_RFS_HEADER_LENGTH + void *hptr; + const __be16 *ports; + __be16 ether_type; + int nhoff; + + hptr = skb_header_pointer(skb, 0, headlen, header); + if (!hptr) + return -EINVAL; + + if (skb->protocol == htons(ETH_P_8021Q)) { + const struct vlan_hdr *vh = hptr; + + /* We can't filter on the IP 5-tuple and the vlan + * together, so just strip the vlan header and filter + * on the IP part. + */ + if (headlen < sizeof(*vh)) + return -EINVAL; + ether_type = vh->h_vlan_encapsulated_proto; + nhoff = sizeof(struct vlan_hdr); + } else { + ether_type = skb->protocol; + nhoff = 0; + } + + if (ether_type != htons(ETH_P_IP) && ether_type != htons(ETH_P_IPV6)) + return -EPROTONOSUPPORT; + + spec->match_flags = EFX_FILTER_MATCH_FLAGS_RFS; + spec->ether_type = ether_type; + + if (ether_type == htons(ETH_P_IP)) { + const struct iphdr *ip = hptr + nhoff; + + if (headlen < nhoff + sizeof(*ip)) + return -EINVAL; + if (ip_is_fragment(ip)) + return -EPROTONOSUPPORT; + spec->ip_proto = ip->protocol; + spec->rem_host[0] = ip->saddr; + spec->loc_host[0] = ip->daddr; + if (headlen < nhoff + 4 * ip->ihl + 4) + return -EINVAL; + ports = (const __be16 *)(hptr + nhoff + 4 * ip->ihl); + } else { + const struct ipv6hdr *ip6 = (hptr + nhoff); + + if (headlen < nhoff + sizeof(*ip6) + 4) + return -EINVAL; + spec->ip_proto = ip6->nexthdr; + memcpy(spec->rem_host, &ip6->saddr, sizeof(ip6->saddr)); + memcpy(spec->loc_host, &ip6->daddr, sizeof(ip6->daddr)); + ports = (const __be16 *)(ip6 + 1); + } + + spec->rem_port = ports[0]; + spec->loc_port = ports[1]; + return 0; +} +#endif /* EFX_HAVE_NEW_FLOW_KEYS */ + +static void efx_filter_rfs_work(struct work_struct *data) +{ + struct efx_async_filter_insertion *req = + container_of(data, struct efx_async_filter_insertion, + work); + struct efx_nic *efx = efx_netdev_priv(req->net_dev); + struct efx_channel *channel = efx_get_channel(efx, req->rxq_index); + int slot_idx = req - efx->rps_slot; + struct efx_arfs_rule *rule; + u16 arfs_id = 0; + int rc; + + rc = efx->type->filter_insert(efx, &req->spec, true); + if (rc >= 0) + /* Discard 'priority' part of EF10+ filter ID (mcdi_filters) */ + rc %= efx->type->max_rx_ip_filters; + if (efx->rps_hash_table) { + spin_lock_bh(&efx->rps_hash_lock); + rule = efx_rps_hash_find(efx, &req->spec); + /* The rule might have already gone, if someone else's request + * for the same spec was already worked and then expired before + * we got around to our work. In that case we have nothing + * tying us to an arfs_id, meaning that as soon as the filter + * is considered for expiry it will be removed. + */ + if (rule) { + if (rc < 0) + rule->filter_id = EFX_ARFS_FILTER_ID_ERROR; + else + rule->filter_id = rc; + arfs_id = rule->arfs_id; + } + spin_unlock_bh(&efx->rps_hash_lock); + } + if (rc >= 0) { + /* Remember this so we can check whether to expire the filter + * later. + */ + mutex_lock(&efx->rps_mutex); + if (channel->rps_flow_id[rc] == RPS_FLOW_ID_INVALID) + channel->rfs_filter_count++; + channel->rps_flow_id[rc] = req->flow_id; + mutex_unlock(&efx->rps_mutex); + + if (req->spec.ether_type == htons(ETH_P_IP)) + netif_info(efx, rx_status, efx->net_dev, + "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d id %u]\n", + (req->spec.ip_proto == IPPROTO_TCP) ? + "TCP" : "UDP", + req->spec.rem_host, + ntohs(req->spec.rem_port), + req->spec.loc_host, + ntohs(req->spec.loc_port), + req->rxq_index, req->flow_id, rc, arfs_id); + else + netif_info(efx, rx_status, efx->net_dev, + "steering %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u filter %d id %u]\n", + (req->spec.ip_proto == IPPROTO_TCP) ? + "TCP" : "UDP", + req->spec.rem_host, + ntohs(req->spec.rem_port), + req->spec.loc_host, + ntohs(req->spec.loc_port), + req->rxq_index, req->flow_id, rc, arfs_id); + channel->n_rfs_succeeded++; + } else { + if (req->spec.ether_type == htons(ETH_P_IP)) + netif_dbg(efx, rx_status, efx->net_dev, + "failed to steer %s %pI4:%u:%pI4:%u to queue %u [flow %u rc %d id %u]\n", + (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP", + req->spec.rem_host, ntohs(req->spec.rem_port), + req->spec.loc_host, ntohs(req->spec.loc_port), + req->rxq_index, req->flow_id, rc, arfs_id); + else + netif_dbg(efx, rx_status, efx->net_dev, + "failed to steer %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u rc %d id %u]\n", + (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP", + req->spec.rem_host, ntohs(req->spec.rem_port), + req->spec.loc_host, ntohs(req->spec.loc_port), + req->rxq_index, req->flow_id, rc, arfs_id); + channel->n_rfs_failed++; + /* We're overloading the NIC's filter tables, so let's do a + * chunk of extra expiry work. + */ + __efx_filter_rfs_expire(channel, min(channel->rfs_filter_count, + 100u));} + + /* Release references */ + clear_bit(slot_idx, &efx->rps_slot_map); + dev_put(req->net_dev); + + return; +} + +int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb, + u16 rxq_index, u32 flow_id) +{ + struct efx_nic *efx = efx_netdev_priv(net_dev); + struct efx_async_filter_insertion *req; + struct efx_arfs_rule *rule; + int slot_idx; + bool new; + int rc; + + if (flow_id == RPS_FLOW_ID_INVALID) + return -EINVAL; + + /* find a free slot */ + for (slot_idx = 0; slot_idx < EFX_RPS_MAX_IN_FLIGHT; slot_idx++) + if (!test_and_set_bit(slot_idx, &efx->rps_slot_map)) + break; + if (slot_idx >= EFX_RPS_MAX_IN_FLIGHT) + return -EBUSY; + + req = efx->rps_slot + slot_idx; + efx_filter_init_rx(&req->spec, EFX_FILTER_PRI_HINT, + efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0, + rxq_index); + rc = efx_rfs_filter_spec(efx, skb, &req->spec); + if (rc < 0) + goto out_clear; + + if (efx->rps_hash_table) { + /* Add it to ARFS hash table */ + spin_lock(&efx->rps_hash_lock); + rule = efx_rps_hash_add(efx, &req->spec, &new); + if (!rule) { + rc = -ENOMEM; + goto out_unlock; + } + if (new) + rule->arfs_id = efx->rps_next_id++ % RPS_NO_FILTER; + rc = rule->arfs_id; + /* Skip if existing or pending filter already does the right + * thing + */ + if (!new && rule->rxq_index == rxq_index && + rule->filter_id >= EFX_ARFS_FILTER_ID_PENDING) + goto out_unlock; + rule->rxq_index = rxq_index; + rule->filter_id = EFX_ARFS_FILTER_ID_PENDING; + spin_unlock(&efx->rps_hash_lock); + } else { + /* Without an ARFS hash table, we just use arfs_id 0 for all + * filters. This means if multiple flows hash to the same + * flow_id, all but the most recently touched will be eligible + * for expiry. + */ + rc = 0; + } + + /* Queue the request */ + dev_hold(req->net_dev = net_dev); + INIT_WORK(&req->work, efx_filter_rfs_work); + req->rxq_index = rxq_index; + req->flow_id = flow_id; + schedule_work(&req->work); + return rc; +out_unlock: + spin_unlock(&efx->rps_hash_lock); +out_clear: + clear_bit(slot_idx, &efx->rps_slot_map); + return rc; +} + +bool __efx_filter_rfs_expire(struct efx_channel *channel, unsigned int quota) +{ + bool (*expire_one)(struct efx_nic *efx, u32 flow_id, + unsigned int index); + struct efx_nic *efx = channel->efx; + unsigned int index, size, start; + u32 flow_id; + + if (!mutex_trylock(&efx->rps_mutex)) + return false; + expire_one = efx->type->filter_rfs_expire_one; + index = channel->rfs_expire_index; + start = index; + size = efx->type->max_rx_ip_filters; + while (quota) { + flow_id = channel->rps_flow_id[index]; + + if (flow_id != RPS_FLOW_ID_INVALID) { + quota--; + if (expire_one(efx, flow_id, index)) { + netif_info(efx, rx_status, efx->net_dev, + "expired filter %d [queue %u flow %u]\n", + index, channel->channel, flow_id); + channel->rps_flow_id[index] = RPS_FLOW_ID_INVALID; + channel->rfs_filter_count--; + } + } + if (++index == size) + index = 0; + /* If we were called with a quota that exceeds the total number + * of filters in the table (which shouldn't happen, but could + * if two callers race), ensure that we don't loop forever - + * stop when we've examined every row of the table. + */ + if (index == start) + break; + } + channel->rfs_expire_index = index; + + mutex_unlock(&efx->rps_mutex); + return true; +} + +#endif /* CONFIG_RFS_ACCEL */ + diff --git a/src/drivers/net/ethernet/sfc/rx_common.h b/src/drivers/net/ethernet/sfc/rx_common.h new file mode 100644 index 0000000..d151a62 --- /dev/null +++ b/src/drivers/net/ethernet/sfc/rx_common.h @@ -0,0 +1,147 @@ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2019 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#ifndef EFX_RX_COMMON_H +#define EFX_RX_COMMON_H +#include "mcdi_pcol.h" +#include "mcdi.h" +#include "net_driver.h" + +/* Number of RX buffers to recycle pages for. When creating the RX page recycle + * ring, this number is divided by the number of buffers per page to calculate + * the number of pages to store in the RX page recycle ring. + */ +#define EFX_RECYCLE_RING_SIZE_10G 256 + +/* vDPA queues starts from 2nd VI or qid 1 */ +#define EFX_VDPA_BASE_RX_QID 1 + +void efx_rx_config_page_split(struct efx_nic *efx); + +int efx_probe_rx_queue(struct efx_rx_queue *rx_queue); +int efx_init_rx_queue(struct efx_rx_queue *rx_queue); +void efx_fini_rx_queue(struct efx_rx_queue *rx_queue); +void efx_remove_rx_queue(struct efx_rx_queue *rx_queue); +void efx_destroy_rx_queue(struct efx_rx_queue *rx_queue); + +void efx_init_rx_buffer(struct efx_rx_queue *rx_queue, + struct page *page, + unsigned int page_offset, + u16 flags); + +void efx_unmap_rx_buffer(struct efx_nic *efx, struct efx_rx_buffer *rx_buf); + +static inline void efx_sync_rx_buffer(struct efx_nic *efx, + struct efx_rx_buffer *rx_buf, + unsigned int len) +{ +#if !defined(EFX_NOT_UPSTREAM) || defined(EFX_RX_PAGE_SHARE) + dma_sync_single_for_cpu(&efx->pci_dev->dev, rx_buf->dma_addr, len, + DMA_FROM_DEVICE); +#else + efx_unmap_rx_buffer(efx, rx_buf); +#endif +} + +void efx_free_rx_buffers(struct efx_rx_queue *rx_queue, + struct efx_rx_buffer *rx_buf, + unsigned int num_bufs); + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_XDP_SOCK) +void efx_recycle_rx_bufs_zc(struct efx_channel *channel, + struct efx_rx_buffer *rx_buf, + unsigned int n_frags); +#endif + +void efx_recycle_rx_pages(struct efx_channel *channel, + struct efx_rx_buffer *rx_buf, + unsigned int n_frags); + +void efx_discard_rx_packet(struct efx_channel *channel, + struct efx_rx_buffer *rx_buf, + unsigned int n_frags); + +static inline u8 *efx_rx_buf_va(struct efx_rx_buffer *buf) +{ +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_XDP_SOCK) +#if defined(CONFIG_XDP_SOCKETS) + if (buf->flags & EFX_RX_BUF_ZC) +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_USE_XSK_BUFFER_ALLOC) + return ((u8 *)buf->xsk_buf->data + buf->page_offset); +#else + return ((u8 *)buf->addr + buf->page_offset); +#endif +#endif /* CONFIG_XDP_SOCKETS */ +#endif + return page_address(buf->page) + buf->page_offset; +} + +static inline u32 efx_rx_buf_hash(struct efx_nic *efx, const u8 *eh) +{ +#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) + return __le32_to_cpup((const __le32 *) + (eh + efx->rx_packet_hash_offset)); +#else + const u8 *data = eh + efx->rx_packet_hash_offset; + return (u32)data[0] | + (u32)data[1] << 8 | + (u32)data[2] << 16 | + (u32)data[3] << 24; +#endif +} + +void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, bool atomic); + +void +efx_rx_packet_gro(struct efx_rx_queue *rx_queue, struct efx_rx_buffer *rx_buf, + unsigned int n_frags, u8 *eh, __wsum csum); + +bool efx_filter_is_mc_recipient(const struct efx_filter_spec *spec); +bool efx_filter_spec_equal(const struct efx_filter_spec *left, + const struct efx_filter_spec *right); +u32 efx_filter_spec_hash(const struct efx_filter_spec *spec); + +#ifdef CONFIG_RFS_ACCEL +bool efx_rps_check_rule(struct efx_arfs_rule *rule, unsigned int filter_idx, + bool *force); + +struct efx_arfs_rule *efx_rps_hash_find(struct efx_nic *efx, + const struct efx_filter_spec *spec); + +/* @new is written to indicate if entry was newly added (true) or if an old + * entry was found and returned (false). + */ +struct efx_arfs_rule *efx_rps_hash_add(struct efx_nic *efx, + const struct efx_filter_spec *spec, + bool *new); + +void efx_rps_hash_del(struct efx_nic *efx, const struct efx_filter_spec *spec); +#endif + +void efx_filter_clear_ntuple(struct efx_nic *efx); +int efx_filter_ntuple_get(struct efx_nic *efx, u32 id, + struct efx_filter_spec *spec); +int efx_filter_ntuple_insert(struct efx_nic *efx, struct efx_filter_spec *spec); +int efx_filter_ntuple_remove(struct efx_nic *efx, u32 id); +size_t efx_filter_count_ntuple(struct efx_nic *efx); +void efx_filter_get_ntuple_ids(struct efx_nic *efx, u32 *buf, u32 size); + +int efx_init_filters(struct efx_nic *efx); +void efx_fini_filters(struct efx_nic *efx); + +static inline bool efx_tx_vi_spreading(struct efx_nic *efx) +{ + return efx->mcdi->fn_flags & + (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_TX_ONLY_VI_SPREADING_ENABLED); +} +int efx_rx_queue_id_internal(struct efx_nic *efx, int rxq_id); + + +#endif + diff --git a/src/drivers/net/ethernet/sfc/selftest.c b/src/drivers/net/ethernet/sfc/selftest.c new file mode 100644 index 0000000..b4ec195 --- /dev/null +++ b/src/drivers/net/ethernet/sfc/selftest.c @@ -0,0 +1,930 @@ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2005-2006 Fen Systems Ltd. + * Copyright 2006-2017 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "net_driver.h" +#include "efx.h" +#include "nic.h" +#include "efx_common.h" +#include "efx_channels.h" +#include "tx_common.h" +#include "selftest.h" +#include "workarounds.h" +#include "mcdi_port_common.h" + +/* IRQ latency can be enormous because: + * - All IRQs may be disabled on a CPU for a *long* time by e.g. a + * slow serial console or an old IDE driver doing error recovery + * - The PREEMPT_RT patches mostly deal with this, but also allow a + * tasklet or normal task to be given higher priority than our IRQ + * threads + * Try to avoid blaming the hardware for this. + */ +#define IRQ_TIMEOUT HZ + +/* + * Loopback test packet structure + * + * The self-test should stress every RSS vector. + */ +struct efx_loopback_payload { + struct ethhdr header; + struct iphdr ip; + struct udphdr udp; + __be16 iteration; + char msg[64]; +} __packed; + +/* Loopback test source MAC address */ +static const u8 payload_source[ETH_ALEN] __aligned(2) = { + 0x00, 0x0f, 0x53, 0x1b, 0x1b, 0x1b, +}; +/* Loopback test dest MAC address */ +static const u8 payload_dest[ETH_ALEN] __aligned(2) = { + 0x00, 0x0f, 0x53, 0x1c, 0x1c, 0x1c, +}; + +static const char payload_msg[] = + "Hello world! This is an Efx loopback test in progress!"; + +/** + * struct efx_loopback_state - Persistent state during a loopback selftest. + * @flush: Drop all packets in efx_loopback_rx_packet + * @packet_count: Number of packets being used in this test + * @skbs: An array of skbs transmitted + * @offload_csum: Checksums are being offloaded + * @rx_good: RX good packet count + * @rx_bad: RX bad packet count + * @payload: Payload used in tests + */ +struct efx_loopback_state { + bool flush; + int packet_count; + struct sk_buff **skbs; + bool offload_csum; + atomic_t rx_good; + atomic_t rx_bad; + struct efx_loopback_payload payload; +}; + +/* How long to wait for all the packets to arrive (in ms) */ +#define LOOPBACK_TIMEOUT_MS 1000 + +/************************************************************************** + * + * MII, NVRAM and register tests + * + **************************************************************************/ + +static int efx_test_phy_alive(struct efx_nic *efx, struct efx_self_tests *tests) +{ + int rc = 0; + + rc = efx_mcdi_phy_test_alive(efx); + netif_dbg(efx, drv, efx->net_dev, "%s PHY liveness selftest\n", + rc ? "Failed" : "Passed"); + tests->phy_alive = rc ? -1 : 1; + + return rc; +} + +static int efx_test_nvram(struct efx_nic *efx, struct efx_self_tests *tests) +{ + int rc = 0; + + if (efx->type->test_nvram) { + rc = efx->type->test_nvram(efx); + if (rc == -EPERM) + rc = 0; + else + tests->nvram = rc ? -1 : 1; + } + + return rc; +} + +static void memtest_simple(unsigned int id, efx_qword_t *reg, int a, int b) +{ + EFX_POPULATE_QWORD_2(*reg, EFX_DWORD_0, a, EFX_DWORD_1, b); +} + +static void memtest_changing_bytes(unsigned int id, efx_qword_t *reg, + int a, int b) +{ + int i; + u8 *byte; + for (i = 0; i < sizeof(efx_qword_t); i++) { + unsigned int addr = id * sizeof(efx_qword_t) + i; + byte = (u8 *)reg + i; + if ((addr >> 8) == 0) + *byte = (u8) (addr % 257); + else + *byte = (u8) ~((addr >> 8) % 257); + + if (addr & 0x40) + *byte = ~*byte; + } +} + +static void memtest_bit_sweep(unsigned int id, efx_qword_t *reg, + int a, int b) +{ + int bit = id % 64; + int xor = (id & 64) ? ~0 : 0; + + EFX_POPULATE_QWORD_2(*reg, + EFX_DWORD_0, (bit < 32 ? bit : 0) ^ xor, + EFX_DWORD_1, (bit >= 32 ? bit - 32 : 0) ^ xor); +} + +struct memtest { + void (*pattern)(unsigned int id, efx_qword_t *reg, int a, int b); + int a; + int b; +}; + +static struct memtest memtests[] = { + {memtest_simple, 0x55AA55AA, 0x55AA55AA}, + {memtest_simple, 0xAA55AA55, 0xAA55AA55}, + {memtest_changing_bytes, 0, 0}, + {memtest_bit_sweep, 0, 0}, +}; + +int efx_test_memory(struct efx_nic *efx) +{ + int rc, i; + + /* Test SRAM and register tables */ + for (i = 0; i < ARRAY_SIZE(memtests); ++i) { + rc = efx->type->test_memory(efx, memtests[i].pattern, + memtests[i].a, memtests[i].b); + if (rc) + break; + } + return rc; +} + +/************************************************************************** + * + * Interrupt and event queue testing + * + **************************************************************************/ + +/* Test generation and receipt of interrupts */ +static int efx_test_interrupts(struct efx_nic *efx, + struct efx_self_tests *tests) +{ + unsigned long timeout, wait; + int cpu; + int rc; + + netif_dbg(efx, drv, efx->net_dev, "testing interrupts\n"); + tests->interrupt = -1; + + rc = efx_nic_irq_test_start(efx); + if (rc == -EOPNOTSUPP) { + netif_dbg(efx, drv, efx->net_dev, + "direct interrupt testing not supported\n"); + tests->interrupt = 0; + return 0; + } + + timeout = jiffies + IRQ_TIMEOUT; + wait = 1; + + /* Wait for arrival of test interrupt. */ + netif_dbg(efx, drv, efx->net_dev, "waiting for test interrupt\n"); + do { + schedule_timeout_uninterruptible(wait); + cpu = efx_nic_irq_test_irq_cpu(efx); + if (cpu >= 0) + goto success; + wait *= 2; + } while (time_before(jiffies, timeout)); + + netif_err(efx, drv, efx->net_dev, "timed out waiting for interrupt\n"); + return -ETIMEDOUT; + + success: + netif_dbg(efx, drv, efx->net_dev, "%s test interrupt seen on CPU%d\n", + INT_MODE(efx), cpu); + tests->interrupt = 1; + return 0; +} + +/* Test generation and receipt of interrupting events */ +static int efx_test_eventq_irq(struct efx_nic *efx, + struct efx_self_tests *tests) +{ + unsigned long *napi_ran, *dma_pend, *int_pend; + int dma_pending_count, int_pending_count; + unsigned long timeout, wait; + struct efx_channel *channel; + unsigned int *read_ptr; + int bitmap_size; + int rc; + + read_ptr = kcalloc(efx_channels(efx), sizeof(*read_ptr), GFP_KERNEL); + + bitmap_size = DIV_ROUND_UP(efx_channels(efx), BITS_PER_LONG); + + napi_ran = kcalloc(bitmap_size, sizeof(unsigned long), GFP_KERNEL); + dma_pend = kcalloc(bitmap_size, sizeof(unsigned long), GFP_KERNEL); + int_pend = kcalloc(bitmap_size, sizeof(unsigned long), GFP_KERNEL); + + if (!read_ptr || !napi_ran || !dma_pend || !int_pend) { + rc = -ENOMEM; + goto out_free; + } + + dma_pending_count = 0; + int_pending_count = 0; + + efx_for_each_channel(channel, efx) { + read_ptr[channel->channel] = channel->eventq_read_ptr; + set_bit(channel->channel, dma_pend); + set_bit(channel->channel, int_pend); + efx_nic_event_test_start(channel); + dma_pending_count++; + int_pending_count++; + } + + timeout = jiffies + IRQ_TIMEOUT; + wait = 1; + + /* Wait for arrival of interrupts. NAPI processing may or may + * not complete in time, but we can cope in any case. + */ + do { + schedule_timeout_uninterruptible(wait); + + efx_for_each_channel(channel, efx) { + efx_stop_eventq(channel); + if (channel->eventq_read_ptr != + read_ptr[channel->channel]) { + set_bit(channel->channel, napi_ran); + clear_bit(channel->channel, dma_pend); + clear_bit(channel->channel, int_pend); + dma_pending_count--; + int_pending_count--; + } else { + if (efx_nic_event_present(channel)) { + clear_bit(channel->channel, dma_pend); + dma_pending_count--; + } + if (efx_nic_event_test_irq_cpu(channel) >= 0) { + clear_bit(channel->channel, int_pend); + int_pending_count--; + } + } + efx_start_eventq(channel); + } + + wait *= 2; + } while ((dma_pending_count || int_pending_count) && + time_before(jiffies, timeout)); + + efx_for_each_channel(channel, efx) { + bool dma_seen = !test_bit(channel->channel, dma_pend); + bool int_seen = !test_bit(channel->channel, int_pend); + + tests->eventq_dma[channel->channel] = dma_seen ? 1 : -1; + tests->eventq_int[channel->channel] = int_seen ? 1 : -1; + + if (dma_seen && int_seen) { + netif_dbg(efx, drv, efx->net_dev, + "channel %d event queue passed (with%s NAPI)\n", + channel->channel, + test_bit(channel->channel, napi_ran) ? + "" : "out"); + } else { + /* Report failure and whether either interrupt or DMA + * worked + */ + netif_err(efx, drv, efx->net_dev, + "channel %d timed out waiting for event queue\n", + channel->channel); + if (int_seen) + netif_err(efx, drv, efx->net_dev, + "channel %d saw interrupt " + "during event queue test\n", + channel->channel); + if (dma_seen) + netif_err(efx, drv, efx->net_dev, + "channel %d event was generated, but " + "failed to trigger an interrupt\n", + channel->channel); + } + } + + rc = (dma_pending_count || int_pending_count) ? -ETIMEDOUT : 0; + +out_free: + kfree(int_pend); + kfree(dma_pend); + kfree(napi_ran); + kfree(read_ptr); + + return rc; +} + +static int efx_test_phy(struct efx_nic *efx, struct efx_self_tests *tests, + unsigned int flags) +{ + int rc; + + mutex_lock(&efx->mac_lock); + rc = efx_mcdi_phy_run_tests(efx, tests->phy_ext, flags); + mutex_unlock(&efx->mac_lock); + if (rc == -EPERM) + rc = 0; + else + netif_info(efx, drv, efx->net_dev, + "%s phy selftest\n", rc ? "Failed" : "Passed"); + return rc; +} + +/************************************************************************** + * + * Loopback testing + * NB Only one loopback test can be executing concurrently. + * + **************************************************************************/ + +/* Loopback test RX callback + * This is called for each received packet during loopback testing. + */ +void efx_loopback_rx_packet(struct efx_nic *efx, + const char *buf_ptr, int pkt_len) +{ + struct efx_loopback_state *state = efx->loopback_selftest; + struct efx_loopback_payload *received; + struct efx_loopback_payload *payload; + + BUG_ON(!buf_ptr); + + /* If we are just flushing, then drop the packet */ + if ((state == NULL) || state->flush) + return; + + payload = &state->payload; + + received = (struct efx_loopback_payload *) buf_ptr; + received->ip.saddr = payload->ip.saddr; + if (state->offload_csum) + received->ip.check = payload->ip.check; + + /* Check that header exists */ + if (pkt_len < sizeof(received->header)) { + netif_err(efx, drv, efx->net_dev, + "saw runt RX packet (length %d) in %s loopback " + "test\n", pkt_len, LOOPBACK_MODE(efx)); + goto err; + } + + /* Check that the ethernet header exists */ + if (memcmp(&received->header, &payload->header, ETH_HLEN) != 0) { + netif_err(efx, drv, efx->net_dev, + "saw non-loopback RX packet in %s loopback test\n", + LOOPBACK_MODE(efx)); + goto err; + } + + /* Check packet length */ + if (pkt_len != sizeof(*payload)) { + netif_err(efx, drv, efx->net_dev, + "saw incorrect RX packet length %d (wanted %d) in " + "%s loopback test\n", pkt_len, (int)sizeof(*payload), + LOOPBACK_MODE(efx)); + goto err; + } + + /* Check that IP header matches */ + if (memcmp(&received->ip, &payload->ip, sizeof(payload->ip)) != 0) { + netif_err(efx, drv, efx->net_dev, + "saw corrupted IP header in %s loopback test\n", + LOOPBACK_MODE(efx)); + goto err; + } + + /* Check that msg and padding matches */ + if (memcmp(&received->msg, &payload->msg, sizeof(received->msg)) != 0) { + netif_err(efx, drv, efx->net_dev, + "saw corrupted RX packet in %s loopback test\n", + LOOPBACK_MODE(efx)); + goto err; + } + + /* Check that iteration matches */ + if (received->iteration != payload->iteration) { + netif_err(efx, drv, efx->net_dev, + "saw RX packet from iteration %d (wanted %d) in " + "%s loopback test\n", ntohs(received->iteration), + ntohs(payload->iteration), LOOPBACK_MODE(efx)); + goto err; + } + + /* Increase correct RX count */ + netif_vdbg(efx, drv, efx->net_dev, + "got loopback RX in %s loopback test\n", LOOPBACK_MODE(efx)); + + atomic_inc(&state->rx_good); + return; + + err: +#ifdef DEBUG + if (atomic_read(&state->rx_bad) == 0) { + netif_err(efx, drv, efx->net_dev, "received packet:\n"); + print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 0x10, 1, + buf_ptr, pkt_len, 0); + netif_err(efx, drv, efx->net_dev, "expected packet:\n"); + print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 0x10, 1, + &state->payload, sizeof(state->payload), 0); + } +#endif + atomic_inc(&state->rx_bad); +} + +/* Initialise an efx_selftest_state for a new iteration */ +static void efx_iterate_state(struct efx_nic *efx) +{ + struct efx_loopback_state *state = efx->loopback_selftest; + struct efx_loopback_payload *payload = &state->payload; + + /* Initialise the layerII header */ + ether_addr_copy((u8 *)&payload->header.h_dest, payload_dest); + ether_addr_copy((u8 *)&payload->header.h_source, payload_source); + payload->header.h_proto = htons(ETH_P_IP); + + /* saddr set later and used as incrementing count */ + payload->ip.daddr = htonl(INADDR_LOOPBACK); + payload->ip.ihl = 5; + payload->ip.check = (__force __sum16) htons(0xdead); + payload->ip.tot_len = htons(sizeof(*payload) - sizeof(struct ethhdr)); + payload->ip.version = IPVERSION; + payload->ip.protocol = IPPROTO_UDP; + + /* Initialise udp header */ + payload->udp.source = 0; + payload->udp.len = htons(sizeof(*payload) - sizeof(struct ethhdr) - + sizeof(struct iphdr)); + payload->udp.check = 0; /* checksum ignored */ + + /* Fill out payload */ + payload->iteration = htons(ntohs(payload->iteration) + 1); + memcpy(&payload->msg, payload_msg, sizeof(payload_msg)); + + /* Fill out remaining state members */ + atomic_set(&state->rx_good, 0); + atomic_set(&state->rx_bad, 0); + smp_wmb(); +} + +static int efx_begin_loopback(struct efx_tx_queue *tx_queue) +{ + struct efx_nic *efx = tx_queue->efx; + struct efx_loopback_state *state = efx->loopback_selftest; + struct efx_loopback_payload *payload; + struct sk_buff *skb; + int i; + int rc; + + /* Transmit N copies of buffer */ + for (i = 0; i < state->packet_count; i++) { + /* Allocate an skb, holding an extra reference for + * transmit completion counting */ + skb = alloc_skb(sizeof(state->payload), GFP_KERNEL); + if (!skb) + return -ENOMEM; + state->skbs[i] = skb; + skb_get(skb); + + /* Copy the payload in, incrementing the source address to + * exercise the rss vectors */ + payload = skb_put(skb, sizeof(state->payload)); + memcpy(payload, &state->payload, sizeof(state->payload)); + payload->ip.saddr = htonl(INADDR_LOOPBACK | (i << 2)); + + /* Ensure everything we've written is visible to the + * interrupt handler. */ + smp_wmb(); + + netif_tx_lock_bh(efx->net_dev); + rc = efx_enqueue_skb(tx_queue, skb); + netif_tx_unlock_bh(efx->net_dev); + + if (rc) { + netif_err(efx, drv, efx->net_dev, + "TX queue %d could not transmit packet %d of " + "%d in %s loopback test\n", tx_queue->queue, + i + 1, state->packet_count, + LOOPBACK_MODE(efx)); + + /* Defer cleaning up the other skbs for the caller */ + kfree_skb(skb); + return -EPIPE; + } + } + + return 0; +} + +static int efx_poll_loopback(struct efx_nic *efx) +{ + struct efx_loopback_state *state = efx->loopback_selftest; + + return atomic_read(&state->rx_good) == state->packet_count; +} + +static int efx_end_loopback(struct efx_tx_queue *tx_queue, + struct efx_loopback_self_tests *lb_tests) +{ + struct efx_nic *efx = tx_queue->efx; + struct efx_loopback_state *state = efx->loopback_selftest; + struct sk_buff *skb; + int tx_done = 0, rx_good, rx_bad; + int i, rc = 0; + + netif_tx_lock_bh(efx->net_dev); + + /* Count the number of tx completions, and decrement the refcnt. Any + * skbs not already completed will be free'd when the queue is flushed */ + for (i = 0; i < state->packet_count; i++) { + skb = state->skbs[i]; + if (skb && !skb_shared(skb)) + ++tx_done; + dev_kfree_skb(skb); + } + + netif_tx_unlock_bh(efx->net_dev); + + /* Check TX completion and received packet counts */ + rx_good = atomic_read(&state->rx_good); + rx_bad = atomic_read(&state->rx_bad); + if (tx_done != state->packet_count) { + netif_err(efx, drv, efx->net_dev, + "TX queue %d saw only %d out of an expected %d " + "TX completion events in %s loopback test\n", + tx_queue->queue, tx_done, state->packet_count, + LOOPBACK_MODE(efx)); + rc = -ETIMEDOUT; + efx_purge_tx_queue(tx_queue); + /* Allow to fall through so we see the RX errors as well */ + } + + /* We may always be up to a flush away from our desired packet total */ + if (rx_good != state->packet_count) { + netif_dbg(efx, drv, efx->net_dev, + "TX queue %d saw only %d out of an expected %d " + "received packets in %s loopback test\n", + tx_queue->queue, rx_good, state->packet_count, + LOOPBACK_MODE(efx)); + rc = -ETIMEDOUT; + /* Fall through */ + } + + /* Update loopback test structure */ + lb_tests->tx_sent[tx_queue->queue] += state->packet_count; + lb_tests->tx_done[tx_queue->queue] += tx_done; + lb_tests->rx_good += rx_good; + lb_tests->rx_bad += rx_bad; + + return rc; +} + +static int +efx_test_loopback(struct efx_tx_queue *tx_queue, + struct efx_loopback_self_tests *lb_tests) +{ + struct efx_nic *efx = tx_queue->efx; + struct efx_loopback_state *state = efx->loopback_selftest; + int i, begin_rc, end_rc; + + for (i = 0; i < 3; i++) { + /* Determine how many packets to send */ + state->packet_count = efx->txq_entries / 3; + state->packet_count = min(1 << (i << 2), state->packet_count); + state->skbs = kcalloc(state->packet_count, + sizeof(state->skbs[0]), GFP_KERNEL); + if (!state->skbs) + return -ENOMEM; + state->flush = false; + + netif_dbg(efx, drv, efx->net_dev, + "TX queue %d testing %s loopback with %d packets\n", + tx_queue->queue, LOOPBACK_MODE(efx), + state->packet_count); + + efx_iterate_state(efx); + begin_rc = efx_begin_loopback(tx_queue); + + /* This will normally complete very quickly, but be + * prepared to wait much longer. */ + msleep(1); + if (!efx_poll_loopback(efx)) { + msleep(LOOPBACK_TIMEOUT_MS); + efx_poll_loopback(efx); + } + + end_rc = efx_end_loopback(tx_queue, lb_tests); + kfree(state->skbs); + + if (begin_rc || end_rc) { + /* Wait a while to ensure there are no packets + * floating around after a failure. */ + schedule_timeout_uninterruptible(HZ / 10); + return begin_rc ? begin_rc : end_rc; + } + } + + netif_dbg(efx, drv, efx->net_dev, + "TX queue %d passed %s loopback test with a burst length " + "of %d packets\n", tx_queue->queue, LOOPBACK_MODE(efx), + state->packet_count); + + return 0; +} + +static int efx_wait_for_link(struct efx_nic *efx) +{ + struct efx_link_state *link_state = &efx->link_state; + int count, link_up_count = 0; + bool link_up; + + for (count = 0; count < 40; count++) { + schedule_timeout_uninterruptible(HZ / 10); + + if (efx->type->monitor != NULL) { + mutex_lock(&efx->mac_lock); + efx->type->monitor(efx); + mutex_unlock(&efx->mac_lock); + } + + mutex_lock(&efx->mac_lock); + link_up = link_state->up; + if (link_up) + link_up = !efx->type->check_mac_fault(efx); + mutex_unlock(&efx->mac_lock); + + if (link_up) { + if (++link_up_count == 2) + return 0; + } else { + link_up_count = 0; + } + } + + return -ETIMEDOUT; +} + +static int efx_test_loopbacks(struct efx_nic *efx, struct efx_self_tests *tests, + unsigned int loopback_modes) +{ + enum efx_loopback_mode mode; + struct efx_loopback_state *state; + struct efx_channel *channel = + efx_get_channel(efx, efx->tx_channel_offset); + struct efx_tx_queue *tx_queue; + int rc = 0; + s32 temp_filter_id = -1; + + /* Set the port loopback_selftest member. From this point on + * all received packets will be dropped. Mark the state as + * "flushing" so all inflight packets are dropped */ + state = kzalloc(sizeof(*state), GFP_KERNEL); + if (state == NULL) + return -ENOMEM; + BUG_ON(efx->loopback_selftest); + + if (loopback_modes) { + struct efx_filter_spec spec; + + efx_filter_init_rx(&spec, EFX_FILTER_PRI_REQUIRED, + EFX_FILTER_FLAG_RX_RSS | EFX_FILTER_FLAG_LOOPBACK, + 0); + efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC, + payload_dest); + + temp_filter_id = efx_filter_insert_filter(efx, &spec, false); + /* RSS filters are not supported in some firmware variants */ + if (temp_filter_id == -EOPNOTSUPP) + return 0; + else if (temp_filter_id < 0) + return temp_filter_id; + } + + state->flush = true; + efx->loopback_selftest = state; + + /* Test all supported loopback modes */ + for (mode = LOOPBACK_NONE; mode <= LOOPBACK_TEST_MAX; mode++) { + if (!(loopback_modes & (1 << mode))) + continue; + + /* Move the port into the specified loopback mode. */ + state->flush = true; + mutex_lock(&efx->mac_lock); + efx->loopback_mode = mode; + rc = __efx_reconfigure_port(efx); + mutex_unlock(&efx->mac_lock); + if (rc) { + netif_err(efx, drv, efx->net_dev, + "unable to move into %s loopback\n", + LOOPBACK_MODE(efx)); + goto out; + } + + rc = efx_wait_for_link(efx); + if (rc) { + netif_err(efx, drv, efx->net_dev, + "loopback %s never came up\n", + LOOPBACK_MODE(efx)); + goto out; + } + + /* Test both types of TX queue */ + efx_for_each_channel_tx_queue(tx_queue, channel) { + state->offload_csum = + tx_queue->csum_offload == + EFX_TXQ_TYPE_CSUM_OFFLOAD; + rc = efx_test_loopback(tx_queue, + &tests->loopback[mode]); + if (rc) + goto out; + } + } + + out: + if (temp_filter_id >= 0) + efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED, + temp_filter_id); + + /* Remove the flush. The caller will remove the loopback setting */ + state->flush = true; + efx->loopback_selftest = NULL; + wmb(); + kfree(state); + + if (rc == -EPERM) + rc = 0; + + return rc; +} + +/************************************************************************** + * + * Entry point + * + *************************************************************************/ + +int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests, + unsigned int flags) +{ + enum efx_loopback_mode loopback_mode = efx->loopback_mode; + int phy_mode = efx->phy_mode; + int rc_test = 0, rc_reset, rc; + + efx_selftest_async_cancel(efx); + + /* Online (i.e. non-disruptive) testing + * This checks interrupt generation, event delivery and PHY presence. */ + + rc = efx_test_phy_alive(efx, tests); + if (rc && !rc_test) + rc_test = rc; + + rc = efx_test_nvram(efx, tests); + if (rc && !rc_test) + rc_test = rc; + + rc = efx_test_interrupts(efx, tests); + if (rc && !rc_test) + rc_test = rc; + + rc = efx_test_eventq_irq(efx, tests); + if (rc && !rc_test) + rc_test = rc; + + if (rc_test) + return rc_test; + + if (!(flags & ETH_TEST_FL_OFFLINE)) + return efx_test_phy(efx, tests, flags); + + /* Offline (i.e. disruptive) testing + * This checks MAC and PHY loopback on the specified port. */ + + /* Detach the device so the kernel doesn't transmit during the + * loopback test and the watchdog timeout doesn't fire. + */ + efx_device_detach_sync(efx); + + if (efx->type->test_chip) { +#ifdef EFX_NOT_UPSTREAM +#if IS_MODULE(CONFIG_SFC_DRIVERLINK) + efx_dl_reset_suspend(&efx->dl_nic); +#endif +#endif + rc_reset = efx->type->test_chip(efx, tests); +#ifdef EFX_NOT_UPSTREAM +#if IS_MODULE(CONFIG_SFC_DRIVERLINK) + efx_dl_reset_resume(&efx->dl_nic, rc_reset == 0); +#endif +#endif + + if (rc_reset) { + netif_err(efx, hw, efx->net_dev, + "Unable to recover from chip test\n"); + efx_schedule_reset(efx, RESET_TYPE_DISABLE); + return rc_reset; + } + + if ((tests->memory < 0 || tests->registers < 0) && !rc_test) + rc_test = -EIO; + } + + /* Ensure that the phy is powered and out of loopback + * for the bist and loopback tests */ + mutex_lock(&efx->mac_lock); + efx->phy_mode &= ~PHY_MODE_LOW_POWER; + efx->loopback_mode = LOOPBACK_NONE; + __efx_reconfigure_port(efx); + mutex_unlock(&efx->mac_lock); + + rc = efx_test_phy(efx, tests, flags); + if (rc && !rc_test) + rc_test = rc; + + rc = efx_test_loopbacks(efx, tests, efx->loopback_modes); + if (rc && !rc_test) + rc_test = rc; + + efx_device_attach_if_not_resetting(efx); + + /* restore the PHY to the previous state */ + mutex_lock(&efx->mac_lock); + efx->phy_mode = phy_mode; + efx->loopback_mode = loopback_mode; + __efx_reconfigure_port(efx); + (void)efx_mac_reconfigure(efx, false); + mutex_unlock(&efx->mac_lock); + + return rc_test; +} + +void efx_selftest_async_start(struct efx_nic *efx) +{ + struct efx_channel *channel; + + efx_for_each_channel(channel, efx) + efx_nic_event_test_start(channel); + schedule_delayed_work(&efx->selftest_work, IRQ_TIMEOUT); +} + +void efx_selftest_async_cancel(struct efx_nic *efx) +{ + cancel_delayed_work_sync(&efx->selftest_work); +} + +static void efx_selftest_async_work(struct work_struct *data) +{ + struct efx_nic *efx = container_of(data, struct efx_nic, + selftest_work.work); + struct efx_channel *channel; + int cpu; + + efx_for_each_channel(channel, efx) { + cpu = efx_nic_event_test_irq_cpu(channel); + if (cpu < 0) + netif_err(efx, ifup, efx->net_dev, + "channel %d failed to trigger an interrupt\n", + channel->channel); + else + netif_dbg(efx, ifup, efx->net_dev, + "channel %d triggered interrupt on CPU %d\n", + channel->channel, cpu); + } +} + +void efx_selftest_async_init(struct efx_nic *efx) +{ + INIT_DELAYED_WORK(&efx->selftest_work, efx_selftest_async_work); +} + diff --git a/src/drivers/net/ethernet/sfc/selftest.h b/src/drivers/net/ethernet/sfc/selftest.h new file mode 100644 index 0000000..a496e82 --- /dev/null +++ b/src/drivers/net/ethernet/sfc/selftest.h @@ -0,0 +1,56 @@ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2005-2006 Fen Systems Ltd. + * Copyright 2006-2017 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#ifndef EFX_SELFTEST_H +#define EFX_SELFTEST_H + +#include "net_driver.h" + +/* + * Self tests + */ + +struct efx_loopback_self_tests { + int tx_sent[EFX_TXQ_TYPES]; + int tx_done[EFX_TXQ_TYPES]; + int rx_good; + int rx_bad; +}; + +#define EFX_MAX_PHY_TESTS 20 + +/* Efx self test results + * For fields which are not counters, 1 indicates success and -1 + * indicates failure; 0 indicates test could not be run. + */ +struct efx_self_tests { + /* online tests */ + int phy_alive; + int nvram; + int interrupt; + int *eventq_dma; + int *eventq_int; + /* offline tests */ + int memory; + int registers; + int phy_ext[EFX_MAX_PHY_TESTS]; + struct efx_loopback_self_tests loopback[LOOPBACK_TEST_MAX + 1]; +}; + +void efx_loopback_rx_packet(struct efx_nic *efx, const char *buf_ptr, + int pkt_len); +int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests, + unsigned int flags); +void efx_selftest_async_init(struct efx_nic *efx); +void efx_selftest_async_start(struct efx_nic *efx); +void efx_selftest_async_cancel(struct efx_nic *efx); +int efx_test_memory(struct efx_nic *efx); + +#endif /* EFX_SELFTEST_H */ diff --git a/src/drivers/net/ethernet/sfc/sfctool.c b/src/drivers/net/ethernet/sfc/sfctool.c new file mode 100644 index 0000000..8c15a97 --- /dev/null +++ b/src/drivers/net/ethernet/sfc/sfctool.c @@ -0,0 +1,389 @@ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2019 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#include "sfctool.h" +#include "net_driver.h" +#include "ethtool_common.h" +#include "efx_ethtool.h" +#include "efx_ioctl.h" +#include "ioctl_common.h" + +#if defined(EFX_USE_KCOMPAT) && !defined(EFX_HAVE_ETHTOOL_FECPARAM) +int efx_sfctool_get_fecparam(struct efx_nic *efx, + struct ethtool_fecparam *fecparam); +int efx_sfctool_set_fecparam(struct efx_nic *efx, + struct ethtool_fecparam *fecparam); + +static int sfctool_get_fecparam(struct efx_nic *efx, void __user *useraddr) +{ + struct ethtool_fecparam fecparam = { ETHTOOL_GFECPARAM }; + int rc; + + rc = efx_ethtool_get_fecparam(efx->net_dev, &fecparam); + if (rc) + return rc; + + if (copy_to_user(useraddr, &fecparam, sizeof(fecparam))) + return -EFAULT; + return 0; +} + +static int sfctool_set_fecparam(struct efx_nic *efx, void __user *useraddr) +{ + struct ethtool_fecparam fecparam; + + if (copy_from_user(&fecparam, useraddr, sizeof(fecparam))) + return -EFAULT; + + return efx_ethtool_set_fecparam(efx->net_dev, &fecparam); +} +#endif + +#if defined(EFX_USE_KCOMPAT) && !defined(EFX_HAVE_ETHTOOL_RXFH_CONTEXT) +#if !defined(EFX_HAVE_ETHTOOL_GET_RXFH) && !defined(EFX_HAVE_ETHTOOL_GET_RXFH_INDIR) && defined(EFX_HAVE_ETHTOOL_RXFH_INDIR) +int efx_sfctool_get_rxfh(struct efx_nic *efx, u32 *indir, u8 *key, u8 *hfunc); +int efx_sfctool_set_rxfh(struct efx_nic *efx, + const u32 *indir, const u8 *key, const u8 hfunc); +#endif + +static noinline_for_stack int sfctool_get_rxfh(struct efx_nic *efx, + void __user *useraddr) +{ + int ret; + u32 user_indir_size, user_key_size; + u32 dev_indir_size = 0, dev_key_size = 0; + struct sfctool_rxfh rxfh; + u32 total_size; + u32 indir_bytes; + u32 *indir = NULL; + u8 dev_hfunc = 0; + u8 *hkey = NULL; + u8 *rss_config; + + dev_indir_size = efx_sfctool_get_rxfh_indir_size(efx); + dev_key_size = efx_sfctool_get_rxfh_key_size(efx); + + if (copy_from_user(&rxfh, useraddr, sizeof(rxfh))) + return -EFAULT; + user_indir_size = rxfh.indir_size; + user_key_size = rxfh.key_size; + + /* Check that reserved fields are 0 for now */ + if (rxfh.rsvd8[0] || rxfh.rsvd8[1] || rxfh.rsvd8[2] || rxfh.rsvd32) + return -EINVAL; + + rxfh.indir_size = dev_indir_size; + rxfh.key_size = dev_key_size; + /* upstream writes back to user here. But we delay so as not to corrupt + * user's cmd buffer if we encounter errors, as that would prevent them + * re-using it for standard ethtool. + */ + + if ((user_indir_size && (user_indir_size != dev_indir_size)) || + (user_key_size && (user_key_size != dev_key_size))) + return -EINVAL; + + indir_bytes = user_indir_size * sizeof(indir[0]); + total_size = indir_bytes + user_key_size; + rss_config = kzalloc(total_size, GFP_USER); + if (!rss_config) + return -ENOMEM; + + if (user_indir_size) + indir = (u32 *)rss_config; + + if (user_key_size) + hkey = rss_config + indir_bytes; + + if (rxfh.rss_context) + ret = efx_sfctool_get_rxfh_context(efx, indir, hkey, + &dev_hfunc, + rxfh.rss_context); + else +#if defined(EFX_HAVE_ETHTOOL_GET_RXFH) || defined(EFX_HAVE_ETHTOOL_GET_RXFH_INDIR) || !defined(EFX_HAVE_ETHTOOL_RXFH_INDIR) + return -EOPNOTSUPP; /* use ethtool instead */ +#else + ret = efx_sfctool_get_rxfh(efx, indir, hkey, &dev_hfunc); +#endif + if (ret) + goto out; + + if (copy_to_user(useraddr, &rxfh, sizeof(rxfh))) { + ret = -EFAULT; + } else if (copy_to_user(useraddr + offsetof(struct sfctool_rxfh, hfunc), + &dev_hfunc, sizeof(rxfh.hfunc))) { + ret = -EFAULT; + } else if (copy_to_user(useraddr + + offsetof(struct sfctool_rxfh, rss_config[0]), + rss_config, total_size)) { + ret = -EFAULT; + } +out: + kfree(rss_config); + + return ret; +} + +static int ethtool_copy_validate_indir(u32 *indir, void __user *useraddr, + struct efx_ethtool_rxnfc *rx_rings, + u32 size) +{ + int i; + + if (copy_from_user(indir, useraddr, size * sizeof(indir[0]))) + return -EFAULT; + + /* Validate ring indices */ + for (i = 0; i < size; i++) + if (indir[i] >= rx_rings->data) + return -EINVAL; + + return 0; +} + +static noinline_for_stack int sfctool_set_rxfh(struct efx_nic *efx, + void __user *useraddr) +{ + int ret; + struct efx_ethtool_rxnfc rx_rings; + struct sfctool_rxfh rxfh; + u32 dev_indir_size = 0, dev_key_size = 0, i; + u32 *indir = NULL, indir_bytes = 0; + u8 *hkey = NULL; + u8 *rss_config; + u32 rss_cfg_offset = offsetof(struct sfctool_rxfh, rss_config[0]); + bool delete = false; + + dev_indir_size = efx_sfctool_get_rxfh_indir_size(efx); + dev_key_size = efx_sfctool_get_rxfh_key_size(efx); + + if (copy_from_user(&rxfh, useraddr, sizeof(rxfh))) + return -EFAULT; + + /* Check that reserved fields are 0 for now */ + if (rxfh.rsvd8[0] || rxfh.rsvd8[1] || rxfh.rsvd8[2] || rxfh.rsvd32) + return -EINVAL; + + /* If either indir, hash key or function is valid, proceed further. + * Must request at least one change: indir size, hash key or function. + */ + if ((rxfh.indir_size && + rxfh.indir_size != ETH_RXFH_INDIR_NO_CHANGE && + rxfh.indir_size != dev_indir_size) || + (rxfh.key_size && (rxfh.key_size != dev_key_size)) || + (rxfh.indir_size == ETH_RXFH_INDIR_NO_CHANGE && + rxfh.key_size == 0 && rxfh.hfunc == ETH_RSS_HASH_NO_CHANGE)) + return -EINVAL; + + if (rxfh.indir_size != ETH_RXFH_INDIR_NO_CHANGE) + indir_bytes = dev_indir_size * sizeof(indir[0]); + + rss_config = kzalloc(indir_bytes + rxfh.key_size, GFP_USER); + if (!rss_config) + return -ENOMEM; + + rx_rings.cmd = ETHTOOL_GRXRINGS; + ret = efx_sfctool_get_rxnfc(efx, &rx_rings, NULL); + if (ret) + goto out; + + /* rxfh.indir_size == 0 means reset the indir table to default (master + * context) or delete the context (other RSS contexts). + * rxfh.indir_size == ETH_RXFH_INDIR_NO_CHANGE means leave it unchanged. + */ + if (rxfh.indir_size && + rxfh.indir_size != ETH_RXFH_INDIR_NO_CHANGE) { + indir = (u32 *)rss_config; + ret = ethtool_copy_validate_indir(indir, + useraddr + rss_cfg_offset, + &rx_rings, + rxfh.indir_size); + if (ret) + goto out; + } else if (rxfh.indir_size == 0) { + if (rxfh.rss_context == 0) { + indir = (u32 *)rss_config; + for (i = 0; i < dev_indir_size; i++) + indir[i] = ethtool_rxfh_indir_default(i, rx_rings.data); + } else { + delete = true; + } + } + + if (rxfh.key_size) { + hkey = rss_config + indir_bytes; + if (copy_from_user(hkey, + useraddr + rss_cfg_offset + indir_bytes, + rxfh.key_size)) { + ret = -EFAULT; + goto out; + } + } + + if (rxfh.rss_context) + ret = efx_sfctool_set_rxfh_context(efx, indir, hkey, rxfh.hfunc, + &rxfh.rss_context, delete); + else +#if defined(EFX_HAVE_ETHTOOL_GET_RXFH) || defined(EFX_HAVE_ETHTOOL_GET_RXFH_INDIR) || !defined(EFX_HAVE_ETHTOOL_RXFH_INDIR) + return -EOPNOTSUPP; /* use ethtool instead */ +#else + ret = efx_sfctool_set_rxfh(efx, indir, hkey, rxfh.hfunc); +#endif + if (ret) + goto out; + + if (copy_to_user(useraddr + offsetof(struct sfctool_rxfh, rss_context), + &rxfh.rss_context, sizeof(rxfh.rss_context))) + ret = -EFAULT; + +#ifdef IFF_RXFH_CONFIGURED + if (!rxfh.rss_context) { + /* indicate whether rxfh was set to default */ + if (rxfh.indir_size == 0) + efx->net_dev->priv_flags &= ~IFF_RXFH_CONFIGURED; + else if (rxfh.indir_size != ETH_RXFH_INDIR_NO_CHANGE) + efx->net_dev->priv_flags |= IFF_RXFH_CONFIGURED; + } +#endif + +out: + kfree(rss_config); + return ret; +} +#endif + +#if defined(EFX_USE_KCOMPAT) && !defined(EFX_HAVE_ETHTOOL_GMODULEEEPROM) +static int sfctool_get_any_eeprom(struct net_device *dev, void __user *useraddr, + int (*getter)(struct net_device *, + struct ethtool_eeprom *, u8 *), + u32 total_len) +{ + struct ethtool_eeprom eeprom; + void __user *userbuf = useraddr + sizeof(eeprom); + u32 bytes_remaining; + u8 *data; + int ret = 0; + + if (copy_from_user(&eeprom, useraddr, sizeof(eeprom))) + return -EFAULT; + + /* Check for wrap and zero */ + if (eeprom.offset + eeprom.len <= eeprom.offset) + return -EINVAL; + + /* Check for exceeding total eeprom len */ + if (eeprom.offset + eeprom.len > total_len) + return -EINVAL; + + data = kmalloc(PAGE_SIZE, GFP_USER); + if (!data) + return -ENOMEM; + + bytes_remaining = eeprom.len; + while (bytes_remaining > 0) { + eeprom.len = min(bytes_remaining, (u32)PAGE_SIZE); + + ret = getter(dev, &eeprom, data); + if (ret) + break; + if (copy_to_user(userbuf, data, eeprom.len)) { + ret = -EFAULT; + break; + } + userbuf += eeprom.len; + eeprom.offset += eeprom.len; + bytes_remaining -= eeprom.len; + } + + eeprom.len = userbuf - (useraddr + sizeof(eeprom)); + eeprom.offset -= eeprom.len; + if (copy_to_user(useraddr, &eeprom, sizeof(eeprom))) + ret = -EFAULT; + + kfree(data); + return ret; +} + +static int sfctool_get_module_info(struct efx_nic *efx, + void __user *useraddr) +{ + int ret; + struct ethtool_modinfo modinfo; + + if (copy_from_user(&modinfo, useraddr, sizeof(modinfo))) + return -EFAULT; + + ret = efx_ethtool_get_module_info(efx->net_dev, &modinfo); + if (ret) + return ret; + + if (copy_to_user(useraddr, &modinfo, sizeof(modinfo))) + return -EFAULT; + + return 0; +} + +static int sfctool_get_module_eeprom(struct efx_nic *efx, + void __user *useraddr) +{ + int ret; + struct ethtool_modinfo modinfo; + + ret = efx_ethtool_get_module_info(efx->net_dev, &modinfo); + if (ret) + return ret; + + return sfctool_get_any_eeprom(efx->net_dev, useraddr, + efx_ethtool_get_module_eeprom, + modinfo.eeprom_len); +} +#endif + +/* WARNING! For any return other than success or -EFAULT, methods called here + * must NOT have written to *data, as the userland tool expects to be able to + * re-use its command buffer to call the regular ethtool ioctl. + */ +int efx_sfctool(struct efx_nic *efx, u32 cmd, void __user *data) +{ + switch (cmd) { +#if defined(EFX_USE_KCOMPAT) && !defined(EFX_HAVE_ETHTOOL_FECPARAM) + case ETHTOOL_GFECPARAM: + return sfctool_get_fecparam(efx, data); + case ETHTOOL_SFECPARAM: + return sfctool_set_fecparam(efx, data); +#endif +#if defined(EFX_USE_KCOMPAT) && !defined(EFX_HAVE_ETHTOOL_RXFH_CONTEXT) + case ETHTOOL_GRSSH: + return sfctool_get_rxfh(efx, data); + case ETHTOOL_SRSSH: + return sfctool_set_rxfh(efx, data); +#endif +#if defined(EFX_USE_KCOMPAT) && !defined(EFX_HAVE_ETHTOOL_RXNFC_CONTEXT) + case ETHTOOL_GRXCLSRLALL: + case ETHTOOL_GRXFH: + case ETHTOOL_GRXRINGS: + case ETHTOOL_GRXCLSRLCNT: + case ETHTOOL_GRXCLSRULE: + case ETHTOOL_SRXCLSRLINS: + case ETHTOOL_SRXCLSRLDEL: + /* Use the old sfctool1 implementation, it deals with all the + * horrid 32-bit compat mess. + */ + return efx_ioctl_rxnfc(efx, data); +#endif +#if defined(EFX_USE_KCOMPAT) && !defined(EFX_HAVE_ETHTOOL_GMODULEEEPROM) + case ETHTOOL_GMODULEEEPROM: + return sfctool_get_module_eeprom(efx, data); + case ETHTOOL_GMODULEINFO: + return sfctool_get_module_info(efx, data); +#endif + default: + return -EOPNOTSUPP; + }; +} diff --git a/src/drivers/net/ethernet/sfc/sfctool.h b/src/drivers/net/ethernet/sfc/sfctool.h new file mode 100644 index 0000000..0dd7605 --- /dev/null +++ b/src/drivers/net/ethernet/sfc/sfctool.h @@ -0,0 +1,133 @@ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2019 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#ifndef EFX_SFCTOOL_H +#define EFX_SFCTOOL_H + +#ifdef EFX_USE_KCOMPAT +/* Must come before other headers */ +#include "kernel_compat.h" +#endif + +/* Forward declaration */ +struct efx_nic; + +#if defined(EFX_USE_KCOMPAT) && !defined(EFX_HAVE_ETHTOOL_FECPARAM) +/** + * struct ethtool_fecparam - Ethernet forward error correction(fec) parameters + * @cmd: Command number = %ETHTOOL_GFECPARAM or %ETHTOOL_SFECPARAM + * @active_fec: FEC mode which is active on porte + * @fec: Bitmask of supported/configured FEC modes + * @reserved: Reserved for future extensions. i.e FEC bypass feature. + * + * Drivers should reject a non-zero setting of @autoneg when + * autoneogotiation is disabled (or not supported) for the link. + * + */ +struct ethtool_fecparam { + __u32 cmd; + /* bitmask of FEC modes */ + __u32 active_fec; + __u32 fec; + __u32 reserved; +}; + +/** + * enum ethtool_fec_config_bits - flags definition of ethtool_fec_configuration + * @ETHTOOL_FEC_NONE_BIT: FEC mode configuration is not supported + * @ETHTOOL_FEC_AUTO_BIT: Default/Best FEC mode provided by driver + * @ETHTOOL_FEC_OFF_BIT: No FEC Mode + * @ETHTOOL_FEC_RS_BIT: Reed-Solomon Forward Error Detection mode + * @ETHTOOL_FEC_BASER_BIT: Base-R/Reed-Solomon Forward Error Detection mode + */ +enum ethtool_fec_config_bits { + ETHTOOL_FEC_NONE_BIT, + ETHTOOL_FEC_AUTO_BIT, + ETHTOOL_FEC_OFF_BIT, + ETHTOOL_FEC_RS_BIT, + ETHTOOL_FEC_BASER_BIT, +}; + +#define ETHTOOL_FEC_NONE (1 << ETHTOOL_FEC_NONE_BIT) +#define ETHTOOL_FEC_AUTO (1 << ETHTOOL_FEC_AUTO_BIT) +#define ETHTOOL_FEC_OFF (1 << ETHTOOL_FEC_OFF_BIT) +#define ETHTOOL_FEC_RS (1 << ETHTOOL_FEC_RS_BIT) +#define ETHTOOL_FEC_BASER (1 << ETHTOOL_FEC_BASER_BIT) + +#define ETHTOOL_GFECPARAM 0x00000050 /* Get FEC settings */ +#define ETHTOOL_SFECPARAM 0x00000051 /* Set FEC settings */ +#endif /* !EFX_HAVE_ETHTOOL_FECPARAM */ + +#if defined(EFX_USE_KCOMPAT) && !defined(EFX_HAVE_ETHTOOL_RXFH_CONTEXT) +/** + * struct sfctool_rxfh - command to get/set RX flow hash indir or/and hash key. + * @cmd: Specific command number - %ETHTOOL_GRSSH or %ETHTOOL_SRSSH + * @rss_context: RSS context identifier. Context 0 is the default for normal + * traffic; other contexts can be referenced as the destination for RX flow + * classification rules. %ETH_RXFH_CONTEXT_ALLOC is used with command + * %ETHTOOL_SRSSH to allocate a new RSS context; on return this field will + * contain the ID of the newly allocated context. + * @indir_size: On entry, the array size of the user buffer for the + * indirection table, which may be zero, or (for %ETHTOOL_SRSSH), + * %ETH_RXFH_INDIR_NO_CHANGE. On return from %ETHTOOL_GRSSH, + * the array size of the hardware indirection table. + * @key_size: On entry, the array size of the user buffer for the hash key, + * which may be zero. On return from %ETHTOOL_GRSSH, the size of the + * hardware hash key. + * @hfunc: Defines the current RSS hash function used by HW (or to be set to). + * Valid values are one of the %ETH_RSS_HASH_*. + * @rsvd8: Reserved for future extensions. + * @rsvd32: Reserved for future extensions. + * @rss_config: RX ring/queue index for each hash value i.e., indirection table + * of @indir_size __u32 elements, followed by hash key of @key_size + * bytes. + * + * For %ETHTOOL_GRSSH, a @indir_size and key_size of zero means that only the + * size should be returned. For %ETHTOOL_SRSSH, an @indir_size of + * %ETH_RXFH_INDIR_NO_CHANGE means that indir table setting is not requested + * and a @indir_size of zero means the indir table should be reset to default + * values (if @rss_context == 0) or that the RSS context should be deleted. + * An hfunc of zero means that hash function setting is not requested. + */ +struct sfctool_rxfh { + __u32 cmd; + __u32 rss_context; + __u32 indir_size; + __u32 key_size; + __u8 hfunc; + __u8 rsvd8[3]; + __u32 rsvd32; + __u32 rss_config[0]; +}; +#define ETH_RXFH_CONTEXT_ALLOC 0xffffffff +#ifndef ETH_RXFH_INDIR_NO_CHANGE +#define ETH_RXFH_INDIR_NO_CHANGE 0xffffffff +#endif + +#ifndef ETHTOOL_GRSSH +#define ETHTOOL_GRSSH 0x00000046 /* Get RX flow hash configuration */ +#define ETHTOOL_SRSSH 0x00000047 /* Set RX flow hash configuration */ +#endif +#endif /* !EFX_HAVE_ETHTOOL_RXFH_CONTEXT */ + +int efx_sfctool(struct efx_nic *efx, u32 cmd, void __user *data); + +#if defined(EFX_USE_KCOMPAT) && !defined(EFX_HAVE_ETHTOOL_RXFH_CONTEXT) +u32 efx_sfctool_get_rxfh_indir_size(struct efx_nic *efx); +u32 efx_sfctool_get_rxfh_key_size(struct efx_nic *efx); +int efx_sfctool_get_rxnfc(struct efx_nic *efx, + struct efx_ethtool_rxnfc *info, u32 *rule_locs); +int efx_sfctool_get_rxfh_context(struct efx_nic *efx, u32 *indir, + u8 *key, u8 *hfunc, u32 rss_context); +int efx_sfctool_set_rxfh_context(struct efx_nic *efx, + const u32 *indir, const u8 *key, + const u8 hfunc, u32 *rss_context, + bool delete); +#endif +#endif /* EFX_SFCTOOL_H */ diff --git a/src/drivers/net/ethernet/sfc/sriov.c b/src/drivers/net/ethernet/sfc/sriov.c new file mode 100644 index 0000000..48f9836 --- /dev/null +++ b/src/drivers/net/ethernet/sfc/sriov.c @@ -0,0 +1,138 @@ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2014-2017 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ +#include +#include "net_driver.h" +#include "nic.h" +#include "sriov.h" + +#define EFX_MAX_PFS 16 + +#define MAX_VFS_ENABLE_ALL INT_MAX +#define MAX_VFS_DEF 0 + +static int max_vfs[EFX_MAX_PFS]; +static unsigned int max_vfs_count; +module_param_array(max_vfs, int, &max_vfs_count, 0444); +MODULE_PARM_DESC(max_vfs, + "Specify the number of VFs initialized by the driver"); + +#ifdef EFX_NOT_UPSTREAM +/* Original name for max_vfs */ +module_param_array_named(vf_count, max_vfs, int, &max_vfs_count, 0); +MODULE_PARM_DESC(vf_count, "Duplicate of the max_vfs parameter"); +#endif + +#ifdef CONFIG_SFC_SRIOV + + +void efx_sriov_init_max_vfs(struct efx_nic *efx, unsigned int pf_index) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + unsigned int idx; + + if (!efx->type->sriov_init) + return; + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_PCI_NUM_VF) + /* If there are already VFs, don't initialise more */ + if (pci_num_vf(efx->pci_dev)) + return; +#endif + + idx = pf_index; + + if (max_vfs_count == 0) + nic_data->max_vfs = MAX_VFS_DEF; + else if (max_vfs_count == 1) + /* If there is only one entry in max_vfs array, + * use it for all NICs for backward compatibility. + */ + nic_data->max_vfs = max_vfs[0]; + else if (idx >= max_vfs_count) + nic_data->max_vfs = 0; + else + nic_data->max_vfs = max_vfs[idx]; + + if (nic_data->max_vfs < 0) + nic_data->max_vfs = MAX_VFS_ENABLE_ALL; +} + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_NDO_SET_VF_MAC) +int efx_sriov_get_vf_config(struct net_device *net_dev, int vf_i, struct ifla_vf_info *ivi) +{ + struct efx_nic *efx = efx_netdev_priv(net_dev); + + if (efx->type->sriov_get_vf_config) + return efx->type->sriov_get_vf_config(efx, vf_i, ivi); + else + return -EOPNOTSUPP; +} +#endif + +int efx_sriov_set_vf_mac(struct net_device *net_dev, int vf_i, u8 *mac) +{ + struct efx_nic *efx = efx_netdev_priv(net_dev); + bool reset; + + if (efx->type->sriov_set_vf_mac) + return efx->type->sriov_set_vf_mac(efx, vf_i, mac, &reset); + else + return -EOPNOTSUPP; +} + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_NDO_SET_VF_VLAN_PROTO) || defined(EFX_HAVE_NDO_EXT_SET_VF_VLAN_PROTO) +int efx_sriov_set_vf_vlan(struct net_device *net_dev, int vf_i, u16 vlan, + u8 qos, __be16 vlan_proto) +#else +int efx_sriov_set_vf_vlan(struct net_device *net_dev, int vf_i, u16 vlan, + u8 qos) +#endif +{ + struct efx_nic *efx = efx_netdev_priv(net_dev); + + if (efx->type->sriov_set_vf_vlan) { + if ((vlan & ~VLAN_VID_MASK) || + (qos & ~(VLAN_PRIO_MASK >> VLAN_PRIO_SHIFT))) + return -EINVAL; + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_NDO_SET_VF_VLAN_PROTO) || defined(EFX_HAVE_NDO_EXT_SET_VF_VLAN_PROTO) + if (vlan_proto != htons(ETH_P_8021Q)) + return -EPROTONOSUPPORT; +#endif + + return efx->type->sriov_set_vf_vlan(efx, vf_i, vlan, qos); + } else { + return -EOPNOTSUPP; + } +} + +int efx_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf_i, + bool spoofchk) +{ + struct efx_nic *efx = efx_netdev_priv(net_dev); + + if (efx->type->sriov_set_vf_spoofchk) + return efx->type->sriov_set_vf_spoofchk(efx, vf_i, spoofchk); + else + return -EOPNOTSUPP; +} + +int efx_sriov_set_vf_link_state(struct net_device *net_dev, int vf_i, + int link_state) +{ + struct efx_nic *efx = efx_netdev_priv(net_dev); + + if (efx->type->sriov_set_vf_link_state) + return efx->type->sriov_set_vf_link_state(efx, vf_i, link_state); + else + return -EOPNOTSUPP; +} + + +#endif diff --git a/src/drivers/net/ethernet/sfc/sriov.h b/src/drivers/net/ethernet/sfc/sriov.h new file mode 100644 index 0000000..820e19e --- /dev/null +++ b/src/drivers/net/ethernet/sfc/sriov.h @@ -0,0 +1,43 @@ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2014-2017 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#ifndef EFX_SRIOV_H +#define EFX_SRIOV_H + +#include "net_driver.h" + +#ifdef CONFIG_SFC_SRIOV + + +void efx_sriov_init_max_vfs(struct efx_nic *efx, unsigned int pf_index); + +int efx_sriov_set_vf_mac(struct net_device *net_dev, int vf_i, u8 *mac); +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_NDO_SET_VF_VLAN_PROTO) || defined(EFX_HAVE_NDO_EXT_SET_VF_VLAN_PROTO) +int efx_sriov_set_vf_vlan(struct net_device *net_dev, int vf_i, u16 vlan, + u8 qos, __be16 vlan_proto); +#else +int efx_sriov_set_vf_vlan(struct net_device *net_dev, int vf_i, u16 vlan, + u8 qos); +#endif +int efx_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf_i, + bool spoofchk); +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_NDO_SET_VF_MAC) +int efx_sriov_get_vf_config(struct net_device *net_dev, int vf_i, + struct ifla_vf_info *ivi); +#endif +int efx_sriov_set_vf_link_state(struct net_device *net_dev, int vf_i, + int link_state); +#endif /* CONFIG_SFC_SRIOV */ + +static inline bool efx_sriov_wanted(struct efx_nic *efx) +{ + return efx->type->sriov_wanted && efx->type->sriov_wanted(efx); +} + +#endif /* EFX_SRIOV_H */ diff --git a/src/drivers/net/ethernet/sfc/tc.c b/src/drivers/net/ethernet/sfc/tc.c new file mode 100644 index 0000000..5fd2d19 --- /dev/null +++ b/src/drivers/net/ethernet/sfc/tc.c @@ -0,0 +1,3661 @@ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2019 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#ifdef EFX_USE_KCOMPAT +/* Must come before other headers */ +#include "kernel_compat.h" +#endif + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_TC_OFFLOAD) +#include +#include +#include +#include +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_CONNTRACK_OFFLOAD) +#include +#endif +#include +#include +#include "tc.h" +#include "tc_bindings.h" +#include "tc_encap_actions.h" +#include "tc_conntrack.h" +#include "tc_debugfs.h" +#include "mae.h" +#include "ef100_rep.h" +#include "nic.h" + +enum efx_encap_type efx_tc_indr_netdev_type(struct net_device *net_dev) +{ + if (netif_is_vxlan(net_dev)) + return EFX_ENCAP_TYPE_VXLAN; + if (netif_is_geneve(net_dev)) + return EFX_ENCAP_TYPE_GENEVE; + + return EFX_ENCAP_TYPE_NONE; +} + +#define EFX_EFV_PF NULL +/* Look up the representor information (efv) for a device. + * May return NULL for the PF (us), or an error pointer for a device that + * isn't supported as a TC offload endpoint + */ +struct efx_rep *efx_tc_flower_lookup_efv(struct efx_nic *efx, + struct net_device *dev) +{ + struct efx_rep *efv; + + if (!dev) + return ERR_PTR(-EOPNOTSUPP); + /* Is it us (the PF)? */ + if (dev == efx->net_dev) + return EFX_EFV_PF; + /* Is it an efx vfrep at all? */ + if (dev->netdev_ops != &efx_ef100_rep_netdev_ops) + return ERR_PTR(-EOPNOTSUPP); + /* Is it ours? We don't support TC rules that include another + * EF100's netdevices (not even on another port of the same NIC). + */ + efv = netdev_priv(dev); + if (efv->parent != efx) + return ERR_PTR(-EOPNOTSUPP); + return efv; +} + +/* Convert a driver-internal vport ID into an internal device (PF or VF) */ +static s64 efx_tc_flower_internal_mport(struct efx_nic *efx, struct efx_rep *efv) +{ + u32 mport; + + if (IS_ERR(efv)) + return PTR_ERR(efv); + if (!efv) /* device is PF (us) */ + efx_mae_mport_uplink(efx, &mport); + else /* device is repr */ + efx_mae_mport_mport(efx, efv->mport, &mport); + return mport; +} + +/* Convert a driver-internal vport ID into an external device (wire or VF) */ +s64 efx_tc_flower_external_mport(struct efx_nic *efx, struct efx_rep *efv) +{ + u32 mport; + + if (IS_ERR(efv)) + return PTR_ERR(efv); + if (!efv) /* device is PF (us) */ + efx_mae_mport_wire(efx, &mport); + else /* device is repr */ + efx_mae_mport_mport(efx, efv->mport, &mport); + return mport; +} + +static const struct rhashtable_params efx_tc_mac_ht_params = { + .key_len = offsetofend(struct efx_tc_mac_pedit_action, h_addr), + .key_offset = 0, + .head_offset = offsetof(struct efx_tc_mac_pedit_action, linkage), +}; + +static const struct rhashtable_params efx_tc_encap_match_ht_params = { + .key_len = offsetof(struct efx_tc_encap_match, linkage), + .key_offset = 0, + .head_offset = offsetof(struct efx_tc_encap_match, linkage), +}; + +static const struct rhashtable_params efx_tc_match_action_ht_params = { + .key_len = sizeof(unsigned long), + .key_offset = offsetof(struct efx_tc_flow_rule, cookie), + .head_offset = offsetof(struct efx_tc_flow_rule, linkage), +}; + +static const struct rhashtable_params efx_tc_lhs_rule_ht_params = { + .key_len = sizeof(unsigned long), + .key_offset = offsetof(struct efx_tc_lhs_rule, cookie), + .head_offset = offsetof(struct efx_tc_lhs_rule, linkage), +}; + +static const struct rhashtable_params efx_tc_recirc_ht_params = { + .key_len = offsetof(struct efx_tc_recirc_id, linkage), + .key_offset = 0, + .head_offset = offsetof(struct efx_tc_recirc_id, linkage), +}; + +static struct efx_tc_mac_pedit_action *efx_tc_flower_get_mac( + struct efx_nic *efx, unsigned char h_addr[ETH_ALEN], + struct netlink_ext_ack *extack) +{ + struct efx_tc_mac_pedit_action *ped, *old; + int rc; + + ped = kzalloc(sizeof(*ped), GFP_USER); + if (!ped) + return ERR_PTR(-ENOMEM); + memcpy(ped->h_addr, h_addr, ETH_ALEN); + old = rhashtable_lookup_get_insert_fast(&efx->tc->mac_ht, + &ped->linkage, + efx_tc_mac_ht_params); + if (old) { + /* don't need our new entry */ + kfree(ped); + if (!refcount_inc_not_zero(&old->ref)) + return ERR_PTR(-EAGAIN); + /* existing entry found, ref taken */ + return old; + } + + rc = efx_mae_allocate_pedit_mac(efx, ped); + if (rc < 0) { + EFX_TC_ERR_MSG(efx, extack, "Failed to store pedit MAC address in hw"); + goto out_remove; + } + + /* ref and return */ + refcount_set(&ped->ref, 1); + return ped; +out_remove: + rhashtable_remove_fast(&efx->tc->mac_ht, &ped->linkage, + efx_tc_mac_ht_params); + kfree(ped); + return ERR_PTR(rc); +} + +static void efx_tc_flower_put_mac(struct efx_nic *efx, + struct efx_tc_mac_pedit_action *ped) +{ + if (!refcount_dec_and_test(&ped->ref)) + return; /* still in use */ + rhashtable_remove_fast(&efx->tc->mac_ht, &ped->linkage, + efx_tc_mac_ht_params); + efx_mae_free_pedit_mac(efx, ped); + kfree(ped); +} + +static void efx_tc_free_action_set(struct efx_nic *efx, + struct efx_tc_action_set *act, bool in_hw) +{ + /* Failure paths calling this on the 'running action' set in_hw=false, + * because if the alloc had succeeded we'd've put it in acts.list and + * not still have it in act. + */ + if (in_hw) { + efx_mae_free_action_set(efx, act->fw_id); + /* in_hw is true iff we are on an acts.list; make sure to + * remove ourselves from that list before we are freed. + */ + list_del(&act->list); + } + if (act->count) { + spin_lock_bh(&act->count->cnt->lock); + if (!list_empty(&act->count_user)) + list_del(&act->count_user); + spin_unlock_bh(&act->count->cnt->lock); + efx_tc_flower_put_counter_index(efx, act->count); + } + if (act->encap_md) { + list_del(&act->encap_user); + efx_tc_flower_release_encap_md(efx, act->encap_md); + } + if (act->src_mac) + efx_tc_flower_put_mac(efx, act->src_mac); + if (act->dst_mac) + efx_tc_flower_put_mac(efx, act->dst_mac); + kfree(act); +} + +static void efx_tc_free_action_set_list(struct efx_nic *efx, + struct efx_tc_action_set_list *acts, + bool in_hw) +{ + struct efx_tc_action_set *act, *next; + + /* Failure paths set in_hw=false, because usually the acts didn't get + * to efx_mae_alloc_action_set_list(); if they did, the failure tree + * has a separate efx_mae_free_action_set_list() before calling us. + */ + if (in_hw) + efx_mae_free_action_set_list(efx, acts); + /* Any act that's on the list will be in_hw even if the list isn't */ + list_for_each_entry_safe(act, next, &acts->list, list) + efx_tc_free_action_set(efx, act, true); + /* Don't kfree, as acts is embedded inside a struct efx_tc_flow_rule */ +} + +static void efx_tc_flower_release_encap_match(struct efx_nic *efx, + struct efx_tc_encap_match *encap); + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_CONNTRACK_OFFLOAD) + +#endif + +static void efx_tc_lhs_free(void *ptr, void *arg) +{ + struct efx_tc_lhs_rule *rule = ptr; + struct efx_nic *efx = arg; + + netif_err(efx, drv, efx->net_dev, + "tc lhs_rule %lx still present at teardown, removing\n", + rule->cookie); + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_CONNTRACK_OFFLOAD) + if (rule->lhs_act.zone) + efx_tc_ct_unregister_zone(efx, rule->lhs_act.zone); +#endif + if (rule->lhs_act.count) + efx_tc_flower_put_counter_index(efx, rule->lhs_act.count); + efx_mae_remove_lhs_rule(efx, rule); + + kfree(rule); +} + +static void efx_tc_flow_free(void *ptr, void *arg) +{ + struct efx_tc_flow_rule *rule = ptr; + struct efx_nic *efx = arg; + + netif_err(efx, drv, efx->net_dev, + "tc rule %lx still present at teardown, removing\n", + rule->cookie); + + efx_mae_delete_rule(efx, rule->fw_id); + + /* Release entries in subsidiary tables */ + efx_tc_free_action_set_list(efx, &rule->acts, true); + if (rule->match.encap) + efx_tc_flower_release_encap_match(efx, rule->match.encap); + + kfree(rule); +} + +/* At teardown time, all TC filter rules (and thus all resources they created) + * should already have been removed. If we find any in our hashtables, make a + * cursory attempt to clean up the software side. + */ +static void efx_tc_mac_free(void *ptr, void *__unused) +{ + struct efx_tc_mac_pedit_action *ped = ptr; + + WARN_ON(refcount_read(&ped->ref)); + kfree(ped); +} + +static void efx_tc_encap_match_free(void *ptr, void *__unused) +{ + struct efx_tc_encap_match *encap = ptr; + + WARN_ON(refcount_read(&encap->ref)); + kfree(encap); +} + +static struct efx_tc_recirc_id *efx_tc_get_recirc_id(struct efx_nic *efx, + u32 chain_index, + struct net_device *net_dev) +{ + struct efx_tc_recirc_id *rid, *old; + int rc; + + rid = kzalloc(sizeof(*rid), GFP_USER); + if (!rid) + return ERR_PTR(-ENOMEM); + rid->chain_index = chain_index; + /* We don't take a reference here, because it's implied - if there's + * a rule on the net_dev that's been offloaded to us, then the net_dev + * can't go away until the rule has been deoffloaded. + */ + rid->net_dev = net_dev; + old = rhashtable_lookup_get_insert_fast(&efx->tc->recirc_ht, + &rid->linkage, + efx_tc_recirc_ht_params); + if (old) { + /* don't need our new entry */ + kfree(rid); + if (!refcount_inc_not_zero(&old->ref)) + return ERR_PTR(-EAGAIN); + /* existing entry found */ + rid = old; + } else { + rc = ida_alloc_range(&efx->tc->recirc_ida, 1, U8_MAX, GFP_USER); + if (rc < 0) { + rhashtable_remove_fast(&efx->tc->recirc_ht, + &rid->linkage, + efx_tc_recirc_ht_params); + kfree(rid); + return ERR_PTR(rc); + } + rid->fw_id = rc; + refcount_set(&rid->ref, 1); + } + return rid; +} + +static void efx_tc_put_recirc_id(struct efx_nic *efx, struct efx_tc_recirc_id *rid) +{ + if (!refcount_dec_and_test(&rid->ref)) + return; /* still in use */ + rhashtable_remove_fast(&efx->tc->recirc_ht, &rid->linkage, + efx_tc_recirc_ht_params); + ida_free(&efx->tc->recirc_ida, rid->fw_id); + kfree(rid); +} + +static void efx_tc_recirc_free(void *ptr, void *arg) +{ + struct efx_tc_recirc_id *rid = ptr; + struct efx_nic *efx = arg; + + WARN_ON(refcount_read(&rid->ref)); + ida_free(&efx->tc->recirc_ida, rid->fw_id); + kfree(rid); +} + +int efx_init_struct_tc(struct efx_nic *efx) +{ + int rc; + + if (efx->type->is_vf || efx->tc) + return 0; + + efx->tc = kzalloc(sizeof(*efx->tc), GFP_KERNEL); + if (!efx->tc) + return -ENOMEM; + efx->tc->caps = kzalloc(sizeof(struct mae_caps), GFP_KERNEL); + if (!efx->tc->caps) { + rc = -ENOMEM; + goto fail_alloc_caps; + } + INIT_LIST_HEAD(&efx->tc->block_list); + + mutex_init(&efx->tc->mutex); + init_waitqueue_head(&efx->tc->flush_wq); + rc = efx_tc_init_encap_actions(efx); + if (rc < 0) + goto fail_encap_actions; + rc = efx_tc_init_counters(efx); + if (rc < 0) + goto fail_counters; + rc = rhashtable_init(&efx->tc->mac_ht, &efx_tc_mac_ht_params); + if (rc < 0) + goto fail_mac_ht; + rc = rhashtable_init(&efx->tc->encap_match_ht, &efx_tc_encap_match_ht_params); + if(rc < 0) + goto fail_encap_match_ht; + rc = rhashtable_init(&efx->tc->match_action_ht, &efx_tc_match_action_ht_params); + if (rc < 0) + goto fail_match_action_ht; + rc = rhashtable_init(&efx->tc->lhs_rule_ht, &efx_tc_lhs_rule_ht_params); + if (rc < 0) + goto fail_lhs_rule_ht; +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_CONNTRACK_OFFLOAD) + rc = efx_tc_init_conntrack(efx); + if (rc < 0) + goto fail_conntrack; +#endif + rc = rhashtable_init(&efx->tc->recirc_ht, &efx_tc_recirc_ht_params); + if (rc < 0) + goto fail_recirc_ht; + ida_init(&efx->tc->recirc_ida); + efx->tc->reps_filter_uc = -1; + efx->tc->reps_filter_mc = -1; + INIT_LIST_HEAD(&efx->tc->dflt.pf.acts.list); + efx->tc->dflt.pf.fw_id = MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL; + efx->tc->dflt.pf.cookie = 0; + INIT_LIST_HEAD(&efx->tc->dflt.wire.acts.list); + efx->tc->dflt.wire.fw_id = MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL; + efx->tc->dflt.wire.cookie = 1; + INIT_LIST_HEAD(&efx->tc->facts.pf.list); + efx->tc->facts.pf.fw_id = MC_CMD_MAE_ACTION_SET_ALLOC_OUT_ACTION_SET_ID_NULL; + INIT_LIST_HEAD(&efx->tc->facts.reps.list); + efx->tc->facts.reps.fw_id = MC_CMD_MAE_ACTION_SET_ALLOC_OUT_ACTION_SET_ID_NULL; + efx->extra_channel_type[EFX_EXTRA_CHANNEL_TC] = &efx_tc_channel_type; + return 0; +fail_recirc_ht: +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_CONNTRACK_OFFLOAD) + efx_tc_destroy_conntrack(efx); +fail_conntrack: +#endif + rhashtable_destroy(&efx->tc->lhs_rule_ht); +fail_lhs_rule_ht: + rhashtable_destroy(&efx->tc->match_action_ht); +fail_match_action_ht: + rhashtable_destroy(&efx->tc->encap_match_ht); +fail_encap_match_ht: + rhashtable_destroy(&efx->tc->mac_ht); +fail_mac_ht: + efx_tc_destroy_counters(efx); +fail_counters: + efx_tc_destroy_encap_actions(efx); +fail_encap_actions: + kfree(efx->tc->caps); + mutex_destroy(&efx->tc->mutex); +fail_alloc_caps: + kfree(efx->tc); + efx->tc = NULL; + return rc; +} + +void efx_fini_struct_tc(struct efx_nic *efx) +{ + if (efx->type->is_vf) + return; + + if (!efx->tc) + return; + + mutex_lock(&efx->tc->mutex); + EFX_WARN_ON_PARANOID(efx->tc->dflt.pf.fw_id != + MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL); + EFX_WARN_ON_PARANOID(efx->tc->dflt.wire.fw_id != + MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL); + EFX_WARN_ON_PARANOID(efx->tc->facts.pf.fw_id != + MC_CMD_MAE_ACTION_SET_LIST_ALLOC_OUT_ACTION_SET_LIST_ID_NULL); + EFX_WARN_ON_PARANOID(efx->tc->facts.reps.fw_id != + MC_CMD_MAE_ACTION_SET_LIST_ALLOC_OUT_ACTION_SET_LIST_ID_NULL); + rhashtable_free_and_destroy(&efx->tc->lhs_rule_ht, efx_tc_lhs_free, efx); + rhashtable_free_and_destroy(&efx->tc->match_action_ht, efx_tc_flow_free, + efx); +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_CONNTRACK_OFFLOAD) + efx_tc_fini_conntrack(efx); +#endif + rhashtable_free_and_destroy(&efx->tc->recirc_ht, efx_tc_recirc_free, efx); + WARN_ON(!ida_is_empty(&efx->tc->recirc_ida)); + ida_destroy(&efx->tc->recirc_ida); + rhashtable_free_and_destroy(&efx->tc->encap_match_ht, efx_tc_encap_match_free, NULL); + rhashtable_free_and_destroy(&efx->tc->mac_ht, efx_tc_mac_free, NULL); + efx_tc_fini_counters(efx); + efx_tc_fini_encap_actions(efx); + mutex_unlock(&efx->tc->mutex); + mutex_destroy(&efx->tc->mutex); + kfree(efx->tc->caps); + kfree(efx->tc); + efx->tc = NULL; +} + +/* Boilerplate for the simple 'copy a field' cases */ +#define _MAP_KEY_AND_MASK(_name, _type, _tcget, _tcfield, _field) \ +if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_##_name)) { \ + struct flow_match_##_type fm; \ + \ + flow_rule_match_##_tcget(rule, &fm); \ + match->value._field = fm.key->_tcfield; \ + match->mask._field = fm.mask->_tcfield; \ +} +#define MAP_KEY_AND_MASK(_name, _type, _tcfield, _field) \ + _MAP_KEY_AND_MASK(_name, _type, _type, _tcfield, _field) +#define MAP_ENC_KEY_AND_MASK(_name, _type, _tcget, _tcfield, _field) \ + _MAP_KEY_AND_MASK(ENC_##_name, _type, _tcget, _tcfield, _field) + +static int efx_tc_flower_parse_match(struct efx_nic *efx, + struct flow_rule *rule, + struct efx_tc_match *match, + struct netlink_ext_ack *extack) +{ + struct flow_dissector *dissector = rule->match.dissector; + unsigned char ipv = 0; + + /* Owing to internal TC infelicities, the IPV6_ADDRS key might be set + * even on IPv4 filters; so rather than relying on dissector->used_keys + * we check the addr_type in the CONTROL key. If we don't find it (or + * it's masked, which should never happen), we treat both IPV4_ADDRS + * and IPV6_ADDRS as absent. + */ + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) { + struct flow_match_control fm; + + flow_rule_match_control(rule, &fm); + if (IS_ALL_ONES(fm.mask->addr_type)) + switch (fm.key->addr_type) { + case FLOW_DISSECTOR_KEY_IPV4_ADDRS: + ipv = 4; + break; + case FLOW_DISSECTOR_KEY_IPV6_ADDRS: + ipv = 6; + break; + default: + break; + } + + if (fm.mask->flags & FLOW_DIS_IS_FRAGMENT) { + match->value.ip_frag = fm.key->flags & FLOW_DIS_IS_FRAGMENT; + match->mask.ip_frag = true; + } + if (fm.mask->flags & FLOW_DIS_FIRST_FRAG) { + match->value.ip_firstfrag = fm.key->flags & FLOW_DIS_FIRST_FRAG; + match->mask.ip_firstfrag = true; + } + if (fm.mask->flags & ~(FLOW_DIS_IS_FRAGMENT | FLOW_DIS_FIRST_FRAG)) { + efx_tc_err(efx, "Unsupported match on control.flags %#x\n", + fm.mask->flags); + NL_SET_ERR_MSG_MOD(extack, "Unsupported match on control.flags"); + return -EOPNOTSUPP; + } + } + + if (dissector->used_keys & + ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) | + BIT(FLOW_DISSECTOR_KEY_BASIC) | + BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | + BIT(FLOW_DISSECTOR_KEY_VLAN) | +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_FLOW_DISSECTOR_KEY_CVLAN) + BIT(FLOW_DISSECTOR_KEY_CVLAN) | +#endif + BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | + BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | + BIT(FLOW_DISSECTOR_KEY_PORTS) | + BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) | + BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | + BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | + BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) | + BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_CONNTRACK_OFFLOAD) + BIT(FLOW_DISSECTOR_KEY_CT) | +#endif + BIT(FLOW_DISSECTOR_KEY_TCP) | +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_FLOW_DISSECTOR_KEY_ENC_IP) + BIT(FLOW_DISSECTOR_KEY_ENC_IP) | +#endif + BIT(FLOW_DISSECTOR_KEY_IP))) { +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_FLOW_DISSECTOR_64BIT_USED_KEYS) + efx_tc_err(efx, "Unsupported flower keys %#llx\n", dissector->used_keys); +#else + efx_tc_err(efx, "Unsupported flower keys %#x\n", dissector->used_keys); +#endif + NL_SET_ERR_MSG_MOD(extack, "Unsupported flower keys encountered"); + return -EOPNOTSUPP; + } + + MAP_KEY_AND_MASK(BASIC, basic, n_proto, eth_proto); + /* Make sure we're IP if any L3/L4 keys used. */ + if (!IS_ALL_ONES(match->mask.eth_proto) || + !(match->value.eth_proto == htons(ETH_P_IP) || + match->value.eth_proto == htons(ETH_P_IPV6))) + if (dissector->used_keys & + (BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | + BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | + BIT(FLOW_DISSECTOR_KEY_PORTS) | + BIT(FLOW_DISSECTOR_KEY_IP) | + BIT(FLOW_DISSECTOR_KEY_TCP))) { +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_FLOW_DISSECTOR_64BIT_USED_KEYS) + efx_tc_err(efx, "Flower keys %#llx require protocol ipv[46]\n", + dissector->used_keys); +#else + efx_tc_err(efx, "Flower keys %#x require protocol ipv[46]\n", + dissector->used_keys); +#endif + NL_SET_ERR_MSG_MOD(extack, "L3/L4 keys without L2 protocol IPv4/6"); + return -EINVAL; + } + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) { + struct flow_match_vlan fm; + + flow_rule_match_vlan(rule, &fm); +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_FLOW_DISSECTOR_VLAN_TPID) + if (fm.mask->vlan_id || fm.mask->vlan_priority || fm.mask->vlan_tpid) { + if (fm.mask->vlan_tpid && !IS_ALL_ONES(fm.mask->vlan_tpid)) { + efx_tc_err(efx, "Unsupported masking (%#x) of VLAN ethertype\n", + fm.mask->vlan_tpid); + NL_SET_ERR_MSG_MOD(extack, "VLAN ethertype masking not supported"); + return -EOPNOTSUPP; + } + match->value.vlan_proto[0] = fm.key->vlan_tpid; + match->mask.vlan_proto[0] = fm.mask->vlan_tpid; +#else + if (fm.mask->vlan_id || fm.mask->vlan_priority) { +#endif + match->value.vlan_tci[0] = cpu_to_be16(fm.key->vlan_priority << 13 | + fm.key->vlan_id); + match->mask.vlan_tci[0] = cpu_to_be16(fm.mask->vlan_priority << 13 | + fm.mask->vlan_id); + } + } +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_FLOW_DISSECTOR_KEY_CVLAN) + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CVLAN)) { + struct flow_match_vlan fm; + + flow_rule_match_cvlan(rule, &fm); + if (fm.mask->vlan_id || fm.mask->vlan_priority || fm.mask->vlan_tpid) { + if (fm.mask->vlan_tpid && !IS_ALL_ONES(fm.mask->vlan_tpid)) { + efx_tc_err(efx, "Unsupported masking (%#x) of CVLAN ethertype\n", + fm.mask->vlan_tpid); + NL_SET_ERR_MSG_MOD(extack, "CVLAN ethertype masking not supported"); + return -EOPNOTSUPP; + } + match->value.vlan_proto[1] = fm.key->vlan_tpid; + match->mask.vlan_proto[1] = fm.mask->vlan_tpid; + match->value.vlan_tci[1] = cpu_to_be16(fm.key->vlan_priority << 13 | + fm.key->vlan_id); + match->mask.vlan_tci[1] = cpu_to_be16(fm.mask->vlan_priority << 13 | + fm.mask->vlan_id); + } + } +#endif + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { + struct flow_match_eth_addrs fm; + + flow_rule_match_eth_addrs(rule, &fm); + ether_addr_copy(match->value.eth_saddr, fm.key->src); + ether_addr_copy(match->value.eth_daddr, fm.key->dst); + ether_addr_copy(match->mask.eth_saddr, fm.mask->src); + ether_addr_copy(match->mask.eth_daddr, fm.mask->dst); + } + MAP_KEY_AND_MASK(BASIC, basic, ip_proto, ip_proto); + /* Make sure we're TCP/UDP if any L4 keys used. */ + if ((match->value.ip_proto != IPPROTO_UDP && + match->value.ip_proto != IPPROTO_TCP) || !IS_ALL_ONES(match->mask.ip_proto)) + if (dissector->used_keys & + (BIT(FLOW_DISSECTOR_KEY_PORTS) | + BIT(FLOW_DISSECTOR_KEY_TCP))) { +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_FLOW_DISSECTOR_64BIT_USED_KEYS) + efx_tc_err(efx, "Flower keys %#llx require ipproto udp or tcp\n", + dissector->used_keys); +#else + efx_tc_err(efx, "Flower keys %#x require ipproto udp or tcp\n", + dissector->used_keys); +#endif + NL_SET_ERR_MSG_MOD(extack, "L4 keys without ipproto udp/tcp"); + return -EINVAL; + } + MAP_KEY_AND_MASK(IP, ip, tos, ip_tos); + MAP_KEY_AND_MASK(IP, ip, ttl, ip_ttl); + if (ipv == 4) { + MAP_KEY_AND_MASK(IPV4_ADDRS, ipv4_addrs, src, src_ip); + MAP_KEY_AND_MASK(IPV4_ADDRS, ipv4_addrs, dst, dst_ip); + } +#ifdef CONFIG_IPV6 + else if (ipv == 6) { + MAP_KEY_AND_MASK(IPV6_ADDRS, ipv6_addrs, src, src_ip6); + MAP_KEY_AND_MASK(IPV6_ADDRS, ipv6_addrs, dst, dst_ip6); + } +#endif + MAP_KEY_AND_MASK(PORTS, ports, src, l4_sport); + MAP_KEY_AND_MASK(PORTS, ports, dst, l4_dport); + MAP_KEY_AND_MASK(TCP, tcp, flags, tcp_flags); + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL)) { + struct flow_match_control fm; + + flow_rule_match_enc_control(rule, &fm); + if (fm.mask->flags) { + efx_tc_err(efx, "Unsupported match on enc_control.flags %#x\n", + fm.mask->flags); + NL_SET_ERR_MSG_MOD(extack, "Unsupported match on enc_control.flags"); + return -EOPNOTSUPP; + } + if (!IS_ALL_ONES(fm.mask->addr_type)) { + efx_tc_err(efx, "Unsupported enc addr_type mask %u (key %u).\n", + fm.mask->addr_type, fm.key->addr_type); + NL_SET_ERR_MSG_MOD(extack, "Masked enc addr_type"); + return -EOPNOTSUPP; + } + switch (fm.key->addr_type) { + case FLOW_DISSECTOR_KEY_IPV4_ADDRS: + MAP_ENC_KEY_AND_MASK(IPV4_ADDRS, ipv4_addrs, enc_ipv4_addrs, + src, enc_src_ip); + MAP_ENC_KEY_AND_MASK(IPV4_ADDRS, ipv4_addrs, enc_ipv4_addrs, + dst, enc_dst_ip); + break; +#ifdef CONFIG_IPV6 + case FLOW_DISSECTOR_KEY_IPV6_ADDRS: + MAP_ENC_KEY_AND_MASK(IPV6_ADDRS, ipv6_addrs, enc_ipv6_addrs, + src, enc_src_ip6); + MAP_ENC_KEY_AND_MASK(IPV6_ADDRS, ipv6_addrs, enc_ipv6_addrs, + dst, enc_dst_ip6); + break; +#endif + default: + efx_tc_err(efx, "Unsupported enc addr_type %u\n", + fm.key->addr_type); + NL_SET_ERR_MSG_MOD(extack, "Unsupported enc addr_type (supported are IPv4, IPv6)"); + return -EOPNOTSUPP; + } +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_FLOW_DISSECTOR_KEY_ENC_IP) + MAP_ENC_KEY_AND_MASK(IP, ip, enc_ip, tos, enc_ip_tos); + MAP_ENC_KEY_AND_MASK(IP, ip, enc_ip, ttl, enc_ip_ttl); +#endif + MAP_ENC_KEY_AND_MASK(PORTS, ports, enc_ports, src, enc_sport); + MAP_ENC_KEY_AND_MASK(PORTS, ports, enc_ports, dst, enc_dport); + MAP_ENC_KEY_AND_MASK(KEYID, enc_keyid, enc_keyid, keyid, enc_keyid); + } else if (dissector->used_keys & + (BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) | + BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | + BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_FLOW_DISSECTOR_KEY_ENC_IP) + BIT(FLOW_DISSECTOR_KEY_ENC_IP) | +#endif + BIT(FLOW_DISSECTOR_KEY_ENC_PORTS))) { +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_FLOW_DISSECTOR_64BIT_USED_KEYS) + efx_tc_err(efx, "Flower enc keys require enc_control (keys: %#llx)\n", + dissector->used_keys); +#else + efx_tc_err(efx, "Flower enc keys require enc_control (keys: %#x)\n", + dissector->used_keys); +#endif + NL_SET_ERR_MSG_MOD(extack, "Flower enc keys without enc_control"); + return -EOPNOTSUPP; + } +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_CONNTRACK_OFFLOAD) + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CT)) { + struct flow_match_ct fm; + + flow_rule_match_ct(rule, &fm); +#define MAP_CT_STATE(_bit, _NAME) do { \ + match->value.ct_state_##_bit = !!(fm.key->ct_state & TCA_FLOWER_KEY_CT_FLAGS_##_NAME);\ + match->mask.ct_state_##_bit = !!(fm.mask->ct_state & TCA_FLOWER_KEY_CT_FLAGS_##_NAME);\ +} while(0) + MAP_CT_STATE(new, NEW); + MAP_CT_STATE(est, ESTABLISHED); + MAP_CT_STATE(rel, RELATED); + MAP_CT_STATE(trk, TRACKED); +#undef MAP_CT_STATE + match->value.ct_mark = fm.key->ct_mark; + match->mask.ct_mark = fm.mask->ct_mark; + match->value.ct_zone = fm.key->ct_zone; + match->mask.ct_zone = fm.mask->ct_zone; + + if (memchr_inv(fm.mask->ct_labels, 0, sizeof(fm.mask->ct_labels))) { + EFX_TC_ERR_MSG(efx, extack, "Matching on ct_label not supported"); + return -EOPNOTSUPP; + } + } +#endif + return 0; +} +#undef MAP_KEY_AND_MASK + +static void efx_tc_describe_encap_match(const struct efx_tc_encap_match *encap, + bool ipv6, char *outbuf, size_t outlen) +{ + char buf[128], tbuf[48] = ""; + +#ifdef CONFIG_IPV6 + if (ipv6) + snprintf(buf, sizeof(buf), "%pI6c->%pI6c", + &encap->src_ip6, &encap->dst_ip6); + else +#endif + snprintf(buf, sizeof(buf), "%pI4->%pI4", + &encap->src_ip, &encap->dst_ip); + + if (encap->ip_tos_mask) + snprintf(tbuf, sizeof(tbuf), " ToS %#x/%#04x", + encap->ip_tos, encap->ip_tos_mask); + + switch (encap->type) { + case EFX_TC_EM_DIRECT: + snprintf(outbuf, outlen, "encap match %s:%u%s", + buf, ntohs(encap->udp_dport), tbuf); + break; + case EFX_TC_EM_PSEUDO_OR: + snprintf(outbuf, outlen, "pseudo(OR) encap match %s:%u%s", + buf, ntohs(encap->udp_dport), tbuf); + break; + case EFX_TC_EM_PSEUDO_TOS: + /* tbuf should be empty, but let's include it in case */ + snprintf(outbuf, outlen, "pseudo(TOS & %#04x) encap match %s:%u%s", + encap->child_ip_tos_mask, buf, ntohs(encap->udp_dport), + tbuf); + break; + default: + snprintf(outbuf, outlen, "pseudo(%d) encap match %s:%u%s", + encap->type, buf, ntohs(encap->udp_dport), tbuf); + break; + } +} + +static int efx_tc_flower_record_encap_match(struct efx_nic *efx, + struct efx_tc_match *match, + enum efx_encap_type type, + enum efx_tc_em_pseudo_type em_type, + u8 child_ip_tos_mask) +{ + struct efx_tc_encap_match *encap, *old, *pseudo = NULL; + bool ipv6 = false; + int rc; + + /* We require that the socket-defining fields (IP addrs and UDP dest + * port) are present and exact-match. Other fields are currently not + * allowed. This meets what OVS will ask for, and means that we don't + * need to handle difficult checks for overlapping matches as could + * come up if we allowed masks or varying sets of match fields. + */ + if (match->mask.enc_dst_ip | match->mask.enc_src_ip) { + if (!IS_ALL_ONES(match->mask.enc_dst_ip)) { + efx_tc_err(efx, "Egress encap match is not exact on dst IP address\n"); + return -EOPNOTSUPP; + } + if (!IS_ALL_ONES(match->mask.enc_src_ip)) { + efx_tc_err(efx, "Egress encap match is not exact on src IP address\n"); + return -EOPNOTSUPP; + } +#ifdef CONFIG_IPV6 + if (!ipv6_addr_any(&match->mask.enc_dst_ip6) || + !ipv6_addr_any(&match->mask.enc_src_ip6)) { + efx_tc_err(efx, "Egress encap match on both IPv4 and IPv6, don't understand\n"); + return -EOPNOTSUPP; + } + } else { + ipv6 = true; + if (!efx_ipv6_addr_all_ones(&match->mask.enc_dst_ip6)) { + efx_tc_err(efx, "Egress encap match is not exact on dst IP address\n"); + return -EOPNOTSUPP; + } + if (!efx_ipv6_addr_all_ones(&match->mask.enc_src_ip6)) { + efx_tc_err(efx, "Egress encap match is not exact on src IP address\n"); + return -EOPNOTSUPP; + } +#endif + } + if (!IS_ALL_ONES(match->mask.enc_dport)) { + efx_tc_err(efx, "Egress encap match is not exact on dst UDP port\n"); + return -EOPNOTSUPP; + } + if (match->mask.enc_sport) { + efx_tc_err(efx, "Egress encap match on src UDP port not supported\n"); + return -EOPNOTSUPP; + } + if (match->mask.enc_ip_tos) { + struct efx_tc_match pmatch = *match; + + if (em_type == EFX_TC_EM_PSEUDO_TOS) { /* can't happen */ + efx_tc_err(efx, "Bad recursion in egress encap match handler\n"); + return -EOPNOTSUPP; + } + pmatch.value.enc_ip_tos = 0; + pmatch.mask.enc_ip_tos = 0; + rc = efx_tc_flower_record_encap_match(efx, &pmatch, type, + EFX_TC_EM_PSEUDO_TOS, + match->mask.enc_ip_tos); + if (rc) + return rc; + pseudo = pmatch.encap; + } + if (match->mask.enc_ip_ttl) { + efx_tc_err(efx, "Egress encap match on IP TTL not supported\n"); + rc = -EOPNOTSUPP; + goto fail_pseudo; + } + + rc = efx_mae_check_encap_match_caps(efx, ipv6, match->mask.enc_ip_tos); + if (rc) { + efx_tc_err(efx, "MAE hw reports no support for IPv%d encap matches\n", + ipv6 ? 6 : 4); + goto fail_pseudo; + } + + encap = kzalloc(sizeof(*encap), GFP_USER); + if (!encap) { + rc = -ENOMEM; + goto fail_pseudo; + } + encap->src_ip = match->value.enc_src_ip; + encap->dst_ip = match->value.enc_dst_ip; +#ifdef CONFIG_IPV6 + encap->src_ip6 = match->value.enc_src_ip6; + encap->dst_ip6 = match->value.enc_dst_ip6; +#endif + encap->udp_dport = match->value.enc_dport; + encap->tun_type = type; + encap->ip_tos = match->value.enc_ip_tos; + encap->ip_tos_mask = match->mask.enc_ip_tos; + encap->child_ip_tos_mask = child_ip_tos_mask; + encap->type = em_type; + encap->pseudo = pseudo; + old = rhashtable_lookup_get_insert_fast(&efx->tc->encap_match_ht, + &encap->linkage, + efx_tc_encap_match_ht_params); + if (old) { + /* don't need our new entry */ + kfree(encap); + if (pseudo) /* don't need our new pseudo either */ + efx_tc_flower_release_encap_match(efx, pseudo); + switch (old->type) { + case EFX_TC_EM_DIRECT: + /* old EM is in hardware, so mustn't overlap with a + * pseudo, but may be shared with another direct EM + */ + if (em_type == EFX_TC_EM_DIRECT) + break; + netif_err(efx, drv, efx->net_dev, + "Pseudo encap match conflicts with existing direct entry\n"); + return -EEXIST; + case EFX_TC_EM_PSEUDO_OR: + /* old EM corresponds to an OR that has to be unique + * (it must not overlap with any other OR, whether + * direct-EM or pseudo). + */ + netif_err(efx, drv, efx->net_dev, + "%s encap match conflicts with existing pseudo(OR) entry\n", + em_type ? "Pseudo" : "Direct"); + return -EEXIST; + case EFX_TC_EM_PSEUDO_TOS: + /* old EM is protecting a ToS-qualified filter, so may + * only be shared with another pseudo for the same + * ToS mask. + */ + if (em_type != EFX_TC_EM_PSEUDO_TOS) { + netif_err(efx, drv, efx->net_dev, + "%s encap match conflicts with existing pseudo(TOS) entry\n", + em_type ? "Pseudo" : "Direct"); + return -EEXIST; + } + if (child_ip_tos_mask != old->child_ip_tos_mask) { + netif_err(efx, drv, efx->net_dev, + "Pseudo encap match for TOS mask %#04x conflicts with existing pseudo(TOS) entry for mask %#04x\n", + child_ip_tos_mask, + old->child_ip_tos_mask); + return -EEXIST; + } + break; + default: /* Unrecognised pseudo-type. Just say no */ + netif_err(efx, drv, efx->net_dev, + "%s encap match conflicts with existing pseudo(%d) entry\n", + em_type ? "Pseudo" : "Direct", old->type); + return -EEXIST; + } + if (old->tun_type != type) { + netif_err(efx, drv, efx->net_dev, "Egress encap match with conflicting tun_type\n"); + return -EEXIST; + } + if (!refcount_inc_not_zero(&old->ref)) + return -EAGAIN; + /* existing entry found */ + encap = old; + } else { + char buf[192]; + + efx_tc_describe_encap_match(encap, ipv6, buf, sizeof(buf)); + + if (em_type == EFX_TC_EM_DIRECT) { + rc = efx_mae_register_encap_match(efx, encap); + if (rc) { + netif_err(efx, drv, efx->net_dev, + "Failed to record %s, rc %d\n", + buf, rc); + goto fail; + } + } + netif_dbg(efx, drv, efx->net_dev, "Recorded %s\n", buf); + refcount_set(&encap->ref, 1); + } + match->encap = encap; + return 0; +fail: + rhashtable_remove_fast(&efx->tc->encap_match_ht, &encap->linkage, + efx_tc_encap_match_ht_params); + kfree(encap); +fail_pseudo: + if (pseudo) + efx_tc_flower_release_encap_match(efx, pseudo); + return rc; +} + +static void efx_tc_flower_release_encap_match(struct efx_nic *efx, + struct efx_tc_encap_match *encap) +{ + bool ipv6 = !(encap->src_ip | encap->dst_ip); + char buf[192]; + int rc; + + if (!refcount_dec_and_test(&encap->ref)) + return; /* still in use */ + + efx_tc_describe_encap_match(encap, ipv6, buf, sizeof(buf)); + + if (encap->type == EFX_TC_EM_DIRECT) { + rc = efx_mae_unregister_encap_match(efx, encap); + if (rc) + /* Display message but carry on and remove entry from our + * SW tables, because there's not much we can do about it. + */ + netif_err(efx, drv, efx->net_dev, + "Failed to release %s, rc %d\n", buf, rc); + else + netif_dbg(efx, drv, efx->net_dev, "Released %s\n", buf); + } else { + netif_dbg(efx, drv, efx->net_dev, "Released %s\n", buf); + } + rhashtable_remove_fast(&efx->tc->encap_match_ht, &encap->linkage, + efx_tc_encap_match_ht_params); + if (encap->pseudo) + efx_tc_flower_release_encap_match(efx, encap->pseudo); + kfree(encap); +} + +#if defined(EFX_USE_KCOMPAT) && !defined(EFX_HAVE_FLOW_INDR_BLOCK_CB_REGISTER) && !defined(EFX_HAVE_FLOW_INDR_DEV_REGISTER) +static enum efx_encap_type efx_tc_egdev_udp_type(struct efx_nic *efx, + struct efx_tc_match *match) +{ + __be16 udp_dport; + + if (!IS_ALL_ONES(match->mask.enc_dport)) + return EFX_ENCAP_TYPE_NONE; + udp_dport = match->value.enc_dport; + if (efx->type->udp_tnl_lookup_port2) + return efx->type->udp_tnl_lookup_port2(efx, udp_dport); + + return EFX_ENCAP_TYPE_NONE; +} +#endif + +const char *efx_tc_encap_type_name(enum efx_encap_type typ, char *buf, size_t n) +{ + switch (typ) { + case EFX_ENCAP_TYPE_NONE: + return "none"; + case EFX_ENCAP_TYPE_VXLAN: + return "vxlan"; + case EFX_ENCAP_TYPE_NVGRE: + return "nvgre"; + case EFX_ENCAP_TYPE_GENEVE: + return "geneve"; + default: + snprintf(buf, n, "type %u\n", typ); + return buf; + } +} + +/* For details of action order constraints refer to SF-123102-TC-1§12.6.1 */ +enum efx_tc_action_order { + EFX_TC_AO_DECAP, + EFX_TC_AO_DEC_TTL, + EFX_TC_AO_PEDIT_MAC_ADDRS, + EFX_TC_AO_VLAN1_POP, + EFX_TC_AO_VLAN0_POP, + EFX_TC_AO_VLAN0_PUSH, + EFX_TC_AO_VLAN1_PUSH, + EFX_TC_AO_COUNT, + EFX_TC_AO_ENCAP, + EFX_TC_AO_DELIVER +}; +/* Determine whether we can add @new action without violating order */ +static bool efx_tc_flower_action_order_ok(const struct efx_tc_action_set *act, + enum efx_tc_action_order new) +{ + switch (new) { + case EFX_TC_AO_DECAP: + if (act->decap) + return false; + /* PEDIT_MAC_ADDRS must not happen before DECAP, though it + * can wait until much later + */ + if (act->src_mac || act->dst_mac) + return false; + + /* Decrementing ttl must not happen before DECAP */ + if (act->do_ttl_dec) + return false; + fallthrough; + case EFX_TC_AO_VLAN0_POP: + if (act->vlan_pop & 1) + return false; + fallthrough; + case EFX_TC_AO_VLAN1_POP: + if (act->vlan_pop & 2) + return false; + fallthrough; + case EFX_TC_AO_VLAN0_PUSH: + if (act->vlan_push & 1) + return false; + fallthrough; + case EFX_TC_AO_VLAN1_PUSH: + if (act->vlan_push & 2) + return false; + fallthrough; + case EFX_TC_AO_COUNT: + if (act->count) + return false; + fallthrough; + case EFX_TC_AO_PEDIT_MAC_ADDRS: + case EFX_TC_AO_ENCAP: + if (act->encap_md) + return false; + fallthrough; + case EFX_TC_AO_DELIVER: + return !act->deliver; + case EFX_TC_AO_DEC_TTL: + if (act->encap_md) + return false; + return !act->do_ttl_dec; + default: + /* Bad caller. Whatever they wanted to do, say they can't. */ + WARN_ON_ONCE(1); + return false; + } +} + +static bool efx_tc_rule_is_lhs_rule(struct flow_rule *fr, + struct efx_tc_match *match) +{ + const struct flow_action_entry *fa; + int i; + + flow_action_for_each(i, fa, &fr->action) { + switch (fa->id) { + case FLOW_ACTION_GOTO: + return true; +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_CONNTRACK_OFFLOAD) + case FLOW_ACTION_CT: + /* If rule is -trk, or doesn't mention trk at all, then + * a CT action implies a conntrack lookup (hence it's an + * LHS rule). If rule is +trk, then a CT action could + * just be ct(nat) or even ct(commit) (though the latter + * can't be offloaded). + */ + if (!match->mask.ct_state_trk || !match->value.ct_state_trk) + return true; +#endif + default: + break; + } + } + return false; +} + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_CONNTRACK_OFFLOAD) +#endif + +/* A foreign LHS rule has matches on enc_ keys at the TC layer (including an + * implied match on enc_ip_proto UDP). Translate these into non-enc_ keys, + * so that we can use the same MAE machinery as local LHS rules (and so that + * debugfs lhs_rules dumps have uniform semantics). It may seem odd to do it + * this way round, given that the corresponding fields in the MAE MCDIs are + * all ENC_, but (a) we don't have enc_L2 or enc_ip_proto in struct + * efx_tc_match_fields and (b) semantically an LHS rule doesn't have inner + * fields so it's just matching on *the* header rather than the outer header. + * Make sure that the non-enc_ keys were not already being matched on, as that + * would imply a rule that needed a triple lookup. (Hardware can do that, + * with OR-AR-CT-AR, but it halves packet rate so we avoid it where possible; + * see efx_tc_flower_flhs_needs_ar().) + */ +static int efx_tc_flower_translate_flhs_match(struct efx_tc_match *match) +{ +#define COPY_MASK_AND_VALUE(_key, _ekey) do { \ + if (match->mask._key) return -EOPNOTSUPP; \ + match->mask._key = match->mask._ekey; \ + match->mask._ekey = 0; \ + match->value._key = match->value._ekey; \ + match->value._ekey = 0; \ +} while (0) +#define COPY_FROM_ENC(_key) COPY_MASK_AND_VALUE(_key, enc_##_key) + if (match->mask.ip_proto) return -EOPNOTSUPP; + match->mask.ip_proto = ~0; + match->value.ip_proto = IPPROTO_UDP; + COPY_FROM_ENC(src_ip); + COPY_FROM_ENC(dst_ip); +#ifdef CONFIG_IPV6 + if (!ipv6_addr_any(&match->mask.src_ip6)) return -EOPNOTSUPP; + match->mask.src_ip6 = match->mask.enc_src_ip6; + memset(&match->mask.enc_src_ip6, 0, sizeof(struct in6_addr)); + if (!ipv6_addr_any(&match->mask.dst_ip6)) return -EOPNOTSUPP; + match->mask.dst_ip6 = match->mask.enc_dst_ip6; + memset(&match->mask.enc_dst_ip6, 0, sizeof(struct in6_addr)); +#endif + COPY_FROM_ENC(ip_tos); + COPY_FROM_ENC(ip_ttl); + /* TODO should really copy enc_ip_frag but we don't have that in + * parse_match yet + */ + COPY_MASK_AND_VALUE(l4_sport, enc_sport); + COPY_MASK_AND_VALUE(l4_dport, enc_dport); + return 0; +#undef COPY_FROM_ENC +#undef COPY_MASK_AND_VALUE +} + +/* If a foreign LHS rule wants to match on keys that are only available after + * encap header identification and parsing, then it can't be done in the Outer + * Rule lookup, because that lookup determines the encap type used to parse + * beyond the outer headers. Thus, such rules must use the OR-AR-CT-AR lookup + * sequence, with an EM (struct efx_tc_encap_match) in the OR step. + * Return true iff the passed match requires this. + */ +static bool efx_tc_flower_flhs_needs_ar(struct efx_tc_match *match) +{ + /* matches on inner-header keys can't be done in OR */ + if (match->mask.eth_proto) return true; + if (match->mask.vlan_tci[0] || match->mask.vlan_tci[1]) return true; + if (match->mask.vlan_proto[0] || match->mask.vlan_proto[1]) return true; + if (memchr_inv(match->mask.eth_saddr, 0, ETH_ALEN)) return true; + if (memchr_inv(match->mask.eth_daddr, 0, ETH_ALEN)) return true; + if (match->mask.ip_proto) return true; + if (match->mask.ip_tos || match->mask.ip_ttl) return true; + if (match->mask.src_ip || match->mask.dst_ip) return true; +#ifdef CONFIG_IPV6 + if (!ipv6_addr_any(&match->mask.src_ip6)) return true; + if (!ipv6_addr_any(&match->mask.dst_ip6)) return true; +#endif + if (match->mask.ip_frag || match->mask.ip_firstfrag) return true; + if (match->mask.l4_sport || match->mask.l4_dport) return true; + if (match->mask.tcp_flags) return true; + /* nor can VNI */ + return match->mask.enc_keyid; +} + +static int efx_tc_flower_handle_lhs_actions(struct efx_nic *efx, + struct flow_cls_offload *tc, + struct flow_rule *fr, + struct net_device *net_dev, + struct efx_tc_lhs_rule *rule) + +{ +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_TC_FLOW_OFFLOAD) || defined(EFX_HAVE_TCF_EXTACK) + struct netlink_ext_ack *extack = tc->common.extack; +#else + struct netlink_ext_ack *extack = NULL; +#endif + struct efx_tc_lhs_action *act = &rule->lhs_act; + const struct flow_action_entry *fa; + enum efx_tc_counter_type ctype; + bool pipe = true; + int i; + + ctype = rule->is_ar ? EFX_TC_COUNTER_TYPE_AR : EFX_TC_COUNTER_TYPE_OR; + + flow_action_for_each(i, fa, &fr->action) { + struct efx_tc_counter_index *cnt; +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_CONNTRACK_OFFLOAD) + struct efx_tc_ct_zone *ct_zone; +#endif + struct efx_tc_recirc_id *rid; + + if (!pipe) { + /* more actions after a non-pipe action */ + EFX_TC_ERR_MSG(efx, extack, "Action follows non-pipe action"); + return -EINVAL; + } + switch (fa->id) { + case FLOW_ACTION_GOTO: + if (!fa->chain_index) { + EFX_TC_ERR_MSG(efx, extack, "Can't goto chain 0, no looping in hw"); + return -EOPNOTSUPP; + } + rid = efx_tc_get_recirc_id(efx, fa->chain_index, + net_dev); + if (IS_ERR(rid)) { + EFX_TC_ERR_MSG(efx, extack, "Failed to allocate a hardware recirculation ID for this chain_index"); + return PTR_ERR(rid); + } + act->rid = rid; + /* TODO check fa->hw_stats */ + cnt = efx_tc_flower_get_counter_index(efx, +#if defined(EFX_USE_KCOMPAT) && defined(EFX_HAVE_TC_ACTION_COOKIE) + fa->cookie, +#else + tc->cookie, +#endif + ctype); + if (IS_ERR(cnt)) { + EFX_TC_ERR_MSG(efx, extack, "Failed to obtain a counter"); + return PTR_ERR(cnt); + } + WARN_ON(act->count); + act->count = cnt; +#if defined(EFX_USE_KCOMPAT) && !defined(EFX_HAVE_TC_FLOW_OFFLOAD) + act->count_action_idx = i; +#endif + pipe = false; + break; +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_CONNTRACK_OFFLOAD) + case FLOW_ACTION_CT: + if (act->zone) { + EFX_TC_ERR_MSG(efx, extack, "Can't offload multiple ct actions"); + return -EOPNOTSUPP; + } +#if !defined(EFX_USE_KCOMPAT) || defined(TCA_CT_ACT_COMMIT) + if (fa->ct.action & (TCA_CT_ACT_COMMIT | + TCA_CT_ACT_FORCE)) { + EFX_TC_ERR_MSG(efx, extack, "Can't offload ct commit/force"); + return -EOPNOTSUPP; + } + if (fa->ct.action & TCA_CT_ACT_CLEAR) { + EFX_TC_ERR_MSG(efx, extack, "Can't clear ct in LHS rule"); + return -EOPNOTSUPP; + } + if (fa->ct.action & (TCA_CT_ACT_NAT | + TCA_CT_ACT_NAT_SRC | + TCA_CT_ACT_NAT_DST)) { + EFX_TC_ERR_MSG(efx, extack, "Can't perform NAT in LHS rule - packet isn't conntracked yet"); + return -EOPNOTSUPP; + } +#endif + if (fa->ct.action) { + efx_tc_err(efx, "Unhandled ct.action %u for LHS rule\n", fa->ct.action); + NL_SET_ERR_MSG_MOD(extack, "Unrecognised ct.action flag"); + return -EOPNOTSUPP; + } + ct_zone = efx_tc_ct_register_zone(efx, fa->ct.zone, + MAE_CT_VNI_MODE_ZERO, + fa->ct.flow_table); + if (IS_ERR(ct_zone)) { + EFX_TC_ERR_MSG(efx, extack, "Failed to register for CT updates"); + return PTR_ERR(ct_zone); + } + act->zone = ct_zone; + break; +#endif + default: + efx_tc_err(efx, "Unhandled action %u for LHS rule\n", fa->id); + NL_SET_ERR_MSG_MOD(extack, "Unsupported action for LHS rule"); + return -EOPNOTSUPP; + } + } + + if (pipe) { + /* TODO we might actually want to allow this */ + EFX_TC_ERR_MSG(efx, extack, "Missing goto chain in LHS rule"); + return -EOPNOTSUPP; + } + return 0; +} + +static void efx_tc_flower_release_lhs_actions(struct efx_nic *efx, + struct efx_tc_lhs_action *act) +{ + if (act->rid) + efx_tc_put_recirc_id(efx, act->rid); +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_CONNTRACK_OFFLOAD) + if (act->zone) + efx_tc_ct_unregister_zone(efx, act->zone); +#endif + if (act->count) + efx_tc_flower_put_counter_index(efx, act->count); +} + +static int efx_tc_flower_replace_foreign_lhs_ar(struct efx_nic *efx, + struct flow_cls_offload *tc, + struct flow_rule *fr, + struct efx_tc_match *match, + struct net_device *net_dev) +{ +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_TC_FLOW_OFFLOAD) || defined(EFX_HAVE_TCF_EXTACK) + struct netlink_ext_ack *extack = tc->common.extack; +#else + struct netlink_ext_ack *extack = NULL; +#endif + struct efx_tc_lhs_rule *rule, *old; + enum efx_encap_type type; + char errbuf[16]; + int rc; + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_FLOW_INDR_BLOCK_CB_REGISTER) || defined(EFX_HAVE_FLOW_INDR_DEV_REGISTER) + type = efx_tc_indr_netdev_type(net_dev); +#else + /* This is deeply unsatisfactory: we're using the UDP port to + * determine the tunnel type but then still inserting a full + * match (with IP addresses) into the encap match table in hw. + */ + type = efx_tc_egdev_udp_type(efx, match); +#endif + if (type == EFX_ENCAP_TYPE_NONE) { + efx_tc_err(efx, "Egress encap match on unsupported tunnel device\n"); + return -EOPNOTSUPP; + } + + rc = efx_mae_check_encap_type_supported(efx, type); + if (rc) { + efx_tc_err(efx, "Firmware reports no support for %s encap match\n", + efx_tc_encap_type_name(type, errbuf, sizeof(errbuf))); + return rc; + } + rc = efx_tc_flower_record_encap_match(efx, match, type, + EFX_TC_EM_DIRECT, 0); + if (rc) + return rc; + + match->mask.recirc_id = 0xff; + if (match->mask.ct_state_trk && match->value.ct_state_trk) { + EFX_TC_ERR_MSG(efx, extack, "LHS rule can never match +trk"); + rc = -EOPNOTSUPP; + goto release_encap_match; + } + /* LHS rules are always -trk, so we don't need to match on that */ + match->mask.ct_state_trk = 0; + match->value.ct_state_trk = 0; + /* We must inhibit match on TCP SYN/FIN/RST, so that SW can see + * the packet and update the conntrack table. + * Outer Rules will do that with CT_TCP_FLAGS_INHIBIT, but Action + * Rules don't have that; instead they support matching on + * TCP_SYN_FIN_RST (aka TCP_INTERESTING_FLAGS), so use that. + * This is only strictly needed if there will be a DO_CT action, + * which we don't know yet, but typically there will be and it's + * simpler not to bother checking here. + */ + match->mask.tcp_syn_fin_rst = true; + + rc = efx_mae_match_check_caps(efx, &match->mask, extack); + if (rc) + goto release_encap_match; + + rule = kzalloc(sizeof(*rule), GFP_USER); + if (!rule) { + rc = -ENOMEM; + goto release_encap_match; + } + rule->cookie = tc->cookie; + rule->is_ar = true; + old = rhashtable_lookup_get_insert_fast(&efx->tc->lhs_rule_ht, + &rule->linkage, + efx_tc_lhs_rule_ht_params); + if (old) { + netif_dbg(efx, drv, efx->net_dev, + "Already offloaded rule (cookie %lx)\n", tc->cookie); + rc = -EEXIST; + NL_SET_ERR_MSG_MOD(extack, "Rule already offloaded"); + goto release; + } + + /* Parse actions */ + rc = efx_tc_flower_handle_lhs_actions(efx, tc, fr, net_dev, rule); + if (rc) + goto release; + + rule->match = *match; + rule->lhs_act.tun_type = type; + + rc = efx_mae_insert_lhs_rule(efx, rule, EFX_TC_PRIO_TC); + if (rc) { + EFX_TC_ERR_MSG(efx, extack, "Failed to insert rule in hw"); + goto release; + } + netif_dbg(efx, drv, efx->net_dev, + "Successfully parsed lhs rule (cookie %lx)\n", + tc->cookie); + return 0; + +release: + efx_tc_flower_release_lhs_actions(efx, &rule->lhs_act); + if (!old) + rhashtable_remove_fast(&efx->tc->lhs_rule_ht, &rule->linkage, + efx_tc_lhs_rule_ht_params); + kfree(rule); +release_encap_match: + if (match->encap) + efx_tc_flower_release_encap_match(efx, match->encap); + return rc; +} + +static int efx_tc_flower_replace_foreign_lhs(struct efx_nic *efx, + struct flow_cls_offload *tc, + struct flow_rule *fr, + struct efx_tc_match *match, + struct net_device *net_dev) +{ +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_TC_FLOW_OFFLOAD) || defined(EFX_HAVE_TCF_EXTACK) + struct netlink_ext_ack *extack = tc->common.extack; +#else + struct netlink_ext_ack *extack = NULL; +#endif + struct efx_tc_lhs_rule *rule, *old; + enum efx_encap_type type; + char errbuf[16]; + int rc; + + if (tc->common.chain_index) { + EFX_TC_ERR_MSG(efx, extack, "LHS rule only allowed in chain 0"); + return -EOPNOTSUPP; + } + + if (!efx_tc_match_is_encap(&match->mask)) { + /* This is not a tunnel decap rule, ignore it */ + netif_dbg(efx, drv, efx->net_dev, "Ignoring foreign LHS filter without encap match\n"); + return -EOPNOTSUPP; + } + + if (efx_tc_flower_flhs_needs_ar(match)) + return efx_tc_flower_replace_foreign_lhs_ar(efx, tc, fr, match, + net_dev); + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_FLOW_INDR_BLOCK_CB_REGISTER) || defined(EFX_HAVE_FLOW_INDR_DEV_REGISTER) + type = efx_tc_indr_netdev_type(net_dev); +#else + /* This is deeply unsatisfactory: we're using the UDP port to + * determine the tunnel type but then still inserting a full + * match (with IP addresses) into the encap match table in hw. + */ + type = efx_tc_egdev_udp_type(efx, match); +#endif + if (type == EFX_ENCAP_TYPE_NONE) { + efx_tc_err(efx, "Egress encap match on unsupported tunnel device\n"); + return -EOPNOTSUPP; + } + + rc = efx_mae_check_encap_type_supported(efx, type); + if (rc) { + efx_tc_err(efx, "Firmware reports no support for %s encap match\n", + efx_tc_encap_type_name(type, errbuf, sizeof(errbuf))); + return rc; + } + rc = efx_tc_flower_record_encap_match(efx, match, type, + EFX_TC_EM_PSEUDO_OR, 0); + if (rc) + return rc; + + if (match->mask.ct_state_trk && match->value.ct_state_trk) { + EFX_TC_ERR_MSG(efx, extack, "LHS rule can never match +trk"); + rc = -EOPNOTSUPP; + goto release_encap_match; + } + /* LHS rules are always -trk, so we don't need to match on that */ + match->mask.ct_state_trk = 0; + match->value.ct_state_trk = 0; + + rc = efx_tc_flower_translate_flhs_match(match); + if (rc) { + EFX_TC_ERR_MSG(efx, extack, "LHS rule cannot match on inner fields"); + goto release_encap_match; + } + + rc = efx_mae_match_check_caps_lhs(efx, &match->mask, extack); + if (rc) + goto release_encap_match; + + rule = kzalloc(sizeof(*rule), GFP_USER); + if (!rule) { + rc = -ENOMEM; + goto release_encap_match; + } + rule->cookie = tc->cookie; + old = rhashtable_lookup_get_insert_fast(&efx->tc->lhs_rule_ht, + &rule->linkage, + efx_tc_lhs_rule_ht_params); + if (old) { + netif_dbg(efx, drv, efx->net_dev, + "Already offloaded rule (cookie %lx)\n", tc->cookie); + rc = -EEXIST; + NL_SET_ERR_MSG_MOD(extack, "Rule already offloaded"); + goto release; + } + + /* Parse actions */ + rc = efx_tc_flower_handle_lhs_actions(efx, tc, fr, net_dev, rule); + if (rc) + goto release; + + rule->match = *match; + rule->lhs_act.tun_type = type; + + rc = efx_mae_insert_lhs_rule(efx, rule, EFX_TC_PRIO_TC); + if (rc) { + EFX_TC_ERR_MSG(efx, extack, "Failed to insert rule in hw"); + goto release; + } + netif_dbg(efx, drv, efx->net_dev, + "Successfully parsed lhs rule (cookie %lx)\n", + tc->cookie); + return 0; + +release: + efx_tc_flower_release_lhs_actions(efx, &rule->lhs_act); + if (!old) + rhashtable_remove_fast(&efx->tc->lhs_rule_ht, &rule->linkage, + efx_tc_lhs_rule_ht_params); + kfree(rule); +release_encap_match: + if (match->encap) + efx_tc_flower_release_encap_match(efx, match->encap); + return rc; +} + +static int efx_tc_flower_replace_foreign(struct efx_nic *efx, + struct net_device *net_dev, + struct flow_cls_offload *tc) +{ +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_TC_FLOW_OFFLOAD) + struct flow_rule *fr = flow_cls_offload_flow_rule(tc); +#else + struct flow_rule *fr; +#endif + struct efx_tc_flow_rule *rule = NULL, *old = NULL; + struct efx_tc_action_set *act = NULL; + bool found = false, uplinked = false; + const struct flow_action_entry *fa; + struct efx_tc_recirc_id *rid; + struct efx_tc_match match; + struct efx_rep *to_efv; + s64 rc; + int i; + +#if defined(EFX_USE_KCOMPAT) && !defined(EFX_HAVE_TC_FLOW_OFFLOAD) + fr = efx_compat_flow_rule_build(tc); + if (IS_ERR(fr)) { + netif_err(efx, drv, efx->net_dev, + "Failed to convert tc cls to a flow_rule (rc %ld)\n", + PTR_ERR(fr)); + return PTR_ERR(fr); + } +#endif + + /* Parse match */ + memset(&match, 0, sizeof(match)); + rc = efx_tc_flower_parse_match(efx, fr, &match, NULL); +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_TC_FLOW_OFFLOAD) + if (rc) + return rc; +#else + if (rc) { + kfree(fr); + return rc; + } +#endif + /* The rule as given to us doesn't specify a source netdevice. + * But, determining whether packets from a VF should match it is + * complicated, so leave those to the software slowpath: qualify + * the filter with source m-port == wire. + */ +#ifdef EFX_NOT_UPSTREAM + /* See SWNETLINUX-4321 for the gruesome details. */ +#endif + rc = efx_tc_flower_external_mport(efx, EFX_EFV_PF); + if (rc < 0) { + efx_tc_err(efx, "Failed to identify ingress m-port for foreign filter"); +#if defined(EFX_USE_KCOMPAT) && !defined(EFX_HAVE_TC_FLOW_OFFLOAD) + kfree(fr); +#endif + return rc; + } + match.value.ingress_port = rc; + match.mask.ingress_port = ~0; + + if (efx_tc_rule_is_lhs_rule(fr, &match)) { + rc = efx_tc_flower_replace_foreign_lhs(efx, tc, fr, &match, net_dev); +#if defined(EFX_USE_KCOMPAT) && !defined(EFX_HAVE_TC_FLOW_OFFLOAD) + kfree(fr); +#endif + return rc; + } + + if (tc->common.chain_index) { + rid = efx_tc_get_recirc_id(efx, tc->common.chain_index, net_dev); + if (IS_ERR(rid)) { + efx_tc_err(efx, "Failed to allocate a hardware recirculation ID for this chain_index"); + rc = PTR_ERR(rid); + goto release; + } + match.rid = rid; + match.value.recirc_id = rid->fw_id; + } + match.mask.recirc_id = 0xff; + + flow_action_for_each(i, fa, &fr->action) { + switch (fa->id) { + case FLOW_ACTION_REDIRECT: + case FLOW_ACTION_MIRRED: /* mirred means mirror here */ + to_efv = efx_tc_flower_lookup_efv(efx, fa->dev); + if (IS_ERR(to_efv)) + continue; + found = true; + break; + default: + break; + } + } + if (!found) { /* We don't care. */ + netif_dbg(efx, drv, efx->net_dev, "Ignoring foreign filter that doesn't egdev us\n"); + rc = -EOPNOTSUPP; + goto release; + } + + /* AR table can't match on DO_CT (+trk). But a commonly used pattern is + * +trk+est, which is strictly implied by +est, so rewrite it to that. + */ + if (match.mask.ct_state_trk && match.value.ct_state_trk && + match.mask.ct_state_est && match.value.ct_state_est) + match.mask.ct_state_trk = 0; + /* Thanks to CT_TCP_FLAGS_INHIBIT, packets with interesting flags could + * match +trk-est (CT_HIT=0) despite being on an established connection. + * So make -est imply -tcp_syn_fin_rst match to ensure these packets + * still hit the software path. + */ + if (match.mask.ct_state_est && !match.value.ct_state_est) { + if (match.value.tcp_syn_fin_rst) { + /* Can't offload this combination */ + rc = -EOPNOTSUPP; + goto release; + } + match.mask.tcp_syn_fin_rst = true; + } + + rc = efx_mae_match_check_caps(efx, &match.mask, NULL); + if (rc) + goto release; + + if (efx_tc_match_is_encap(&match.mask)) { + enum efx_encap_type type; + char errbuf[16]; + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_FLOW_INDR_BLOCK_CB_REGISTER) || defined(EFX_HAVE_FLOW_INDR_DEV_REGISTER) + type = efx_tc_indr_netdev_type(net_dev); +#else + /* This is deeply unsatisfactory: we're using the UDP port to + * determine the tunnel type but then still inserting a full + * match (with IP addresses) into the encap match table in hw. + */ + type = efx_tc_egdev_udp_type(efx, &match); +#endif + if (type == EFX_ENCAP_TYPE_NONE) { + efx_tc_err(efx, "Egress encap match on unsupported tunnel device\n"); + rc = -EOPNOTSUPP; + goto release; + } + + rc = efx_mae_check_encap_type_supported(efx, type); + if (rc) { + efx_tc_err(efx, "Firmware reports no support for %s encap match\n", + efx_tc_encap_type_name(type, errbuf, + sizeof(errbuf))); + goto release; + } + + rc = efx_tc_flower_record_encap_match(efx, &match, type, + EFX_TC_EM_DIRECT, 0); + if (rc) + goto release; + } else if (!tc->common.chain_index) { + /* This is not a tunnel decap rule, ignore it */ + netif_dbg(efx, drv, efx->net_dev, "Ignoring foreign filter without encap match\n"); + rc = -EOPNOTSUPP; + goto release; + } + + rule = kzalloc(sizeof(*rule), GFP_USER); + if (!rule) { + rc = -ENOMEM; + goto release; + } + INIT_LIST_HEAD(&rule->acts.list); + rule->cookie = tc->cookie; + old = rhashtable_lookup_get_insert_fast(&efx->tc->match_action_ht, + &rule->linkage, + efx_tc_match_action_ht_params); + if (old) { + netif_dbg(efx, drv, efx->net_dev, + "Ignoring already-offloaded rule (cookie %lx)\n", + tc->cookie); + rc = -EEXIST; + goto release; + } + + /* Parse actions */ + act = kzalloc(sizeof(*act), GFP_USER); + if (!act) { + rc = -ENOMEM; + goto release; + } + + /* Parse actions. For foreign rules we only support decap & redirect */ + flow_action_for_each(i, fa, &fr->action) { + struct efx_tc_action_set save; + + switch (fa->id) { + case FLOW_ACTION_REDIRECT: + case FLOW_ACTION_MIRRED: + /* See corresponding code in efx_tc_flower_replace() for + * long explanations of what's going on here. + */ + save = *act; +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_HW_STATS_TYPE) + if (fa->hw_stats) { + if (!(fa->hw_stats & FLOW_ACTION_HW_STATS_DELAYED)) { + efx_tc_err(efx, "hw_stats_type %u not supported (only 'delayed')\n", + fa->hw_stats); + rc = -EOPNOTSUPP; + goto release; + } +#endif + if (!efx_tc_flower_action_order_ok(act, EFX_TC_AO_COUNT)) { + rc = -EOPNOTSUPP; + goto release; + } else { + struct efx_tc_counter_index *ctr; + + ctr = efx_tc_flower_get_counter_index(efx, +#if defined(EFX_USE_KCOMPAT) && defined(EFX_HAVE_TC_ACTION_COOKIE) + fa->cookie, +#else + tc->cookie, +#endif + EFX_TC_COUNTER_TYPE_AR); + if (IS_ERR(ctr)) { + rc = PTR_ERR(ctr); + goto release; + } + act->count = ctr; +#if defined(EFX_USE_KCOMPAT) && !defined(EFX_HAVE_TC_FLOW_OFFLOAD) + act->count_action_idx = i; +#endif + INIT_LIST_HEAD(&act->count_user); + } +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_HW_STATS_TYPE) + } +#endif + + if (!efx_tc_flower_action_order_ok(act, EFX_TC_AO_DELIVER)) { + /* can't happen */ + rc = -EOPNOTSUPP; + goto release; + } + to_efv = efx_tc_flower_lookup_efv(efx, fa->dev); + /* PF implies egdev is us, in which case we really + * want to deliver to the uplink (because this is an + * ingress filter). If we don't recognise the egdev + * at all, then we'd better trap so SW can handle it. + */ + if (IS_ERR(to_efv)) + to_efv = EFX_EFV_PF; + if (to_efv == EFX_EFV_PF) { + if (uplinked) + break; + uplinked = true; + } + rc = efx_tc_flower_internal_mport(efx, to_efv); + if (rc < 0) + goto release; + act->dest_mport = rc; + act->deliver = 1; + rc = efx_mae_alloc_action_set(efx, act); + if (rc) + goto release; + list_add_tail(&act->list, &rule->acts.list); + act = NULL; + if (fa->id == FLOW_ACTION_REDIRECT) + break; /* end of the line */ + /* Mirror, so continue on with saved act */ + act = kzalloc(sizeof(*act), GFP_USER); + if (!act) { + rc = -ENOMEM; + goto release; + } + *act = save; + break; + case FLOW_ACTION_TUNNEL_DECAP: + if (!efx_tc_flower_action_order_ok(act, EFX_TC_AO_DECAP)) { + rc = -EINVAL; + goto release; + } + act->decap = 1; + /* If we previously delivered/trapped to uplink, now + * that we've decapped we'll want another copy if we + * try to deliver/trap to uplink again. + */ + uplinked = false; + break; + default: + efx_tc_err(efx, "Unhandled action %u\n", fa->id); + rc = -EOPNOTSUPP; + goto release; + } + } + + if (act) { + if (!uplinked) { + /* Not shot/redirected, so deliver to default dest (which is + * the uplink, as this is an ingress filter) + */ + efx_mae_mport_uplink(efx, &act->dest_mport); + act->deliver = 1; + } + rc = efx_mae_alloc_action_set(efx, act); + if (rc) + goto release; + list_add_tail(&act->list, &rule->acts.list); + act = NULL; /* Prevent double-free in error path */ + } + + rule->match = match; + + netif_dbg(efx, drv, efx->net_dev, + "Successfully parsed foreign filter (cookie %lx)\n", + tc->cookie); + + if (!efx_tc_check_ready(efx, rule)) { + /* can't happen, as foreign filters don't support encap actions */ + netif_err(efx, drv, efx->net_dev, "action not ready for hw\n"); + rc = -EOPNOTSUPP; + goto release; + } + rc = efx_mae_alloc_action_set_list(efx, &rule->acts); + if (rc) + goto release; + rc = efx_mae_insert_rule(efx, &rule->match, EFX_TC_PRIO_TC, + rule->acts.fw_id, &rule->fw_id); + if (rc) + goto release_act; +#if defined(EFX_USE_KCOMPAT) && !defined(EFX_HAVE_TC_FLOW_OFFLOAD) + kfree(fr); +#endif + return 0; + +release_act: + efx_mae_free_action_set_list(efx, &rule->acts); +release: + /* We failed to insert the rule, so free up any entries we created in + * subsidiary tables. + */ + if (match.rid) + efx_tc_put_recirc_id(efx, match.rid); + if (act) + efx_tc_free_action_set(efx, act, false); + if (rule) { + if (!old) + rhashtable_remove_fast(&efx->tc->match_action_ht, + &rule->linkage, + efx_tc_match_action_ht_params); + efx_tc_free_action_set_list(efx, &rule->acts, false); + } + kfree(rule); + if (match.encap) + efx_tc_flower_release_encap_match(efx, match.encap); +#if defined(EFX_USE_KCOMPAT) && !defined(EFX_HAVE_TC_FLOW_OFFLOAD) + kfree(fr); +#endif + return rc; +} + +static int efx_tc_flower_replace_lhs(struct efx_nic *efx, + struct flow_cls_offload *tc, + struct flow_rule *fr, + struct efx_tc_match *match, + struct efx_rep *efv, + struct net_device *net_dev) +{ +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_TC_FLOW_OFFLOAD) || defined(EFX_HAVE_TCF_EXTACK) + struct netlink_ext_ack *extack = tc->common.extack; +#else + struct netlink_ext_ack *extack = NULL; +#endif + struct efx_tc_lhs_rule *rule, *old; + int rc; + + if (tc->common.chain_index) { + EFX_TC_ERR_MSG(efx, extack, "LHS rule only allowed in chain 0"); + return -EOPNOTSUPP; + } + + if (match->mask.ct_state_trk && match->value.ct_state_trk) { + EFX_TC_ERR_MSG(efx, extack, "LHS rule can never match +trk"); + return -EOPNOTSUPP; + } + /* LHS rules are always -trk, so we don't need to match on that */ + match->mask.ct_state_trk = 0; + match->value.ct_state_trk = 0; + + rc = efx_mae_match_check_caps_lhs(efx, &match->mask, extack); + if (rc) + return rc; + + rule = kzalloc(sizeof(*rule), GFP_USER); + if (!rule) + return -ENOMEM; + rule->cookie = tc->cookie; + old = rhashtable_lookup_get_insert_fast(&efx->tc->lhs_rule_ht, + &rule->linkage, + efx_tc_lhs_rule_ht_params); + if (old) { + netif_dbg(efx, drv, efx->net_dev, + "Already offloaded rule (cookie %lx)\n", tc->cookie); + rc = -EEXIST; + NL_SET_ERR_MSG_MOD(extack, "Rule already offloaded"); + goto release; + } + + /* Parse actions */ + /* Note regarding passed net_dev (used for efx_tc_get_recirc_id()): + * VFrep and PF can share the same chain namespace, as they have + * distinct ingress_mports. So we don't need to burn an extra + * recirc_id if both use the same chain_index. + * (Strictly speaking, we could give each VFrep its own recirc_id + * namespace that doesn't take IDs away from the PF, but that would + * require a bunch of additional IDAs - one for each representor - + * and that's not likely to be the main cause of recirc_id + * exhaustion anyway.) + */ + rc = efx_tc_flower_handle_lhs_actions(efx, tc, fr, efx->net_dev, rule); + if (rc) + goto release; + + rule->match = *match; + + rc = efx_mae_insert_lhs_rule(efx, rule, EFX_TC_PRIO_TC); + if (rc) { + EFX_TC_ERR_MSG(efx, extack, "Failed to insert rule in hw"); + goto release; + } + netif_dbg(efx, drv, efx->net_dev, + "Successfully parsed lhs rule (cookie %lx)\n", + tc->cookie); + return 0; + +release: + efx_tc_flower_release_lhs_actions(efx, &rule->lhs_act); + if (!old) + rhashtable_remove_fast(&efx->tc->lhs_rule_ht, &rule->linkage, + efx_tc_lhs_rule_ht_params); + kfree(rule); + return rc; +} + +struct efx_tc_mangler_state { + u8 dst_mac_32:1; /* eth->h_dest[0:3] */ + u8 dst_mac_16:1; /* eth->h_dest[4:5] */ + u8 src_mac_16:1; /* eth->h_source[0:1] */ + u8 src_mac_32:1; /* eth->h_source[2:5] */ + unsigned char dst_mac[ETH_ALEN]; + unsigned char src_mac[ETH_ALEN]; +}; + +static int efx_tc_complete_mac_mangle(struct efx_nic *efx, + struct efx_tc_action_set *act, + struct efx_tc_mangler_state *mung, + struct netlink_ext_ack *extack) +{ + struct efx_tc_mac_pedit_action *ped; + + if (mung->dst_mac_32 && mung->dst_mac_16) { + ped = efx_tc_flower_get_mac(efx, mung->dst_mac, extack); + if (IS_ERR(ped)) + return PTR_ERR(ped); + act->dst_mac = ped; + /* consume the incomplete state */ + mung->dst_mac_32 = 0; + mung->dst_mac_16 = 0; + } + if (mung->src_mac_16 && mung->src_mac_32) { + ped = efx_tc_flower_get_mac(efx, mung->src_mac, extack); + if (IS_ERR(ped)) + return PTR_ERR(ped); + act->src_mac = ped; + /* consume the incomplete state */ + mung->src_mac_32 = 0; + mung->src_mac_16 = 0; + } + return 0; +} + +static int efx_tc_pedit_add(struct efx_nic *efx, struct efx_tc_action_set *act, + const struct flow_action_entry *fa, + struct netlink_ext_ack *extack) +{ + switch (fa->mangle.htype) { + case FLOW_ACT_MANGLE_HDR_TYPE_IP4: + switch (fa->mangle.offset) { + case offsetof(struct iphdr, ttl): + /* check that pedit applies to ttl only */ + if (fa->mangle.mask != 0xffffff00) + break; + + /* Adding 0xff is equivalent to decrementing the ttl. + * Other added values are not supported. + */ + if ((fa->mangle.val & 0x000000ff) != 0xff) + break; + + /* check that we do not decrement ttl twice */ + if (!efx_tc_flower_action_order_ok(act, + EFX_TC_AO_DEC_TTL)) { + EFX_TC_ERR_MSG(efx, extack, + "Unsupported: multiple dec ttl"); + return -EOPNOTSUPP; + } + act->do_ttl_dec = 1; + return 0; + default: + break; + } + break; + case FLOW_ACT_MANGLE_HDR_TYPE_IP6: + switch (fa->mangle.offset) { + case round_down(offsetof(struct ipv6hdr, hop_limit), 4): + /* check that pedit applies to hoplimit only */ + if (fa->mangle.mask != 0x00ffffff) + break; + + /* Adding 0xff is equivalent to decrementing the hoplimit. + * Other added values are not supported. + */ + if ((fa->mangle.val >> 24) != 0xff) + break; + + /* check that we do not decrement hoplimit twice */ + if (!efx_tc_flower_action_order_ok(act, + EFX_TC_AO_DEC_TTL)) { + EFX_TC_ERR_MSG(efx, extack, + "Unsupported: multiple dec ttl"); + return -EOPNOTSUPP; + } + act->do_ttl_dec = 1; + return 0; + default: + break; + } + break; + default: + break; + } + + NL_SET_ERR_MSG_MOD(extack, "Unsupported: ttl add pedit must be a pure decrement"); + efx_tc_err(efx, "Unsupported: ttl add action type %x %x %x/%x\n", + fa->mangle.htype, fa->mangle.offset, fa->mangle.val, + fa->mangle.mask); + return -EOPNOTSUPP; +} + +static int efx_tc_mangle(struct efx_nic *efx, struct efx_tc_action_set *act, + const struct flow_action_entry *fa, + struct efx_tc_mangler_state *mung, + struct netlink_ext_ack *extack, + struct efx_tc_match *match) +{ + __le32 mac32; + __le16 mac16; + u8 tr_ttl; + + switch (fa->mangle.htype) { + case FLOW_ACT_MANGLE_HDR_TYPE_ETH: + BUILD_BUG_ON(offsetof(struct ethhdr, h_dest) != 0); + BUILD_BUG_ON(offsetof(struct ethhdr, h_source) != 6); + switch (fa->mangle.offset) { + case 0: + if (fa->mangle.mask) { + NL_SET_ERR_MSG_MOD(extack, "Unsupported mask for eth.h_dest mangle"); + efx_tc_err(efx, "Unsupported: mask (%#x) of eth.dst32 mangle\n", + fa->mangle.mask); + return -EOPNOTSUPP; + } + /* Ethernet address is little-endian */ + mac32 = cpu_to_le32(fa->mangle.val); + memcpy(mung->dst_mac, &mac32, sizeof(mac32)); + mung->dst_mac_32 = 1; + return efx_tc_complete_mac_mangle(efx, act, mung, extack); + case 4: + if (fa->mangle.mask == 0xffff) { + mac16 = cpu_to_le16(fa->mangle.val >> 16); + memcpy(mung->src_mac, &mac16, sizeof(mac16)); + mung->src_mac_16 = 1; + } else if (fa->mangle.mask == 0xffff0000) { + mac16 = cpu_to_le16((u16)fa->mangle.val); + memcpy(mung->dst_mac + 4, &mac16, sizeof(mac16)); + mung->dst_mac_16 = 1; + } else { + NL_SET_ERR_MSG_MOD(extack, "Unsupported mask for eth mangle"); + efx_tc_err(efx, "Unsupported: mask (%#x) of eth+4 mangle is not high or low 16b\n", + fa->mangle.mask); + return -EOPNOTSUPP; + } + return efx_tc_complete_mac_mangle(efx, act, mung, extack); + case 8: + if (fa->mangle.mask) { + NL_SET_ERR_MSG_MOD(extack, "Unsupported mask for eth.h_source mangle"); + efx_tc_err(efx, "Unsupported: mask (%#x) of eth.src32 mangle\n", + fa->mangle.mask); + return -EOPNOTSUPP; + } + mac32 = cpu_to_le32(fa->mangle.val); + memcpy(mung->src_mac + 2, &mac32, sizeof(mac32)); + mung->src_mac_32 = 1; + return efx_tc_complete_mac_mangle(efx, act, mung, extack); + default: + NL_SET_ERR_MSG_MOD(extack, "Unsupported offset for ethhdr mangle"); + efx_tc_err(efx, "Unsupported: mangle eth+%u %x/%x\n", + fa->mangle.offset, fa->mangle.val, fa->mangle.mask); + return -EOPNOTSUPP; + } + break; + case FLOW_ACT_MANGLE_HDR_TYPE_IP4: + switch (fa->mangle.offset) { + case offsetof(struct iphdr, ttl): + /* we currently only support pedit IP4 when it applies + * to TTL and then only when it can be achieved with a + * decrement ttl action + */ + + /* check that pedit applies to ttl only */ + if (fa->mangle.mask != 0xffffff00) + return -EOPNOTSUPP; + + /* we can only convert to a dec ttl when we have an + * exact match on the ttl field + */ + if (match->mask.ip_ttl != 0xff) + return -EOPNOTSUPP; + + /* check that we don't try to decrement 0, which equates + * to setting the ttl to 0xff + */ + if (match->value.ip_ttl == 0) + return -EOPNOTSUPP; + + /* check that we do not decrement ttl twice */ + if (!efx_tc_flower_action_order_ok(act, + EFX_TC_AO_DEC_TTL)) { + EFX_TC_ERR_MSG(efx, extack, + "Unsupported: multiple dec ttl"); + return -EOPNOTSUPP; + } + + /* check pedit can be achieved with decrement action */ + tr_ttl = match->value.ip_ttl - 1; + if ((fa->mangle.val & 0x000000ff) == tr_ttl) { + act->do_ttl_dec = 1; + if (efx->tc_match_ignore_ttl) { + match->mask.ip_ttl = 0; + match->value.ip_ttl = 0; + } + return 0; + } + + fallthrough; + default: + return -EOPNOTSUPP; + } + break; + case FLOW_ACT_MANGLE_HDR_TYPE_IP6: + switch (fa->mangle.offset) { + case round_down(offsetof(struct ipv6hdr, hop_limit), 4): + /* we currently only support pedit IP6 when it applies + * to the hoplimit and then only when it can be achieved + * with a decrement hoplimit action + */ + + /* check that pedit applies to ttl only */ + if (fa->mangle.mask != 0x00ffffff) + return -EOPNOTSUPP; + + /* we can only convert to a dec ttl when we have an + * exact match on the ttl field + */ + if (match->mask.ip_ttl != 0xff) + return -EOPNOTSUPP; + + /* check that we don't try to decrement 0, which equates + * to setting the ttl to 0xff + */ + if (match->value.ip_ttl == 0) + return -EOPNOTSUPP; + + /* check that we do not decrement hoplimit twice */ + if (!efx_tc_flower_action_order_ok(act, + EFX_TC_AO_DEC_TTL)) { + EFX_TC_ERR_MSG(efx, extack, + "Unsupported: multiple dec ttl"); + return -EOPNOTSUPP; + } + + /* check pedit can be achieved with decrement action */ + tr_ttl = match->value.ip_ttl - 1; + if ((fa->mangle.val >> 24) == tr_ttl) { + act->do_ttl_dec = 1; + if (efx->tc_match_ignore_ttl) { + match->mask.ip_ttl = 0; + match->value.ip_ttl = 0; + } + return 0; + } + + fallthrough; + default: + return -EOPNOTSUPP; + } + default: + NL_SET_ERR_MSG_MOD(extack, "Unsupported header type for mangle"); + efx_tc_err(efx, "Unhandled mangle htype %u for action rule\n", + fa->mangle.htype); + return -EOPNOTSUPP; + } + return 0; +} + +static int efx_tc_incomplete_mangle(struct efx_nic *efx, + struct efx_tc_mangler_state *mung, + struct netlink_ext_ack *extack) +{ + if (mung->dst_mac_32 || mung->dst_mac_16) { + EFX_TC_ERR_MSG(efx, extack, "Incomplete pedit of dest MAC address"); + return -EOPNOTSUPP; + } + if (mung->src_mac_16 || mung->src_mac_32) { + EFX_TC_ERR_MSG(efx, extack, "Incomplete pedit of source MAC address"); + return -EOPNOTSUPP; + } + return 0; +} + +static int efx_tc_late_ttl_match_check(struct efx_nic *efx, + struct efx_tc_match *match, + struct netlink_ext_ack *extack) +{ + /* Either ttl matches are ignored, or the ttl match value must be 1. + * Current hardware only supports matching ttl 1 or wildcard ttl. + */ + if (match->mask.ip_ttl == 0) + return 0; + + if (match->mask.ip_ttl == 0xff && match->value.ip_ttl == 1) + return 0; + + efx_tc_err(efx, "Unsupported TTL or Hop Limit match %#x/%#x\n", + match->value.ip_ttl, match->mask.ip_ttl); + NL_SET_ERR_MSG_MOD(extack, "ip ttl match not supported"); + return -EOPNOTSUPP; +} + +static int efx_tc_flower_replace(struct efx_nic *efx, + struct net_device *net_dev, + struct flow_cls_offload *tc, + struct efx_rep *efv) +{ +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_TC_FLOW_OFFLOAD) + struct flow_rule *fr = flow_cls_offload_flow_rule(tc); +#else + struct flow_rule *fr; +#endif +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_TC_FLOW_OFFLOAD) || defined(EFX_HAVE_TCF_EXTACK) + struct netlink_ext_ack *extack = tc->common.extack; +#else + struct netlink_ext_ack *extack = NULL; +#endif + struct efx_tc_flow_rule *rule = NULL, *old = NULL; + const struct ip_tunnel_info *encap_info = NULL; + struct efx_tc_mangler_state mung = {}; + struct efx_tc_action_set *act = NULL; + const struct flow_action_entry *fa; + struct efx_rep *from_efv, *to_efv; + struct efx_tc_recirc_id *rid; + struct efx_tc_match match; + u32 acts_id; + s64 rc; + int i; + + if (!tc_can_offload_extack(efx->net_dev, extack)) + return -EOPNOTSUPP; + if (WARN_ON(!efx->tc)) + return -ENETDOWN; + if (WARN_ON(!efx->tc->up)) + return -ENETDOWN; + + from_efv = efx_tc_flower_lookup_efv(efx, net_dev); + if (IS_ERR(from_efv)) { + netif_dbg(efx, drv, efx->net_dev, "Got notification for otherdev\n"); + return efx_tc_flower_replace_foreign(efx, net_dev, tc); + } + + if (efv != from_efv) { + /* can't happen */ + efx_tc_err(efx, "for %s efv is %snull but from_efv is %snull\n", + netdev_name(net_dev), efv ? "non-" : "", + from_efv ? "non-" : ""); + if (efv) + NL_SET_ERR_MSG_MOD(extack, "vfrep filter has PF net_dev (can't happen)"); + else + NL_SET_ERR_MSG_MOD(extack, "PF filter has vfrep net_dev (can't happen)"); + return -EINVAL; + } + + /* Parse match */ + memset(&match, 0, sizeof(match)); + rc = efx_tc_flower_external_mport(efx, from_efv); + if (rc < 0) { + EFX_TC_ERR_MSG(efx, extack, "Failed to identify ingress m-port"); + return rc; + } +#if defined(EFX_USE_KCOMPAT) && !defined(EFX_HAVE_TC_FLOW_OFFLOAD) + fr = efx_compat_flow_rule_build(tc); + if (IS_ERR(fr)) { + EFX_TC_ERR_MSG(efx, extack, "Failed to convert tc cls to a flow_rule"); + return PTR_ERR(fr); + } +#endif + match.value.ingress_port = rc; + match.mask.ingress_port = ~0; + rc = efx_tc_flower_parse_match(efx, fr, &match, extack); +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_TC_FLOW_OFFLOAD) + if (rc) + return rc; +#else + if (rc) { + kfree(fr); + return rc; + } +#endif + if (efx_tc_match_is_encap(&match.mask)) { + EFX_TC_ERR_MSG(efx, extack, "Ingress enc_key matches not supported"); + rc = -EOPNOTSUPP; + goto release; + } + + if (efx_tc_rule_is_lhs_rule(fr, &match)) { + rc = efx_tc_flower_replace_lhs(efx, tc, fr, &match, efv, net_dev); + if (rc) + goto release; +#if defined(EFX_USE_KCOMPAT) && !defined(EFX_HAVE_TC_FLOW_OFFLOAD) + kfree(fr); +#endif + return 0; + } + + /* chain_index 0 is always recirc_id 0 (and does not appear in recirc_ht). + * Conveniently, match.rid == NULL and match.value.recirc_id == 0 owing + * to the initial memset(), so we don't need to do anything in that case. + */ + if (tc->common.chain_index) { + /* VFreps and PF can share chain namespace, see comment in + * efx_tc_flower_replace_lhs(). + */ + rid = efx_tc_get_recirc_id(efx, tc->common.chain_index, + efx->net_dev); + if (IS_ERR(rid)) { + EFX_TC_ERR_MSG(efx, extack, "Failed to allocate a hardware recirculation ID for this chain_index"); + rc = PTR_ERR(rid); + goto release; + } + match.rid = rid; + match.value.recirc_id = rid->fw_id; + } + match.mask.recirc_id = 0xff; + + /* AR table can't match on DO_CT (+trk). But a commonly used pattern is + * +trk+est, which is strictly implied by +est, so rewrite it to that. + */ + if (match.mask.ct_state_trk && match.value.ct_state_trk && + match.mask.ct_state_est && match.value.ct_state_est) + match.mask.ct_state_trk = 0; + /* Thanks to CT_TCP_FLAGS_INHIBIT, packets with interesting flags could + * match +trk-est (CT_HIT=0) despite being on an established connection. + * So make -est imply -tcp_syn_fin_rst match to ensure these packets + * still hit the software path. + */ + if (match.mask.ct_state_est && !match.value.ct_state_est) { + if (match.value.tcp_syn_fin_rst) { + /* Can't offload this combination */ + rc = -EOPNOTSUPP; + goto release; + } + match.mask.tcp_syn_fin_rst = true; + } + + rc = efx_mae_match_check_caps(efx, &match.mask, extack); + if (rc) + goto release; + + rule = kzalloc(sizeof(*rule), GFP_USER); + if (!rule) { + rc = -ENOMEM; + goto release; + } + INIT_LIST_HEAD(&rule->acts.list); + rule->cookie = tc->cookie; + old = rhashtable_lookup_get_insert_fast(&efx->tc->match_action_ht, + &rule->linkage, + efx_tc_match_action_ht_params); + if (old) { + netif_dbg(efx, drv, efx->net_dev, + "Already offloaded rule (cookie %lx)\n", tc->cookie); + rc = -EEXIST; + NL_SET_ERR_MSG_MOD(extack, "Rule already offloaded"); + goto release; + } + + /* Parse actions */ + act = kzalloc(sizeof(*act), GFP_USER); + if (!act) { + rc = -ENOMEM; + goto release; + } + + flow_action_for_each(i, fa, &fr->action) { + struct efx_tc_action_set save; + int depth; + u16 tci; + + if (!act) { + /* more actions after a non-pipe action */ + EFX_TC_ERR_MSG(efx, extack, "Action follows non-pipe action"); + rc = -EINVAL; + goto release; + } + /* If encap_info is set, then we need a counter even if the + * user doesn't want stats, because we have to prod + * neighbouring periodically if the rule is in use. + */ + if ((fa->id == FLOW_ACTION_REDIRECT || + fa->id == FLOW_ACTION_MIRRED || + fa->id == FLOW_ACTION_DROP) && +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_FLOW_STATS_TYPE) + (fa->hw_stats || encap_info)) { +#else + true) { +#endif + struct efx_tc_counter_index *ctr; + + /* Currently the only actions that want stats are + * mirred and gact (ok, shot, trap, goto-chain), which + * means we want stats just before delivery. Also, + * note that tunnel_key set shouldn't change the length + * — it's only the subsequent mirred that does that, + * and the stats are taken _before_ the mirred action + * happens. + */ + if (!efx_tc_flower_action_order_ok(act, EFX_TC_AO_COUNT)) { + /* All supported actions that count either steal + * (gact shot, mirred redirect) or clone act + * (mirred mirror), so we should never get two + * count actions on one action_set. + */ + EFX_TC_ERR_MSG(efx, extack, "Count-action conflict (can't happen)"); + rc = -EOPNOTSUPP; + goto release; + } + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_FLOW_STATS_TYPE) + if (!(fa->hw_stats & FLOW_ACTION_HW_STATS_DELAYED)) { + NL_SET_ERR_MSG_MOD(extack, "Only hw_stats_type delayed is supported"); + efx_tc_err(efx, "hw_stats_type %u not supported (only 'delayed')\n", + fa->hw_stats); + rc = -EOPNOTSUPP; + goto release; + } +#endif + + ctr = efx_tc_flower_get_counter_index(efx, +#if defined(EFX_USE_KCOMPAT) && defined(EFX_HAVE_TC_ACTION_COOKIE) + fa->cookie, +#else + tc->cookie, +#endif + EFX_TC_COUNTER_TYPE_AR); + if (IS_ERR(ctr)) { + rc = PTR_ERR(ctr); + EFX_TC_ERR_MSG(efx, extack, "Failed to obtain a counter"); + goto release; + } + act->count = ctr; +#if defined(EFX_USE_KCOMPAT) && !defined(EFX_HAVE_TC_FLOW_OFFLOAD) + act->count_action_idx = i; +#endif + INIT_LIST_HEAD(&act->count_user); + } + + switch (fa->id) { + case FLOW_ACTION_DROP: + rc = efx_mae_alloc_action_set(efx, act); + if (rc) { + EFX_TC_ERR_MSG(efx, extack, "Failed to write action set to hw (drop)"); + goto release; + } + list_add_tail(&act->list, &rule->acts.list); + act = NULL; /* end of the line */ + break; + case FLOW_ACTION_REDIRECT: + case FLOW_ACTION_MIRRED: + save = *act; + + if (encap_info) { + struct efx_tc_encap_action *encap; + + if (!efx_tc_flower_action_order_ok(act, + EFX_TC_AO_ENCAP)) { + rc = -EOPNOTSUPP; + EFX_TC_ERR_MSG(efx, extack, "Encap action violates action order"); + goto release; + } + encap = efx_tc_flower_create_encap_md( + efx, encap_info, fa->dev, extack); + if (IS_ERR_OR_NULL(encap)) { + rc = PTR_ERR(encap); + if (!rc) + rc = -EIO; /* arbitrary */ + goto release; + } + act->encap_md = encap; + list_add_tail(&act->encap_user, &encap->users); + act->dest_mport = encap->dest_mport; + act->deliver = 1; + if (act->count && !WARN_ON(!act->count->cnt)) { + /* This counter is used by an encap + * action, which needs a reference back + * so it can prod neighbouring whenever + * traffic is seen. + */ + spin_lock_bh(&act->count->cnt->lock); + list_add_tail(&act->count_user, + &act->count->cnt->users); + spin_unlock_bh(&act->count->cnt->lock); + } + rc = efx_mae_alloc_action_set(efx, act); + if (rc) { + EFX_TC_ERR_MSG(efx, extack, "Failed to write action set to hw (encap)"); + goto release; + } + list_add_tail(&act->list, &rule->acts.list); + act->user = &rule->acts; + act = NULL; + if (fa->id == FLOW_ACTION_REDIRECT) + break; /* end of the line */ + /* Mirror, so continue on with saved act */ + save.count = NULL; + act = kzalloc(sizeof(*act), GFP_USER); + if (!act) { + rc = -ENOMEM; + goto release; + } + *act = save; + break; + } + + if (!efx_tc_flower_action_order_ok(act, EFX_TC_AO_DELIVER)) { + /* can't happen */ + rc = -EOPNOTSUPP; + EFX_TC_ERR_MSG(efx, extack, "Deliver action violates action order (can't happen)"); + goto release; + } + to_efv = efx_tc_flower_lookup_efv(efx, fa->dev); + if (IS_ERR(to_efv)) { + EFX_TC_ERR_MSG(efx, extack, "Mirred egress device not on switch"); + rc = PTR_ERR(to_efv); + goto release; + } + rc = efx_tc_flower_external_mport(efx, to_efv); + if (rc < 0) { + EFX_TC_ERR_MSG(efx, extack, "Failed to identify egress m-port"); + goto release; + } + act->dest_mport = rc; + act->deliver = 1; + rc = efx_mae_alloc_action_set(efx, act); + if (rc) { + EFX_TC_ERR_MSG(efx, extack, "Failed to write action set to hw (mirred)"); + goto release; + } + list_add_tail(&act->list, &rule->acts.list); + act = NULL; + if (fa->id == FLOW_ACTION_REDIRECT) + break; /* end of the line */ + /* Mirror, so continue on with saved act */ + save.count = NULL; + act = kzalloc(sizeof(*act), GFP_USER); + if (!act) { + rc = -ENOMEM; + goto release; + } + *act = save; + break; + case FLOW_ACTION_VLAN_POP: + if (act->vlan_push & 2) { + act->vlan_push &= ~2; + } else if (act->vlan_push & 1) { + act->vlan_push &= ~1; + } else if (efx_tc_flower_action_order_ok(act, EFX_TC_AO_VLAN0_POP)) { + act->vlan_pop |= 1; + } else if (efx_tc_flower_action_order_ok(act, EFX_TC_AO_VLAN1_POP)) { + act->vlan_pop |= 2; + } else { + EFX_TC_ERR_MSG(efx, extack, "More than two VLAN pops, or action order violated"); + rc = -EINVAL; + goto release; + } + break; + case FLOW_ACTION_VLAN_PUSH: + if (efx_tc_flower_action_order_ok(act, EFX_TC_AO_VLAN0_PUSH)) { + depth = 0; + } else if (efx_tc_flower_action_order_ok(act, EFX_TC_AO_VLAN1_PUSH)) { + depth = 1; + } else { + /* TODO special case when we have an ENCAP rule + * and can stick a vlan on its encap_hdr? + * But we can't do the reverse, so why bother? + */ + rc = -EINVAL; + EFX_TC_ERR_MSG(efx, extack, "More than two VLAN pushes, or action order violated"); + goto release; + } + tci = fa->vlan.vid & 0x0fff; + tci |= fa->vlan.prio << 13; + act->vlan_push |= (1 << depth); + act->vlan_tci[depth] = cpu_to_be16(tci); + act->vlan_proto[depth] = fa->vlan.proto; + break; + case FLOW_ACTION_ADD: + rc = efx_tc_pedit_add(efx, act, fa, extack); + if (rc < 0) + goto release; + break; + case FLOW_ACTION_MANGLE: + if (!efx_tc_flower_action_order_ok(act, EFX_TC_AO_PEDIT_MAC_ADDRS)) { + rc = -EOPNOTSUPP; + EFX_TC_ERR_MSG(efx, extack, "Pedit action violates action order"); + goto release; + } + rc = efx_tc_mangle(efx, act, fa, &mung, extack, &match); + if (rc < 0) + goto release; + break; + case FLOW_ACTION_TUNNEL_ENCAP: + if (encap_info) { + /* Can't specify encap multiple times. + * XXX possibly we should allow this, and just + * overwrite the existing encap_info? But you + * can do it with a tcf_tunnel_release anyway. + */ + rc = -EINVAL; + EFX_TC_ERR_MSG(efx, extack, "Tunnel key set when already set"); + goto release; + } + if (!fa->tunnel) { + rc = -EOPNOTSUPP; + EFX_TC_ERR_MSG(efx, extack, "Tunnel key set is missing key"); + goto release; + } + encap_info = fa->tunnel; + break; + case FLOW_ACTION_TUNNEL_DECAP: + if (encap_info) { + encap_info = NULL; + break; + } + /* Actually decap it. Since we don't support enc_key + * matches on ingress (and if we did there'd be no + * tunnel-device to give us a type), the only way for + * this to work is if there was a foreign filter that + * set up an encap_match rule that covers us. + * XXX if not, what will the HW/FW do with this? + */ + if (!efx_tc_flower_action_order_ok(act, EFX_TC_AO_DECAP)) { + rc = -EINVAL; + EFX_TC_ERR_MSG(efx, extack, "Multiple decaps, or action order violated"); + goto release; + } + act->decap = 1; + break; +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_CONNTRACK_OFFLOAD) + case FLOW_ACTION_CT: + if (fa->ct.action != TCA_CT_ACT_NAT) { + rc = -EOPNOTSUPP; + efx_tc_err(efx, "Unhandled ct action=%d\n", fa->ct.action); + NL_SET_ERR_MSG_MOD(extack, "Can only offload CT 'nat' action in RHS rules"); + goto release; + } + act->do_nat = 1; + break; +#endif + case FLOW_ACTION_GOTO: + rc = -EOPNOTSUPP; + efx_tc_err(efx, "goto chain_index=%u\n", fa->chain_index); + NL_SET_ERR_MSG_MOD(extack, "Can't offload goto chain in RHS rules"); + goto release; + case FLOW_ACTION_CSUM: + /* HW will automatically update the checksum by applying + * a delta when executing a dec ttl action, therefore, + * the checksum update action can be ignored. + */ + if (!act->do_ttl_dec) { + rc = -EOPNOTSUPP; + EFX_TC_ERR_MSG(efx, extack, + "Csum action unsupported"); + goto release; + } + break; + default: + efx_tc_err(efx, "Unhandled action %u\n", fa->id); + rc = -EOPNOTSUPP; + NL_SET_ERR_MSG_MOD(extack, "Unsupported action"); + goto release; + } + } + rc = efx_tc_incomplete_mangle(efx, &mung, extack); + if (rc < 0) + goto release; + if (act) { + /* Not shot/redirected, so deliver to default dest */ + if (from_efv == EFX_EFV_PF) + /* Rule applies to traffic from the wire, + * and default dest is thus the PF + */ + efx_mae_mport_uplink(efx, &act->dest_mport); + else + /* Representor, so rule applies to traffic from + * representee, and default dest is thus the rep. + * All reps use the same mport for delivery + */ + efx_mae_mport_mport(efx, efx->tc->reps_mport_id, + &act->dest_mport); + act->deliver = 1; + rc = efx_mae_alloc_action_set(efx, act); + if (rc) { + EFX_TC_ERR_MSG(efx, extack, "Failed to write action set to hw (deliver)"); + goto release; + } + list_add_tail(&act->list, &rule->acts.list); + act = NULL; /* Prevent double-free in error path */ + } + + netif_dbg(efx, drv, efx->net_dev, + "Successfully parsed filter (cookie %lx)\n", + tc->cookie); + + rule->match = match; + /* Perform a late check on the ttl match field in order to take into + * account the changes that a pedit dec ttl action could have had on it + */ + rc = efx_tc_late_ttl_match_check(efx, &match, extack); + if (rc) + goto release; + + rc = efx_mae_alloc_action_set_list(efx, &rule->acts); + if (rc) { + EFX_TC_ERR_MSG(efx, extack, "Failed to write action set list to hw"); + goto release; + } + if (from_efv == EFX_EFV_PF) + /* PF netdev, so rule applies to traffic from wire */ + rule->fallback = &efx->tc->facts.pf; + else + /* repdev, so rule applies to traffic from representee */ + rule->fallback = &efx->tc->facts.reps; + if (!efx_tc_check_ready(efx, rule)) { + netif_dbg(efx, drv, efx->net_dev, "action not ready for hw\n"); + acts_id = rule->fallback->fw_id; + } else { + netif_dbg(efx, drv, efx->net_dev, "ready for hw\n"); + acts_id = rule->acts.fw_id; + } + rc = efx_mae_insert_rule(efx, &rule->match, EFX_TC_PRIO_TC, + acts_id, &rule->fw_id); + if (rc) { + EFX_TC_ERR_MSG(efx, extack, "Failed to insert rule in hw"); + goto release_acts; + } +#if defined(EFX_USE_KCOMPAT) && !defined(EFX_HAVE_TC_FLOW_OFFLOAD) + kfree(fr); +#endif + return 0; + +release_acts: + efx_mae_free_action_set_list(efx, &rule->acts); +release: + /* We failed to insert the rule, so free up any entries we created in + * subsidiary tables. + */ + if (match.rid) + efx_tc_put_recirc_id(efx, match.rid); + if (act) + efx_tc_free_action_set(efx, act, false); + if (rule) { + if (!old) + rhashtable_remove_fast(&efx->tc->match_action_ht, + &rule->linkage, + efx_tc_match_action_ht_params); + efx_tc_free_action_set_list(efx, &rule->acts, false); + } + kfree(rule); +#if defined(EFX_USE_KCOMPAT) && !defined(EFX_HAVE_TC_FLOW_OFFLOAD) + kfree(fr); +#endif + return rc; +} + +static void efx_tc_delete_rule(struct efx_nic *efx, struct efx_tc_flow_rule *rule) +{ + efx_mae_delete_rule(efx, rule->fw_id); + + /* Release entries in subsidiary tables */ + efx_tc_free_action_set_list(efx, &rule->acts, true); + if (rule->match.rid) + efx_tc_put_recirc_id(efx, rule->match.rid); + if (rule->match.encap) + efx_tc_flower_release_encap_match(efx, rule->match.encap); + rule->fw_id = MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL; +} + +static int efx_tc_flower_destroy(struct efx_nic *efx, + struct net_device *net_dev, + struct flow_cls_offload *tc) +{ +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_TC_FLOW_OFFLOAD) || defined(EFX_HAVE_TCF_EXTACK) + struct netlink_ext_ack *extack = tc->common.extack; +#else + struct netlink_ext_ack *extack = NULL; +#endif + struct efx_tc_lhs_rule *lhs_rule; + struct efx_tc_flow_rule *rule; + + lhs_rule = rhashtable_lookup_fast(&efx->tc->lhs_rule_ht, &tc->cookie, + efx_tc_lhs_rule_ht_params); + if (lhs_rule) { + /* Remove it from HW */ + efx_mae_remove_lhs_rule(efx, lhs_rule); + /* Delete it from SW */ + efx_tc_flower_release_lhs_actions(efx, &lhs_rule->lhs_act); + rhashtable_remove_fast(&efx->tc->lhs_rule_ht, &lhs_rule->linkage, + efx_tc_lhs_rule_ht_params); + if (lhs_rule->match.encap) + efx_tc_flower_release_encap_match(efx, lhs_rule->match.encap); + netif_dbg(efx, drv, efx->net_dev, "Removed (lhs) filter %lx\n", + lhs_rule->cookie); + kfree(lhs_rule); + return 0; + } + + rule = rhashtable_lookup_fast(&efx->tc->match_action_ht, &tc->cookie, + efx_tc_match_action_ht_params); + if (!rule) { + /* Only log a message if we're the ingress device. Otherwise + * it's a foreign filter and we might just not have been + * interested (e.g. we might not have been the egress device + * either). + */ + if (!IS_ERR(efx_tc_flower_lookup_efv(efx, net_dev))) + netif_warn(efx, drv, efx->net_dev, + "Filter %lx not found to remove\n", tc->cookie); + NL_SET_ERR_MSG_MOD(extack, "Flow cookie not found in offloaded rules"); + return -ENOENT; + } + + /* Remove it from HW */ + efx_tc_delete_rule(efx, rule); + /* Delete it from SW */ + rhashtable_remove_fast(&efx->tc->match_action_ht, &rule->linkage, + efx_tc_match_action_ht_params); + netif_dbg(efx, drv, efx->net_dev, "Removed filter %lx\n", rule->cookie); + kfree(rule); + return 0; +} + +#if defined(EFX_USE_KCOMPAT) && defined(EFX_HAVE_TC_ACTION_COOKIE) +static int efx_tc_action_stats(struct efx_nic *efx, + struct tc_action_offload *tca) +{ + struct netlink_ext_ack *extack = tc->common.extack; + struct efx_tc_counter_index *ctr; + struct efx_tc_counter *cnt; + u64 packets, bytes; + + ctr = efx_tc_flower_find_counter_index(efx, tca->cookie); + if (!ctr) { + /* See comment in efx_tc_flower_destroy() */ + if (!IS_ERR(efx_tc_flower_lookup_efv(efx, net_dev))) + netif_warn(efx, drv, efx->net_dev, + "Action %lx not found for stats\n", tca->cookie); + NL_SET_ERR_MSG_MOD(extack, "Action cookie not found in offload"); + return -ENOENT; + } + if (WARN_ON(!ctr->cnt)) /* can't happen */ + return -EIO; + cnt = ctr->cnt; + spin_lock_bh(&cnt->lock); + /* Report only new pkts/bytes since last time TC asked */ + packets = cnt->packets; + bytes = cnt->bytes; + flow_stats_update(&tca->stats, bytes - cnt->old_bytes, + packets - cnt->old_packets, cnt->touched, + FLOW_ACTION_HW_STATS_DELAYED); + cnt->old_packets = packets; + cnt->old_bytes = bytes; + spin_unlock_bh(&cnt->lock); + return 0; +} +#elif defined(EFX_USE_KCOMPAT) && !defined(EFX_HAVE_TC_FLOW_OFFLOAD) +static int efx_tc_flower_stats(struct efx_nic *efx, struct net_device *net_dev, + struct tc_cls_flower_offload *tc) +{ +#ifdef EFX_HAVE_TCF_EXTACK + struct netlink_ext_ack *extack = tc->common.extack; +#else + struct netlink_ext_ack *extack = NULL; +#endif + struct efx_tc_lhs_rule *lhs_rule; + struct efx_tc_flow_rule *rule; + struct efx_tc_action_set *act; + struct efx_tc_counter *cnt; + u64 packets, bytes; + + lhs_rule = rhashtable_lookup_fast(&efx->tc->lhs_rule_ht, &tc->cookie, + efx_tc_lhs_rule_ht_params); + if (lhs_rule) { + if (lhs_rule->lhs_act.count) { + struct tc_action *a; + + cnt = lhs_rule->lhs_act.count->cnt; + /* Report only new pkts/bytes since last time TC asked */ + packets = cnt->packets; + bytes = cnt->bytes; + a = tc->exts->actions[lhs_rule->lhs_act.count_action_idx]; + tcf_action_stats_update(a, bytes - cnt->old_bytes, + packets - cnt->old_packets, + cnt->touched +#ifndef EFX_HAVE_OLD_TCF_ACTION_STATS_UPDATE + , true +#endif + ); + cnt->old_packets = packets; + cnt->old_bytes = bytes; + } + return 0; + } + rule = rhashtable_lookup_fast(&efx->tc->match_action_ht, &tc->cookie, + efx_tc_match_action_ht_params); + if (!rule) { + /* See comment in efx_tc_flower_destroy() */ + if (!IS_ERR(efx_tc_flower_lookup_efv(efx, net_dev))) + if (net_ratelimit()) + netif_warn(efx, drv, efx->net_dev, + "Filter %lx not found for stats\n", + tc->cookie); + NL_SET_ERR_MSG_MOD(extack, "Flow cookie not found in offloaded rules"); + return -ENOENT; + } + + /* For each COUNT action in the action-set list, update the + * corresponding (count_action_idx) tc action's stats + */ + list_for_each_entry(act, &rule->acts.list, list) + if (act->count) { + struct tc_action *a; + + if (WARN_ON(!act->count->cnt)) /* can't happen */ + continue; + cnt = act->count->cnt; + spin_lock_bh(&cnt->lock); + /* Report only new pkts/bytes since last time TC asked */ + packets = cnt->packets; + bytes = cnt->bytes; + a = tc->exts->actions[act->count_action_idx]; + tcf_action_stats_update(a, bytes - cnt->old_bytes, + packets - cnt->old_packets, + cnt->touched +#ifndef EFX_HAVE_OLD_TCF_ACTION_STATS_UPDATE + , true +#endif + ); + cnt->old_packets = packets; + cnt->old_bytes = bytes; + spin_unlock_bh(&cnt->lock); + } + return 0; +} +#else +static int efx_tc_flower_stats(struct efx_nic *efx, struct net_device *net_dev, + struct flow_cls_offload *tc) +{ + struct netlink_ext_ack *extack = tc->common.extack; + struct efx_tc_counter_index *ctr; + struct efx_tc_counter *cnt; + u64 packets, bytes; + + ctr = efx_tc_flower_find_counter_index(efx, tc->cookie); + if (!ctr) { + /* See comment in efx_tc_flower_destroy() */ + if (!IS_ERR(efx_tc_flower_lookup_efv(efx, net_dev))) + if (net_ratelimit()) + netif_warn(efx, drv, efx->net_dev, + "Filter %lx not found for stats\n", + tc->cookie); + NL_SET_ERR_MSG_MOD(extack, "Flow cookie not found in offloaded rules"); + return -ENOENT; + } + if (WARN_ON(!ctr->cnt)) /* can't happen */ + return -EIO; + cnt = ctr->cnt; + + spin_lock_bh(&cnt->lock); + /* Report only new pkts/bytes since last time TC asked */ + packets = cnt->packets; + bytes = cnt->bytes; + flow_stats_update(&tc->stats, bytes - cnt->old_bytes, + packets - cnt->old_packets, +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_FLOW_STATS_DROPS) + 0, +#endif +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_FLOW_STATS_TYPE) + cnt->touched, FLOW_ACTION_HW_STATS_DELAYED); +#else + cnt->touched); +#endif + cnt->old_packets = packets; + cnt->old_bytes = bytes; + spin_unlock_bh(&cnt->lock); + return 0; +} +#endif + +int efx_tc_flower(struct efx_nic *efx, struct net_device *net_dev, + struct flow_cls_offload *tc, struct efx_rep *efv) +{ + int rc; + + if (!efx->tc) + return -EOPNOTSUPP; + + mutex_lock(&efx->tc->mutex); + switch (tc->command) { + case FLOW_CLS_REPLACE: + rc = efx_tc_flower_replace(efx, net_dev, tc, efv); + break; + case FLOW_CLS_DESTROY: + rc = efx_tc_flower_destroy(efx, net_dev, tc); + break; +#if !defined(EFX_USE_KCOMPAT) || !defined(EFX_HAVE_TC_ACTION_COOKIE) + case FLOW_CLS_STATS: + rc = efx_tc_flower_stats(efx, net_dev, tc); + break; +#endif + default: + rc = -EOPNOTSUPP; + break; + } + mutex_unlock(&efx->tc->mutex); + return rc; +} + +#if defined(EFX_USE_KCOMPAT) && defined(EFX_HAVE_TC_ACTION_COOKIE) +int efx_tc_setup_action(struct efx_nic *efx, struct tc_action_offload *tca) +{ + int rc; + + mutex_lock(&efx->tc->mutex); + switch (tca->command) { + case TC_ACTION_STATS: + rc = efx_tc_action_stats(efx, tca); + break; + default: + rc = -EOPNOTSUPP; + break; + } + mutex_unlock(&efx->tc->mutex); + return rc; +} +#endif + +static int efx_tc_configure_default_rule(struct efx_nic *efx, u32 ing_port, + u32 eg_port, struct efx_tc_flow_rule *rule) +{ + struct efx_tc_action_set_list *acts = &rule->acts; + struct efx_tc_match *match = &rule->match; + struct efx_tc_action_set *act; + int rc; + + match->value.ingress_port = ing_port; + match->mask.ingress_port = ~0; + act = kzalloc(sizeof(*act), GFP_KERNEL); + if (!act) + return -ENOMEM; + act->deliver = 1; + act->dest_mport = eg_port; + rc = efx_mae_alloc_action_set(efx, act); + if (rc) + goto fail1; + EFX_WARN_ON_PARANOID(!list_empty(&acts->list)); + list_add_tail(&act->list, &acts->list); + rc = efx_mae_alloc_action_set_list(efx, acts); + if (rc) + goto fail2; + rc = efx_mae_insert_rule(efx, match, EFX_TC_PRIO_DFLT, + acts->fw_id, &rule->fw_id); + if (rc) + goto fail3; + return 0; +fail3: + efx_mae_free_action_set_list(efx, acts); +fail2: + list_del(&act->list); + efx_mae_free_action_set(efx, act->fw_id); +fail1: + kfree(act); + return rc; +} + +static int efx_tc_configure_default_rule_pf(struct efx_nic *efx) +{ + struct efx_tc_flow_rule *rule = &efx->tc->dflt.pf; + u32 ing_port, eg_port; + + efx_mae_mport_uplink(efx, &ing_port); + efx_mae_mport_wire(efx, &eg_port); + return efx_tc_configure_default_rule(efx, ing_port, eg_port, rule); +} + +static int efx_tc_configure_default_rule_wire(struct efx_nic *efx) +{ + struct efx_tc_flow_rule *rule = &efx->tc->dflt.wire; + u32 ing_port, eg_port; + + efx_mae_mport_wire(efx, &ing_port); + efx_mae_mport_uplink(efx, &eg_port); + return efx_tc_configure_default_rule(efx, ing_port, eg_port, rule); +} + +int efx_tc_configure_default_rule_rep(struct efx_rep *efv) +{ + struct efx_tc_flow_rule *rule = &efv->dflt; + struct efx_nic *efx = efv->parent; + u32 ing_port, eg_port; + + efx_mae_mport_mport(efx, efv->mport, &ing_port); + efx_mae_mport_mport(efx, efx->tc->reps_mport_id, &eg_port); + return efx_tc_configure_default_rule(efx, ing_port, eg_port, rule); +} + +void efx_tc_deconfigure_default_rule(struct efx_nic *efx, + struct efx_tc_flow_rule *rule) +{ + if (rule->fw_id != MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL) + efx_tc_delete_rule(efx, rule); + rule->fw_id = MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL; +} + +static int efx_tc_configure_fallback_acts(struct efx_nic *efx, u32 eg_port, + struct efx_tc_action_set_list *acts) +{ + struct efx_tc_action_set *act; + int rc; + + act = kzalloc(sizeof(*act), GFP_KERNEL); + if (!act) + return -ENOMEM; + act->deliver = 1; + act->dest_mport = eg_port; + rc = efx_mae_alloc_action_set(efx, act); + if (rc) + goto fail1; + EFX_WARN_ON_PARANOID(!list_empty(&acts->list)); + list_add_tail(&act->list, &acts->list); + rc = efx_mae_alloc_action_set_list(efx, acts); + if (rc) + goto fail2; + return 0; +fail2: + list_del(&act->list); + efx_mae_free_action_set(efx, act->fw_id); +fail1: + kfree(act); + return rc; +} + +static int efx_tc_configure_fallback_acts_pf(struct efx_nic *efx) +{ + struct efx_tc_action_set_list *acts = &efx->tc->facts.pf; + u32 eg_port; + + efx_mae_mport_uplink(efx, &eg_port); + return efx_tc_configure_fallback_acts(efx, eg_port, acts); +} + +static int efx_tc_configure_fallback_acts_reps(struct efx_nic *efx) +{ + struct efx_tc_action_set_list *acts = &efx->tc->facts.reps; + u32 eg_port; + + efx_mae_mport_mport(efx, efx->tc->reps_mport_id, &eg_port); + return efx_tc_configure_fallback_acts(efx, eg_port, acts); +} + +static void efx_tc_deconfigure_fallback_acts(struct efx_nic *efx, + struct efx_tc_action_set_list *acts) +{ + efx_tc_free_action_set_list(efx, acts, true); +} + +static int efx_tc_configure_rep_mport(struct efx_nic *efx) +{ + struct efx_vport *rep_vport; + u32 rep_mport_label; + int rc; + + rc = efx_mae_allocate_mport(efx, &efx->tc->reps_mport_id, &rep_mport_label); + if (rc) + return rc; + pci_dbg(efx->pci_dev, "created rep mport 0x%08x (0x%04x)\n", + efx->tc->reps_mport_id, rep_mport_label); + /* Fake up a vport ID mapping for filters */ + mutex_lock(&efx->vport_lock); + rep_vport = efx_alloc_vport_entry(efx); + if (rep_vport) + /* Use mport *selector* as vport ID */ + efx_mae_mport_mport(efx, efx->tc->reps_mport_id, &rep_vport->vport_id); + else + rc = -ENOMEM; + mutex_unlock(&efx->vport_lock); + if (rc) + return rc; + efx->tc->reps_mport_vport_id = rep_vport->user_id; + pci_dbg(efx->pci_dev, "allocated rep vport 0x%04x\n", + efx->tc->reps_mport_vport_id); + return 0; +} + +static void efx_tc_deconfigure_rep_mport(struct efx_nic *efx) +{ + struct efx_vport *rep_vport; + + mutex_lock(&efx->vport_lock); + rep_vport = efx_find_vport_entry(efx, efx->tc->reps_mport_vport_id); + if (!rep_vport) + goto out_unlock; + efx_free_vport_entry(rep_vport); + efx->tc->reps_mport_vport_id = 0; +out_unlock: + mutex_unlock(&efx->vport_lock); + efx_mae_free_mport(efx, efx->tc->reps_mport_id); + efx->tc->reps_mport_id = MAE_MPORT_SELECTOR_NULL; +} + +int efx_tc_insert_rep_filters(struct efx_nic *efx) +{ + struct efx_filter_spec promisc, allmulti; + int rc; + + if (efx->type->is_vf) + return 0; + if (!efx->tc) + return 0; + if (!efx->tc->up) + return 0; + efx_filter_init_rx(&promisc, EFX_FILTER_PRI_REQUIRED, 0, 0); + efx_filter_set_uc_def(&promisc); + efx_filter_set_vport_id(&promisc, efx->tc->reps_mport_vport_id); + rc = efx_filter_insert_filter(efx, &promisc, false); + if (rc < 0) + return rc; + efx->tc->reps_filter_uc = rc; + efx_filter_init_rx(&allmulti, EFX_FILTER_PRI_REQUIRED, 0, 0); + efx_filter_set_mc_def(&allmulti); + efx_filter_set_vport_id(&allmulti, efx->tc->reps_mport_vport_id); + rc = efx_filter_insert_filter(efx, &allmulti, false); + if (rc < 0) + return rc; + efx->tc->reps_filter_mc = rc; + return 0; +} + +void efx_tc_remove_rep_filters(struct efx_nic *efx) +{ + if (efx->type->is_vf) + return; + if (!efx->tc) + return; + if (efx->tc->reps_filter_mc != (u32)-1) + efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED, efx->tc->reps_filter_mc); + efx->tc->reps_filter_mc = -1; + if (efx->tc->reps_filter_uc != (u32)-1) + efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED, efx->tc->reps_filter_uc); + efx->tc->reps_filter_uc = -1; +} + +int efx_init_tc(struct efx_nic *efx) +{ + int rc; + + rc = efx_mae_get_caps(efx, efx->tc->caps); + if (rc) + return rc; + if (efx->tc->caps->match_field_count > MAE_NUM_FIELDS) + /* Firmware supports some match fields the driver doesn't know + * about. Not fatal, unless any of those fields are required + * (MAE_FIELD_SUPPORTED_MATCH_ALWAYS) but if so we don't know. + */ + netif_warn(efx, probe, efx->net_dev, + "FW reports additional match fields %u\n", + efx->tc->caps->match_field_count); + if (efx->tc->caps->action_prios < EFX_TC_PRIO__NUM) { + netif_err(efx, probe, efx->net_dev, + "Too few action prios supported (have %u, need %u)\n", + efx->tc->caps->action_prios, EFX_TC_PRIO__NUM); + return -EIO; + } + mutex_lock(&efx->tc->mutex); + rc = efx_mae_get_tables(efx); + if (rc) + goto out_free; + rc = efx_tc_configure_rep_mport(efx); + if (rc) + goto out_free; + rc = efx_tc_configure_default_rule_pf(efx); + if (rc) + goto out_free; + rc = efx_tc_configure_default_rule_wire(efx); + if (rc) + goto out_free; + rc = efx_tc_configure_fallback_acts_pf(efx); + if (rc) + goto out_free; + rc = efx_tc_configure_fallback_acts_reps(efx); + if (rc) + goto out_free; + + efx_extend_debugfs_port(efx, efx, 0, efx_tc_debugfs); + efx->tc->up = true; +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_FLOW_INDR_DEV_REGISTER) + rc = flow_indr_dev_register(efx_tc_indr_setup_cb, efx); +#endif + +out_free: + if (rc) + efx_mae_free_tables(efx); + mutex_unlock(&efx->tc->mutex); + return rc; +} + +void efx_fini_tc(struct efx_nic *efx) +{ + struct mae_mport_desc *mport; + struct rhashtable_iter walk; + struct net_device *rep_dev; + struct efx_rep *efv; + int i; + + /* We can get called even if efx_init_struct_tc() failed */ + if (!efx->tc) + return; + + efx_trim_debugfs_port(efx, efx_tc_debugfs); + mutex_lock(&efx->tc->mutex); +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_FLOW_INDR_DEV_REGISTER) + if (efx->tc->up) + flow_indr_dev_unregister(efx_tc_indr_setup_cb, efx, efx_tc_block_unbind); +#endif + efx_tc_deconfigure_default_rule(efx, &efx->tc->dflt.pf); + efx_tc_deconfigure_default_rule(efx, &efx->tc->dflt.wire); + for (i = 0; i < EFX_TC_VF_MAX; i++) { + rep_dev = efx_get_vf_rep(efx, i); + if (IS_ERR_OR_NULL(rep_dev)) + continue; + efv = netdev_priv(rep_dev); + efx_tc_deconfigure_default_rule(efx, &efv->dflt); + } + if (efx->mae) { + rhashtable_walk_enter(&efx->mae->mports_ht, &walk); + rhashtable_walk_start(&walk); + while ((mport = rhashtable_walk_next(&walk)) != NULL) { + efv = mport->efv; + if (!efv) + continue; + if (!refcount_inc_not_zero(&mport->ref)) + continue; + rhashtable_walk_stop(&walk); + efx_tc_deconfigure_default_rule(efx, &efv->dflt); + efx_mae_put_mport(efx, mport); + rhashtable_walk_start(&walk); + } + rhashtable_walk_stop(&walk); + rhashtable_walk_exit(&walk); + } + efx_tc_deconfigure_fallback_acts(efx, &efx->tc->facts.pf); + efx_tc_deconfigure_fallback_acts(efx, &efx->tc->facts.reps); + efx_tc_deconfigure_rep_mport(efx); + efx->tc->up = false; + efx_mae_free_tables(efx); + mutex_unlock(&efx->tc->mutex); +} + +#else /* EFX_TC_OFFLOAD */ +#include "tc.h" +#include "mae.h" + +static int efx_tc_configure_default_rule(struct efx_nic *efx, u32 ing_port, + u32 eg_port, struct efx_tc_flow_rule *rule) +{ + struct efx_tc_action_set_list *acts = &rule->acts; + struct efx_tc_match *match = &rule->match; + struct efx_tc_action_set *act; + int rc; + + match->value.ingress_port = ing_port; + match->mask.ingress_port = ~0; + act = kzalloc(sizeof(*act), GFP_KERNEL); + if (!act) + return -ENOMEM; + act->deliver = 1; + act->dest_mport = eg_port; + rc = efx_mae_alloc_action_set(efx, act); + if (rc) + goto fail1; + list_add_tail(&act->list, &acts->list); + rc = efx_mae_alloc_action_set_list(efx, acts); + if (rc) + goto fail2; + rc = efx_mae_insert_rule(efx, match, EFX_TC_PRIO_DFLT, + acts->fw_id, &rule->fw_id); + if (rc) + goto fail3; + return 0; +fail3: + efx_mae_free_action_set_list(efx, acts); +fail2: + list_del(&act->list); + efx_mae_free_action_set(efx, act->fw_id); +fail1: + kfree(act); + return rc; +} + +static int efx_tc_configure_default_rule_pf(struct efx_nic *efx) +{ + struct efx_tc_flow_rule *rule = &efx->tc->dflt.pf; + u32 ing_port, eg_port; + + efx_mae_mport_uplink(efx, &ing_port); + efx_mae_mport_wire(efx, &eg_port); + return efx_tc_configure_default_rule(efx, ing_port, eg_port, rule); +} + +static int efx_tc_configure_default_rule_wire(struct efx_nic *efx) +{ + struct efx_tc_flow_rule *rule = &efx->tc->dflt.wire; + u32 ing_port, eg_port; + + efx_mae_mport_wire(efx, &ing_port); + efx_mae_mport_uplink(efx, &eg_port); + return efx_tc_configure_default_rule(efx, ing_port, eg_port, rule); +} + +static void efx_tc_delete_rule(struct efx_nic *efx, struct efx_tc_flow_rule *rule) +{ + struct efx_tc_action_set *act, *next; + + efx_mae_delete_rule(efx, rule->fw_id); + + /* Release entries in subsidiary tables */ + efx_mae_free_action_set_list(efx, &rule->acts); + list_for_each_entry_safe(act, next, &rule->acts.list, list) { + efx_mae_free_action_set(efx, act->fw_id); + kfree(act); + } + rule->fw_id = MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL; +} + +static void efx_tc_deconfigure_default_rule(struct efx_nic *efx, + struct efx_tc_flow_rule *rule) +{ + if (rule->fw_id != MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL) + efx_tc_delete_rule(efx, rule); + rule->fw_id = MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL; +} + +int efx_tc_insert_rep_filters(struct efx_nic *efx) +{ + return 0; +} + +void efx_tc_remove_rep_filters(struct efx_nic *efx) +{ +} + +int efx_init_tc(struct efx_nic *efx) +{ + int rc; + + rc = efx_mae_get_caps(efx, efx->tc->caps); + if (rc) + return rc; + if (efx->tc->caps->match_field_count > MAE_NUM_FIELDS) + /* Firmware supports some match fields the driver doesn't know + * about. Not fatal, unless any of those fields are required + * (MAE_FIELD_SUPPORTED_MATCH_ALWAYS) but if so we don't know. + */ + netif_warn(efx, probe, efx->net_dev, + "FW reports additional match fields %u\n", + efx->tc->caps->match_field_count); + if (efx->tc->caps->action_prios < EFX_TC_PRIO__NUM) { + netif_err(efx, probe, efx->net_dev, + "Too few action prios supported (have %u, need %u)\n", + efx->tc->caps->action_prios, EFX_TC_PRIO__NUM); + return -EIO; + } + rc = efx_tc_configure_default_rule_pf(efx); + if (rc) + return rc; + return efx_tc_configure_default_rule_wire(efx); +} + +void efx_fini_tc(struct efx_nic *efx) +{ + /* We can get called even if efx_init_struct_tc() failed */ + if (!efx->tc) + return; + efx_tc_deconfigure_default_rule(efx, &efx->tc->dflt.pf); + efx_tc_deconfigure_default_rule(efx, &efx->tc->dflt.wire); +} + +int efx_init_struct_tc(struct efx_nic *efx) +{ + int rc; + + if (efx->type->is_vf) + return 0; + + efx->tc = kzalloc(sizeof(*efx->tc), GFP_KERNEL); + if (!efx->tc) + return -ENOMEM; + efx->tc->caps = kzalloc(sizeof(struct mae_caps), GFP_KERNEL); + if (!efx->tc->caps) { + rc = -ENOMEM; + goto fail1; + } + + INIT_LIST_HEAD(&efx->tc->dflt.pf.acts.list); + efx->tc->dflt.pf.fw_id = MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL; + INIT_LIST_HEAD(&efx->tc->dflt.wire.acts.list); + efx->tc->dflt.wire.fw_id = MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL; + return 0; +fail1: + kfree(efx->tc); + efx->tc = NULL; + return rc; +} + +void efx_fini_struct_tc(struct efx_nic *efx) +{ + if (!efx->tc) + return; + + EFX_WARN_ON_PARANOID(efx->tc->dflt.pf.fw_id != + MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL); + EFX_WARN_ON_PARANOID(efx->tc->dflt.wire.fw_id != + MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL); + kfree(efx->tc->caps); + kfree(efx->tc); + efx->tc = NULL; +} + +#endif /* EFX_TC_OFFLOAD */ diff --git a/src/drivers/net/ethernet/sfc/tc.h b/src/drivers/net/ethernet/sfc/tc.h new file mode 100644 index 0000000..10c81cd --- /dev/null +++ b/src/drivers/net/ethernet/sfc/tc.h @@ -0,0 +1,442 @@ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2019 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#ifndef EFX_TC_H +#define EFX_TC_H +#include "net_driver.h" + +#include "tc_counters.h" + +/* Error reporting: convenience macros. For indicating why a given filter + * insertion is not supported; errors in internal operation or in the + * hardware should be netif_err()s instead. + */ +/* Used when error message is constant, to ensure we get a message out even + * if extack isn't available. + */ +#define EFX_TC_ERR_MSG(efx, extack, message) do { \ + if (extack) \ + NL_SET_ERR_MSG_MOD(extack, message); \ + if (efx->log_tc_errs || !extack) \ + netif_info(efx, drv, efx->net_dev, "%s\n", message); \ +} while (0) +/* Used when error message is not constant; caller should also supply a + * constant extack message with NL_SET_ERR_MSG_MOD(). + */ +#define efx_tc_err(efx, fmt, args...) do { \ +if (efx->log_tc_errs) \ + netif_info(efx, drv, efx->net_dev, fmt, ##args);\ +} while (0) + +#define IS_ALL_ONES(v) (!(typeof (v))~(v)) + +#ifdef CONFIG_IPV6 +static inline bool efx_ipv6_addr_all_ones(struct in6_addr *addr) +{ + return !memchr_inv(addr, 0xff, sizeof(*addr)); +} +#endif + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_TC_OFFLOAD) +#include +#include +#include +#include + +/* MAC address edits are indirected through a table in the hardware */ +struct efx_tc_mac_pedit_action { + u8 h_addr[ETH_ALEN]; + struct rhash_head linkage; + refcount_t ref; + u32 fw_id; /* index of this entry in firmware MAC address table */ +}; + +/* In principle up to 255 VFs are possible; the last one is #254 */ +#define EFX_TC_VF_MAX (255) + +struct efx_tc_encap_action; /* see tc_encap_actions.h */ + +struct efx_tc_action_set { + u16 vlan_push:2; + u16 vlan_pop:2; + u16 decap:1; + u16 do_nat:1; + u16 deliver:1; + u16 do_ttl_dec:1; + __be16 vlan_tci[2]; /* TCIs for vlan_push */ + __be16 vlan_proto[2]; /* Ethertypes for vlan_push */ + struct efx_tc_counter_index *count; +#if defined(EFX_USE_KCOMPAT) && !defined(EFX_HAVE_TC_FLOW_OFFLOAD) + int count_action_idx; +#endif + u32 dest_mport; + struct efx_tc_encap_action *encap_md; /* entry in tc_encap_ht table */ + struct efx_tc_mac_pedit_action *src_mac; /* entry in tc_mac_ht table */ + struct efx_tc_mac_pedit_action *dst_mac; /* entry in tc_mac_ht table */ + struct list_head encap_user; /* entry on encap_md->users list */ + struct list_head count_user; /* entry on counter->users list, if encap */ + struct efx_tc_action_set_list *user; /* Only populated if encap_md */ + u32 fw_id; /* index of this entry in firmware actions table */ + struct list_head list; +}; + +struct efx_tc_match_fields { + /* L1, I guess? */ + u32 ingress_port; + /* L2 (inner when encap) */ + __be16 eth_proto; + __be16 vlan_tci[2], vlan_proto[2]; + u8 eth_saddr[ETH_ALEN], eth_daddr[ETH_ALEN]; + /* L3 (when IP) */ + u8 ip_proto, ip_tos, ip_ttl; + __be32 src_ip, dst_ip; +#ifdef CONFIG_IPV6 + struct in6_addr src_ip6, dst_ip6; +#endif + bool ip_frag, ip_firstfrag; + /* L4 */ + __be16 l4_sport, l4_dport; /* Ports (UDP, TCP) */ + __be16 tcp_flags; + bool tcp_syn_fin_rst; /* true if ANY of SYN/FIN/RST are set */ + /* Encap. The following are *outer* fields. Note that there are no + * outer eth (L2) fields; this is because TC doesn't have them. + */ + __be32 enc_src_ip, enc_dst_ip; + struct in6_addr enc_src_ip6, enc_dst_ip6; + u8 enc_ip_tos, enc_ip_ttl; + __be16 enc_sport, enc_dport; + __be32 enc_keyid; /* e.g. VNI, VSID */ + /* L... I don't even know any more. Conntrack. */ + u16 ct_state_trk:1, + ct_state_est:1, + ct_state_rel:1, + ct_state_new:1; /* only these bits are defined in TC uapi so far */ + u32 ct_mark; /* For now we ignore ct_label, and don't indirect */ + u16 ct_zone; /* also referred to as CT domain */ + u8 recirc_id; /* mapped from (u32) TC chain_index to smaller space */ +}; + +static inline bool efx_tc_match_is_encap(const struct efx_tc_match_fields *mask) +{ + return mask->enc_src_ip || mask->enc_dst_ip || + !ipv6_addr_any(&mask->enc_src_ip6) || + !ipv6_addr_any(&mask->enc_dst_ip6) || mask->enc_ip_tos || + mask->enc_ip_ttl || mask->enc_sport || mask->enc_dport; +} + +/** + * enum efx_tc_em_pseudo_type - &struct efx_tc_encap_match pseudo type + * + * These are used to classify "pseudo" encap matches, which don't refer + * to an entry in hardware but rather indicate that a section of the + * match space is in use by another Outer Rule. + * + * @EFX_TC_EM_DIRECT: real HW entry in Outer Rule table; not a pseudo. + * Hardware index in &struct efx_tc_encap_match.fw_id is valid. + * @EFX_TC_EM_PSEUDO_OR: registered by an fLHS rule that fits in the OR + * table. The &struct efx_tc_lhs_rule already holds the HW OR entry. + * Only one reference to this encap match may exist. + * @EFX_TC_EM_PSEUDO_TOS: registered by an encap match which includes an + * ip_tos match, to prevent an overlapping encap match _without_ + * ip_tos. The pseudo encap match may be referenced again by + * an encap match with a different ip_tos value, but all ip_tos_mask + * must match the first (stored in our child_ip_tos_mask). + */ +enum efx_tc_em_pseudo_type { + EFX_TC_EM_DIRECT, + EFX_TC_EM_PSEUDO_OR, + EFX_TC_EM_PSEUDO_TOS, +}; + +struct efx_tc_encap_match { + __be32 src_ip, dst_ip; + struct in6_addr src_ip6, dst_ip6; + __be16 udp_dport; + u8 ip_tos, ip_tos_mask; + struct rhash_head linkage; + u16 tun_type; /* enum efx_encap_type */ + u8 child_ip_tos_mask; + refcount_t ref; + enum efx_tc_em_pseudo_type type; + u32 fw_id; /* index of this entry in firmware encap match table */ + struct efx_tc_encap_match *pseudo; /* Referenced pseudo EM if needed */ +}; + +const char *efx_tc_encap_type_name(enum efx_encap_type typ, char *buf, size_t n); + +struct efx_tc_recirc_id { + u32 chain_index; + struct net_device *net_dev; + struct rhash_head linkage; + refcount_t ref; + u8 fw_id; /* index allocated for use in the MAE */ +}; + +struct efx_tc_match { + struct efx_tc_match_fields value; + struct efx_tc_match_fields mask; + struct efx_tc_encap_match *encap; + struct efx_tc_recirc_id *rid; +}; + +struct efx_tc_action_set_list { + struct list_head list; + u32 fw_id; +}; + +struct efx_tc_ct_zone; /* see tc_conntrack.h */ + +struct efx_tc_lhs_action { + u16 tun_type; /* enum efx_encap_type */ + struct efx_tc_recirc_id *rid; +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_CONNTRACK_OFFLOAD) + struct efx_tc_ct_zone *zone; +#endif + struct efx_tc_counter_index *count; +#if defined(EFX_USE_KCOMPAT) && !defined(EFX_HAVE_TC_FLOW_OFFLOAD) + int count_action_idx; +#endif +}; + +struct efx_tc_flow_rule { + unsigned long cookie; + struct rhash_head linkage; + struct efx_tc_match match; + struct efx_tc_action_set_list acts; + struct efx_tc_action_set_list *fallback; /* what to use when unready? */ + u32 fw_id; +}; + +struct efx_tc_lhs_rule { + unsigned long cookie; + struct efx_tc_match match; + struct efx_tc_lhs_action lhs_act; + struct rhash_head linkage; + u32 fw_id; + bool is_ar; /* Action Rule (for OR-AR-CT-AR sequence) */ +}; + +enum efx_tc_rule_prios { + EFX_TC_PRIO_TC, /* Rule inserted by TC */ + EFX_TC_PRIO_DFLT, /* Default switch rule; one of efx_tc_default_rules */ + EFX_TC_PRIO__NUM +}; + +struct efx_tc_table_field_fmt { + u16 field_id; + u16 lbn; + u16 width; + u8 masking; + u8 scheme; +}; + +struct efx_tc_table_desc { + u16 type; + u16 key_width; + u16 resp_width; + u16 n_keys; + u16 n_resps; + u16 n_prios; + u8 flags; + u8 scheme; + struct efx_tc_table_field_fmt *keys; + struct efx_tc_table_field_fmt *resps; +}; + +struct efx_tc_table_ct { /* TABLE_ID_CONNTRACK_TABLE */ + struct efx_tc_table_desc desc; + bool hooked; + struct { /* indices of named fields within @desc.keys */ + u8 eth_proto_idx; + u8 ip_proto_idx; + u8 src_ip_idx; /* either v4 or v6 */ + u8 dst_ip_idx; + u8 l4_sport_idx; + u8 l4_dport_idx; + u8 zone_idx; /* for TABLE_FIELD_ID_DOMAIN */ + } keys; + struct { /* indices of named fields within @desc.resps */ + u8 dnat_idx; + u8 nat_ip_idx; + u8 l4_natport_idx; + u8 mark_idx; + u8 counter_id_idx; + } resps; +}; + +/** + * struct efx_tc_state - control plane data for TC offload + * + * @caps: MAE capabilities reported by MCDI + * @block_list: List of &struct efx_tc_block_binding + * @mutex: Used to serialise operations on TC hashtables + * @counter_ht: Hashtable of TC counters (FW IDs and counter values) + * @counter_id_ht: Hashtable mapping TC counter cookies to counters + * @encap_ht: Hashtable of TC encap actions + * @mac_ht: Hashtable of MAC address entries (for pedits) + * @encap_match_ht: Hashtable of TC encap matches + * @match_action_ht: Hashtable of TC match-action rules + * @lhs_rule_ht: Hashtable of TC left-hand (act ct & goto chain) rules + * @ct_zone_ht: Hashtable of TC conntrack flowtable bindings + * @ct_ht: Hashtable of TC conntrack flow entries + * @neigh_ht: Hashtable of neighbour watches (&struct efx_neigh_binder) + * @recirc_ht: Hashtable of recirculation ID mappings (&struct efx_tc_recirc_id) + * @recirc_ida: Recirculation ID allocator + * @domain_ida: CT domain (zone + vni_mode) ID allocator + * @meta_ct: MAE table layout for conntrack table + * @reps_mport_id: MAE port allocated for representor RX + * @reps_filter_uc: VNIC filter for representor unicast RX (promisc) + * @reps_filter_mc: VNIC filter for representor multicast RX (allmulti) + * @reps_mport_vport_id: vport user_id for representor RX filters + * @flush_counters: counters have been stopped, waiting for drain + * @flush_gen: final generation count per type array as reported by + * MC_CMD_MAE_COUNTERS_STREAM_STOP + * @seen_gen: most recent generation count per type as seen by efx_tc_rx() + * @flush_wq: wait queue used by efx_mae_stop_counters() to wait for + * MAE counters RXQ to finish draining + * @dflt: Match-action rules for default switching; at priority + * %EFX_TC_PRIO_DFLT. Named by *ingress* port + * @dflt.pf: rule for traffic ingressing from PF (egresses to wire) + * @dflt.wire: rule for traffic ingressing from wire (egresses to PF) + * @facts: Fallback action-set-lists for unready rules. Named by *egress* port + * @facts.pf: action-set-list for unready rules on PF netdev, hence applying to + * traffic from wire, and egressing to PF + * @facts.reps: action-set-list for unready rules on representors, hence + * applying to traffic from representees, and egressing to the reps mport + * @up: have TC datastructures been set up? + */ +struct efx_tc_state { + struct mae_caps *caps; + struct list_head block_list; + struct mutex mutex; + struct rhashtable counter_ht; + struct rhashtable counter_id_ht; + struct rhashtable encap_ht; + struct rhashtable mac_ht; + struct rhashtable encap_match_ht; + struct rhashtable match_action_ht; + struct rhashtable lhs_rule_ht; +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_CONNTRACK_OFFLOAD) + struct rhashtable ct_zone_ht; + struct rhashtable ct_ht; +#endif + struct rhashtable neigh_ht; + struct rhashtable recirc_ht; + struct ida recirc_ida; +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_CONNTRACK_OFFLOAD) + struct ida domain_ida; +#endif + struct efx_tc_table_ct meta_ct; + u32 reps_mport_id; + u32 reps_filter_uc, reps_filter_mc; + u16 reps_mport_vport_id; + bool flush_counters; + u32 flush_gen[EFX_TC_COUNTER_TYPE_MAX]; + u32 seen_gen[EFX_TC_COUNTER_TYPE_MAX]; + wait_queue_head_t flush_wq; + struct { + struct efx_tc_flow_rule pf; + struct efx_tc_flow_rule wire; + } dflt; + struct { + struct efx_tc_action_set_list pf; + struct efx_tc_action_set_list reps; + } facts; + bool up; +}; + +struct efx_rep; + +enum efx_encap_type efx_tc_indr_netdev_type(struct net_device *net_dev); +struct efx_rep *efx_tc_flower_lookup_efv(struct efx_nic *efx, + struct net_device *dev); +s64 efx_tc_flower_external_mport(struct efx_nic *efx, struct efx_rep *efv); +int efx_tc_configure_default_rule_rep(struct efx_rep *efv); +void efx_tc_deconfigure_default_rule(struct efx_nic *efx, + struct efx_tc_flow_rule *rule); +int efx_tc_flower(struct efx_nic *efx, struct net_device *net_dev, + struct flow_cls_offload *tc, struct efx_rep *efv); +#if defined(EFX_USE_KCOMPAT) && defined(EFX_HAVE_TC_ACTION_COOKIE) +int efx_tc_setup_action(struct efx_nic *efx, struct tc_action_offload *tca); +#endif + +#else /* EFX_TC_OFFLOAD */ + +struct efx_tc_action_set { + u16 vlan_push:2; + u16 vlan_pop:2; + u16 decap:1; + u16 do_nat:1; + u16 deliver:1; + u16 do_ttl_dec:1; + __be16 vlan_tci[2]; /* TCIs for vlan_push */ + __be16 vlan_proto[2]; /* Ethertypes for vlan_push */ + u32 dest_mport; + u32 fw_id; /* index of this entry in firmware actions table */ + struct list_head list; +}; + +struct efx_tc_match_fields { + /* L1, I guess? */ + u32 ingress_port; +}; + +struct efx_tc_encap_match {}; +struct efx_tc_encap_action {}; + +struct efx_tc_match { + struct efx_tc_match_fields value; + struct efx_tc_match_fields mask; + struct efx_tc_encap_match *encap; +}; + +struct efx_tc_action_set_list { + struct list_head list; + u32 fw_id; +}; + +enum efx_tc_default_rules { /* named by ingress port */ + EFX_TC_DFLT_PF, + EFX_TC_DFLT_WIRE, + /* No rules for VFs and vfreps; if we don't have TC offload then we + * just don't create them. + */ + EFX_TC_DFLT__MAX +}; + +struct efx_tc_flow_rule { + struct efx_tc_match match; + struct efx_tc_action_set_list acts; + u32 fw_id; +}; + +enum efx_tc_rule_prios { + EFX_TC_PRIO_DFLT, /* Default switch rule; one of efx_tc_default_rules */ + EFX_TC_PRIO__NUM +}; + +struct efx_tc_state { + struct mae_caps *caps; + struct { + struct efx_tc_flow_rule pf; + struct efx_tc_flow_rule wire; + } dflt; +}; + +#endif /* EFX_TC_OFFLOAD */ + +int efx_tc_insert_rep_filters(struct efx_nic *efx); +void efx_tc_remove_rep_filters(struct efx_nic *efx); +int efx_init_tc(struct efx_nic *efx); +void efx_fini_tc(struct efx_nic *efx); + +int efx_init_struct_tc(struct efx_nic *efx); +void efx_fini_struct_tc(struct efx_nic *efx); + +#endif /* EFX_TC_H */ diff --git a/src/drivers/net/ethernet/sfc/tc_bindings.c b/src/drivers/net/ethernet/sfc/tc_bindings.c new file mode 100644 index 0000000..85f9bd4 --- /dev/null +++ b/src/drivers/net/ethernet/sfc/tc_bindings.c @@ -0,0 +1,376 @@ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2022 Xilinx Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#ifdef EFX_USE_KCOMPAT +/* Must come before other headers */ +#include "kernel_compat.h" +#endif + +#include "tc_bindings.h" +#include "tc.h" +#include "tc_encap_actions.h" + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_TC_OFFLOAD) +struct efx_tc_block_binding { + struct list_head list; + struct efx_nic *efx; + struct efx_rep *efv; + struct net_device *otherdev; /* may actually be us */ + struct flow_block *block; +}; + +static struct efx_tc_block_binding *efx_tc_find_binding(struct efx_nic *efx, + struct net_device *otherdev) +{ + struct efx_tc_block_binding *binding; + + ASSERT_RTNL(); + list_for_each_entry(binding, &efx->tc->block_list, list) + if (binding->otherdev == otherdev) + return binding; + return NULL; +} + +static int efx_tc_block_cb(enum tc_setup_type type, void *type_data, + void *cb_priv) +{ + struct efx_tc_block_binding *binding = cb_priv; + struct flow_cls_offload *tcf = type_data; +#if defined(EFX_USE_KCOMPAT) && defined(EFX_HAVE_TC_ACTION_COOKIE) + struct tc_action_offload *tca = type_data; +#endif + + switch (type) { + case TC_SETUP_CLSFLOWER: + return efx_tc_flower(binding->efx, binding->otherdev, + tcf, binding->efv); +#if defined(EFX_USE_KCOMPAT) && defined(EFX_HAVE_TC_ACTION_COOKIE) + case TC_SETUP_ACTION: + return efx_tc_setup_action(binding->efx, tca); +#endif + default: + return -EOPNOTSUPP; + } +} + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_FLOW_BLOCK_OFFLOAD) +void efx_tc_block_unbind(void *cb_priv) +{ + struct efx_tc_block_binding *binding = cb_priv; + + list_del(&binding->list); + kfree(binding); +} + +static struct efx_tc_block_binding *efx_tc_create_binding( + struct efx_nic *efx, struct efx_rep *efv, + struct net_device *otherdev, struct flow_block *block) +{ + struct efx_tc_block_binding *binding = kmalloc(sizeof(*binding), GFP_KERNEL); + + if (!binding) + return ERR_PTR(-ENOMEM); + binding->efx = efx; + binding->efv = efv; + binding->otherdev = otherdev; + binding->block = block; + list_add(&binding->list, &efx->tc->block_list); + return binding; +} + +int efx_tc_setup_block(struct net_device *net_dev, struct efx_nic *efx, + struct flow_block_offload *tcb, struct efx_rep *efv) +{ + struct efx_tc_block_binding *binding; + struct flow_block_cb *block_cb; + int rc; + + if (tcb->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) + return -EOPNOTSUPP; + + if (WARN_ON(!efx->tc)) + return -ENETDOWN; + + switch (tcb->command) { + case FLOW_BLOCK_BIND: + binding = efx_tc_create_binding(efx, efv, net_dev, tcb->block); + if (IS_ERR(binding)) + return PTR_ERR(binding); + block_cb = flow_block_cb_alloc(efx_tc_block_cb, binding, + binding, efx_tc_block_unbind); + rc = PTR_ERR_OR_ZERO(block_cb); + netif_dbg(efx, drv, efx->net_dev, + "bind %sdirect block for device %s, rc %d\n", + net_dev == efx->net_dev ? "" : + efv ? "semi" : "in", + net_dev ? net_dev->name : NULL, rc); + if (rc) { + list_del(&binding->list); + kfree(binding); + } else { + flow_block_cb_add(block_cb, tcb); + } + return rc; + case FLOW_BLOCK_UNBIND: + binding = efx_tc_find_binding(efx, net_dev); + if (binding) { + block_cb = flow_block_cb_lookup(tcb->block, + efx_tc_block_cb, + binding); + if (block_cb) { + flow_block_cb_remove(block_cb, tcb); + netif_dbg(efx, drv, efx->net_dev, + "unbound %sdirect block for device %s\n", + net_dev == efx->net_dev ? "" : + binding->efv ? "semi" : "in", + net_dev ? net_dev->name : NULL); + return 0; + } + } + /* If we're in driver teardown, then we expect to have + * already unbound all our blocks (we did it early while + * we still had MCDI to remove the filters), so getting + * unbind callbacks now isn't a problem. + */ + netif_cond_dbg(efx, drv, efx->net_dev, + !efx->tc->up, warn, + "%sdirect block unbind for device %s, was never bound\n", + net_dev == efx->net_dev ? "" : "in", + net_dev ? net_dev->name : NULL); + return -ENOENT; + default: + return -EOPNOTSUPP; + } +} +#else /* EFX_HAVE_FLOW_BLOCK_OFFLOAD */ +int efx_tc_setup_block(struct net_device *net_dev, struct efx_nic *efx, + struct flow_block_offload *tcb, struct efx_rep *efv) +{ + struct efx_tc_block_binding *binding; + int rc; + + if (tcb->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) + return -EOPNOTSUPP; + + if (WARN_ON(!efx->tc)) + return -ENETDOWN; + + switch (tcb->command) { + case TC_BLOCK_BIND: + binding = kmalloc(sizeof(*binding), GFP_KERNEL); + if (!binding) + return -ENOMEM; + binding->efx = efx; + binding->efv = efv; + binding->otherdev = net_dev; + binding->block = tcb->block; + list_add(&binding->list, &efx->tc->block_list); + rc = tcf_block_cb_register(tcb->block, efx_tc_block_cb, + binding, binding +#ifdef EFX_HAVE_TCB_EXTACK + , tcb->extack +#endif + ); + netif_dbg(efx, drv, efx->net_dev, + "bind %sdirect block for device %s, rc %d\n", + net_dev == efx->net_dev ? "" : + efv ? "semi" : "in", + net_dev ? net_dev->name : NULL, rc); + if (rc) { + list_del(&binding->list); + kfree(binding); + } + return rc; + case TC_BLOCK_UNBIND: + binding = efx_tc_find_binding(efx, net_dev); + if (binding) { + tcf_block_cb_unregister(binding->block, efx_tc_block_cb, + binding); + netif_dbg(efx, drv, efx->net_dev, + "unbound %sdirect block for device %s\n", + net_dev == efx->net_dev ? "" : + binding->efv ? "semi" : "in", + net_dev ? net_dev->name : NULL); + list_del(&binding->list); + kfree(binding); + } else { + /* If we're in driver teardown, then we expect to have + * already unbound all our blocks (we did it early while + * we still had MCDI to remove the filters), so getting + * unbind callbacks now isn't a problem. + */ + netif_cond_dbg(efx, drv, efx->net_dev, + !efx->tc->up, warn, + "%sdirect block unbind for device %s, was never bound\n", + net_dev == efx->net_dev ? "" : "in", + net_dev ? net_dev->name : NULL); + } + return 0; + default: + return -EOPNOTSUPP; + } +} +#endif /* EFX_HAVE_FLOW_BLOCK_OFFLOAD */ + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_FLOW_INDR_DEV_REGISTER) +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_FLOW_INDR_QDISC) +int efx_tc_indr_setup_cb(struct net_device *net_dev, struct Qdisc *sch, +#else +int efx_tc_indr_setup_cb(struct net_device *net_dev, +#endif + void *cb_priv, enum tc_setup_type type, + void *type_data, void *data, + void (*cleanup)(struct flow_block_cb *block_cb)) +{ + struct flow_block_offload *tcb = type_data; + struct efx_tc_block_binding *binding; + struct flow_block_cb *block_cb; + struct efx_nic *efx = cb_priv; + bool is_ovs_int_port; + int rc; + + if (!net_dev) + return -EOPNOTSUPP; + + if (tcb->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS && + tcb->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) + return -EOPNOTSUPP; + + is_ovs_int_port = netif_is_ovs_master(net_dev); + if (tcb->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS && + !is_ovs_int_port) + return -EOPNOTSUPP; + + if (is_ovs_int_port) + return -EOPNOTSUPP; + + switch (type) { + case TC_SETUP_BLOCK: + switch (tcb->command) { + case FLOW_BLOCK_BIND: + binding = efx_tc_create_binding(efx, NULL, net_dev, tcb->block); + if (IS_ERR(binding)) + return PTR_ERR(binding); + block_cb = flow_indr_block_cb_alloc(efx_tc_block_cb, binding, + binding, efx_tc_block_unbind, +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_FLOW_INDR_QDISC) + tcb, net_dev, sch, data, binding, +#else + tcb, net_dev, data, binding, +#endif + cleanup); + rc = PTR_ERR_OR_ZERO(block_cb); + netif_dbg(efx, drv, efx->net_dev, + "bind indr block for device %s, rc %d\n", + net_dev ? net_dev->name : NULL, rc); + if (rc) { + list_del(&binding->list); + kfree(binding); + } else { + flow_block_cb_add(block_cb, tcb); + } + return rc; + case FLOW_BLOCK_UNBIND: + binding = efx_tc_find_binding(efx, net_dev); + if (!binding) + return -ENOENT; + block_cb = flow_block_cb_lookup(tcb->block, + efx_tc_block_cb, + binding); + if (!block_cb) + return -ENOENT; + flow_indr_block_cb_remove(block_cb, tcb); + netif_dbg(efx, drv, efx->net_dev, + "unbind indr block for device %s\n", + net_dev ? net_dev->name : NULL); + return 0; + default: + return -EOPNOTSUPP; + } + default: + return -EOPNOTSUPP; + } +} +#elif defined(EFX_HAVE_FLOW_INDR_BLOCK_CB_REGISTER) +int efx_tc_indr_setup_cb(struct net_device *net_dev, void *cb_priv, + enum tc_setup_type type, void *type_data) +{ + switch (type) { + case TC_SETUP_BLOCK: + return efx_tc_setup_block(net_dev, cb_priv, type_data, NULL); + default: + return -EOPNOTSUPP; + } +} +#endif + +/* .ndo_setup_tc implementation + * Entry point for flower block and filter management. + */ +int efx_setup_tc(struct net_device *net_dev, enum tc_setup_type type, + void *type_data) +{ + struct efx_nic *efx = efx_netdev_priv(net_dev); + + if (efx->type->is_vf) + return -EOPNOTSUPP; + if (!efx->tc) + return -EOPNOTSUPP; + + if (type == TC_SETUP_CLSFLOWER) + return efx_tc_flower(efx, net_dev, type_data, NULL); + if (type == TC_SETUP_BLOCK) + return efx_tc_setup_block(net_dev, efx, type_data, NULL); + + return -EOPNOTSUPP; +} + +int efx_tc_netdev_event(struct efx_nic *efx, unsigned long event, + struct net_device *net_dev) +{ +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_FLOW_INDR_BLOCK_CB_REGISTER) || defined(EFX_HAVE_FLOW_INDR_DEV_REGISTER) +#if defined(EFX_USE_KCOMPAT) && defined(EFX_HAVE_FLOW_INDR_BLOCK_CB_REGISTER) + enum efx_encap_type etyp = efx_tc_indr_netdev_type(net_dev); + int rc; +#endif /* EFX_HAVE_FLOW_INDR_BLOCK_CB_REGISTER */ + + if (efx->type->is_vf) + return NOTIFY_DONE; + + if (event == NETDEV_UNREGISTER) + efx_tc_unregister_egdev(efx, net_dev); + +#if defined(EFX_USE_KCOMPAT) && defined(EFX_HAVE_FLOW_INDR_BLOCK_CB_REGISTER) + if (etyp == EFX_ENCAP_TYPE_NONE) + return NOTIFY_OK; + if (event == NETDEV_REGISTER) { + rc = __flow_indr_block_cb_register(net_dev, efx, + efx_tc_indr_setup_cb, efx); + if (rc) + netif_warn(efx, drv, efx->net_dev, + "Indirect block reg failed %d for %s\n", rc, + net_dev->name); + else + netif_dbg(efx, drv, efx->net_dev, + "reg indirect block for device %s\n", + net_dev ? net_dev->name : NULL); + } else if (event == NETDEV_UNREGISTER) { + __flow_indr_block_cb_unregister(net_dev, efx_tc_indr_setup_cb, + efx); + netif_dbg(efx, drv, efx->net_dev, + "unreg indirect block for device %s\n", + net_dev ? net_dev->name : NULL); + } +#endif /* EFX_HAVE_FLOW_INDR_BLOCK_CB_REGISTER */ +#endif + + return NOTIFY_OK; +} + +#endif /* EFX_TC_OFFLOAD */ diff --git a/src/drivers/net/ethernet/sfc/tc_bindings.h b/src/drivers/net/ethernet/sfc/tc_bindings.h new file mode 100644 index 0000000..b22e285 --- /dev/null +++ b/src/drivers/net/ethernet/sfc/tc_bindings.h @@ -0,0 +1,55 @@ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2022 Xilinx Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#ifndef EFX_TC_BINDINGS_H +#define EFX_TC_BINDINGS_H +#include "net_driver.h" + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_TC_OFFLOAD) +#include +#if defined(EFX_USE_KCOMPAT) && !defined(EFX_HAVE_FLOW_BLOCK_OFFLOAD) +#include /* for struct tc_block_offload */ +#endif + +struct efx_rep; + +int efx_tc_setup_block(struct net_device *net_dev, struct efx_nic *efx, + struct flow_block_offload *tcb, struct efx_rep *efv); +int efx_setup_tc(struct net_device *net_dev, enum tc_setup_type type, + void *type_data); + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_FLOW_BLOCK_OFFLOAD) +void efx_tc_block_unbind(void *cb_priv); +#endif + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_FLOW_INDR_DEV_REGISTER) +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_FLOW_INDR_QDISC) +int efx_tc_indr_setup_cb(struct net_device *net_dev, struct Qdisc *sch, +#else +int efx_tc_indr_setup_cb(struct net_device *net_dev, +#endif + void *cb_priv, enum tc_setup_type type, + void *type_data, void *data, + void (*cleanup)(struct flow_block_cb *block_cb)); +#elif defined(EFX_HAVE_FLOW_INDR_BLOCK_CB_REGISTER) +int efx_tc_indr_setup_cb(struct net_device *net_dev, void *cb_priv, + enum tc_setup_type type, void *type_data); +#endif + +int efx_tc_netdev_event(struct efx_nic *efx, unsigned long event, + struct net_device *net_dev); +#else /* EFX_TC_OFFLOAD */ +static inline int efx_tc_netdev_event(struct efx_nic *efx, unsigned long event, + struct net_device *net_dev) +{ + return NOTIFY_OK; +} +#endif /* EFX_TC_OFFLOAD */ + +#endif /* EFX_TC_BINDINGS_H */ diff --git a/src/drivers/net/ethernet/sfc/tc_conntrack.c b/src/drivers/net/ethernet/sfc/tc_conntrack.c new file mode 100644 index 0000000..0152411 --- /dev/null +++ b/src/drivers/net/ethernet/sfc/tc_conntrack.c @@ -0,0 +1,678 @@ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2022 Xilinx Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#ifdef EFX_USE_KCOMPAT +/* Must come before other headers */ +#include "kernel_compat.h" +#endif + +#include "tc_conntrack.h" +#include "tc.h" +#include "mae.h" + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_CONNTRACK_OFFLOAD) +#if IS_ENABLED(CONFIG_NF_FLOW_TABLE) +static int efx_tc_flow_block(enum tc_setup_type type, void *type_data, + void *cb_priv); +#endif + +static const struct rhashtable_params efx_tc_ct_zone_ht_params = { + .key_len = offsetof(struct efx_tc_ct_zone, linkage), + .key_offset = 0, + .head_offset = offsetof(struct efx_tc_ct_zone, linkage), +}; + +static const struct rhashtable_params efx_tc_ct_ht_params = { + .key_len = offsetof(struct efx_tc_ct_entry, linkage), + .key_offset = 0, + .head_offset = offsetof(struct efx_tc_ct_entry, linkage), +}; + +static void efx_tc_ct_zone_free(void *ptr, void *arg) +{ + struct efx_tc_ct_zone *zone = ptr; + struct efx_nic *efx = zone->efx; + + netif_err(efx, drv, efx->net_dev, + "tc ct_zone %u still present at teardown, removing\n", + zone->zone); + + ida_free(&efx->tc->domain_ida, zone->domain); +#if IS_ENABLED(CONFIG_NF_FLOW_TABLE) + nf_flow_table_offload_del_cb(zone->nf_ft, efx_tc_flow_block, zone); +#endif + kfree(zone); +} + +static void efx_tc_ct_free(void *ptr, void *arg) +{ + struct efx_tc_ct_entry *conn = ptr; + struct efx_nic *efx = arg; + + netif_err(efx, drv, efx->net_dev, + "tc ct_entry %lx still present at teardown\n", + conn->cookie); + + /* We can release the counter, but we can't remove the CT itself + * from hardware because the table meta is already gone. + */ + efx_tc_flower_release_counter(efx, conn->cnt); + kfree(conn); +} + +int efx_tc_init_conntrack(struct efx_nic *efx) +{ + int rc; + + rc = rhashtable_init(&efx->tc->ct_zone_ht, &efx_tc_ct_zone_ht_params); + if (rc < 0) + goto fail_ct_zone_ht; + rc = rhashtable_init(&efx->tc->ct_ht, &efx_tc_ct_ht_params); + if (rc < 0) + goto fail_ct_ht; + ida_init(&efx->tc->domain_ida); + return 0; +fail_ct_ht: + rhashtable_destroy(&efx->tc->ct_zone_ht); +fail_ct_zone_ht: + return rc; +} + +/* Only call this in init failure teardown. + * Normal exit should fini instead as there may be entries in the table. + */ +void efx_tc_destroy_conntrack(struct efx_nic *efx) +{ + ida_destroy(&efx->tc->domain_ida); + rhashtable_destroy(&efx->tc->ct_ht); + rhashtable_destroy(&efx->tc->ct_zone_ht); +} + +void efx_tc_fini_conntrack(struct efx_nic *efx) +{ + rhashtable_free_and_destroy(&efx->tc->ct_zone_ht, efx_tc_ct_zone_free, NULL); + rhashtable_free_and_destroy(&efx->tc->ct_ht, efx_tc_ct_free, efx); + WARN_ON(!ida_is_empty(&efx->tc->domain_ida)); + ida_destroy(&efx->tc->domain_ida); +} + +#if IS_ENABLED(CONFIG_NF_FLOW_TABLE) +#define EFX_NF_TCP_FLAG(flg) cpu_to_be16(be32_to_cpu(TCP_FLAG_##flg) >> 16) + +static int efx_tc_ct_parse_match(struct efx_nic *efx, struct flow_rule *fr, + struct efx_tc_ct_entry *conn) +{ + struct flow_dissector *dissector = fr->match.dissector; + unsigned char ipv = 0; + bool tcp = false; + + if (flow_rule_match_key(fr, FLOW_DISSECTOR_KEY_CONTROL)) { + struct flow_match_control fm; + + flow_rule_match_control(fr, &fm); + if (IS_ALL_ONES(fm.mask->addr_type)) + switch (fm.key->addr_type) { + case FLOW_DISSECTOR_KEY_IPV4_ADDRS: + ipv = 4; + break; + case FLOW_DISSECTOR_KEY_IPV6_ADDRS: + ipv = 6; + break; + default: + break; + } + } + + if (!ipv) { + efx_tc_err(efx, "Conntrack missing ipv specification\n"); + return -EOPNOTSUPP; + } + + if (dissector->used_keys & + ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) | + BIT(FLOW_DISSECTOR_KEY_BASIC) | + BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | + BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | + BIT(FLOW_DISSECTOR_KEY_PORTS) | + BIT(FLOW_DISSECTOR_KEY_TCP) | + BIT(FLOW_DISSECTOR_KEY_META))) { +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_FLOW_DISSECTOR_64BIT_USED_KEYS) + efx_tc_err(efx, "Unsupported conntrack keys %#llx\n", dissector->used_keys); +#else + efx_tc_err(efx, "Unsupported conntrack keys %#x\n", dissector->used_keys); +#endif + return -EOPNOTSUPP; + } + + if (flow_rule_match_key(fr, FLOW_DISSECTOR_KEY_BASIC)) { + struct flow_match_basic fm; + + flow_rule_match_basic(fr, &fm); + if (!IS_ALL_ONES(fm.mask->n_proto)) { + efx_tc_err(efx, "Conntrack eth_proto is not exact-match; mask %04x\n", + ntohs(fm.mask->n_proto)); + return -EOPNOTSUPP; + } + conn->eth_proto = fm.key->n_proto; + if (conn->eth_proto != (ipv == 4 ? htons(ETH_P_IP) + : htons(ETH_P_IPV6))) { + efx_tc_err(efx, "Conntrack eth_proto is not IPv%hhu, is %04x\n", + ipv, ntohs(conn->eth_proto)); + return -EOPNOTSUPP; + } + if (!IS_ALL_ONES(fm.mask->ip_proto)) { + efx_tc_err(efx, "Conntrack ip_proto is not exact-match; mask %02x\n", + fm.mask->ip_proto); + return -EOPNOTSUPP; + } + conn->ip_proto = fm.key->ip_proto; + switch (conn->ip_proto) { + case IPPROTO_TCP: + tcp = true; + break; + case IPPROTO_UDP: + break; + default: + efx_tc_err(efx, "Conntrack ip_proto not TCP or UDP, is %02x\n", + conn->ip_proto); + return -EOPNOTSUPP; + } + } else { + efx_tc_err(efx, "Conntrack missing eth_proto, ip_proto\n"); + return -EOPNOTSUPP; + } + + if (ipv == 4 && flow_rule_match_key(fr, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) { + struct flow_match_ipv4_addrs fm; + + flow_rule_match_ipv4_addrs(fr, &fm); + if (!IS_ALL_ONES(fm.mask->src)) { + efx_tc_err(efx, "Conntrack ipv4.src is not exact-match; mask %08x\n", + ntohl(fm.mask->src)); + return -EOPNOTSUPP; + } + conn->src_ip = fm.key->src; + if (!IS_ALL_ONES(fm.mask->dst)) { + efx_tc_err(efx, "Conntrack ipv4.dst is not exact-match; mask %08x\n", + ntohl(fm.mask->dst)); + return -EOPNOTSUPP; + } + conn->dst_ip = fm.key->dst; + } +#ifdef CONFIG_IPV6 + else if (ipv == 6 && flow_rule_match_key(fr, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) { + struct flow_match_ipv6_addrs fm; + + flow_rule_match_ipv6_addrs(fr, &fm); + if (!efx_ipv6_addr_all_ones(&fm.mask->src)) { + efx_tc_err(efx, "Conntrack ipv6.src is not exact-match; mask %pI6\n", + &fm.mask->src); + return -EOPNOTSUPP; + } + conn->src_ip6 = fm.key->src; + if (!efx_ipv6_addr_all_ones(&fm.mask->dst)) { + efx_tc_err(efx, "Conntrack ipv6.dst is not exact-match; mask %pI6\n", + &fm.mask->dst); + return -EOPNOTSUPP; + } + conn->dst_ip6 = fm.key->dst; + } +#endif + else { + efx_tc_err(efx, "Conntrack missing IPv%hhu addrs\n", ipv); + return -EOPNOTSUPP; + } + + if (flow_rule_match_key(fr, FLOW_DISSECTOR_KEY_PORTS)) { + struct flow_match_ports fm; + + flow_rule_match_ports(fr, &fm); + if (!IS_ALL_ONES(fm.mask->src)) { + efx_tc_err(efx, "Conntrack ports.src is not exact-match; mask %04x\n", + ntohs(fm.mask->src)); + return -EOPNOTSUPP; + } + conn->l4_sport = fm.key->src; + if (!IS_ALL_ONES(fm.mask->dst)) { + efx_tc_err(efx, "Conntrack ports.dst is not exact-match; mask %04x\n", + ntohs(fm.mask->dst)); + return -EOPNOTSUPP; + } + conn->l4_dport = fm.key->dst; + } + /* TODO reject if ports not specified? */ + + if (flow_rule_match_key(fr, FLOW_DISSECTOR_KEY_TCP)) { + __be16 tcp_interesting_flags; + struct flow_match_tcp fm; + + if (!tcp) { + efx_tc_err(efx, "Conntrack matching on TCP keys but ipproto is not tcp\n"); + return -EOPNOTSUPP; + } + flow_rule_match_tcp(fr, &fm); + tcp_interesting_flags = EFX_NF_TCP_FLAG(SYN) | + EFX_NF_TCP_FLAG(RST) | + EFX_NF_TCP_FLAG(FIN); + /* If any of the tcp_interesting_flags is set, we always + * inhibit CT lookup in LHS (so SW can update CT table). + */ + if (fm.key->flags & tcp_interesting_flags) { + efx_tc_err(efx, "Unsupported conntrack tcp.flags %04x/%04x\n", + ntohs(fm.key->flags), ntohs(fm.mask->flags)); + return -EOPNOTSUPP; + } + /* Other TCP flags cannot be filtered at CT */ + if (fm.mask->flags & ~tcp_interesting_flags) { + efx_tc_err(efx, "Unsupported conntrack tcp.flags %04x/%04x\n", + ntohs(fm.key->flags), ntohs(fm.mask->flags)); + return -EOPNOTSUPP; + } + } + + return 0; +} + +struct efx_tc_ct_mangler_state { + u8 ipv4:1; + u8 tcpudp:1; + u8 first:1; +}; + +static int efx_tc_ct_mangle(struct efx_nic *efx, struct efx_tc_ct_entry *conn, + const struct flow_action_entry *fa, + struct efx_tc_ct_mangler_state *mung) +{ + bool dnat = false; + + switch (fa->mangle.htype) { + case FLOW_ACT_MANGLE_HDR_TYPE_ETH: + efx_tc_err(efx, "Unsupported: mangle eth+%u %x/%x\n", + fa->mangle.offset, fa->mangle.val, fa->mangle.mask); + return -EOPNOTSUPP; + case FLOW_ACT_MANGLE_HDR_TYPE_IP4: + switch (fa->mangle.offset) { + case offsetof(struct iphdr, daddr): + dnat = true; + fallthrough; + case offsetof(struct iphdr, saddr): + if (fa->mangle.mask) { + efx_tc_err(efx, "Unsupported: mask (%#x) of ipv4.%s mangle\n", + fa->mangle.mask, dnat ? "dst" : "src"); + return -EOPNOTSUPP; + } + conn->nat_ip = htonl(fa->mangle.val); + mung->ipv4 = 1; + break; + default: + efx_tc_err(efx, "Unsupported: mangle ipv4+%u %x/%x\n", + fa->mangle.offset, fa->mangle.val, + fa->mangle.mask); + return -EOPNOTSUPP; + } + break; + case FLOW_ACT_MANGLE_HDR_TYPE_TCP: + case FLOW_ACT_MANGLE_HDR_TYPE_UDP: + /* Both struct tcphdr and struct udphdr start with + * __be16 source; + * __be16 dest; + * so we can use the same code for both. + */ + switch (fa->mangle.offset) { + case offsetof(struct tcphdr, dest): + BUILD_BUG_ON(offsetof(struct tcphdr, dest) != + offsetof(struct udphdr, dest)); + dnat = true; + fallthrough; + case offsetof(struct tcphdr, source): + BUILD_BUG_ON(offsetof(struct tcphdr, source) != + offsetof(struct udphdr, source)); + if (~fa->mangle.mask != 0xffff) { + efx_tc_err(efx, "Unsupported: mask (%#x) of l4+%u mangle (%x)\n", + fa->mangle.mask, fa->mangle.offset, + fa->mangle.val); + return -EOPNOTSUPP; + } + conn->l4_natport = htons(fa->mangle.val); + mung->tcpudp = 1; + break; + default: + efx_tc_err(efx, "Unsupported: mangle l4+%u (%x/%x)\n", + fa->mangle.offset, fa->mangle.val, + fa->mangle.mask); + return -EOPNOTSUPP; + } + break; + default: + efx_tc_err(efx, "Unhandled mangle htype %u for conntrack\n", + fa->mangle.htype); + return -EOPNOTSUPP; + } + /* first mangle tells us whether this is SNAT or DNAT + * subsequent mangles must match that + */ + if (mung->first) + conn->dnat = dnat; + mung->first = false; + if (conn->dnat != dnat) { + efx_tc_err(efx, "Mixed src and dst NAT for conntrack\n"); + return -EOPNOTSUPP; + } + return 0; +} + +static int efx_tc_ct_replace(struct efx_tc_ct_zone *ct_zone, + struct flow_cls_offload *tc) +{ + struct flow_rule *fr = flow_cls_offload_flow_rule(tc); + struct efx_tc_ct_mangler_state mung = { .first = true }; + struct efx_tc_ct_entry *conn, *old; + struct efx_nic *efx = ct_zone->efx; + const struct flow_action_entry *fa; + struct efx_tc_counter *cnt; + int rc, i; + + if (WARN_ON(!efx->tc)) + return -ENETDOWN; + if (WARN_ON(!efx->tc->up)) + return -ENETDOWN; + + conn = kzalloc(sizeof(*conn), GFP_USER); + if (!conn) + return -ENOMEM; + conn->cookie = tc->cookie; + old = rhashtable_lookup_get_insert_fast(&efx->tc->ct_ht, + &conn->linkage, + efx_tc_ct_ht_params); + if (old) { + netif_dbg(efx, drv, efx->net_dev, + "Already offloaded conntrack (cookie %lx)\n", tc->cookie); + rc = -EEXIST; + goto release; + } + + /* Parse match */ + conn->domain = ct_zone->domain; + rc = efx_tc_ct_parse_match(efx, fr, conn); + if (rc) + goto release; + + /* Parse actions */ + flow_action_for_each(i, fa, &fr->action) { + /* TODO check fa->hw_stats, figure out CT stats generally */ + switch (fa->id) { + case FLOW_ACTION_CT_METADATA: + conn->mark = fa->ct_metadata.mark; + if (memchr_inv(fa->ct_metadata.labels, 0, sizeof(fa->ct_metadata.labels))) { + efx_tc_err(efx, "Setting CT label not supported\n"); + rc = -EOPNOTSUPP; + goto release; + } + break; + case FLOW_ACTION_MANGLE: + if (conn->eth_proto != htons(ETH_P_IP)) { + efx_tc_err(efx, "NAT only supported for IPv4\n"); + rc = -EOPNOTSUPP; + goto release; + } + rc = efx_tc_ct_mangle(efx, conn, fa, &mung); + if (rc) + goto release; + break; + default: + efx_tc_err(efx, "Unhandled action %u for conntrack\n", fa->id); + rc = -EOPNOTSUPP; + goto release; + } + } + + /* fill in defaults for unmangled values */ + if (!mung.ipv4) + conn->nat_ip = conn->dnat ? conn->dst_ip : conn->src_ip; + if (!mung.tcpudp) + conn->l4_natport = conn->dnat ? conn->l4_dport : conn->l4_sport; + + cnt = efx_tc_flower_allocate_counter(efx, EFX_TC_COUNTER_TYPE_CT); + if (IS_ERR(cnt)) { + rc = PTR_ERR(cnt); + goto release; + } + conn->cnt = cnt; + + rc = efx_mae_insert_ct(efx, conn); + if (rc) { + efx_tc_err(efx, "Failed to insert conntrack, %d\n", rc); + goto release; + } + down_write(&ct_zone->rwsem); + list_add_tail(&conn->list, &ct_zone->cts); + up_write(&ct_zone->rwsem); + return 0; +release: + if (conn->cnt) + efx_tc_flower_release_counter(efx, conn->cnt); + if (!old) + rhashtable_remove_fast(&efx->tc->ct_ht, &conn->linkage, + efx_tc_ct_ht_params); + kfree(conn); + return rc; +} + +/* Caller must follow with efx_tc_ct_remove_finish() after RCU grace period! */ +static void efx_tc_ct_remove(struct efx_nic *efx, struct efx_tc_ct_entry *conn) +{ + int rc; + + /* Remove it from HW */ + rc = efx_mae_remove_ct(efx, conn); + /* Delete it from SW */ + rhashtable_remove_fast(&efx->tc->ct_ht, &conn->linkage, + efx_tc_ct_ht_params); + if (rc) { + netif_err(efx, drv, efx->net_dev, + "Failed to remove conntrack %lx from hw, rc %d\n", + conn->cookie, rc); + } else { + netif_dbg(efx, drv, efx->net_dev, "Removed conntrack %lx\n", + conn->cookie); + } +} + +static void efx_tc_ct_remove_finish(struct efx_nic *efx, struct efx_tc_ct_entry *conn) +{ + /* Remove related CT counter. This is delayed after the conn object we + * are working with has been succesfully removed. This is specifically + * for properly protecting the counter from being used inside + * efx_tc_ct_stats: + * + * 1) no conn, no counter reachable. + * 2) Conn exists, the previous synchronize_rcu precludes us from + * removing the counter here until efx_tc_ct_stats is done. + * + * Note releasing the counter through the next function call is fine + * with concurrent uses of the counter since that is all done through + * the rhastable API for the counter_ht rhashtable which takes care of + * the safe counter removal. + */ + efx_tc_flower_release_counter(efx, conn->cnt); + kfree(conn); +} + +static int efx_tc_ct_destroy(struct efx_tc_ct_zone *ct_zone, + struct flow_cls_offload *tc) +{ + struct efx_nic *efx = ct_zone->efx; + struct efx_tc_ct_entry *conn; + + conn = rhashtable_lookup_fast(&efx->tc->ct_ht, &tc->cookie, + efx_tc_ct_ht_params); + if (!conn) { + netif_warn(efx, drv, efx->net_dev, + "Conntrack %lx not found to remove\n", tc->cookie); + return -ENOENT; + } + + down_write(&ct_zone->rwsem); + list_del(&conn->list); + efx_tc_ct_remove(efx, conn); + up_write(&ct_zone->rwsem); + synchronize_rcu(); + efx_tc_ct_remove_finish(efx, conn); + return 0; +} + +static int efx_tc_ct_stats(struct efx_tc_ct_zone *ct_zone, + struct flow_cls_offload *tc) +{ + struct efx_nic *efx = ct_zone->efx; + struct efx_tc_ct_entry *conn; + struct efx_tc_counter *cnt; + + rcu_read_lock(); + conn = rhashtable_lookup_fast(&efx->tc->ct_ht, &tc->cookie, + efx_tc_ct_ht_params); + if (!conn) { + netif_warn(efx, drv, efx->net_dev, + "Conntrack %lx not found for stats\n", tc->cookie); + rcu_read_unlock(); + return -ENOENT; + } + + cnt = conn->cnt; + spin_lock_bh(&cnt->lock); + /* Report only last use */ + flow_stats_update(&tc->stats, 0, 0, +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_FLOW_STATS_DROPS) + 0, +#endif +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_FLOW_STATS_TYPE) + cnt->touched, FLOW_ACTION_HW_STATS_DELAYED); +#else + cnt->touched); +#endif + spin_unlock_bh(&cnt->lock); + rcu_read_unlock(); + + return 0; +} + +static int efx_tc_flow_block(enum tc_setup_type type, void *type_data, + void *cb_priv) +{ + struct flow_cls_offload *tcb = type_data; + struct efx_tc_ct_zone *ct_zone = cb_priv; + + if (type != TC_SETUP_CLSFLOWER) + return -EOPNOTSUPP; + + switch (tcb->command) { + case FLOW_CLS_REPLACE: + return efx_tc_ct_replace(ct_zone, tcb); + case FLOW_CLS_DESTROY: + return efx_tc_ct_destroy(ct_zone, tcb); + case FLOW_CLS_STATS: + return efx_tc_ct_stats(ct_zone, tcb); + default: + break; + }; + + return -EOPNOTSUPP; +} + +struct efx_tc_ct_zone *efx_tc_ct_register_zone(struct efx_nic *efx, u16 zone, + u8 vni_mode, + struct nf_flowtable *ct_ft) +{ + struct efx_tc_ct_zone *ct_zone, *old; + int rc; + + ct_zone = kzalloc(sizeof(*ct_zone), GFP_USER); + if (!ct_zone) + return ERR_PTR(-ENOMEM); + ct_zone->zone = zone; + ct_zone->vni_mode = vni_mode; + old = rhashtable_lookup_get_insert_fast(&efx->tc->ct_zone_ht, + &ct_zone->linkage, + efx_tc_ct_zone_ht_params); + if (old) { + /* don't need our new entry */ + kfree(ct_zone); + if (!refcount_inc_not_zero(&old->ref)) + return ERR_PTR(-EAGAIN); + /* existing entry found */ + WARN_ON_ONCE(old->nf_ft != ct_ft); + netif_dbg(efx, drv, efx->net_dev, + "Found existing ct_zone for %u@%u\n", zone, vni_mode); + return old; + } + ct_zone->nf_ft = ct_ft; + ct_zone->efx = efx; + INIT_LIST_HEAD(&ct_zone->cts); + init_rwsem(&ct_zone->rwsem); + rc = ida_alloc_range(&efx->tc->domain_ida, 0, U16_MAX, GFP_USER); + if (rc < 0) { + if (net_ratelimit()) + netif_warn(efx, drv, efx->net_dev, + "Failed to allocate a domain (rc %d) for %u@%u\n", + rc, zone, vni_mode); + goto fail1; + } + ct_zone->domain = rc; + rc = nf_flow_table_offload_add_cb(ct_ft, efx_tc_flow_block, ct_zone); + netif_dbg(efx, drv, efx->net_dev, "Adding new ct_zone for %u@%u, rc %d\n", + zone, vni_mode, rc); + if (rc < 0) + goto fail2; + refcount_set(&ct_zone->ref, 1); + return ct_zone; +fail2: + ida_free(&efx->tc->domain_ida, ct_zone->domain); +fail1: + rhashtable_remove_fast(&efx->tc->ct_zone_ht, &ct_zone->linkage, + efx_tc_ct_zone_ht_params); + kfree(ct_zone); + return ERR_PTR(rc); +} + +void efx_tc_ct_unregister_zone(struct efx_nic *efx, + struct efx_tc_ct_zone *ct_zone) +{ + struct efx_tc_ct_entry *conn, *next; + + if (!refcount_dec_and_test(&ct_zone->ref)) + return; /* still in use */ + nf_flow_table_offload_del_cb(ct_zone->nf_ft, efx_tc_flow_block, ct_zone); + rhashtable_remove_fast(&efx->tc->ct_zone_ht, &ct_zone->linkage, + efx_tc_ct_zone_ht_params); + down_write(&ct_zone->rwsem); + list_for_each_entry(conn, &ct_zone->cts, list) + efx_tc_ct_remove(efx, conn); + synchronize_rcu(); + /* _safe because efx_tc_ct_remove_finish() frees conn */ + list_for_each_entry_safe(conn, next, &ct_zone->cts, list) + efx_tc_ct_remove_finish(efx, conn); + up_write(&ct_zone->rwsem); + ida_free(&efx->tc->domain_ida, ct_zone->domain); + netif_dbg(efx, drv, efx->net_dev, "Removed ct_zone for %u@%u\n", + ct_zone->zone, ct_zone->vni_mode); + kfree(ct_zone); +} +#else +struct efx_tc_ct_zone *efx_tc_ct_register_zone(struct efx_nic *efx, u16 zone, + u8 vni_mode, + struct nf_flowtable *ct_ft) +{ + return ERR_PTR(-EOPNOTSUPP); +} + +void efx_tc_ct_unregister_zone(struct efx_nic *efx, + struct efx_tc_ct_zone *ct_zone) {} +#endif /* CONFIG_NF_FLOW_TABLE */ + +#endif /* EFX_CONNTRACK_OFFLOAD */ diff --git a/src/drivers/net/ethernet/sfc/tc_conntrack.h b/src/drivers/net/ethernet/sfc/tc_conntrack.h new file mode 100644 index 0000000..bfca28b --- /dev/null +++ b/src/drivers/net/ethernet/sfc/tc_conntrack.h @@ -0,0 +1,62 @@ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2022 Xilinx Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#ifndef EFX_TC_CONNTRACK_H +#define EFX_TC_CONNTRACK_H +#include "net_driver.h" + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_CONNTRACK_OFFLOAD) +#include +#if defined(EFX_USE_KCOMPAT) +/* nf_flow_table.h should include this, but on deb10 it's missing */ +#include +#endif +#include + +struct efx_tc_ct_zone { + u16 zone; + u8 vni_mode; /* MAE_CT_VNI_MODE enum */ + struct rhash_head linkage; + refcount_t ref; + struct nf_flowtable *nf_ft; + struct efx_nic *efx; + u16 domain; /* ID allocated for hardware use */ + struct rw_semaphore rwsem; /* protects cts list */ + struct list_head cts; /* list of efx_tc_ct_entry in this domain */ +}; + +struct efx_tc_ct_entry { + unsigned long cookie; + struct rhash_head linkage; + __be16 eth_proto; + u8 ip_proto; + bool dnat; + __be32 src_ip, dst_ip, nat_ip; + struct in6_addr src_ip6, dst_ip6; + __be16 l4_sport, l4_dport, l4_natport; /* Ports (UDP, TCP) */ + u16 domain; /* we'd rather have struct efx_tc_ct_zone *zone; but that's unsafe currently */ + u32 mark; + struct efx_tc_counter *cnt; + struct list_head list; /* entry on zone->cts */ +}; + +/* create/uncreate/teardown hashtables */ +int efx_tc_init_conntrack(struct efx_nic *efx); +void efx_tc_destroy_conntrack(struct efx_nic *efx); +void efx_tc_fini_conntrack(struct efx_nic *efx); + +struct efx_tc_ct_zone *efx_tc_ct_register_zone(struct efx_nic *efx, u16 zone, + u8 vni_mode, + struct nf_flowtable *ct_ft); +void efx_tc_ct_unregister_zone(struct efx_nic *efx, + struct efx_tc_ct_zone *ct_zone); + +#endif /* EFX_CONNTRACK_OFFLOAD */ + +#endif /* EFX_TC_CONNTRACK_H */ diff --git a/src/drivers/net/ethernet/sfc/tc_counters.c b/src/drivers/net/ethernet/sfc/tc_counters.c new file mode 100644 index 0000000..9c2d532 --- /dev/null +++ b/src/drivers/net/ethernet/sfc/tc_counters.c @@ -0,0 +1,600 @@ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2022 Xilinx Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#ifdef EFX_USE_KCOMPAT +/* Must come before other headers */ +#include "kernel_compat.h" +#endif + +#include +#include "tc_counters.h" + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_TC_OFFLOAD) +#include "tc_encap_actions.h" +#include "tc.h" +#include "mae_counter_format.h" +#include "mae.h" +#include "rx_common.h" +#include "efx_common.h" + +/* Counter-management hashtables */ + +static const struct rhashtable_params efx_tc_counter_id_ht_params = { + .key_len = offsetof(struct efx_tc_counter_index, linkage), + .key_offset = 0, + .head_offset = offsetof(struct efx_tc_counter_index, linkage), +}; + +static const struct rhashtable_params efx_tc_counter_ht_params = { + .key_len = offsetof(struct efx_tc_counter, linkage), + .key_offset = 0, + .head_offset = offsetof(struct efx_tc_counter, linkage), +}; + +static void efx_tc_counter_free(void *ptr, void *__unused) +{ + struct efx_tc_counter *cnt = ptr; + + WARN_ON(!list_empty(&cnt->users)); + /* We'd like to synchronize_rcu() here, but unfortunately we aren't + * removing the element from the hashtable (it's not clear that's a + * safe thing to do in an rhashtable_free_and_destroy free_fn), so + * threads could still be obtaining new pointers to *cnt if they can + * race against this function at all. + */ + flush_work(&cnt->work); + EFX_WARN_ON_PARANOID(spin_is_locked(&cnt->lock)); + kfree(cnt); +} + +static void efx_tc_counter_id_free(void *ptr, void *__unused) +{ + struct efx_tc_counter_index *ctr = ptr; + + WARN_ON(refcount_read(&ctr->ref)); + kfree(ctr); +} + +int efx_tc_init_counters(struct efx_nic *efx) +{ + int rc; + + rc = rhashtable_init(&efx->tc->counter_id_ht, &efx_tc_counter_id_ht_params); + if (rc < 0) + goto fail_counter_id_ht; + rc = rhashtable_init(&efx->tc->counter_ht, &efx_tc_counter_ht_params); + if (rc < 0) + goto fail_counter_ht; + return 0; +fail_counter_ht: + rhashtable_destroy(&efx->tc->counter_id_ht); +fail_counter_id_ht: + return rc; +} + +/* Only call this in init failure teardown. + * Normal exit should fini instead as there may be entries in the table. + */ +void efx_tc_destroy_counters(struct efx_nic *efx) +{ + rhashtable_destroy(&efx->tc->counter_ht); + rhashtable_destroy(&efx->tc->counter_id_ht); +} + +void efx_tc_fini_counters(struct efx_nic *efx) +{ + rhashtable_free_and_destroy(&efx->tc->counter_id_ht, efx_tc_counter_id_free, NULL); + rhashtable_free_and_destroy(&efx->tc->counter_ht, efx_tc_counter_free, NULL); +} + +/* Counter allocation */ + +static void efx_tc_counter_work(struct work_struct *work); + +struct efx_tc_counter *efx_tc_flower_allocate_counter(struct efx_nic *efx, + int type) +{ + struct efx_tc_counter *cnt; + int rc, rc2; + + cnt = kzalloc(sizeof(*cnt), GFP_USER); + if (!cnt) + return ERR_PTR(-ENOMEM); + + spin_lock_init(&cnt->lock); + INIT_WORK(&cnt->work, efx_tc_counter_work); + cnt->touched = jiffies; + cnt->type = type; + + rc = efx_mae_allocate_counter(efx, cnt); + if (rc) + goto fail1; + INIT_LIST_HEAD(&cnt->users); + rc = rhashtable_insert_fast(&efx->tc->counter_ht, &cnt->linkage, + efx_tc_counter_ht_params); + if (rc) + goto fail2; + return cnt; +fail2: + /* If we get here, it implies that we couldn't insert into the table, + * which in turn probably means that the fw_id was already taken. + * In that case, it's unclear whether we really 'own' the fw_id; but + * the firmware seemed to think we did, so it's proper to free it. + */ + rc2 = efx_mae_free_counter(efx, cnt); + if (rc2) + netif_warn(efx, hw, efx->net_dev, + "Failed to free MAE counter %u, rc %d\n", + cnt->fw_id, rc2); +fail1: + kfree(cnt); + return ERR_PTR(rc > 0 ? -EIO : rc); +} + +void efx_tc_flower_release_counter(struct efx_nic *efx, + struct efx_tc_counter *cnt) +{ + int rc; + + rhashtable_remove_fast(&efx->tc->counter_ht, &cnt->linkage, + efx_tc_counter_ht_params); + rc = efx_mae_free_counter(efx, cnt); + if (rc) + netif_warn(efx, hw, efx->net_dev, + "Failed to free MAE counter %u, rc %d\n", + cnt->fw_id, rc); + WARN_ON(!list_empty(&cnt->users)); + /* This doesn't protect counter updates coming in arbitrarily long + * after we deleted the counter. The RCU just ensures that we won't + * free the counter while another thread has a pointer to it. + * Ensuring we don't update the wrong counter if the ID gets re-used + * is handled by the generation count. See SWNETLINUX-3595, and + * comments on CT-8026, for further discussion. + */ + synchronize_rcu(); + flush_work(&cnt->work); + EFX_WARN_ON_PARANOID(spin_is_locked(&cnt->lock)); + kfree(cnt); +} + +static struct efx_tc_counter *efx_tc_flower_find_counter_by_fw_id( + struct efx_nic *efx, int type, u32 fw_id) +{ + struct efx_tc_counter key = {}; + + key.fw_id = fw_id; + key.type = type; + + return rhashtable_lookup_fast(&efx->tc->counter_ht, &key, + efx_tc_counter_ht_params); +} + +/* TC cookie to counter mapping */ + +void efx_tc_flower_put_counter_index(struct efx_nic *efx, + struct efx_tc_counter_index *ctr) +{ + if (!refcount_dec_and_test(&ctr->ref)) + return; /* still in use */ + rhashtable_remove_fast(&efx->tc->counter_id_ht, &ctr->linkage, + efx_tc_counter_id_ht_params); + efx_tc_flower_release_counter(efx, ctr->cnt); + kfree(ctr); +} + +struct efx_tc_counter_index *efx_tc_flower_get_counter_index( + struct efx_nic *efx, unsigned long cookie, + enum efx_tc_counter_type type) +{ + struct efx_tc_counter_index *ctr, *old; + struct efx_tc_counter *cnt; + + ctr = kzalloc(sizeof(*ctr), GFP_USER); + if (!ctr) + return ERR_PTR(-ENOMEM); + ctr->cookie = cookie; + old = rhashtable_lookup_get_insert_fast(&efx->tc->counter_id_ht, + &ctr->linkage, + efx_tc_counter_id_ht_params); + if (old) { + /* don't need our new entry */ + kfree(ctr); + if (!refcount_inc_not_zero(&old->ref)) + return ERR_PTR(-EAGAIN); + /* existing entry found */ + ctr = old; + } else { + cnt = efx_tc_flower_allocate_counter(efx, type); + if (IS_ERR(cnt)) { + rhashtable_remove_fast(&efx->tc->counter_id_ht, + &ctr->linkage, + efx_tc_counter_id_ht_params); + kfree(ctr); + return (void *)cnt; /* it's an ERR_PTR */ + } + ctr->cnt = cnt; + refcount_set(&ctr->ref, 1); + } + return ctr; +} + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_TC_ACTION_COOKIE) || defined(EFX_HAVE_TC_FLOW_OFFLOAD) +struct efx_tc_counter_index *efx_tc_flower_find_counter_index( + struct efx_nic *efx, unsigned long cookie) +{ + struct efx_tc_counter_index key = {}; + + key.cookie = cookie; + return rhashtable_lookup_fast(&efx->tc->counter_id_ht, &key, + efx_tc_counter_id_ht_params); +} +#endif + +/* TC Channel. Counter updates are delivered on this channel's RXQ. */ + +static void efx_tc_handle_no_channel(struct efx_nic *efx) +{ + netif_warn(efx, drv, efx->net_dev, + "MAE counters require MSI-X and 1 additional interrupt vector.\n"); +} + +static int efx_tc_probe_channel(struct efx_channel *channel) +{ + struct efx_rx_queue *rx_queue = &channel->rx_queue; + + channel->irq_moderation_us = 0; + rx_queue->core_index = 0; + + INIT_WORK(&rx_queue->grant_work, efx_mae_counters_grant_credits); + + return 0; +} + +static int efx_tc_start_channel(struct efx_channel *channel) +{ + struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel); + struct efx_nic *efx = channel->efx; + + return efx_mae_start_counters(efx, rx_queue); +} + +static void efx_tc_stop_channel(struct efx_channel *channel) +{ + struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel); + struct efx_nic *efx = channel->efx; + int rc; + + rc = efx_mae_stop_counters(efx, rx_queue); + if (rc) + netif_warn(efx, drv, efx->net_dev, + "Failed to stop MAE counters streaming, rc=%d.\n", + rc); + rx_queue->grant_credits = false; + flush_work(&rx_queue->grant_work); +} + +static void efx_tc_remove_channel(struct efx_channel *channel) +{ +} + +static void efx_tc_get_channel_name(struct efx_channel *channel, + char *buf, size_t len) +{ + snprintf(buf, len, "%s-mae", channel->efx->name); +} + +static void efx_tc_counter_work(struct work_struct *work) +{ + struct efx_tc_counter *cnt = container_of(work, struct efx_tc_counter, work); + struct efx_tc_encap_action *encap; + struct efx_tc_action_set *act; + unsigned long touched; + struct neighbour *n; + + spin_lock_bh(&cnt->lock); + touched = READ_ONCE(cnt->touched); + + list_for_each_entry(act, &cnt->users, count_user) { + encap = act->encap_md; + if (!encap) + continue; + if (!encap->neigh) /* can't happen */ + continue; + if (time_after_eq(encap->neigh->used, touched)) + continue; + encap->neigh->used = touched; + /* We have passed traffic using this ARP entry, so + * indicate to the ARP cache that it's still active + */ + if (encap->neigh->dst_ip) + n = neigh_lookup(&arp_tbl, &encap->neigh->dst_ip, + encap->neigh->egdev); + else +#ifdef CONFIG_IPV6 + n = neigh_lookup(ipv6_stub->nd_tbl, + &encap->neigh->dst_ip6, + encap->neigh->egdev); +#else + n = NULL; +#endif + if (!n) + continue; + + neigh_event_send(n, NULL); + neigh_release(n); + } + spin_unlock_bh(&cnt->lock); +} + +static void efx_tc_counter_update(struct efx_nic *efx, + enum efx_tc_counter_type counter_type, + u32 counter_idx, u64 packets, u64 bytes, + u32 mark) +{ + struct efx_tc_counter *cnt; + + rcu_read_lock(); /* Protect against deletion of 'cnt' */ + cnt = efx_tc_flower_find_counter_by_fw_id(efx, counter_type, counter_idx); + if (!cnt) { + /* This could theoretically happen due to a race where an + * update from the counter is generated between allocating + * it and adding it to the hashtable, in + * efx_tc_flower_allocate_counter(). But during that race + * window, the counter will not yet have been attached to + * any action, so should not have counted any packets; thus + * the HW should not be sending updates (zero squash). + * Alternatively, it can happen when a counter is removed, + * with updates for the counter still in-flight; this is + * particularly likely for CT counters, in which case the + * last ACK (post FIN/FIN-ACK) will be hitting the CT entry + * just as software is removing it in reaction to the FIN. + * For this reason we downgrade to netif_dbg() for CT. + */ + if (net_ratelimit()) + netif_cond_dbg(efx, drv, efx->net_dev, + counter_type == EFX_TC_COUNTER_TYPE_CT, + warn, + "Got update for unwanted MAE counter %u type %u\n", + counter_idx, counter_type); + goto out; + } + + spin_lock_bh(&cnt->lock); + if ((s32)mark - (s32)cnt->gen < 0) { + /* This counter update packet is from before the counter was + * allocated; thus it must be for a previous counter with + * the same ID that has since been freed, and it should be + * ignored. + */ + } else { + /* Update latest seen generation count. This ensures that + * even a long-lived counter won't start getting ignored if + * the generation count wraps around, unless it somehow + * manages to go 1<<31 generations without an update. + */ + cnt->gen = mark; + /* update counter values */ + cnt->packets += packets; + cnt->bytes += bytes; + cnt->touched = jiffies; + } + spin_unlock_bh(&cnt->lock); + schedule_work(&cnt->work); +out: + rcu_read_unlock(); +} + +static void efx_tc_rx_version_1(struct efx_nic *efx, const u8 *data, u32 mark) +{ + u16 n_counters, i; + + /* Header format: + * + | 0 | 1 | 2 | 3 | + * 0 |version | reserved | + * 4 | seq_index | n_counters | + */ + + n_counters = le16_to_cpu(*(const __le16 *)(data + 6)); + + /* Counter update entry format: + * | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | a | b | c | d | e | f | + * | counter_idx | packet_count | byte_count | + */ + for (i = 0; i < n_counters; i++) { + const void *entry = data + 8 + 16 * i; + u64 packet_count, byte_count; + u32 counter_idx; + + counter_idx = le32_to_cpu(*(const __le32 *)entry); + packet_count = le32_to_cpu(*(const __le32 *)(entry + 4)) | + ((u64)le16_to_cpu(*(const __le16 *)(entry + 8)) << 32); + byte_count = le16_to_cpu(*(const __le16 *)(entry + 10)) | + ((u64)le32_to_cpu(*(const __le32 *)(entry + 12)) << 16); + efx_tc_counter_update(efx, EFX_TC_COUNTER_TYPE_AR, counter_idx, + packet_count, byte_count, mark); + } +} + +#define TCV2_HDR_PTR(pkt, field) \ + ((void)BUILD_BUG_ON_ZERO(ERF_SC_PACKETISER_HEADER_##field##_LBN & 7), \ + (pkt) + ERF_SC_PACKETISER_HEADER_##field##_LBN / 8) +#define TCV2_HDR_BYTE(pkt, field) \ + ((void)BUILD_BUG_ON_ZERO(ERF_SC_PACKETISER_HEADER_##field##_WIDTH != 8),\ + *TCV2_HDR_PTR(pkt, field)) +#define TCV2_HDR_WORD(pkt, field) \ + ((void)BUILD_BUG_ON_ZERO(ERF_SC_PACKETISER_HEADER_##field##_WIDTH != 16),\ + (void)BUILD_BUG_ON_ZERO(ERF_SC_PACKETISER_HEADER_##field##_LBN & 15), \ + *(__force const __le16 *)TCV2_HDR_PTR(pkt, field)) +#define TCV2_PKT_PTR(pkt, poff, i, field) \ + ((void)BUILD_BUG_ON_ZERO(ERF_SC_PACKETISER_PAYLOAD_##field##_LBN & 7), \ + (pkt) + ERF_SC_PACKETISER_PAYLOAD_##field##_LBN/8 + poff + \ + i * ER_RX_SL_PACKETISER_PAYLOAD_WORD_SIZE) + +/* Read a little-endian 48-bit field with 16-bit alignment */ +static u64 efx_tc_read48(const __le16 *field) +{ + u64 out = 0; + int i; + + for (i = 0; i < 3; i++) + out |= (u64)le16_to_cpu(field[i]) << (i * 16); + return out; +} + +static enum efx_tc_counter_type efx_tc_rx_version_2(struct efx_nic *efx, + const u8 *data, u32 mark) +{ + u8 payload_offset, header_offset, ident; + enum efx_tc_counter_type type; + u16 n_counters, i; + + ident = TCV2_HDR_BYTE(data, IDENTIFIER); + switch (ident) { + case ERF_SC_PACKETISER_HEADER_IDENTIFIER_AR: + type = EFX_TC_COUNTER_TYPE_AR; + break; + case ERF_SC_PACKETISER_HEADER_IDENTIFIER_CT: + type = EFX_TC_COUNTER_TYPE_CT; + break; + case ERF_SC_PACKETISER_HEADER_IDENTIFIER_OR: + type = EFX_TC_COUNTER_TYPE_OR; + break; + default: + if (net_ratelimit()) + netif_err(efx, drv, efx->net_dev, + "ignored v2 MAE counter packet (bad identifier %u" + "), counters may be inaccurate\n", ident); + return EFX_TC_COUNTER_TYPE_MAX; + } + header_offset = TCV2_HDR_BYTE(data, HEADER_OFFSET); + /* mae_counter_format.h implies that this offset is fixed, since it + * carries on with SOP-based LBNs for the fields in this header + */ + if (header_offset != ERF_SC_PACKETISER_HEADER_HEADER_OFFSET_DEFAULT) { + if (net_ratelimit()) + netif_err(efx, drv, efx->net_dev, + "choked on v2 MAE counter packet (bad header_offset %u" + "), counters may be inaccurate\n", header_offset); + return EFX_TC_COUNTER_TYPE_MAX; + } + payload_offset = TCV2_HDR_BYTE(data, PAYLOAD_OFFSET); + n_counters = le16_to_cpu(TCV2_HDR_WORD(data, COUNT)); + + for (i = 0; i < n_counters; i++) { + const void *counter_idx_p, *packet_count_p, *byte_count_p; + u64 packet_count, byte_count; + u32 counter_idx; + + /* 24-bit field with 32-bit alignment */ + counter_idx_p = TCV2_PKT_PTR(data, payload_offset, i, COUNTER_INDEX); + BUILD_BUG_ON(ERF_SC_PACKETISER_PAYLOAD_COUNTER_INDEX_WIDTH != 24); + BUILD_BUG_ON(ERF_SC_PACKETISER_PAYLOAD_COUNTER_INDEX_LBN & 31); + counter_idx = le32_to_cpu(*(const __le32 *)counter_idx_p) & 0xffffff; + /* 48-bit field with 16-bit alignment */ + packet_count_p = TCV2_PKT_PTR(data, payload_offset, i, PACKET_COUNT); + BUILD_BUG_ON(ERF_SC_PACKETISER_PAYLOAD_PACKET_COUNT_WIDTH != 48); + BUILD_BUG_ON(ERF_SC_PACKETISER_PAYLOAD_PACKET_COUNT_LBN & 15); + packet_count = efx_tc_read48((const __le16 *)packet_count_p); + /* 48-bit field with 16-bit alignment */ + byte_count_p = TCV2_PKT_PTR(data, payload_offset, i, BYTE_COUNT); + BUILD_BUG_ON(ERF_SC_PACKETISER_PAYLOAD_BYTE_COUNT_WIDTH != 48); + BUILD_BUG_ON(ERF_SC_PACKETISER_PAYLOAD_BYTE_COUNT_LBN & 15); + byte_count = efx_tc_read48((const __le16 *)byte_count_p); + + if (type == EFX_TC_COUNTER_TYPE_CT) { + /* CT counters are 1-bit saturating counters to update + * the lastuse time in CT stats. A received CT counter + * should have packet counter to 0 and only LSB bit on + * in byte counter. + */ + if (packet_count || (byte_count != 1)) + netdev_warn_once(efx->net_dev, + "CT counter with inconsistent state (%llu, %llu)\n", + packet_count, byte_count); + /* zeroing the counters before updating the driver's + * CT counter object. + */ + packet_count = 0; + byte_count = 0; + } + + efx_tc_counter_update(efx, type, counter_idx, packet_count, + byte_count, mark); + } + return type; +} + +/* We always swallow the packet, whether successful or not, since it's not + * a network packet and shouldn't ever be forwarded to the stack. + * @mark is the generation count for counter allocations. + */ +static bool efx_tc_rx(struct efx_rx_queue *rx_queue, u32 mark) +{ + struct efx_rx_buffer *rx_buf = efx_rx_buf_pipe(rx_queue); + const u8 *data = efx_rx_buf_va(rx_buf); + struct efx_nic *efx = rx_queue->efx; + enum efx_tc_counter_type type; + u8 version; + + /* version is always first byte of packet */ + version = *data; + switch (version) { + case 1: + type = EFX_TC_COUNTER_TYPE_AR; + efx_tc_rx_version_1(efx, data, mark); + break; + case ERF_SC_PACKETISER_HEADER_VERSION_VALUE: // 2 + type = efx_tc_rx_version_2(efx, data, mark); + break; + default: + if (net_ratelimit()) + netif_err(efx, drv, efx->net_dev, + "choked on MAE counter packet (bad version %u" + "); counters may be inaccurate\n", + version); + goto out; + } + + /* Update seen_gen unconditionally, to avoid a missed wakeup if + * we race with efx_mae_stop_counters(). + */ + efx->tc->seen_gen[type] = mark; + if (efx->tc->flush_counters && + (s32)(efx->tc->flush_gen[type] - mark) <= 0) + wake_up(&efx->tc->flush_wq); +out: + efx_free_rx_buffers(rx_queue, rx_buf, 1); + rx_queue->rx_pkt_n_frags = 0; + return true; +} + +static const char *efx_tc_get_queue_name(struct efx_channel *channel, bool tx) +{ + (void)channel; + + if (tx) + return "counter_unused"; + else + return "counter_updates"; +} + +const struct efx_channel_type efx_tc_channel_type = { + .handle_no_channel = efx_tc_handle_no_channel, + .pre_probe = efx_tc_probe_channel, + .start = efx_tc_start_channel, + .stop = efx_tc_stop_channel, + .post_remove = efx_tc_remove_channel, + .get_name = efx_tc_get_channel_name, + .receive_raw = efx_tc_rx, + .keep_eventq = true, + .hide_tx = true, + .get_queue_name = efx_tc_get_queue_name, +}; + +#endif /* EFX_TC_OFFLOAD */ diff --git a/src/drivers/net/ethernet/sfc/tc_counters.h b/src/drivers/net/ethernet/sfc/tc_counters.h new file mode 100644 index 0000000..eefeeac --- /dev/null +++ b/src/drivers/net/ethernet/sfc/tc_counters.h @@ -0,0 +1,74 @@ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2022 Xilinx Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#ifndef EFX_TC_COUNTERS_H +#define EFX_TC_COUNTERS_H +#include "net_driver.h" + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_TC_OFFLOAD) +#include + +#include "mcdi_pcol.h" /* for MAE_COUNTER_TYPE_* */ + +enum efx_tc_counter_type { + EFX_TC_COUNTER_TYPE_AR = MAE_COUNTER_TYPE_AR, + EFX_TC_COUNTER_TYPE_CT = MAE_COUNTER_TYPE_CT, + EFX_TC_COUNTER_TYPE_OR = MAE_COUNTER_TYPE_OR, + EFX_TC_COUNTER_TYPE_MAX +}; + +struct efx_tc_counter { + u32 fw_id; /* index in firmware counter table */ + enum efx_tc_counter_type type; + struct rhash_head linkage; /* efx->tc->counter_ht */ + spinlock_t lock; /* Serialises updates to counter values */ + u32 gen; /* Generation count at which this counter is current */ + u64 packets, bytes; + u64 old_packets, old_bytes; /* Values last time passed to userspace */ + /* jiffies of the last time we saw packets increase */ + unsigned long touched; + struct work_struct work; /* For notifying encap actions */ + /* owners of corresponding count actions */ + struct list_head users; +}; + +struct efx_tc_counter_index { +#if defined(EFX_USE_KCOMPAT) && !defined(EFX_HAVE_TC_ACTION_COOKIE) + /* cookie is rule, not action, cookie */ +#endif + unsigned long cookie; + struct rhash_head linkage; /* efx->tc->counter_id_ht */ + refcount_t ref; + struct efx_tc_counter *cnt; +}; + +/* create/uncreate/teardown hashtables */ +int efx_tc_init_counters(struct efx_nic *efx); +void efx_tc_destroy_counters(struct efx_nic *efx); +void efx_tc_fini_counters(struct efx_nic *efx); + +struct efx_tc_counter *efx_tc_flower_allocate_counter(struct efx_nic *efx, + int type); +void efx_tc_flower_release_counter(struct efx_nic *efx, + struct efx_tc_counter *cnt); +struct efx_tc_counter_index *efx_tc_flower_get_counter_index( + struct efx_nic *efx, unsigned long cookie, + enum efx_tc_counter_type type); +void efx_tc_flower_put_counter_index(struct efx_nic *efx, + struct efx_tc_counter_index *ctr); +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_TC_ACTION_COOKIE) || defined(EFX_HAVE_TC_FLOW_OFFLOAD) +struct efx_tc_counter_index *efx_tc_flower_find_counter_index( + struct efx_nic *efx, unsigned long cookie); +#endif + +extern const struct efx_channel_type efx_tc_channel_type; + +#endif /* EFX_TC_OFFLOAD */ + +#endif /* EFX_TC_COUNTERS_H */ diff --git a/src/drivers/net/ethernet/sfc/tc_debugfs.c b/src/drivers/net/ethernet/sfc/tc_debugfs.c new file mode 100644 index 0000000..b206257 --- /dev/null +++ b/src/drivers/net/ethernet/sfc/tc_debugfs.c @@ -0,0 +1,1126 @@ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2022 Xilinx Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#ifdef EFX_USE_KCOMPAT +/* Must come before other headers */ +#include "kernel_compat.h" +#endif + +#include "tc_debugfs.h" +#include "tc.h" +#include "tc_encap_actions.h" +#include "tc_conntrack.h" +#include "nic.h" + +#ifdef CONFIG_DEBUG_FS +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_TC_OFFLOAD) +static void efx_tc_debugfs_dump_encap_match(struct seq_file *file, + struct efx_tc_encap_match *encap) +{ + char errbuf[16]; + + switch (encap->type) { + case EFX_TC_EM_DIRECT: + seq_printf(file, "\tencap_match (%#x)\n", encap->fw_id); + break; + case EFX_TC_EM_PSEUDO_OR: + seq_printf(file, "\tencap_match (pseudo for OR)\n"); + break; + case EFX_TC_EM_PSEUDO_TOS: + seq_printf(file, "\tencap_match (pseudo for IP ToS & %#04x)\n", + encap->child_ip_tos_mask); + break; + default: + seq_printf(file, "\tencap_match (pseudo type %d)\n", + encap->type); + break; + } +#ifdef CONFIG_IPV6 + if (encap->src_ip | encap->dst_ip) { +#endif + seq_printf(file, "\t\tsrc_ip = %pI4\n", &encap->src_ip); + seq_printf(file, "\t\tdst_ip = %pI4\n", &encap->dst_ip); +#ifdef CONFIG_IPV6 + } else { + seq_printf(file, "\t\tsrc_ip6 = %pI6c\n", &encap->src_ip6); + seq_printf(file, "\t\tdst_ip6 = %pI6c\n", &encap->dst_ip6); + } +#endif + seq_printf(file, "\t\tudp_dport = %u\n", be16_to_cpu(encap->udp_dport)); + if (encap->ip_tos_mask) { + if (encap->ip_tos_mask == 0xff) + seq_printf(file, "\t\tip_tos = %#04x (%u)\n", + encap->ip_tos, encap->ip_tos); + else + seq_printf(file, "\t\tip_tos & %#04x = %#04x\n", + encap->ip_tos_mask, encap->ip_tos); + } + seq_printf(file, "\t\ttun_type = %s\n", + efx_tc_encap_type_name(encap->tun_type, errbuf, + sizeof(errbuf))); + seq_printf(file, "\t\tref = %u\n", refcount_read(&encap->ref)); + if (encap->pseudo) { + seq_printf(file, "\t\thas_pseudo ::\n"); + efx_tc_debugfs_dump_encap_match(file, encap->pseudo); + } +} + +#define efx_tc_debugfs_dump_fmt_match_item(_file, _name, _key, _mask, _fmt, _amp)\ +do { \ + if (_mask) { \ + if (~_mask) \ + seq_printf(file, "\t%s & " _fmt " = " _fmt "\n", _name,\ + _amp _mask, _amp _key); \ + else \ + seq_printf(file, "\t%s = " _fmt "\n", _name, _amp _key);\ + } \ +} while (0) + +static void efx_tc_debugfs_dump_fmt_ptr_match_item(struct seq_file *file, + const char *name, void *key, + void *mask, size_t len, + const char *fmt) +{ + char maskbuf[40], keybuf[40]; + + if (!memchr_inv(mask, 0, len)) + return; /* mask is all-0s */ + snprintf(keybuf, sizeof(keybuf), fmt, key); + if (memchr_inv(mask, 0xff, len)) { /* masked */ + snprintf(maskbuf, sizeof(maskbuf), fmt, mask); + seq_printf(file, "\t%s & %s = %s\n", name, maskbuf, keybuf); + } else { /* mask is all-1s */ + seq_printf(file, "\t%s = %s\n", name, keybuf); + } +} + +static void efx_tc_debugfs_dump_one_match_item(struct seq_file *file, + const char *name, void *key, + void *mask, size_t len) +{ + if (!memchr_inv(mask, 0, len)) + return; /* mask is all-0s */ + if (memchr_inv(mask, 0xff, len)) /* masked */ + seq_printf(file, "\t%s & 0x%*phN = 0x%*phN\n", name, (int)len, mask, + (int)len, key); + else /* mask is all-1s */ + switch (len) { + case 1: + seq_printf(file, "\t%s = %#04x (%u)\n", name, + *(u8 *)key, *(u8 *)key); + break; + case 2: + seq_printf(file, "\t%s = %#06x (%u)\n", name, + be16_to_cpu(*(__be16 *)key), + be16_to_cpu(*(__be16 *)key)); + break; + case 4: + seq_printf(file, "\t%s = %#010x (%u)\n", name, + be32_to_cpu(*(__be32 *)key), + be32_to_cpu(*(__be32 *)key)); + break; + default: + seq_printf(file, "\t%s = 0x%*phN\n", name, (int)len, key); + break; + } +} + +static void efx_tc_debugfs_dump_ct_bits(struct seq_file *file, + struct efx_tc_match *match) +{ + if (!(match->mask.ct_state_trk || match->mask.ct_state_est || + match->mask.ct_state_rel || match->mask.ct_state_new)) + return; /* mask is all-0s */ + seq_printf(file, "\tct_state ="); +#define DUMP_ONE_BIT(name) \ + if (match->mask.ct_state_##name) \ + seq_printf(file, " %c%s", \ + match->value.ct_state_##name ? '+' : '-', \ + #name) + DUMP_ONE_BIT(trk); + DUMP_ONE_BIT(est); + DUMP_ONE_BIT(rel); + DUMP_ONE_BIT(new); +#undef DUMP_ONE_BIT + seq_printf(file, "\n"); +} + +static void efx_tc_debugfs_dump_match(struct seq_file *file, + struct efx_tc_match *match) +{ + if (match->encap) + efx_tc_debugfs_dump_encap_match(file, match->encap); +#define DUMP_ONE_MATCH(_field) \ + efx_tc_debugfs_dump_one_match_item(file, #_field, &match->value._field,\ + &match->mask._field, \ + sizeof(match->value._field)) +#define DUMP_FMT_MATCH(_field, _fmt) \ + efx_tc_debugfs_dump_fmt_match_item(file, #_field, match->value._field, \ + match->mask._field, _fmt, ) +#define DUMP_FMT_AMP_MATCH(_field, _fmt) \ + efx_tc_debugfs_dump_fmt_match_item(file, #_field, match->value._field, \ + match->mask._field, _fmt, &) +#define DUMP_FMT_PTR_MATCH(_field, _fmt) \ + efx_tc_debugfs_dump_fmt_ptr_match_item(file, #_field, \ + &match->value._field, \ + &match->mask._field, \ + sizeof(match->value._field), \ + _fmt) + DUMP_FMT_MATCH(ingress_port, "%#010x"); + DUMP_ONE_MATCH(eth_proto); + DUMP_ONE_MATCH(vlan_tci[0]); + DUMP_ONE_MATCH(vlan_proto[0]); + DUMP_ONE_MATCH(vlan_tci[1]); + DUMP_ONE_MATCH(vlan_proto[1]); + DUMP_FMT_PTR_MATCH(eth_saddr, "%pM"); + DUMP_FMT_PTR_MATCH(eth_daddr, "%pM"); + DUMP_ONE_MATCH(ip_proto); + DUMP_ONE_MATCH(ip_tos); + DUMP_ONE_MATCH(ip_ttl); + if (match->mask.ip_frag) + seq_printf(file, "\tip_frag = %d\n", match->value.ip_frag); + if (match->mask.ip_firstfrag) + seq_printf(file, "\tip_firstfrag = %d\n", match->value.ip_firstfrag); + DUMP_FMT_AMP_MATCH(src_ip, "%pI4"); + DUMP_FMT_AMP_MATCH(dst_ip, "%pI4"); +#ifdef CONFIG_IPV6 + DUMP_FMT_PTR_MATCH(src_ip6, "%pI6"); + DUMP_FMT_PTR_MATCH(dst_ip6, "%pI6"); +#endif + DUMP_ONE_MATCH(l4_sport); + DUMP_ONE_MATCH(l4_dport); + DUMP_ONE_MATCH(tcp_flags); + DUMP_ONE_MATCH(tcp_syn_fin_rst); + DUMP_FMT_AMP_MATCH(enc_src_ip, "%pI4"); + DUMP_FMT_AMP_MATCH(enc_dst_ip, "%pI4"); +#ifdef CONFIG_IPV6 + DUMP_FMT_PTR_MATCH(enc_src_ip6, "%pI6c"); + DUMP_FMT_PTR_MATCH(enc_dst_ip6, "%pI6c"); +#endif + DUMP_ONE_MATCH(enc_ip_tos); + DUMP_ONE_MATCH(enc_ip_ttl); + DUMP_ONE_MATCH(enc_sport); + DUMP_ONE_MATCH(enc_dport); + DUMP_ONE_MATCH(enc_keyid); + efx_tc_debugfs_dump_ct_bits(file, match); + DUMP_FMT_MATCH(ct_mark, "%#010x"); + DUMP_ONE_MATCH(recirc_id); +#undef DUMP_ONE_MATCH +#undef DUMP_FMT_MATCH +#undef DUMP_FMT_AMP_MATCH +#undef DUMP_FMT_PTR_MATCH +} + +static void efx_tc_debugfs_dump_one_rule(struct seq_file *file, + struct efx_tc_flow_rule *rule) +{ + struct efx_tc_action_set *act; + + seq_printf(file, "%#lx (%#x)\n", rule->cookie, rule->fw_id); + + efx_tc_debugfs_dump_match(file, &rule->match); + + seq_printf(file, "\taction_set_list (%#x)\n", rule->acts.fw_id); + list_for_each_entry(act, &rule->acts.list, list) { + seq_printf(file, "\t\taction_set (%#x)\n", act->fw_id); + if (act->decap) + seq_printf(file, "\t\t\tdecap\n"); + if (act->vlan_pop & BIT(1)) + seq_printf(file, "\t\t\tvlan1_pop\n"); + if (act->vlan_pop & BIT(0)) + seq_printf(file, "\t\t\tvlan0_pop\n"); + if (act->src_mac) { + seq_printf(file, "\t\t\tpedit src_mac (%#x)\n", + act->src_mac->fw_id); + seq_printf(file, "\t\t\t\th_addr=%pM\n", + act->src_mac->h_addr); + } + if (act->dst_mac) { + seq_printf(file, "\t\t\tpedit dst_mac (%#x)\n", + act->dst_mac->fw_id); + seq_printf(file, "\t\t\t\th_addr=%pM\n", + act->dst_mac->h_addr); + } + if (act->vlan_push & BIT(0)) + seq_printf(file, "\t\t\tvlan0_push tci=%u proto=%x\n", + be16_to_cpu(act->vlan_tci[0]), + be16_to_cpu(act->vlan_proto[0])); + if (act->vlan_push & BIT(1)) + seq_printf(file, "\t\t\tvlan1_push tci=%u proto=%x\n", + be16_to_cpu(act->vlan_tci[1]), + be16_to_cpu(act->vlan_proto[1])); + if (act->do_nat) + seq_printf(file, "\t\t\tnat\n"); + if (act->count) { + u32 fw_id; + + if (WARN_ON(!act->count->cnt)) + fw_id = MC_CMD_MAE_COUNTER_ALLOC_OUT_COUNTER_ID_NULL; + else + fw_id = act->count->cnt->fw_id; +#if defined(EFX_USE_KCOMPAT) && !defined(EFX_HAVE_TC_FLOW_OFFLOAD) + seq_printf(file, "\t\t\tcount (%#x) act_idx=%d\n", + fw_id, act->count_action_idx); +#else + seq_printf(file, "\t\t\tcount (%#x)\n", fw_id); +#endif +#if defined(EFX_USE_KCOMPAT) && defined(EFX_HAVE_TC_ACTION_COOKIE) + seq_printf(file, "\t\t\t\tcookie = %#lx\n", + act->count->cookie); +#endif + } + if (act->encap_md) { + enum efx_encap_type type = act->encap_md->type & EFX_ENCAP_TYPES_MASK; + bool v6 = act->encap_md->type & EFX_ENCAP_FLAG_IPV6; + char errbuf[16]; + + seq_printf(file, "\t\t\tencap (%#x)\n", act->encap_md->fw_id); + seq_printf(file, "\t\t\t\ttype = %s IPv%d\n", + efx_tc_encap_type_name(type, errbuf, + sizeof(errbuf)), + v6 ? 6 : 4); + seq_printf(file, "\t\t\t\tkey\n"); + seq_printf(file, "\t\t\t\t\ttun_id = %llu\n", + be64_to_cpu(act->encap_md->key.tun_id)); + if (v6) { + seq_printf(file, "\t\t\t\t\tsrc = %pI6c\n", + &act->encap_md->key.u.ipv6.src); + seq_printf(file, "\t\t\t\t\tdst = %pI6c\n", + &act->encap_md->key.u.ipv6.dst); + } else { + seq_printf(file, "\t\t\t\t\tsrc = %pI4\n", + &act->encap_md->key.u.ipv4.src); + seq_printf(file, "\t\t\t\t\tdst = %pI4\n", + &act->encap_md->key.u.ipv4.dst); + } + seq_printf(file, "\t\t\t\t\ttun_flags = %#x\n", + be16_to_cpu(act->encap_md->key.tun_flags)); + seq_printf(file, "\t\t\t\t\ttos = %#x\n", + act->encap_md->key.tos); + seq_printf(file, "\t\t\t\t\tttl = %u\n", + act->encap_md->key.ttl); + seq_printf(file, "\t\t\t\t\tflow_label = %#x\n", + be32_to_cpu(act->encap_md->key.label)); + seq_printf(file, "\t\t\t\t\ttp_src = %u\n", + be16_to_cpu(act->encap_md->key.tp_src)); + seq_printf(file, "\t\t\t\t\ttp_dst = %u\n", + be16_to_cpu(act->encap_md->key.tp_dst)); + seq_printf(file, "\t\t\t\tneigh %svalid\n", + act->encap_md->n_valid ? "" : "in"); + } + if (act->deliver) + seq_printf(file, "\t\t\tdeliver %#010x\n", act->dest_mport); + } +} + +static int efx_tc_debugfs_dump_rules(struct seq_file *file, void *data) +{ + struct efx_tc_flow_rule *rule; + struct rhashtable_iter walk; + struct efx_nic *efx = data; + + mutex_lock(&efx->tc->mutex); + if (efx->tc->up) { + rhashtable_walk_enter(&efx->tc->match_action_ht, &walk); + rhashtable_walk_start(&walk); + while ((rule = rhashtable_walk_next(&walk)) != NULL) { + if (IS_ERR(rule)) + continue; + efx_tc_debugfs_dump_one_rule(file, rule); + } + rhashtable_walk_stop(&walk); + rhashtable_walk_exit(&walk); + } else { + seq_printf(file, "tc is down\n"); + } + mutex_unlock(&efx->tc->mutex); + + return 0; +} + +static int efx_tc_debugfs_dump_default_rules(struct seq_file *file, void *data) +{ + struct mae_mport_desc *mport; + struct rhashtable_iter walk; + struct efx_nic *efx = data; + struct net_device *rep_dev; + struct efx_rep *efv; + int i; + + mutex_lock(&efx->tc->mutex); + if (efx->tc->dflt.pf.fw_id != MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL) + efx_tc_debugfs_dump_one_rule(file, &efx->tc->dflt.pf); + if (efx->tc->dflt.wire.fw_id != MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL) + efx_tc_debugfs_dump_one_rule(file, &efx->tc->dflt.wire); + for (i = 0; i < EFX_TC_VF_MAX; i++) { + rep_dev = efx_get_vf_rep(efx, i); + if (IS_ERR_OR_NULL(rep_dev)) + continue; + efv = netdev_priv(rep_dev); + if (efv->dflt.fw_id != MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL) + efx_tc_debugfs_dump_one_rule(file, &efv->dflt); + } + if (!efx->mae) + goto out_unlock; + /* Takes RCU read lock, so entries cannot be freed under us */ + rhashtable_walk_enter(&efx->mae->mports_ht, &walk); + rhashtable_walk_start(&walk); + while ((mport = rhashtable_walk_next(&walk)) != NULL) { + efv = mport->efv; + if (!efv) + continue; + if (efv->dflt.fw_id != MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL) + efx_tc_debugfs_dump_one_rule(file, &efv->dflt); + } + rhashtable_walk_stop(&walk); + rhashtable_walk_exit(&walk); +out_unlock: + mutex_unlock(&efx->tc->mutex); + return 0; +} + +static const char *efx_mae_counter_type_name(enum efx_tc_counter_type type) +{ + switch (type) { + case EFX_TC_COUNTER_TYPE_AR: + return "AR"; + case EFX_TC_COUNTER_TYPE_CT: + return "CT"; + case EFX_TC_COUNTER_TYPE_OR: + return "OR"; + default: + return NULL; + } +}; + +static void efx_tc_debugfs_dump_one_counter(struct seq_file *file, + struct efx_tc_counter *cnt) +{ + u64 packets, bytes, old_packets, old_bytes; + enum efx_tc_counter_type type; + const char *type_name; + unsigned long age; + u32 gen; + + /* get a consistent view */ + spin_lock_bh(&cnt->lock); + packets = cnt->packets; + bytes = cnt->bytes; + old_packets = cnt->old_packets; + old_bytes = cnt->old_bytes; + age = jiffies - cnt->touched; + gen = cnt->gen; + type = cnt->type; + spin_unlock_bh(&cnt->lock); + + type_name = efx_mae_counter_type_name(type); + if (type_name) + seq_printf(file, "%s %#x: ", type_name, cnt->fw_id); + else + seq_printf(file, "unk-%d %#x: ", type, cnt->fw_id); + seq_printf(file, "%llu pkts %llu bytes (old %llu, %llu) gen %u age %lu\n", + packets, bytes, old_packets, old_bytes, gen, age); +} + +static int efx_tc_debugfs_dump_mae_counters(struct seq_file *file, void *data) +{ + struct rhashtable_iter walk; + struct efx_nic *efx = data; + struct efx_tc_counter *cnt; + + mutex_lock(&efx->tc->mutex); + if (efx->tc->up) { + rhashtable_walk_enter(&efx->tc->counter_ht, &walk); + rhashtable_walk_start(&walk); + while ((cnt = rhashtable_walk_next(&walk)) != NULL) { + if (IS_ERR(cnt)) + continue; + efx_tc_debugfs_dump_one_counter(file, cnt); + } + rhashtable_walk_stop(&walk); + rhashtable_walk_exit(&walk); + } else { + seq_printf(file, "tc is down\n"); + } + mutex_unlock(&efx->tc->mutex); + + return 0; +} + +static int efx_tc_debugfs_dump_mae_macs(struct seq_file *file, void *data) +{ + struct efx_tc_mac_pedit_action *ped; + struct rhashtable_iter walk; + struct efx_nic *efx = data; + + mutex_lock(&efx->tc->mutex); + if (efx->tc->up) { + rhashtable_walk_enter(&efx->tc->mac_ht, &walk); + rhashtable_walk_start(&walk); + while ((ped = rhashtable_walk_next(&walk)) != NULL) { + if (IS_ERR(ped)) + continue; + seq_printf(file, "%#x: %pM ref %u\n", ped->fw_id, + ped->h_addr, refcount_read(&ped->ref)); + } + rhashtable_walk_stop(&walk); + rhashtable_walk_exit(&walk); + } else { + seq_printf(file, "tc is down\n"); + } + mutex_unlock(&efx->tc->mutex); + + return 0; +} + +static int efx_tc_debugfs_dump_recirc_ids(struct seq_file *file, void *data) +{ + struct efx_tc_recirc_id *rid; + struct rhashtable_iter walk; + struct efx_nic *efx = data; + + mutex_lock(&efx->tc->mutex); + if (efx->tc->up) { + rhashtable_walk_enter(&efx->tc->recirc_ht, &walk); + rhashtable_walk_start(&walk); + while ((rid = rhashtable_walk_next(&walk)) != NULL) { + if (IS_ERR(rid)) + continue; + seq_printf(file, "%#x: %u\n", rid->fw_id, rid->chain_index); + if (rid->net_dev != efx->net_dev) + seq_printf(file, "\tnetdev %s\n", + netdev_name(rid->net_dev)); + } + rhashtable_walk_stop(&walk); + rhashtable_walk_exit(&walk); + } else { + seq_printf(file, "tc is down\n"); + } + mutex_unlock(&efx->tc->mutex); + + return 0; +} + +static void efx_tc_debugfs_dump_lhs_rule(struct seq_file *file, + struct efx_tc_lhs_rule *rule) +{ + struct efx_tc_lhs_action *act = &rule->lhs_act; + char errbuf[16]; + + seq_printf(file, "%#lx (%s %#x)\n", rule->cookie, + rule->is_ar ? "AR" : "OR", rule->fw_id); + + efx_tc_debugfs_dump_match(file, &rule->match); + + seq_printf(file, "\tlhs_action\n"); + if (act->tun_type) { + seq_printf(file, "\t\ttun_type = %s\n", + efx_tc_encap_type_name(act->tun_type, errbuf, + sizeof(errbuf))); + } + seq_printf(file, "\t\trecirc_id %#02x\n", act->rid ? act->rid->fw_id : 0); +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_CONNTRACK_OFFLOAD) + if (act->zone) { + seq_printf(file, "\t\tct domain %#x\n", act->zone->domain); + seq_printf(file, "\t\t\tzone %u\n", act->zone->zone); + seq_printf(file, "\t\t\tvni_mode %u\n", act->zone->vni_mode); + } +#endif + if (act->count) { + seq_printf(file, "\t\tcount %#x\n", act->count->cnt->fw_id); +#if defined(EFX_USE_KCOMPAT) && !defined(EFX_HAVE_TC_FLOW_OFFLOAD) + seq_printf(file, "\t\t\tact_idx=%d\n", + act->count_action_idx); +#endif +#if defined(EFX_USE_KCOMPAT) && defined(EFX_HAVE_TC_ACTION_COOKIE) + seq_printf(file, "\t\t\tcookie = %#lx\n", + act->count->cookie); +#else + /* count->cookie == rule->cookie, no point printing it again */ +#endif + } +} + +static int efx_tc_debugfs_dump_lhs_rules(struct seq_file *file, void *data) +{ + struct efx_tc_lhs_rule *rule; + struct rhashtable_iter walk; + struct efx_nic *efx = data; + + mutex_lock(&efx->tc->mutex); + if (efx->tc->up) { + rhashtable_walk_enter(&efx->tc->lhs_rule_ht, &walk); + rhashtable_walk_start(&walk); + while ((rule = rhashtable_walk_next(&walk)) != NULL) { + if (IS_ERR(rule)) + continue; + efx_tc_debugfs_dump_lhs_rule(file, rule); + } + rhashtable_walk_stop(&walk); + rhashtable_walk_exit(&walk); + } else { + seq_printf(file, "tc is down\n"); + } + mutex_unlock(&efx->tc->mutex); + + return 0; +} + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_CONNTRACK_OFFLOAD) +static void efx_tc_debugfs_dump_ct(struct seq_file *file, + struct efx_tc_ct_entry *conn) +{ + seq_printf(file, "%#lx\n", conn->cookie); + seq_printf(file, "\tdomain = %#x\n", conn->domain); + seq_printf(file, "\teth_proto = %#06x\n", be16_to_cpu(conn->eth_proto)); + seq_printf(file, "\tip_proto = %#04x (%u)\n", + conn->ip_proto, conn->ip_proto); + switch (conn->eth_proto) { + case htons(ETH_P_IP): + seq_printf(file, "\tsrc = %pI4:%u\n", &conn->src_ip, + be16_to_cpu(conn->l4_sport)); + seq_printf(file, "\tdst = %pI4:%u\n", &conn->dst_ip, + be16_to_cpu(conn->l4_dport)); + seq_printf(file, "\t%cnat = %pI4:%u\n", conn->dnat ? 'd' : 's', + &conn->nat_ip, be16_to_cpu(conn->l4_natport)); + break; +#ifdef CONFIG_IPV6 + case htons(ETH_P_IPV6): + seq_printf(file, "\tsrc = %pI6c:%u\n", &conn->src_ip6, + be16_to_cpu(conn->l4_sport)); + seq_printf(file, "\tdst = %pI6c:%u\n", &conn->dst_ip6, + be16_to_cpu(conn->l4_dport)); + break; +#endif + default: + break; + } + seq_printf(file, "\tmark = %#x (%u)\n", conn->mark, conn->mark); + if (conn->cnt) + seq_printf(file, "\tcount %#x\n", conn->cnt->fw_id); +} + +static int efx_tc_debugfs_dump_cts(struct seq_file *file, void *data) +{ + struct efx_tc_ct_entry *conn; + struct rhashtable_iter walk; + struct efx_nic *efx = data; + + mutex_lock(&efx->tc->mutex); + if (efx->tc->up) { + rhashtable_walk_enter(&efx->tc->ct_ht, &walk); + rhashtable_walk_start(&walk); + while ((conn = rhashtable_walk_next(&walk)) != NULL) { + if (IS_ERR(conn)) + continue; + efx_tc_debugfs_dump_ct(file, conn); + } + rhashtable_walk_stop(&walk); + rhashtable_walk_exit(&walk); + } else { + seq_printf(file, "tc is down\n"); + } + mutex_unlock(&efx->tc->mutex); + + return 0; +} +#endif + +static const char *efx_mae_field_names[] = { +#define NAME(_name) [MAE_FIELD_##_name] = #_name + NAME(INGRESS_PORT), + NAME(MARK), + NAME(RECIRC_ID), + NAME(IS_IP_FRAG), + NAME(DO_CT), + NAME(CT_HIT), + NAME(CT_MARK), + NAME(CT_DOMAIN), + NAME(ETHER_TYPE), + NAME(CT_PRIVATE_FLAGS), + NAME(IS_FROM_NETWORK), + NAME(HAS_OVLAN), + NAME(HAS_IVLAN), + NAME(ENC_HAS_OVLAN), + NAME(ENC_HAS_IVLAN), + NAME(ENC_IP_FRAG), + NAME(VLAN0_TCI), + NAME(VLAN0_PROTO), + NAME(VLAN1_TCI), + NAME(VLAN1_PROTO), + NAME(ETH_SADDR), + NAME(ETH_DADDR), + NAME(SRC_IP4), + NAME(SRC_IP6), + NAME(DST_IP4), + NAME(DST_IP6), + NAME(IP_PROTO), + NAME(IP_TOS), + NAME(IP_TTL), + NAME(IP_FLAGS), + NAME(L4_SPORT), + NAME(L4_DPORT), + NAME(TCP_FLAGS), + NAME(TCP_SYN_FIN_RST), + NAME(IP_FIRST_FRAG), + NAME(ENCAP_TYPE), + NAME(OUTER_RULE_ID), + NAME(ENC_ETHER_TYPE), + NAME(ENC_VLAN0_TCI), + NAME(ENC_VLAN0_PROTO), + NAME(ENC_VLAN1_TCI), + NAME(ENC_VLAN1_PROTO), + NAME(ENC_ETH_SADDR), + NAME(ENC_ETH_DADDR), + NAME(ENC_SRC_IP4), + NAME(ENC_SRC_IP6), + NAME(ENC_DST_IP4), + NAME(ENC_DST_IP6), + NAME(ENC_IP_PROTO), + NAME(ENC_IP_TOS), + NAME(ENC_IP_TTL), + NAME(ENC_IP_FLAGS), + NAME(ENC_L4_SPORT), + NAME(ENC_L4_DPORT), + NAME(ENC_VNET_ID), +#undef NAME +}; + +static const char *efx_mae_field_support_names[] = { + [MAE_FIELD_UNSUPPORTED] = "UNSUPPORTED", +#define NAME(_name) [MAE_FIELD_SUPPORTED_MATCH_##_name] = #_name + NAME(NEVER), + NAME(ALWAYS), + NAME(OPTIONAL), + NAME(PREFIX), + NAME(MASK), +#undef NAME +}; + +static const char *efx_mae_field_support_name(unsigned int i) +{ + if (i < ARRAY_SIZE(efx_mae_field_support_names)) + return efx_mae_field_support_names[i]; + return "what is this I don't even"; +} + +static int efx_tc_debugfs_dump_mae_caps(struct seq_file *file, u8 *fields) +{ + int i; + + for (i = 0; i < MAE_NUM_FIELDS; i++) + if (efx_mae_field_names[i]) + seq_printf(file, "%s\t%s\n", efx_mae_field_names[i], + efx_mae_field_support_name(fields[i])); + else if(fields[i]) + seq_printf(file, "unknown-%d\t%s\n", i, + efx_mae_field_support_name(fields[i])); + return 0; +} + +static int efx_tc_debugfs_dump_mae_ar_caps(struct seq_file *file, void *data) +{ + struct efx_nic *efx = data; + + return efx_tc_debugfs_dump_mae_caps(file, efx->tc->caps->action_rule_fields); +} + +static int efx_tc_debugfs_dump_mae_or_caps(struct seq_file *file, void *data) +{ + struct efx_nic *efx = data; + + return efx_tc_debugfs_dump_mae_caps(file, efx->tc->caps->outer_rule_fields); +} + +static void efx_tc_debugfs_dump_mae_tunnel_cap(struct seq_file *file, + struct efx_nic *efx, + enum efx_encap_type encap) +{ + char errbuf[16]; + + if (!efx_mae_check_encap_type_supported(efx, encap)) + seq_printf(file, "%s\n", efx_tc_encap_type_name(encap, errbuf, + sizeof(errbuf))); +} + +static int efx_tc_debugfs_dump_mae_tunnel_caps(struct seq_file *file, void *data) +{ + struct efx_nic *efx = data; + + efx_tc_debugfs_dump_mae_tunnel_cap(file, efx, EFX_ENCAP_TYPE_VXLAN); + efx_tc_debugfs_dump_mae_tunnel_cap(file, efx, EFX_ENCAP_TYPE_NVGRE); + efx_tc_debugfs_dump_mae_tunnel_cap(file, efx, EFX_ENCAP_TYPE_GENEVE); + return 0; +} + +static int efx_tc_debugfs_dump_action_prios(struct seq_file *file, void *data) +{ + struct efx_nic *efx = data; + + seq_printf(file, "%u\n", efx->tc->caps->action_prios); + return 0; +} + +static int efx_tc_debugfs_dump_mae_neighs(struct seq_file *file, void *data) +{ + struct efx_neigh_binder *neigh; + struct rhashtable_iter walk; + struct efx_nic *efx = data; + + mutex_lock(&efx->tc->mutex); + rhashtable_walk_enter(&efx->tc->neigh_ht, &walk); + rhashtable_walk_start(&walk); + while ((neigh = rhashtable_walk_next(&walk)) != NULL) { + if (IS_ERR(neigh)) + continue; +#ifdef CONFIG_IPV6 + if (neigh->dst_ip) /* IPv4 */ +#endif + seq_printf(file, "%pI4: %svalid %pM ttl %hhu egdev %s ref %u\n", + &neigh->dst_ip, neigh->n_valid ? "" : "in", + neigh->ha, neigh->ttl, neigh->egdev->name, + refcount_read(&neigh->ref)); +#ifdef CONFIG_IPV6 + else /* IPv6 */ + seq_printf(file, "%pI6c: %svalid %pM ttl %hhu egdev %s ref %u\n", + &neigh->dst_ip6, neigh->n_valid ? "" : "in", + neigh->ha, neigh->ttl, neigh->egdev->name, + refcount_read(&neigh->ref)); +#endif + } + rhashtable_walk_stop(&walk); + rhashtable_walk_exit(&walk); + mutex_unlock(&efx->tc->mutex); + return 0; +} + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_RHASHTABLE) +static int efx_tc_debugfs_dump_mports(struct seq_file *file, void *data) +{ + const struct mae_mport_desc *m; + struct rhashtable_iter walk; + struct efx_nic *efx = data; + struct efx_mae *mae = efx->mae; + + if (!mae) + return 0; + + rhashtable_walk_enter(&mae->mports_ht, &walk); + rhashtable_walk_start(&walk); + while ((m = rhashtable_walk_next(&walk)) != NULL) { + char buf[120]; + size_t n; + + n = scnprintf(buf, sizeof(buf), "id %08x flags %02x cf %02x", + m->mport_id, m->flags, m->caller_flags); + if (m->caller_flags & MAE_MPORT_DESC_FLAG__MASK) + /* R = receive, T = transmit (deliver), X = delete, + * Z = zombie. + * Avoided using 'D' for T or X, as that's ambiguous + */ + n += scnprintf(buf + n, sizeof(buf) - n, + " (%c%c%c%c)", + (m->caller_flags & MAE_MPORT_DESC_FLAG_CAN_RECEIVE_ON) ? 'R' : 'r', + (m->caller_flags & MAE_MPORT_DESC_FLAG_CAN_DELIVER_TO) ? 'T' : 't', + (m->caller_flags & MAE_MPORT_DESC_FLAG_CAN_DELETE) ? 'X' : 'x', + (m->caller_flags & MAE_MPORT_DESC_FLAG_IS_ZOMBIE) ? 'Z' : 'z'); + switch (m->mport_type) { + case MAE_MPORT_DESC_MPORT_TYPE_NET_PORT: + n += scnprintf(buf + n, sizeof(buf) - n, + " type net_port idx %u", m->port_idx); + break; + case MAE_MPORT_DESC_MPORT_TYPE_ALIAS: + n += scnprintf(buf + n, sizeof(buf) - n, + " type alias mport %u", m->alias_mport_id); + break; + case MAE_MPORT_DESC_MPORT_TYPE_VNIC: + n += scnprintf(buf + n, sizeof(buf) - n, " type vnic"); + switch (m->vnic_client_type) { + case MAE_MPORT_DESC_VNIC_CLIENT_TYPE_FUNCTION: + n += scnprintf(buf + n, sizeof(buf) - n, + " ct fn if %u pf %u", + m->interface_idx, m->pf_idx); + if (m->vf_idx != MAE_MPORT_DESC_VF_IDX_NULL) + n += scnprintf(buf + n, sizeof(buf) - n, + " vf %u", m->vf_idx); + break; + case MAE_MPORT_DESC_VNIC_CLIENT_TYPE_PLUGIN: + n += scnprintf(buf + n, sizeof(buf) - n, + " ct plugin"); + break; + default: + n += scnprintf(buf + n, sizeof(buf) - n, + " ct %u\n", m->vnic_client_type); + break; + + } + break; + default: + n += scnprintf(buf + n, sizeof(buf) - n, + " type %u", m->mport_type); + break; + + } + if (m->efv) + n += scnprintf(buf + n, sizeof(buf) - n, + " rep %s", netdev_name(m->efv->net_dev)); + /* Trailing + * '.' will be absent if line truncated to fit buf */ + snprintf(buf + n, sizeof(buf) - n, "."); + seq_printf(file, "%s\n", buf); + + } + + rhashtable_walk_stop(&walk); + rhashtable_walk_exit(&walk); + return 0; +} +#endif + +static const char *efx_mae_field_id_names[] = { +#define NAME(_name) [TABLE_FIELD_ID_##_name] = #_name + NAME(UNUSED), + NAME(SRC_MPORT), + NAME(DST_MPORT), + NAME(SRC_MGROUP_ID), + NAME(NETWORK_PORT_ID), + NAME(IS_FROM_NETWORK), + NAME(CH_VC), + NAME(CH_VC_LOW), + NAME(USER_MARK), + NAME(USER_FLAG), + NAME(COUNTER_ID), + NAME(DISCRIM), + NAME(DST_MAC), + NAME(SRC_MAC), + NAME(OVLAN_TPID_COMPRESSED), + NAME(OVLAN), + NAME(OVLAN_VID), + NAME(IVLAN_TPID_COMPRESSED), + NAME(IVLAN), + NAME(IVLAN_VID), + NAME(ETHER_TYPE), + NAME(SRC_IP), + NAME(DST_IP), + NAME(IP_TOS), + NAME(IP_PROTO), + NAME(SRC_PORT), + NAME(DST_PORT), + NAME(TCP_FLAGS), + NAME(VNI), + NAME(HAS_ENCAP), + NAME(HAS_ENC_OVLAN), + NAME(HAS_ENC_IVLAN), + NAME(HAS_ENC_IP), + NAME(HAS_ENC_IP4), + NAME(HAS_ENC_UDP), + NAME(HAS_OVLAN), + NAME(HAS_IVLAN), + NAME(HAS_IP), + NAME(HAS_L4), + NAME(IP_FRAG), + NAME(IP_FIRST_FRAG), + NAME(IP_TTL_LE_ONE), + NAME(TCP_INTERESTING_FLAGS), + NAME(RDP_PL_CHAN), + NAME(RDP_C_PL_EN), + NAME(RDP_C_PL), + NAME(RDP_D_PL_EN), + NAME(RDP_D_PL), + NAME(RDP_OUT_HOST_CHAN_EN), + NAME(RDP_OUT_HOST_CHAN), + NAME(RECIRC_ID), + NAME(DOMAIN), + NAME(CT_VNI_MODE), + NAME(CT_TCP_FLAGS_INHIBIT), + NAME(DO_CT_IP4_TCP), + NAME(DO_CT_IP4_UDP), + NAME(DO_CT_IP6_TCP), + NAME(DO_CT_IP6_UDP), + NAME(OUTER_RULE_ID), + NAME(ENCAP_TYPE), + NAME(ENCAP_TUNNEL_ID), + NAME(CT_ENTRY_ID), + NAME(NAT_PORT), + NAME(NAT_IP), + NAME(NAT_DIR), + NAME(CT_MARK), + NAME(CT_PRIV_FLAGS), + NAME(CT_HIT), + NAME(SUPPRESS_SELF_DELIVERY), + NAME(DO_DECAP), + NAME(DECAP_DSCP_COPY), + NAME(DECAP_ECN_RFC6040), + NAME(DO_REPLACE_DSCP), + NAME(DO_REPLACE_ECN), + NAME(DO_DECR_IP_TTL), + NAME(DO_SRC_MAC), + NAME(DO_DST_MAC), + NAME(DO_VLAN_POP), + NAME(DO_VLAN_PUSH), + NAME(DO_COUNT), + NAME(DO_ENCAP), + NAME(ENCAP_DSCP_COPY), + NAME(ENCAP_ECN_COPY), + NAME(DO_DELIVER), + NAME(DO_FLAG), + NAME(DO_MARK), + NAME(DO_SET_NET_CHAN), + NAME(DO_SET_SRC_MPORT), + NAME(ENCAP_HDR_ID), + NAME(DSCP_VALUE), + NAME(ECN_CONTROL), + NAME(SRC_MAC_ID), + NAME(DST_MAC_ID), + NAME(REPORTED_SRC_MPORT_OR_NET_CHAN), + NAME(CHUNK64), + NAME(CHUNK32), + NAME(CHUNK16), + NAME(CHUNK8), + NAME(CHUNK4), + NAME(CHUNK2), + NAME(HDR_LEN_W), + NAME(ENC_LACP_HASH_L23), + NAME(ENC_LACP_HASH_L4), + NAME(USE_ENC_LACP_HASHES), + NAME(DO_CT), + NAME(DO_NAT), + NAME(DO_RECIRC), + NAME(NEXT_ACTION_SET_PAYLOAD), + NAME(NEXT_ACTION_SET_ROW), + NAME(MC_ACTION_SET_PAYLOAD), + NAME(MC_ACTION_SET_ROW), + NAME(LACP_INC_L4), + NAME(LACP_PLUGIN), + NAME(BAL_TBL_BASE_DIV64), + NAME(BAL_TBL_LEN_ID), + NAME(UDP_PORT), + NAME(RSS_ON_OUTER), + NAME(STEER_ON_OUTER), + NAME(DST_QID), + NAME(DROP), + NAME(VLAN_STRIP), + NAME(MARK_OVERRIDE), + NAME(FLAG_OVERRIDE), + NAME(RSS_CTX_ID), + NAME(RSS_EN), + NAME(KEY), + NAME(TCP_V4_KEY_MODE), + NAME(TCP_V6_KEY_MODE), + NAME(UDP_V4_KEY_MODE), + NAME(UDP_V6_KEY_MODE), + NAME(OTHER_V4_KEY_MODE), + NAME(OTHER_V6_KEY_MODE), + NAME(SPREAD_MODE), + NAME(INDIR_TBL_BASE), + NAME(INDIR_TBL_LEN_ID), + NAME(INDIR_OFFSET), +#undef NAME +}; + +static const char *efx_mae_table_masking_names[] = { +#define NAME(_name) [TABLE_FIELD_DESCR_MASK_##_name] = #_name + NAME(NEVER), + NAME(EXACT), + NAME(TERNARY), + NAME(WHOLE_FIELD), + NAME(LPM), +#undef NAME +}; + +static void efx_tc_debugfs_dump_mae_table_field(struct seq_file *file, + const struct efx_tc_table_field_fmt *field, + bool resp) +{ + seq_printf(file, "\t%s ", resp ? "resp" : "key"); + if (field->field_id < ARRAY_SIZE(efx_mae_field_id_names) && + efx_mae_field_id_names[field->field_id]) + seq_printf(file, "%s: ", efx_mae_field_id_names[field->field_id]); + else + seq_printf(file, "unknown-%#x: ", field->field_id); + seq_printf(file, "%u @ %u; ", field->width, field->lbn); + if (field->masking < ARRAY_SIZE(efx_mae_table_masking_names) && + efx_mae_table_masking_names[field->masking]) + seq_printf(file, "mask %s ", + efx_mae_table_masking_names[field->masking]); + else + seq_printf(file, "mask unknown-%#x ", field->masking); + seq_printf(file, "scheme %u\n", field->scheme); +} + +static const char *efx_mae_table_type_names[] = { +#define NAME(_name) [MC_CMD_TABLE_DESCRIPTOR_OUT_TYPE_##_name] = #_name + NAME(DIRECT), + NAME(BCAM), + NAME(TCAM), + NAME(STCAM), +#undef NAME +}; + +static void efx_tc_debugfs_dump_mae_table(struct seq_file *file, + const char *name, + const struct efx_tc_table_desc *meta, + bool hooked) +{ + unsigned int i; + + seq_printf(file, "%s: ", name); + if (meta->type < ARRAY_SIZE(efx_mae_table_type_names) && + efx_mae_table_type_names[meta->type]) + seq_printf(file, "type %s ", + efx_mae_table_type_names[meta->type]); + else + seq_printf(file, "type unknown-%#x ", meta->type); + seq_printf(file, "kw %u rw %u; %u prios; flags %#x scheme %#x\n", + meta->key_width, meta->resp_width, meta->n_prios, + meta->flags, meta->scheme); + for (i = 0; i < meta->n_keys; i++) + efx_tc_debugfs_dump_mae_table_field(file, meta->keys + i, false); + for (i = 0; i < meta->n_resps; i++) + efx_tc_debugfs_dump_mae_table_field(file, meta->resps + i, true); + if (hooked) + seq_printf(file, "\thooked\n"); +} + +static int efx_tc_debugfs_dump_mae_tables(struct seq_file *file, void *data) +{ + struct efx_nic *efx = data; + + efx_tc_debugfs_dump_mae_table(file, "ct", &efx->tc->meta_ct.desc, + efx->tc->meta_ct.hooked); + return 0; +} + +const struct efx_debugfs_parameter efx_tc_debugfs[] = { + _EFX_RAW_PARAMETER(mae_rules, efx_tc_debugfs_dump_rules), + _EFX_RAW_PARAMETER(lhs_rules, efx_tc_debugfs_dump_lhs_rules), + _EFX_RAW_PARAMETER(mae_default_rules, efx_tc_debugfs_dump_default_rules), + _EFX_RAW_PARAMETER(mae_counters, efx_tc_debugfs_dump_mae_counters), + _EFX_RAW_PARAMETER(mae_pedit_macs, efx_tc_debugfs_dump_mae_macs), + _EFX_RAW_PARAMETER(mae_recirc_ids, efx_tc_debugfs_dump_recirc_ids), + _EFX_RAW_PARAMETER(mae_action_rule_caps, efx_tc_debugfs_dump_mae_ar_caps), + _EFX_RAW_PARAMETER(mae_outer_rule_caps, efx_tc_debugfs_dump_mae_or_caps), + _EFX_RAW_PARAMETER(mae_tunnel_caps, efx_tc_debugfs_dump_mae_tunnel_caps), + _EFX_RAW_PARAMETER(mae_prios, efx_tc_debugfs_dump_action_prios), + _EFX_RAW_PARAMETER(mae_neighs, efx_tc_debugfs_dump_mae_neighs), +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_CONNTRACK_OFFLOAD) + _EFX_RAW_PARAMETER(tracked_conns, efx_tc_debugfs_dump_cts), +#endif +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_RHASHTABLE) + _EFX_RAW_PARAMETER(mae_mport_map, efx_tc_debugfs_dump_mports), +#endif + _EFX_RAW_PARAMETER(mae_tables, efx_tc_debugfs_dump_mae_tables), + {NULL} +}; +#endif /* EFX_TC_OFFLOAD */ +#else /* CONFIG_DEBUG_FS */ +const struct efx_debugfs_parameter efx_tc_debugfs[] = { + {NULL} +}; +#endif /* CONFIG_DEBUG_FS */ diff --git a/src/drivers/net/ethernet/sfc/tc_debugfs.h b/src/drivers/net/ethernet/sfc/tc_debugfs.h new file mode 100644 index 0000000..d43e699 --- /dev/null +++ b/src/drivers/net/ethernet/sfc/tc_debugfs.h @@ -0,0 +1,20 @@ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2022 Xilinx Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#ifndef EFX_TC_DEBUGFS_H +#define EFX_TC_DEBUGFS_H +#include "net_driver.h" + +#include "debugfs.h" + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_TC_OFFLOAD) +extern const struct efx_debugfs_parameter efx_tc_debugfs[]; +#endif + +#endif /* EFX_TC_DEBUGFS_H */ diff --git a/src/drivers/net/ethernet/sfc/tc_encap_actions.c b/src/drivers/net/ethernet/sfc/tc_encap_actions.c new file mode 100644 index 0000000..d81e31e --- /dev/null +++ b/src/drivers/net/ethernet/sfc/tc_encap_actions.c @@ -0,0 +1,765 @@ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2022 Xilinx Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#ifdef EFX_USE_KCOMPAT +/* Must come before other headers */ +#include "kernel_compat.h" +#endif + +#include "tc_encap_actions.h" + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_TC_OFFLOAD) +#include "tc.h" +#include "mae.h" +#include +#include +#include +#include + +static const struct rhashtable_params efx_neigh_ht_params = { + .key_len = offsetof(struct efx_neigh_binder, ha), + .key_offset = 0, + .head_offset = offsetof(struct efx_neigh_binder, linkage), +}; + +static const struct rhashtable_params efx_tc_encap_ht_params = { + .key_len = offsetofend(struct efx_tc_encap_action, key), + .key_offset = 0, + .head_offset = offsetof(struct efx_tc_encap_action, linkage), +}; + +static void efx_tc_encap_free(void *ptr, void *__unused) +{ + struct efx_tc_encap_action *enc = ptr; + + WARN_ON(refcount_read(&enc->ref)); + kfree(enc); +} + +static void efx_neigh_free(void *ptr, void *__unused) +{ + struct efx_neigh_binder *neigh = ptr; + + WARN_ON(refcount_read(&neigh->ref)); + WARN_ON(!list_empty(&neigh->users)); + put_net(neigh->net); + dev_put(neigh->egdev); + kfree(neigh); +} + +int efx_tc_init_encap_actions(struct efx_nic *efx) +{ + int rc; + + rc = rhashtable_init(&efx->tc->neigh_ht, &efx_neigh_ht_params); + if (rc < 0) + goto fail_neigh_ht; + rc = rhashtable_init(&efx->tc->encap_ht, &efx_tc_encap_ht_params); + if (rc < 0) + goto fail_encap_ht; + return 0; +fail_encap_ht: + rhashtable_destroy(&efx->tc->neigh_ht); +fail_neigh_ht: + return rc; +} + +/* Only call this in init failure teardown. + * Normal exit should fini instead as there may be entries in the table. + */ +void efx_tc_destroy_encap_actions(struct efx_nic *efx) +{ + rhashtable_destroy(&efx->tc->encap_ht); + rhashtable_destroy(&efx->tc->neigh_ht); +} + +void efx_tc_fini_encap_actions(struct efx_nic *efx) +{ + rhashtable_free_and_destroy(&efx->tc->encap_ht, efx_tc_encap_free, NULL); + rhashtable_free_and_destroy(&efx->tc->neigh_ht, efx_neigh_free, NULL); +} + +static void efx_neigh_update(struct work_struct *work); +static void efx_tc_update_encap(struct efx_nic *efx, + struct efx_tc_encap_action *encap); + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_FLOW_INDR_BLOCK_CB_REGISTER) || defined(EFX_HAVE_FLOW_INDR_DEV_REGISTER) +static void efx_release_neigh(struct efx_nic *efx, + struct efx_tc_encap_action *encap); + +static void efx_tc_remove_neigh_users(struct efx_nic *efx, struct efx_neigh_binder *neigh) +{ + struct efx_tc_encap_action *encap, *next; + + list_for_each_entry_safe(encap, next, &neigh->users, list) { + /* Should cause neigh usage count to fall to zero, freeing it */ + efx_release_neigh(efx, encap); + /* The encap has lost its neigh, so it's now unready */ + efx_tc_update_encap(efx, encap); + } +} + +void efx_tc_unregister_egdev(struct efx_nic *efx, struct net_device *net_dev) +{ + struct efx_neigh_binder *neigh; + struct rhashtable_iter walk; + + mutex_lock(&efx->tc->mutex); + rhashtable_walk_enter(&efx->tc->neigh_ht, &walk); + rhashtable_walk_start(&walk); + while ((neigh = rhashtable_walk_next(&walk)) != NULL) { + if (IS_ERR(neigh)) + continue; + if (neigh->egdev != net_dev) + continue; + neigh->dying = true; + rhashtable_walk_stop(&walk); + synchronize_rcu(); /* Make sure any updates see dying flag */ + efx_tc_remove_neigh_users(efx, neigh); /* might sleep */ + rhashtable_walk_start(&walk); + } + rhashtable_walk_stop(&walk); + rhashtable_walk_exit(&walk); + mutex_unlock(&efx->tc->mutex); +} +#endif + +static int efx_bind_neigh(struct efx_nic *efx, + struct efx_tc_encap_action *encap, struct net *net, + struct netlink_ext_ack *extack) +{ + struct efx_neigh_binder *neigh, *old; +#ifdef CONFIG_IPV6 + struct flowi6 flow6 = {}; +#endif + struct flowi4 flow4 = {}; + int rc; + + /* GCC stupidly thinks that only values explicitly listed in the enum + * definition can _possibly_ be sensible case values, so without this + * cast it complains about the IPv6 versions. + */ + switch ((int)encap->type) { + case EFX_ENCAP_TYPE_VXLAN: + case EFX_ENCAP_TYPE_GENEVE: + flow4.flowi4_proto = IPPROTO_UDP; + flow4.fl4_dport = encap->key.tp_dst; + flow4.flowi4_tos = encap->key.tos; + flow4.daddr = encap->key.u.ipv4.dst; + flow4.saddr = encap->key.u.ipv4.src; + break; +#ifdef CONFIG_IPV6 + case EFX_ENCAP_TYPE_VXLAN | EFX_ENCAP_FLAG_IPV6: + case EFX_ENCAP_TYPE_GENEVE | EFX_ENCAP_FLAG_IPV6: + flow6.flowi6_proto = IPPROTO_UDP; + flow6.fl6_dport = encap->key.tp_dst; + flow6.flowlabel = ip6_make_flowinfo(RT_TOS(encap->key.tos), + encap->key.label); + flow6.daddr = encap->key.u.ipv6.dst; + flow6.saddr = encap->key.u.ipv6.src; + break; +#endif + default: + EFX_TC_ERR_MSG(efx, extack, "Unsupported encap type"); + return -EOPNOTSUPP; + } + + neigh = kzalloc(sizeof(*neigh), GFP_USER); + if (!neigh) + return -ENOMEM; + neigh->net = get_net(net); + neigh->dst_ip = flow4.daddr; +#ifdef CONFIG_IPV6 + neigh->dst_ip6 = flow6.daddr; +#endif + + old = rhashtable_lookup_get_insert_fast(&efx->tc->neigh_ht, + &neigh->linkage, + efx_neigh_ht_params); + if (old) { + /* don't need our new entry */ + put_net(neigh->net); + kfree(neigh); + if (!refcount_inc_not_zero(&old->ref)) + return -EAGAIN; + /* existing entry found, ref taken */ + neigh = old; + } else { + /* New entry. We need to initiate a lookup */ + struct neighbour *n; + struct rtable *rt; + +#ifdef CONFIG_IPV6 + if (encap->type & EFX_ENCAP_FLAG_IPV6) { + struct dst_entry *dst; +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_IPV6_STUBS_DST_LOOKUP_FLOW) + dst = ipv6_stub->ipv6_dst_lookup_flow(net, NULL, &flow6, + NULL); + rc = PTR_ERR_OR_ZERO(dst); +#else + rc = ipv6_stub->ipv6_dst_lookup(net, NULL, &dst, &flow6); +#endif + if (rc) { + EFX_TC_ERR_MSG(efx, extack, "Failed to lookup route for encap"); + goto out_free; + } + dev_hold(neigh->egdev = dst->dev); + neigh->ttl = ip6_dst_hoplimit(dst); + n = dst_neigh_lookup(dst, &flow6.daddr); + dst_release(dst); + } else { +#endif + rt = ip_route_output_key(net, &flow4); + if (IS_ERR_OR_NULL(rt)) { + rc = PTR_ERR(rt); + if (!rc) + rc = -EIO; + EFX_TC_ERR_MSG(efx, extack, "Failed to lookup route for encap"); + goto out_free; + } + dev_hold(neigh->egdev = rt->dst.dev); + neigh->ttl = ip4_dst_hoplimit(&rt->dst); + n = dst_neigh_lookup(&rt->dst, &flow4.daddr); + ip_rt_put(rt); +#ifdef CONFIG_IPV6 + } +#endif + if (!n) { + rc = -ENETUNREACH; + EFX_TC_ERR_MSG(efx, extack, "Failed to lookup neighbour for encap"); + dev_put(neigh->egdev); + goto out_free; + } + refcount_set(&neigh->ref, 1); + INIT_LIST_HEAD(&neigh->users); + read_lock_bh(&n->lock); + ether_addr_copy(neigh->ha, n->ha); + neigh->n_valid = n->nud_state & NUD_VALID; + read_unlock_bh(&n->lock); + rwlock_init(&neigh->lock); + INIT_WORK(&neigh->work, efx_neigh_update); + neigh->efx = efx; + neigh->used = jiffies; + if (!neigh->n_valid) + /* Prod ARP to find us a neighbour */ + neigh_event_send(n, NULL); + neigh_release(n); + } + /* Add us to this neigh */ + encap->neigh = neigh; + list_add_tail(&encap->list, &neigh->users); + return 0; + +out_free: + /* cleanup common to several error paths */ + rhashtable_remove_fast(&efx->tc->neigh_ht, &neigh->linkage, + efx_neigh_ht_params); + synchronize_rcu(); + put_net(net); + kfree(neigh); + return rc; +} + +static void efx_free_neigh(struct efx_neigh_binder *neigh) +{ + struct efx_nic *efx = neigh->efx; + + rhashtable_remove_fast(&efx->tc->neigh_ht, &neigh->linkage, + efx_neigh_ht_params); + synchronize_rcu(); + dev_put(neigh->egdev); + put_net(neigh->net); + kfree(neigh); +} + +static void efx_release_neigh(struct efx_nic *efx, + struct efx_tc_encap_action *encap) +{ + struct efx_neigh_binder *neigh = encap->neigh; + + if (!neigh) + return; + list_del(&encap->list); + encap->neigh = NULL; + if (!refcount_dec_and_test(&neigh->ref)) + return; /* still in use */ + efx_free_neigh(neigh); +} + +static void efx_neigh_update(struct work_struct *work) +{ + struct efx_neigh_binder *neigh = container_of(work, struct efx_neigh_binder, work); + struct efx_tc_encap_action *encap; + struct efx_nic *efx = neigh->efx; + + mutex_lock(&efx->tc->mutex); + list_for_each_entry(encap, &neigh->users, list) + efx_tc_update_encap(neigh->efx, encap); + /* release ref taken in efx_neigh_event() */ + if (refcount_dec_and_test(&neigh->ref)) + efx_free_neigh(neigh); + mutex_unlock(&efx->tc->mutex); +} + +static int efx_neigh_event(struct efx_nic *efx, struct neighbour *n) +{ + struct efx_neigh_binder keys = {NULL}, *neigh; + char ha[ETH_ALEN]; + unsigned char ipv; + size_t keysize; + bool n_valid; + + if (WARN_ON(!efx->tc)) + return NOTIFY_DONE; + + /* Only care about IPv4 for now */ + if (n->tbl == &arp_tbl) { + ipv = 4; + keysize = sizeof(keys.dst_ip); +#ifdef CONFIG_IPV6 + } else if (n->tbl == &nd_tbl) { + ipv = 6; + keysize = sizeof(keys.dst_ip6); +#endif + } else { + return NOTIFY_DONE; + } + if (!n->parms) { + netif_warn(efx, drv, efx->net_dev, "neigh_event with no parms!\n"); + return NOTIFY_DONE; + } + keys.net = read_pnet(&n->parms->net); + if (n->tbl->key_len != keysize) { + netif_warn(efx, drv, efx->net_dev, "neigh_event with bad key_len %u\n", + n->tbl->key_len); + return NOTIFY_DONE; + } + read_lock_bh(&n->lock); /* Get a consistent view */ + memcpy(ha, n->ha, ETH_ALEN); + n_valid = (n->nud_state & NUD_VALID) && !n->dead; + read_unlock_bh(&n->lock); + switch(ipv) { + case 4: + memcpy(&keys.dst_ip, n->primary_key, n->tbl->key_len); + break; +#ifdef CONFIG_IPV6 + case 6: + memcpy(&keys.dst_ip6, n->primary_key, n->tbl->key_len); + break; +#endif + default: /* can't happen */ + return NOTIFY_DONE; + } + rcu_read_lock(); + neigh = rhashtable_lookup_fast(&efx->tc->neigh_ht, &keys, + efx_neigh_ht_params); + if (!neigh || neigh->dying) + /* We're not interested in this neighbour */ + goto done; + write_lock_bh(&neigh->lock); + if (n_valid == neigh->n_valid && !memcmp(ha, neigh->ha, ETH_ALEN)) { + write_unlock_bh(&neigh->lock); + /* Nothing has changed; no work to do */ + goto done; + } + neigh->n_valid = n_valid; + memcpy(neigh->ha, ha, ETH_ALEN); + write_unlock_bh(&neigh->lock); + if (refcount_inc_not_zero(&neigh->ref)) { + rcu_read_unlock(); + if (!schedule_work(&neigh->work)) + /* failed to schedule, release the ref we just took */ + if (refcount_dec_and_test(&neigh->ref)) + efx_free_neigh(neigh); + } else { +done: + rcu_read_unlock(); + } + return NOTIFY_DONE; +} + +static void efx_gen_tun_header_eth(struct efx_tc_encap_action *encap, u16 proto) +{ + struct efx_neigh_binder *neigh = encap->neigh; + struct ethhdr *eth; + + memset(encap->encap_hdr, 0, sizeof(encap->encap_hdr)); + encap->encap_hdr_len = sizeof(*eth); + + eth = (void *)encap->encap_hdr; + if (encap->neigh->n_valid) + ether_addr_copy(eth->h_dest, neigh->ha); + else + eth_zero_addr(eth->h_dest); + ether_addr_copy(eth->h_source, neigh->egdev->dev_addr); + eth->h_proto = htons(proto); +} + +static void efx_gen_tun_header_ipv4(struct efx_tc_encap_action *encap, u8 ipproto, u8 len) +{ + struct efx_neigh_binder *neigh = encap->neigh; + struct ip_tunnel_key *key = &encap->key; + struct iphdr *ip; + + ip = (void *)(encap->encap_hdr + encap->encap_hdr_len); + encap->encap_hdr_len += sizeof(*ip); + + ip->daddr = key->u.ipv4.dst; + ip->saddr = key->u.ipv4.src; + ip->ttl = neigh->ttl; + ip->protocol = ipproto; + ip->version = 0x4; + ip->ihl = 0x5; + ip->tot_len = cpu_to_be16(ip->ihl * 4 + len); + ip_send_check(ip); +} + +#ifdef CONFIG_IPV6 +static void efx_gen_tun_header_ipv6(struct efx_tc_encap_action *encap, u8 ipproto, u8 len) +{ + struct efx_neigh_binder *neigh = encap->neigh; + struct ip_tunnel_key *key = &encap->key; + struct ipv6hdr *ip; + + ip = (void *)(encap->encap_hdr + encap->encap_hdr_len); + encap->encap_hdr_len += sizeof(*ip); + + ip6_flow_hdr(ip, key->tos, key->label); + ip->daddr = key->u.ipv6.dst; + ip->saddr = key->u.ipv6.src; + ip->hop_limit = neigh->ttl; + ip->nexthdr = ipproto; + ip->version = 0x6; + ip->payload_len = cpu_to_be16(len); +} +#endif + +static void efx_gen_tun_header_udp(struct efx_tc_encap_action *encap, u8 len) +{ + struct ip_tunnel_key *key = &encap->key; + struct udphdr *udp; + + udp = (void *)(encap->encap_hdr + encap->encap_hdr_len); + encap->encap_hdr_len += sizeof(*udp); + udp->dest = key->tp_dst; + udp->len = cpu_to_be16(sizeof(*udp) + len); +} + +static void efx_gen_tun_header_vxlan(struct efx_tc_encap_action *encap) +{ + struct ip_tunnel_key *key = &encap->key; + struct vxlanhdr *vxlan; + + vxlan = (void *)(encap->encap_hdr + encap->encap_hdr_len); + encap->encap_hdr_len += sizeof(*vxlan); + vxlan->vx_flags = VXLAN_HF_VNI; + vxlan->vx_vni = vxlan_vni_field(tunnel_id_to_key32(key->tun_id)); +} + +static void efx_gen_tun_header_geneve(struct efx_tc_encap_action *encap) +{ + struct ip_tunnel_key *key = &encap->key; + struct genevehdr *geneve; + u32 vni; + + geneve = (void *)(encap->encap_hdr + encap->encap_hdr_len); + encap->encap_hdr_len += sizeof(*geneve); + geneve->proto_type = htons(ETH_P_TEB); + /* convert tun_id to host-endian so we can use host arithmetic to + * extract individual bytes. + */ + vni = ntohl(tunnel_id_to_key32(key->tun_id)); + geneve->vni[0] = vni >> 16; + geneve->vni[1] = vni >> 8; + geneve->vni[2] = vni; +} + +#define vxlan_header_l4_len (sizeof(struct udphdr) + sizeof(struct vxlanhdr)) +#define vxlan4_header_len (sizeof(struct ethhdr) + sizeof(struct iphdr) + vxlan_header_l4_len) +static void efx_gen_vxlan_header_ipv4(struct efx_tc_encap_action *encap) +{ + BUILD_BUG_ON(sizeof(encap->encap_hdr) < vxlan4_header_len); + efx_gen_tun_header_eth(encap, ETH_P_IP); + efx_gen_tun_header_ipv4(encap, IPPROTO_UDP, vxlan_header_l4_len); + efx_gen_tun_header_udp(encap, sizeof(struct vxlanhdr)); + efx_gen_tun_header_vxlan(encap); +} + +#define geneve_header_l4_len (sizeof(struct udphdr) + sizeof(struct genevehdr)) +#define geneve4_header_len (sizeof(struct ethhdr) + sizeof(struct iphdr) + geneve_header_l4_len) +static void efx_gen_geneve_header_ipv4(struct efx_tc_encap_action *encap) +{ + BUILD_BUG_ON(sizeof(encap->encap_hdr) < geneve4_header_len); + efx_gen_tun_header_eth(encap, ETH_P_IP); + efx_gen_tun_header_ipv4(encap, IPPROTO_UDP, geneve_header_l4_len); + efx_gen_tun_header_udp(encap, sizeof(struct genevehdr)); + efx_gen_tun_header_geneve(encap); +} + +#ifdef CONFIG_IPV6 +#define vxlan6_header_len (sizeof(struct ethhdr) + sizeof(struct ipv6hdr) + vxlan_header_l4_len) +static void efx_gen_vxlan_header_ipv6(struct efx_tc_encap_action *encap) +{ + BUILD_BUG_ON(sizeof(encap->encap_hdr) < vxlan6_header_len); + efx_gen_tun_header_eth(encap, ETH_P_IPV6); + efx_gen_tun_header_ipv6(encap, IPPROTO_UDP, vxlan_header_l4_len); + efx_gen_tun_header_udp(encap, sizeof(struct vxlanhdr)); + efx_gen_tun_header_vxlan(encap); +} + +#define geneve6_header_len (sizeof(struct ethhdr) + sizeof(struct ipv6hdr) + geneve_header_l4_len) +static void efx_gen_geneve_header_ipv6(struct efx_tc_encap_action *encap) +{ + BUILD_BUG_ON(sizeof(encap->encap_hdr) < geneve6_header_len); + efx_gen_tun_header_eth(encap, ETH_P_IPV6); + efx_gen_tun_header_ipv6(encap, IPPROTO_UDP, geneve_header_l4_len); + efx_gen_tun_header_udp(encap, sizeof(struct genevehdr)); + efx_gen_tun_header_geneve(encap); +} +#endif + +static void efx_gen_encap_header(struct efx_tc_encap_action *encap) +{ + encap->n_valid = encap->neigh->n_valid; + + memset(encap->encap_hdr, 0, sizeof(encap->encap_hdr)); + /* GCC stupidly thinks that only values explicitly listed in the enum + * definition can _possibly_ be sensible case values, so without this + * cast it complains about the IPv6 versions. + */ + switch ((int)encap->type) { + case EFX_ENCAP_TYPE_VXLAN: + efx_gen_vxlan_header_ipv4(encap); + break; + case EFX_ENCAP_TYPE_GENEVE: + efx_gen_geneve_header_ipv4(encap); + break; +#ifdef CONFIG_IPV6 + case EFX_ENCAP_TYPE_VXLAN | EFX_ENCAP_FLAG_IPV6: + efx_gen_vxlan_header_ipv6(encap); + break; + case EFX_ENCAP_TYPE_GENEVE | EFX_ENCAP_FLAG_IPV6: + efx_gen_geneve_header_ipv6(encap); + break; +#endif + default: + /* unhandled encap type, can't happen */ + WARN_ON(1); + /* Make sure we don't leak arbitrary bytes on the wire; + * set an all-0s ethernet header. + */ + encap->encap_hdr_len = ETH_HLEN; + break; + } +} + +bool efx_tc_check_ready(struct efx_nic *efx, struct efx_tc_flow_rule *rule) +{ + struct efx_tc_action_set *act; + + /* How's our neigh? */ + list_for_each_entry(act, &rule->acts.list, list) + if (act->encap_md && !act->encap_md->n_valid) + return false; /* ENOHORSE */ + return true; +} + +static void efx_tc_update_encap(struct efx_nic *efx, + struct efx_tc_encap_action *encap) +{ + struct efx_tc_action_set_list *acts, *fallback; + struct efx_tc_flow_rule *rule; + struct efx_tc_action_set *act; + int rc; + + if (encap->n_valid) { + /* Make sure no rules are using this encap while we change it */ + list_for_each_entry(act, &encap->users, encap_user) { + acts = act->user; + if (WARN_ON(!acts)) /* can't happen */ + continue; + rule = container_of(acts, struct efx_tc_flow_rule, acts); + if (rule->fallback) + fallback = rule->fallback; + else /* fallback fallback: deliver to PF */ + fallback = &efx->tc->facts.pf; + rc = efx_mae_update_rule(efx, fallback->fw_id, + rule->fw_id); + if (rc) + netif_err(efx, drv, efx->net_dev, + "Failed to update (f) rule %08x rc %d\n", + rule->fw_id, rc); + else + netif_dbg(efx, drv, efx->net_dev, "Updated (f) rule %08x\n", + rule->fw_id); + } + } + + if (encap->neigh) { + read_lock_bh(&encap->neigh->lock); + efx_gen_encap_header(encap); + read_unlock_bh(&encap->neigh->lock); + } else { + encap->n_valid = false; + memset(encap->encap_hdr, 0, sizeof(encap->encap_hdr)); + encap->encap_hdr_len = ETH_HLEN; + } + + rc = efx_mae_update_encap_md(efx, encap); + if (rc) { + netif_err(efx, drv, efx->net_dev, + "Failed to update encap hdr %08x rc %d\n", + encap->fw_id, rc); + return; + } + netif_dbg(efx, drv, efx->net_dev, "Updated encap hdr %08x\n", + encap->fw_id); + if (!encap->n_valid) + return; + /* Update rule users: use the action if they are now ready */ + list_for_each_entry(act, &encap->users, encap_user) { + acts = act->user; + if (WARN_ON(!acts)) /* can't happen */ + continue; + rule = container_of(acts, struct efx_tc_flow_rule, acts); + if (!efx_tc_check_ready(efx, rule)) + continue; + rc = efx_mae_update_rule(efx, acts->fw_id, rule->fw_id); + if (rc) + netif_err(efx, drv, efx->net_dev, + "Failed to update rule %08x rc %d\n", + rule->fw_id, rc); + else + netif_dbg(efx, drv, efx->net_dev, "Updated rule %08x\n", + rule->fw_id); + } +} + +struct efx_tc_encap_action *efx_tc_flower_create_encap_md( + struct efx_nic *efx, const struct ip_tunnel_info *info, + struct net_device *egdev, struct netlink_ext_ack *extack) +{ + enum efx_encap_type type = efx_tc_indr_netdev_type(egdev); + struct efx_tc_encap_action *encap, *old; + struct efx_rep *to_efv; + s64 rc; + + if (type == EFX_ENCAP_TYPE_NONE) { + /* dest is not an encap device */ + EFX_TC_ERR_MSG(efx, extack, "Not a (supported) tunnel device but tunnel_key is set"); + return ERR_PTR(-EOPNOTSUPP); + } + rc = efx_mae_check_encap_type_supported(efx, type); + if (rc < 0) { + EFX_TC_ERR_MSG(efx, extack, "Firmware reports no support for this tunnel type"); + return ERR_PTR(rc); + } + encap = kzalloc(sizeof(*encap), GFP_USER); + if (!encap) + return ERR_PTR(-ENOMEM); + /* No support yet for Geneve options */ + if (info->options_len) { + EFX_TC_ERR_MSG(efx, extack, "Unsupported tunnel options"); + rc = -EOPNOTSUPP; + goto out_free; + } + switch (info->mode) { + case IP_TUNNEL_INFO_TX: + break; + case IP_TUNNEL_INFO_TX | IP_TUNNEL_INFO_IPV6: + type |= EFX_ENCAP_FLAG_IPV6; + break; + default: + efx_tc_err(efx, "Unsupported tunnel mode %u\n", info->mode); + NL_SET_ERR_MSG_MOD(extack, "Unsupported tunnel mode"); + rc = -EOPNOTSUPP; + goto out_free; + } + encap->type = type; + encap->key = info->key; + INIT_LIST_HEAD(&encap->users); + old = rhashtable_lookup_get_insert_fast(&efx->tc->encap_ht, + &encap->linkage, + efx_tc_encap_ht_params); + if (old) { + /* don't need our new entry */ + kfree(encap); + if (!refcount_inc_not_zero(&old->ref)) + return ERR_PTR(-EAGAIN); + /* existing entry found, ref taken */ + return old; + } + + rc = efx_bind_neigh(efx, encap, dev_net(egdev), extack); + if (rc < 0) + goto out_remove; + to_efv = efx_tc_flower_lookup_efv(efx, encap->neigh->egdev); + if (IS_ERR(to_efv)) { + /* neigh->egdev isn't ours */ + EFX_TC_ERR_MSG(efx, extack, "Tunnel egress device not on switch"); + rc = PTR_ERR(to_efv); + goto out_release; + } + rc = efx_tc_flower_external_mport(efx, to_efv); + if (rc < 0) { + EFX_TC_ERR_MSG(efx, extack, "Failed to identify tunnel egress m-port"); + goto out_release; + } + encap->dest_mport = rc; + read_lock_bh(&encap->neigh->lock); + efx_gen_encap_header(encap); + read_unlock_bh(&encap->neigh->lock); + + rc = efx_mae_allocate_encap_md(efx, encap); + if (rc < 0) { + EFX_TC_ERR_MSG(efx, extack, "Failed to write tunnel header to hw"); + goto out_release; + } + + /* ref and return */ + refcount_set(&encap->ref, 1); + return encap; +out_release: + efx_release_neigh(efx, encap); +out_remove: + rhashtable_remove_fast(&efx->tc->encap_ht, &encap->linkage, + efx_tc_encap_ht_params); +out_free: + kfree(encap); + return ERR_PTR(rc); +} + +void efx_tc_flower_release_encap_md(struct efx_nic *efx, + struct efx_tc_encap_action *encap) +{ + if (!refcount_dec_and_test(&encap->ref)) + return; /* still in use */ + efx_release_neigh(efx, encap); + rhashtable_remove_fast(&efx->tc->encap_ht, &encap->linkage, + efx_tc_encap_ht_params); + efx_mae_free_encap_md(efx, encap); + kfree(encap); +} + +int efx_tc_netevent_event(struct efx_nic *efx, unsigned long event, + void *ptr) +{ + if (efx->type->is_vf) + return NOTIFY_DONE; + + switch (event) { + case NETEVENT_NEIGH_UPDATE: + return efx_neigh_event(efx, ptr); + case NETEVENT_DELAY_PROBE_TIME_UPDATE: + /* TODO care about these */ + default: + return NOTIFY_DONE; + } +} +#endif /* EFX_TC_OFFLOAD */ diff --git a/src/drivers/net/ethernet/sfc/tc_encap_actions.h b/src/drivers/net/ethernet/sfc/tc_encap_actions.h new file mode 100644 index 0000000..71d0dc1 --- /dev/null +++ b/src/drivers/net/ethernet/sfc/tc_encap_actions.h @@ -0,0 +1,107 @@ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2022 Xilinx Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#ifndef EFX_TC_ENCAP_ACTIONS_H +#define EFX_TC_ENCAP_ACTIONS_H +#include "net_driver.h" + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_TC_OFFLOAD) +#include +#include + +/** + * struct efx_neigh_binder - driver state for a neighbour entry + * @net: the network namespace in which this neigh resides + * @dst_ip: the IPv4 destination address resolved by this neigh + * @dst_ip6: the IPv6 destination address resolved by this neigh + * @ha: the hardware (Ethernet) address of the neighbour + * @n_valid: true if the neighbour is in NUD_VALID state + * @lock: protects @ha and @n_valid + * @ttl: Time To Live associated with the route used + * @dying: set when egdev is going away, to skip further updates + * @egdev: egress device from the route lookup. Holds a reference + * @ref: counts encap actions referencing this entry + * @used: jiffies of last time traffic hit any encap action using this. + * When counter reads update this, a new neighbour event is sent to + * indicate that the neighbour entry is still in use. + * @users: list of &struct efx_tc_encap_action + * @linkage: entry in efx->neigh_ht (keys are @net, @dst_ip, @dst_ip6). + * @work: processes neighbour state changes, updates the encap actions + * @efx: owning NIC instance. + * + * Associates a neighbour entry with the encap actions that are + * interested in it, allowing the latter to be updated when the + * neighbour details change. + * Whichever of @dst_ip and @dst_ip6 is not in use will be all-zeroes, + * this distinguishes IPv4 from IPv6 entries. + */ +struct efx_neigh_binder { + struct net *net; + __be32 dst_ip; +#ifdef CONFIG_IPV6 + struct in6_addr dst_ip6; +#endif + char ha[ETH_ALEN]; + bool n_valid; + rwlock_t lock; + u8 ttl; + bool dying; + struct net_device *egdev; + refcount_t ref; + unsigned long used; + struct list_head users; + struct rhash_head linkage; + struct work_struct work; + struct efx_nic *efx; +}; + +#define EFX_TC_MAX_ENCAP_HDR 128 /* made-up for now, fw will decide */ +struct efx_tc_encap_action { + enum efx_encap_type type; + struct ip_tunnel_key key; /* 52 bytes */ + u32 dest_mport; /* is copied into struct efx_tc_action_set */ + u8 encap_hdr_len; + bool n_valid; + u8 encap_hdr[EFX_TC_MAX_ENCAP_HDR]; + struct efx_neigh_binder *neigh; + struct list_head list; /* entry on neigh->users list */ + struct list_head users; /* action sets using this encap_md */ + struct rhash_head linkage; /* efx->tc_encap_ht */ + refcount_t ref; + u32 fw_id; /* index of this entry in firmware encap table */ +}; + +/* create/uncreate/teardown hashtables */ +int efx_tc_init_encap_actions(struct efx_nic *efx); +void efx_tc_destroy_encap_actions(struct efx_nic *efx); +void efx_tc_fini_encap_actions(struct efx_nic *efx); + +struct efx_tc_flow_rule; +bool efx_tc_check_ready(struct efx_nic *efx, struct efx_tc_flow_rule *rule); + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_FLOW_INDR_BLOCK_CB_REGISTER) || defined(EFX_HAVE_FLOW_INDR_DEV_REGISTER) +void efx_tc_unregister_egdev(struct efx_nic *efx, struct net_device *net_dev); +#endif +struct efx_tc_encap_action *efx_tc_flower_create_encap_md( + struct efx_nic *efx, const struct ip_tunnel_info *info, + struct net_device *egdev, struct netlink_ext_ack *extack); +void efx_tc_flower_release_encap_md(struct efx_nic *efx, + struct efx_tc_encap_action *encap); +int efx_tc_netevent_event(struct efx_nic *efx, unsigned long event, + void *ptr); + +#else /* EFX_TC_OFFLOAD */ +static inline int efx_tc_netevent_event(struct efx_nic *efx, + unsigned long event, void *ptr) +{ + return NOTIFY_OK; +} +#endif /* EFX_TC_OFFLOAD */ + +#endif /* EFX_TC_ENCAP_ACTIONS_H */ diff --git a/src/drivers/net/ethernet/sfc/tx.c b/src/drivers/net/ethernet/sfc/tx.c new file mode 100644 index 0000000..3d55245 --- /dev/null +++ b/src/drivers/net/ethernet/sfc/tx.c @@ -0,0 +1,659 @@ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2005-2006 Fen Systems Ltd. + * Copyright 2005-2017 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#if !defined(EFX_USE_KCOMPAT) +#include +#else +#include +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) +#include +#endif +#endif +#include +#include +#include "net_driver.h" +#include "efx.h" +#include "io.h" +#include "nic.h" +#include "efx_common.h" +#include "tx.h" +#include "tx_common.h" +#include "workarounds.h" +#include "ef10_regs.h" +#ifdef CONFIG_SFC_TRACING +#include +#endif + +/* Size of page-based copy buffers, used for TSO headers (normally), + * padding and linearisation. + * + * Must be power-of-2 before subtracting NET_IP_ALIGN. Values much + * less than 128 are fairly useless; values larger than EFX_PAGE_SIZE + * or PAGE_SIZE would be harder to support. + */ +#define TX_CB_ORDER_MIN 4 +#define TX_CB_ORDER_MAX min(12, PAGE_SHIFT) +#define TX_CB_ORDER_DEF 7 +unsigned int tx_cb_order __read_mostly = TX_CB_ORDER_DEF; +static unsigned int tx_cb_size __read_mostly = (1 << TX_CB_ORDER_DEF) - NET_IP_ALIGN; + +#if defined(EFX_NOT_UPSTREAM) +static int __init +#if !defined(EFX_USE_KCOMPAT) || !defined(EFX_HAVE_NON_CONST_KERNEL_PARAM) +tx_copybreak_set(const char *val, const struct kernel_param *kp) +#else +tx_copybreak_set(const char *val, struct kernel_param *kp) +#endif +{ + int rc; + + rc = param_set_uint(val, kp); + if (rc) + return rc; + + /* If disabled, copy buffers are still needed for VLAN tag insertion */ + if (!tx_cb_size) { + tx_cb_order = TX_CB_ORDER_MIN; + return 0; + } + + tx_cb_order = order_base_2(tx_cb_size + NET_IP_ALIGN); + if (tx_cb_order < TX_CB_ORDER_MIN) + tx_cb_order = TX_CB_ORDER_MIN; + else if (tx_cb_order > TX_CB_ORDER_MAX) + tx_cb_order = TX_CB_ORDER_MAX; + tx_cb_size = (1 << tx_cb_order) - NET_IP_ALIGN; + return 0; +} +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_KERNEL_PARAM_OPS) +static const struct kernel_param_ops tx_copybreak_ops = { + .set = tx_copybreak_set, + .get = param_get_uint, +}; +module_param_cb(tx_copybreak, &tx_copybreak_ops, &tx_cb_size, 0444); +#else +module_param_call(tx_copybreak, tx_copybreak_set, param_get_uint, + &tx_cb_size, 0444); +#endif +MODULE_PARM_DESC(tx_copybreak, + "Maximum size of packet that may be copied to a new buffer on transmit, minimum is 16 bytes or 0 to disable (uint)"); +#endif /* EFX_NOT_UPSTREAM */ + +#ifdef EFX_USE_PIO + +#define EFX_PIOBUF_SIZE_DEF ALIGN(256, L1_CACHE_BYTES) +unsigned int efx_piobuf_size __read_mostly = EFX_PIOBUF_SIZE_DEF; + +#ifdef EFX_NOT_UPSTREAM +/* The size of the on-hardware buffer should always be at least this big; + * it might be bigger but that's ok. + */ +#define EFX_PIOBUF_SIZE_MAX ER_DZ_TX_PIOBUF_SIZE + +static int __init +#if !defined(EFX_USE_KCOMPAT) || !defined(EFX_HAVE_NON_CONST_KERNEL_PARAM) +efx_piobuf_size_set(const char *val, const struct kernel_param *kp) +#else +efx_piobuf_size_set(const char *val, struct kernel_param *kp) +#endif +{ + int rc; + + rc = param_set_uint(val, kp); + if (rc) + return rc; + + BUILD_BUG_ON(EFX_PIOBUF_SIZE_DEF > EFX_PIOBUF_SIZE_MAX); + + efx_piobuf_size = min_t(unsigned int, + ALIGN(efx_piobuf_size, L1_CACHE_BYTES), + EFX_PIOBUF_SIZE_MAX); + return 0; +} +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_KERNEL_PARAM_OPS) +static const struct kernel_param_ops efx_piobuf_size_ops = { + .set = efx_piobuf_size_set, + .get = param_get_uint, +}; +module_param_cb(piobuf_size, &efx_piobuf_size_ops, &efx_piobuf_size, 0444); +#else +module_param_call(piobuf_size, efx_piobuf_size_set, param_get_uint, + &efx_piobuf_size, 0444); +#endif +MODULE_PARM_DESC(piobuf_size, + "[SFC9100-family] Maximum size of packet that may be copied to a PIO buffer on transmit (uint)"); +#endif /* EFX_NOT_UPSTREAM */ + +#endif /* EFX_USE_PIO */ + +static inline u8 *efx_tx_get_copy_buffer(struct efx_tx_queue *tx_queue, + struct efx_tx_buffer *buffer) +{ + unsigned int index = efx_tx_queue_get_insert_index(tx_queue); + struct efx_buffer *page_buf = + &tx_queue->cb_page[index >> (PAGE_SHIFT - tx_cb_order)]; + unsigned int offset = + ((index << tx_cb_order) + NET_IP_ALIGN) & (PAGE_SIZE - 1); + + if (unlikely(!page_buf->addr) && + efx_nic_alloc_buffer(tx_queue->efx, page_buf, PAGE_SIZE, + GFP_ATOMIC)) + return NULL; + buffer->dma_addr = page_buf->dma_addr + offset; + buffer->unmap_len = 0; + return (u8 *)page_buf->addr + offset; +} + +u8 *efx_tx_get_copy_buffer_limited(struct efx_tx_queue *tx_queue, + struct efx_tx_buffer *buffer, size_t len) +{ + if (len > tx_cb_size) + return NULL; + return efx_tx_get_copy_buffer(tx_queue, buffer); +} + +static void efx_tx_maybe_stop_queue(struct efx_tx_queue *txq1) +{ + /* We need to consider both queues that the net core sees as one */ + struct efx_tx_queue *txq2; + struct efx_nic *efx = txq1->efx; + unsigned int fill_level; + + fill_level = efx_channel_tx_fill_level(txq1->channel); + if (likely(fill_level < efx->txq_stop_thresh)) + return; + + /* We used the stale old_read_count above, which gives us a + * pessimistic estimate of the fill level (which may even + * validly be >= efx->txq_entries). Now try again using + * read_count (more likely to be a cache miss). + * + * If we read read_count and then conditionally stop the + * queue, it is possible for the completion path to race with + * us and complete all outstanding descriptors in the middle, + * after which there will be no more completions to wake it. + * Therefore we stop the queue first, then read read_count + * (with a memory barrier to ensure the ordering), then + * restart the queue if the fill level turns out to be low + * enough. + */ + netif_tx_stop_queue(txq1->core_txq); + smp_mb(); + efx_for_each_channel_tx_queue(txq2, txq1->channel) + txq2->old_read_count = READ_ONCE(txq2->read_count); + + fill_level = efx_channel_tx_fill_level(txq1->channel); + EFX_WARN_ON_ONCE_PARANOID(fill_level >= efx->txq_entries); + if (likely(fill_level < efx->txq_stop_thresh)) { + smp_mb(); + if (likely(!efx->loopback_selftest)) + netif_tx_start_queue(txq1->core_txq); + } +} + +static int efx_enqueue_skb_copy(struct efx_tx_queue *tx_queue, + struct sk_buff *skb) +{ + unsigned int copy_len = skb->len; + struct efx_tx_buffer *buffer; + u8 *copy_buffer; + int rc; + + EFX_WARN_ON_ONCE_PARANOID(copy_len > tx_cb_size); + + buffer = efx_tx_queue_get_insert_buffer(tx_queue); + + copy_buffer = efx_tx_get_copy_buffer(tx_queue, buffer); + if (unlikely(!copy_buffer)) + return -ENOMEM; + + rc = skb_copy_bits(skb, 0, copy_buffer, copy_len); + EFX_WARN_ON_PARANOID(rc); + buffer->len = copy_len; + + buffer->skb = skb; + buffer->flags = EFX_TX_BUF_SKB; + + ++tx_queue->insert_count; + return rc; +} + +#ifdef EFX_USE_PIO + +struct efx_short_copy_buffer { + int used; + u8 buf[L1_CACHE_BYTES]; +}; + +/* Copy in explicit 64-bit writes. */ +static void efx_memcpy_64(void __iomem *dest, void *src, size_t len) +{ + u64 *src64 = src; + u64 __iomem *dest64 = dest; + size_t l64 = len / 8; + size_t i; + + WARN_ON_ONCE(len % 8 != 0); + WARN_ON_ONCE(((u8 __iomem *) dest - (u8 __iomem *) 0) % 8 != 0); + + for(i = 0; i < l64; i++) + writeq(src64[i], &dest64[i]); +} + +/* Copy to PIO, respecting that writes to PIO buffers must be dword aligned. + * Advances piobuf pointer. Leaves additional data in the copy buffer. + */ +static void efx_memcpy_toio_aligned(struct efx_nic *efx, u8 __iomem **piobuf, + u8 *data, int len, + struct efx_short_copy_buffer *copy_buf) +{ + int block_len = len & ~(sizeof(copy_buf->buf) - 1); + + efx_memcpy_64(*piobuf, data, block_len); + *piobuf += block_len; + len -= block_len; + + if (len) { + data += block_len; + BUG_ON(copy_buf->used); + BUG_ON(len > sizeof(copy_buf->buf)); + memcpy(copy_buf->buf, data, len); + copy_buf->used = len; + } +} + +/* Copy to PIO, respecting dword alignment, popping data from copy buffer first. + * Advances piobuf pointer. Leaves additional data in the copy buffer. + */ +static void efx_memcpy_toio_aligned_cb(struct efx_nic *efx, u8 __iomem **piobuf, + u8 *data, int len, + struct efx_short_copy_buffer *copy_buf) +{ + if (copy_buf->used) { + /* if the copy buffer is partially full, fill it up and write */ + int copy_to_buf = + min_t(int, sizeof(copy_buf->buf) - copy_buf->used, len); + + memcpy(copy_buf->buf + copy_buf->used, data, copy_to_buf); + copy_buf->used += copy_to_buf; + + /* if we didn't fill it up then we're done for now */ + if (copy_buf->used < sizeof(copy_buf->buf)) + return; + + efx_memcpy_64(*piobuf, copy_buf->buf, sizeof(copy_buf->buf)); + *piobuf += sizeof(copy_buf->buf); + data += copy_to_buf; + len -= copy_to_buf; + copy_buf->used = 0; + } + + efx_memcpy_toio_aligned(efx, piobuf, data, len, copy_buf); +} + +static void efx_flush_copy_buffer(struct efx_nic *efx, u8 __iomem *piobuf, + struct efx_short_copy_buffer *copy_buf) +{ + /* if there's anything in it, write the whole buffer, including junk */ + if (copy_buf->used) + efx_memcpy_64(piobuf, copy_buf->buf, sizeof(copy_buf->buf)); +} + +/* Traverse skb structure and copy fragments in to PIO buffer. + * Advances piobuf pointer. + */ +static void efx_skb_copy_bits_to_pio(struct efx_nic *efx, struct sk_buff *skb, + u8 __iomem **piobuf, + struct efx_short_copy_buffer *copy_buf) +{ + int i; + + efx_memcpy_toio_aligned(efx, piobuf, skb->data, skb_headlen(skb), + copy_buf); + + for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) { + skb_frag_t *f = &skb_shinfo(skb)->frags[i]; + u8 *vaddr; + +#if defined(EFX_USE_KCOMPAT) && defined(EFX_HAVE_OLD_KMAP_ATOMIC) +#ifdef CONFIG_HIGHMEM + BUG_ON(in_irq()); + local_bh_disable(); +#endif + vaddr = kmap_atomic(skb_frag_page(f), KM_SKB_DATA_SOFTIRQ); +#else + vaddr = kmap_atomic(skb_frag_page(f)); +#endif + + efx_memcpy_toio_aligned_cb(efx, piobuf, vaddr + skb_frag_off(f), + skb_frag_size(f), copy_buf); +#if defined(EFX_USE_KCOMPAT) && defined(EFX_HAVE_OLD_KMAP_ATOMIC) + kunmap_atomic(vaddr, KM_SKB_DATA_SOFTIRQ); +#ifdef CONFIG_HIGHMEM + local_bh_enable(); +#endif +#else + kunmap_atomic(vaddr); +#endif + } + + EFX_WARN_ON_ONCE_PARANOID(skb_shinfo(skb)->frag_list); +} + +static int efx_enqueue_skb_pio(struct efx_tx_queue *tx_queue, + struct sk_buff *skb) +{ + struct efx_tx_buffer *buffer = + efx_tx_queue_get_insert_buffer(tx_queue); + u8 __iomem *piobuf = tx_queue->piobuf; + + /* Copy to PIO buffer. Ensure the writes are padded to the end + * of a cache line, as this is required for write-combining to be + * effective on at least x86. + */ +#ifdef EFX_USE_KCOMPAT +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,4,0) && defined(CONFIG_SLOB) + #error "This function doesn't work with SLOB and Linux < 3.4" + /* SLOB is for tiny embedded systems; you probably want SLAB */ +#endif +#endif + + if (skb_shinfo(skb)->nr_frags) { + /* The size of the copy buffer will ensure all writes + * are the size of a cache line. + */ + struct efx_short_copy_buffer copy_buf; + + copy_buf.used = 0; + + efx_skb_copy_bits_to_pio(tx_queue->efx, skb, + &piobuf, ©_buf); + efx_flush_copy_buffer(tx_queue->efx, piobuf, ©_buf); + } else { + /* Pad the write to the size of a cache line. + * We can do this because we know the skb_shared_info struct is + * after the source, and the destination buffer is big enough. + */ + BUILD_BUG_ON(L1_CACHE_BYTES > + SKB_DATA_ALIGN(sizeof(struct skb_shared_info))); + efx_memcpy_64(tx_queue->piobuf, skb->data, + ALIGN(skb->len, L1_CACHE_BYTES)); + } + + buffer->skb = skb; + buffer->flags = EFX_TX_BUF_SKB | EFX_TX_BUF_OPTION; + + EFX_POPULATE_QWORD_5(buffer->option, + ESF_DZ_TX_DESC_IS_OPT, 1, + ESF_DZ_TX_OPTION_TYPE, 1 /* PIO */, + ESF_DZ_TX_PIO_CONT, 0, + ESF_DZ_TX_PIO_BYTE_CNT, skb->len, + ESF_DZ_TX_PIO_BUF_ADDR, + tx_queue->piobuf_offset); + ++tx_queue->insert_count; + return 0; +} + +/* Decide whether we can use TX PIO, ie. write packet data directly into + * a buffer on the device. This can reduce latency at the expense of + * throughput, so we only do this if both hardware and software TX rings + * are empty, including all queues for the channel. This also ensures that + * only one packet at a time can be using the PIO buffer. If the xmit_more + * flag is set then we don't use this - there'll be another packet along + * shortly and we want to hold off the doorbell. + */ +static inline bool efx_tx_may_pio(struct efx_tx_queue *tx_queue) +{ + struct efx_channel *channel = tx_queue->channel; + + if (!tx_queue->piobuf) + return false; + + EFX_WARN_ON_ONCE_PARANOID(!channel->efx->type->option_descriptors); + + efx_for_each_channel_tx_queue(tx_queue, channel) + if (!efx_nic_tx_is_empty(tx_queue)) + return false; + + return true; +} +#endif /* EFX_USE_PIO */ + +/* Send any pending traffic for a channel. xmit_more is shared across all + * queues for a channel, so we must check all of them. + */ +static void efx_tx_send_pending(struct efx_channel *channel) +{ + struct efx_tx_queue *q; + + efx_for_each_channel_tx_queue(q, channel) { + if (q->xmit_pending) + efx_nic_push_buffers(q); + } +} + +/* + * Add a socket buffer to a TX queue + * + * This maps all fragments of a socket buffer for DMA and adds them to + * the TX queue. The queue's insert pointer will be incremented by + * the number of fragments in the socket buffer. + * + * If any DMA mapping fails, any mapped fragments will be unmapped, + * the queue's insert pointer will be restored to its original value. + * + * This function is split out from efx_hard_start_xmit to allow the + * loopback test to direct packets via specific TX queues. + * + * Returns 0 on success, error code otherwise. + * You must hold netif_tx_lock() to call this function. + */ +int __efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) +{ + unsigned int old_insert_count = tx_queue->insert_count; + bool xmit_more = netdev_xmit_more(); + struct efx_channel *channel; + bool data_mapped = false; + unsigned int segments; + unsigned int skb_len; + int rc = 0; + + channel = tx_queue->channel; + + /* We're pretty likely to want a descriptor to do this tx. */ + prefetchw(__efx_tx_queue_get_insert_buffer(tx_queue)); + + EFX_WARN_ON_ONCE_PARANOID(!tx_queue->handle_vlan); + skb = tx_queue->handle_vlan(tx_queue, skb); + if (IS_ERR_OR_NULL(skb)) + goto err; + + /* Copy the length *after* VLAN handling, in case we've inserted a + * tag in software. + */ + skb_len = skb->len; + segments = skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 0; + if (segments == 1) + segments = 0; /* Don't use TSO for a single segment. */ + + /* Handle TSO first - it's *possible* (although unlikely) that we might + * be passed a packet to segment that's smaller than the copybreak/PIO + * size limit. + */ + if (segments) { + EFX_WARN_ON_ONCE_PARANOID(!tx_queue->handle_tso); + rc = tx_queue->handle_tso(tx_queue, skb, &data_mapped); + if (rc == -EINVAL) { + rc = efx_tx_tso_fallback(tx_queue, skb); + tx_queue->tso_fallbacks++; + if (rc == 0) + return 0; + } + if (rc) + goto err; +#ifdef EFX_USE_PIO + } else if (skb_len <= efx_piobuf_size && !xmit_more && + efx_tx_may_pio(tx_queue)) { + /* Use PIO for short packets with an empty queue. */ + rc = efx_enqueue_skb_pio(tx_queue, skb); + if (rc) + goto err; + tx_queue->pio_packets++; + data_mapped = true; +#endif + } else if (skb->data_len && skb_len <= tx_cb_size) { + /* Coalesce short fragmented packets. */ + rc = efx_enqueue_skb_copy(tx_queue, skb); + if (rc) + goto err; + tx_queue->cb_packets++; + data_mapped = true; + } + + /* Map for DMA and create descriptors if we haven't done so already. */ + if (!data_mapped) { + rc = efx_tx_map_data(tx_queue, skb, segments); + if (rc) + goto err; + } + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_SKB_TX_TIMESTAMP) + skb_tx_timestamp(skb); +#endif + + efx_tx_maybe_stop_queue(tx_queue); + + tx_queue->xmit_pending = true; + + /* Pass to hardware. */ + if (__netdev_tx_sent_queue(tx_queue->core_txq, skb->len, xmit_more)) + efx_tx_send_pending(channel); + else + /* There's another TX on the way. Prefetch next descriptor. */ + prefetchw(__efx_tx_queue_get_insert_buffer(tx_queue)); + + if (segments) { + tx_queue->tso_bursts++; + tx_queue->tso_packets += segments; + tx_queue->tx_packets += segments; + } else { + tx_queue->tx_packets++; + } + tx_queue->tx_bytes += skb_len; + + return 0; + +err: + efx_enqueue_unwind(tx_queue, old_insert_count); + if (!IS_ERR_OR_NULL(skb)) + dev_kfree_skb_any(skb); + + /* If we're not expecting another transmit and we had something to push + * on this queue or a partner queue then we need to push here to get the + * previous packets out. + */ + if (!xmit_more) + efx_tx_send_pending(channel); + + return rc; +} + +/* Initiate a packet transmission. We use one channel per CPU + * (sharing when we have more CPUs than channels). On Falcon, the TX + * completion events will be directed back to the CPU that transmitted + * the packet, which should be cache-efficient. + * + * Context: non-blocking. + * Note that returning anything other than NETDEV_TX_OK will cause the + * OS to free the skb. + */ +netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb, + struct net_device *net_dev) +{ + struct efx_nic *efx = efx_netdev_priv(net_dev); + struct efx_channel *channel; + struct efx_tx_queue *tx_queue; + +#ifdef CONFIG_SFC_TRACING + trace_sfc_transmit(skb, net_dev); +#endif + + channel = efx_get_tx_channel(efx, skb_get_queue_mapping(skb)); + +#if defined(CONFIG_SFC_PTP) + /* + * PTP "event" packet + */ + if (unlikely(efx_xmit_with_hwtstamp(skb)) && + ((efx_ptp_use_mac_tx_timestamps(efx) && efx->ptp_data) || + unlikely(efx_ptp_is_ptp_tx(efx, skb)))) { + /* There may be existing transmits on the channel that are + * waiting for this packet to trigger the doorbell write. + * We need to send the packets at this point. + */ + efx_tx_send_pending(channel); + return efx_ptp_tx(efx, skb); + } +#endif + + tx_queue = efx->select_tx_queue(channel, skb); + + __efx_enqueue_skb(tx_queue, skb); + return NETDEV_TX_OK; +} + +void efx_xmit_done_single(struct efx_tx_queue *tx_queue) +{ + unsigned int pkts_compl = 0, bytes_compl = 0; + unsigned int efv_pkts_compl = 0; + unsigned int read_ptr; + bool finished = false; + + read_ptr = tx_queue->read_count & tx_queue->ptr_mask; + + while (!finished) { + struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr]; + + if (!efx_tx_buffer_in_use(buffer)) { + struct efx_nic *efx = tx_queue->efx; + + netif_err(efx, hw, efx->net_dev, + "TX queue %d spurious single TX completion\n", + tx_queue->queue); + atomic_inc(&efx->errors.spurious_tx); + efx_schedule_reset(efx, RESET_TYPE_TX_SKIP); + return; + } + + /* Need to check the flag before dequeueing. */ + if (buffer->flags & EFX_TX_BUF_SKB) + finished = true; + efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl, + &efv_pkts_compl); + + ++tx_queue->read_count; + read_ptr = tx_queue->read_count & tx_queue->ptr_mask; + } + + tx_queue->pkts_compl += pkts_compl; + tx_queue->bytes_compl += bytes_compl; + + EFX_WARN_ON_PARANOID(pkts_compl + efv_pkts_compl != 1); + + efx_xmit_done_check_empty(tx_queue); +} diff --git a/src/drivers/net/ethernet/sfc/tx.h b/src/drivers/net/ethernet/sfc/tx.h new file mode 100644 index 0000000..5b6259f --- /dev/null +++ b/src/drivers/net/ethernet/sfc/tx.h @@ -0,0 +1,26 @@ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2005-2006 Fen Systems Ltd. + * Copyright 2006-2017 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#ifndef EFX_TX_H +#define EFX_TX_H + +#include + +/** Driver internal tx-path related declarations. */ + +unsigned int efx_tx_limit_len( + struct efx_tx_queue *tx_queue, + dma_addr_t dma_addr, + unsigned int len); + +u8 *efx_tx_get_copy_buffer_limited(struct efx_tx_queue *tx_queue, + struct efx_tx_buffer *buffer, size_t len); + +#endif /* EFX_TX_H */ diff --git a/src/drivers/net/ethernet/sfc/tx_common.c b/src/drivers/net/ethernet/sfc/tx_common.c new file mode 100644 index 0000000..5aa1c9c --- /dev/null +++ b/src/drivers/net/ethernet/sfc/tx_common.c @@ -0,0 +1,654 @@ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2019 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#include "net_driver.h" +#include +#include "efx.h" +#include "nic.h" +#include "efx_common.h" +#include "tx_common.h" +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_GSO_H) +#include +#endif + +#if defined(EFX_NOT_UPSTREAM) && defined(EFX_USE_FAKE_VLAN_TX_ACCEL) +static struct sk_buff *efx_tx_vlan_sw(struct efx_tx_queue *tx_queue, + struct sk_buff *skb) +{ + if (skb_vlan_tag_present(skb)) { + struct vlan_ethhdr *veth; + int delta = 0; + + if (skb_headroom(skb) < VLAN_HLEN) + delta = VLAN_HLEN - skb_headroom(skb); + + if (delta || skb_header_cloned(skb)) { + int rc; + + /* pskb_expand_head will crash if skb_shared */ + if (skb_shared(skb)) { + struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC); + struct sock *sk = skb->sk; + + if (unlikely(!nskb)) + return ERR_PTR(-ENOMEM); + + if (sk) + skb_set_owner_w(nskb, sk); + consume_skb(skb); + skb = nskb; + } + + rc = pskb_expand_head(skb, ALIGN(delta, NET_SKB_PAD), + 0, GFP_ATOMIC); + if (rc) { + dev_kfree_skb_any(skb); + return NULL; + } + } + + veth = (struct vlan_ethhdr *)__skb_push(skb, VLAN_HLEN); + /* Move the mac addresses to the beginning of the new header. */ + memmove(skb->data, skb->data + VLAN_HLEN, 2 * ETH_ALEN); + veth->h_vlan_proto = htons(ETH_P_8021Q); + veth->h_vlan_TCI = htons(skb_vlan_tag_get(skb)); + skb->protocol = htons(ETH_P_8021Q); + + skb->mac_header -= VLAN_HLEN; + skb->vlan_tci = 0; + } + return skb; +} +#else +static struct sk_buff *efx_tx_vlan_noaccel(struct efx_tx_queue *tx_queue, + struct sk_buff *skb) +{ + if (skb_vlan_tag_present(skb)) { + WARN_ONCE(1, "VLAN tagging requested, but no support\n"); + dev_kfree_skb_any(skb); + return ERR_PTR(-EINVAL); + } + return skb; +} +#endif + +static unsigned int efx_tx_cb_page_count(struct efx_tx_queue *tx_queue) +{ + return DIV_ROUND_UP(tx_queue->ptr_mask + 1, PAGE_SIZE >> tx_cb_order); +} + +static bool efx_tx_cb_probe(struct efx_tx_queue *tx_queue) +{ + if (!tx_queue->efx->type->copy_break) + return true; + + tx_queue->cb_page = kcalloc(efx_tx_cb_page_count(tx_queue), + sizeof(tx_queue->cb_page[0]), GFP_KERNEL); + + return !!tx_queue->cb_page; +} + +static void efx_tx_cb_destroy(struct efx_tx_queue *tx_queue) +{ + unsigned int i; + + if (!tx_queue->efx->type->copy_break) + return; + + if (tx_queue->cb_page) { + for (i = 0; i < efx_tx_cb_page_count(tx_queue); i++) + efx_nic_free_buffer(tx_queue->efx, + &tx_queue->cb_page[i]); + kfree(tx_queue->cb_page); + tx_queue->cb_page = NULL; + } +} + +int efx_probe_tx_queue(struct efx_tx_queue *tx_queue) +{ + struct efx_nic *efx = tx_queue->efx; + unsigned int entries; + int rc; + + /* Create the smallest power-of-two aligned ring */ + entries = max(roundup_pow_of_two(efx->txq_entries), + efx_min_dmaq_size(efx)); + EFX_WARN_ON_PARANOID(entries > efx_max_dmaq_size(efx)); + tx_queue->ptr_mask = entries - 1; + + netif_dbg(efx, probe, efx->net_dev, + "creating TX queue %d size %#x mask %#x\n", + tx_queue->queue, efx->txq_entries, tx_queue->ptr_mask); + + /* Allocate software ring */ + tx_queue->buffer = kcalloc(entries, sizeof(*tx_queue->buffer), + GFP_KERNEL); + if (!tx_queue->buffer) + return -ENOMEM; + + if (!efx_tx_cb_probe(tx_queue)) { + rc = -ENOMEM; + goto fail1; + } + + /* Allocate hardware ring */ + rc = efx_nic_probe_tx(tx_queue); + if (rc) + goto fail2; + + return 0; + +fail2: + kfree(tx_queue->cb_page); + tx_queue->cb_page = NULL; +fail1: + kfree(tx_queue->buffer); + tx_queue->buffer = NULL; + return rc; +} + +int efx_init_tx_queue(struct efx_tx_queue *tx_queue) +{ + struct efx_nic *efx = tx_queue->efx; + + netif_dbg(efx, drv, efx->net_dev, + "initialising TX queue %d\n", tx_queue->queue); + + /* must be the inverse of lookup in efx_get_tx_channel */ + tx_queue->core_txq = + netdev_get_tx_queue(efx->net_dev, + tx_queue->channel->channel - + efx->tx_channel_offset); + + tx_queue->insert_count = 0; + tx_queue->notify_count = 0; + tx_queue->notify_jiffies = 0; + tx_queue->write_count = 0; + tx_queue->packet_write_count = 0; + tx_queue->old_write_count = 0; + tx_queue->read_count = 0; + tx_queue->read_jiffies = 0; + tx_queue->old_read_count = 0; + tx_queue->empty_read_count = 0 | EFX_EMPTY_COUNT_VALID; + tx_queue->xmit_pending = false; + + if (efx_ptp_use_mac_tx_timestamps(efx) && + tx_queue->channel == efx_ptp_channel(efx)) + tx_queue->timestamping = true; + else + tx_queue->timestamping = false; + tx_queue->completed_timestamp_major = 0; + tx_queue->completed_timestamp_minor = 0; + + tx_queue->xdp_tx = efx_channel_is_xdp_tx(tx_queue->channel); +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_XDP_SOCK) +#if defined(CONFIG_XDP_SOCKETS) +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_XSK_POOL) + tx_queue->xsk_pool = NULL; + if (tx_queue->channel->zc && + efx_is_xsk_tx_queue(tx_queue)) { + tx_queue->xsk_pool = + xsk_get_pool_from_qid(efx->net_dev, + tx_queue->channel->channel); + if (!tx_queue->xsk_pool) + return 0; + } +#else + tx_queue->umem = NULL; + if (tx_queue->channel->zc && + efx_is_xsk_tx_queue(tx_queue)) { + tx_queue->umem = + xdp_get_umem_from_qid(efx->net_dev, + tx_queue->channel->channel); + if (!tx_queue->umem) + return 0; + } +#endif /* EFX_HAVE_XSK_POOL */ +#endif /* CONFIG_XDP_SOCKETS */ +#endif + + /* Set up default function pointers. These may get replaced by + * efx_nic_init_tx() based off NIC/queue capabilities. + */ +#if defined(EFX_NOT_UPSTREAM) && defined(EFX_USE_FAKE_VLAN_TX_ACCEL) + tx_queue->handle_vlan = efx_tx_vlan_sw; +#else + tx_queue->handle_vlan = efx_tx_vlan_noaccel; +#endif + tx_queue->handle_tso = efx_nic_tx_tso_sw; + + /* Set up TX descriptor ring */ + return efx_nic_init_tx(tx_queue); +} + +void efx_fini_tx_queue(struct efx_tx_queue *tx_queue) +{ + netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev, + "shutting down TX queue %d\n", tx_queue->queue); + + if (!tx_queue->buffer) + return; + + efx_purge_tx_queue(tx_queue); + tx_queue->xmit_pending = false; +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_XDP_SOCK) +#if defined(CONFIG_XDP_SOCKETS) +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_XSK_POOL) + tx_queue->xsk_pool = NULL; +#else + tx_queue->umem = NULL; +#endif + if (!efx_is_xsk_tx_queue(tx_queue)) +#endif +#endif + if (tx_queue->core_txq) + netdev_tx_reset_queue(tx_queue->core_txq); +} + +void efx_destroy_tx_queue(struct efx_tx_queue *tx_queue) +{ + netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev, + "destroying TX queue %d\n", tx_queue->queue); + + efx_tx_cb_destroy(tx_queue); + + kfree(tx_queue->buffer); + tx_queue->buffer = NULL; +} + +void efx_purge_tx_queue(struct efx_tx_queue *tx_queue) +{ + while (tx_queue->read_count != tx_queue->insert_count) { + unsigned int pkts_compl = 0, bytes_compl = 0; + unsigned int efv_pkts_compl = 0; + struct efx_tx_buffer *buffer = + &tx_queue->buffer[tx_queue->read_count & + tx_queue->ptr_mask]; + + efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl, + &efv_pkts_compl); + ++tx_queue->read_count; + } +} + +void efx_dequeue_buffer(struct efx_tx_queue *tx_queue, + struct efx_tx_buffer *buffer, + unsigned int *pkts_compl, + unsigned int *bytes_compl, + unsigned int *efv_pkts_compl) +{ + if (buffer->unmap_len) { + struct device *dma_dev = &tx_queue->efx->pci_dev->dev; + dma_addr_t unmap_addr = buffer->dma_addr - buffer->dma_offset; + + if (buffer->flags & EFX_TX_BUF_MAP_SINGLE) + dma_unmap_single(dma_dev, unmap_addr, buffer->unmap_len, + DMA_TO_DEVICE); + else + dma_unmap_page(dma_dev, unmap_addr, buffer->unmap_len, + DMA_TO_DEVICE); + buffer->unmap_len = 0; + } + + if (buffer->flags & EFX_TX_BUF_SKB) { + struct sk_buff *skb = (struct sk_buff *)buffer->skb; + + if (unlikely(buffer->flags & EFX_TX_BUF_EFV)) { + EFX_WARN_ON_PARANOID(!efv_pkts_compl); + (*efv_pkts_compl)++; + } else { + EFX_WARN_ON_PARANOID(!pkts_compl); + EFX_WARN_ON_PARANOID(!bytes_compl); + (*pkts_compl)++; + (*bytes_compl) += skb->len; + } + + if (tx_queue->timestamping && + (tx_queue->completed_timestamp_major || + tx_queue->completed_timestamp_minor)) { + struct skb_shared_hwtstamps hwtstamp; + + hwtstamp.hwtstamp = + efx_ptp_nic_to_kernel_time(tx_queue); + skb_tstamp_tx(skb, &hwtstamp); + + tx_queue->completed_timestamp_major = 0; + tx_queue->completed_timestamp_minor = 0; + } + + dev_kfree_skb_any(skb); + netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev, + "TX queue %d transmission id %x complete\n", + tx_queue->queue, tx_queue->read_count); + } else if (buffer->flags & EFX_TX_BUF_HEAP) { + kfree(buffer->buf); +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_XDP_TX) + } else if (buffer->flags & EFX_TX_BUF_XDP) { +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_XDP_FRAME_API) + xdp_return_frame(buffer->xdpf); +#else + page_frag_free(buffer->buf); +#endif +#endif +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_XDP_SOCK) +#if defined(CONFIG_XDP_SOCKETS) + } else if (buffer->flags & EFX_TX_BUF_XSK) { +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_XSK_POOL) + xsk_tx_completed(tx_queue->xsk_pool, 1); +#else + xsk_umem_complete_tx(tx_queue->umem, 1); +#endif +#endif /* CONFIG_XDP_SOCKETS */ +#endif + } + + buffer->len = 0; + buffer->flags = 0; +} + +/* Remove packets from the TX queue + * + * This removes packets from the TX queue, up to and including the + * specified index. + */ +static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue, + unsigned int index, + unsigned int *pkts_compl, + unsigned int *bytes_compl, + unsigned int *efv_pkts_compl) +{ + struct efx_nic *efx = tx_queue->efx; + unsigned int stop_index, read_ptr; + + stop_index = (index + 1) & tx_queue->ptr_mask; + read_ptr = tx_queue->read_count & tx_queue->ptr_mask; + + while (read_ptr != stop_index) { + struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr]; + + if (unlikely(!efx_tx_buffer_in_use(buffer))) { + netif_err(efx, hw, efx->net_dev, + "TX queue %d spurious TX completion id %d\n", + tx_queue->queue, read_ptr); + atomic_inc(&efx->errors.spurious_tx); + if (efx->type->revision != EFX_REV_EF100) { + efx_schedule_reset(efx, RESET_TYPE_TX_SKIP); + return; + } + } + + efx_dequeue_buffer(tx_queue, buffer, pkts_compl, bytes_compl, + efv_pkts_compl); + + ++tx_queue->read_count; + tx_queue->read_jiffies = jiffies; + read_ptr = tx_queue->read_count & tx_queue->ptr_mask; + } +} + +void efx_xmit_done_check_empty(struct efx_tx_queue *tx_queue) +{ + if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) { + tx_queue->old_write_count = READ_ONCE(tx_queue->write_count); + if (tx_queue->read_count == tx_queue->old_write_count) { + smp_mb(); + tx_queue->empty_read_count = + tx_queue->read_count | EFX_EMPTY_COUNT_VALID; + } + } +} + +void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index) +{ + unsigned int pkts_compl = 0, efv_pkts_compl = 0, bytes_compl = 0; + + EFX_WARN_ON_ONCE_PARANOID(index > tx_queue->ptr_mask); + + efx_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl, + &efv_pkts_compl); + tx_queue->pkts_compl += pkts_compl; + tx_queue->bytes_compl += bytes_compl; + + if (pkts_compl + efv_pkts_compl > 1) + ++tx_queue->merge_events; +#if !defined(EFX_USE_KCOMPAT) || (defined(EFX_HAVE_XDP_SOCK) && defined(EFX_HAVE_XSK_NEED_WAKEUP)) +#if defined(CONFIG_XDP_SOCKETS) +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_XSK_POOL) + if (tx_queue->xsk_pool && xsk_uses_need_wakeup(tx_queue->xsk_pool)) + xsk_set_tx_need_wakeup(tx_queue->xsk_pool); +#else + if (tx_queue->umem && xsk_umem_uses_need_wakeup(tx_queue->umem)) + xsk_set_tx_need_wakeup(tx_queue->umem); +#endif +#endif +#endif + + efx_xmit_done_check_empty(tx_queue); +} + +/* Remove buffers put into a tx_queue for the current packet. + * None of the buffers must have an skb attached. + */ +void efx_enqueue_unwind(struct efx_tx_queue *tx_queue,unsigned int insert_count) +{ + struct efx_tx_buffer *buffer; + + /* Work backwards until we hit the original insert pointer value */ + while (tx_queue->insert_count != insert_count) { + unsigned int pkts_compl = 0, bytes_compl = 0; + unsigned int efv_pkts_compl = 0; + + --tx_queue->insert_count; + buffer = __efx_tx_queue_get_insert_buffer(tx_queue); + efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl, + &efv_pkts_compl); + } +} + +struct efx_tx_buffer *efx_tx_map_chunk(struct efx_tx_queue *tx_queue, + dma_addr_t dma_addr, size_t len) +{ + const struct efx_nic_type *nic_type = tx_queue->efx->type; + struct efx_tx_buffer *buffer; + unsigned int dma_len; + + /* Map the fragment taking account of NIC-dependent DMA limits. */ + do { + buffer = __efx_tx_queue_get_insert_buffer(tx_queue); +#ifdef EFX_NOT_UPSTREAM + /* In a single queue system we can get resource contention on + * the TX buffer array, if another thread has descheduled + * before incrementing the insert_count below. + * The IP stack should protect us from this, but on older + * kernels during ifdown it may not do so. + */ +#endif + if (unlikely(efx_tx_buffer_in_use(buffer))) { + atomic_inc(&tx_queue->efx->errors.tx_desc_fetch); + return NULL; + } + + if (nic_type->tx_limit_len) + dma_len = nic_type->tx_limit_len(tx_queue, dma_addr, + len); + else + dma_len = len; + + buffer->len = dma_len; + buffer->dma_addr = dma_addr; + buffer->flags = EFX_TX_BUF_CONT; + len -= dma_len; + dma_addr += dma_len; + ++tx_queue->insert_count; + } while (len); + + return buffer; +} + +int efx_tx_tso_header_length(struct sk_buff *skb) +{ + size_t header_len; + +#ifdef EFX_NOT_UPSTREAM + if (efx_skb_encapsulation(skb)) +#else + if (skb->encapsulation) +#endif +#ifdef EFX_CAN_SUPPORT_ENCAP_TSO + header_len = skb_inner_transport_header(skb) - + skb->data + + (inner_tcp_hdr(skb)->doff << 2u); +#else + return -EINVAL; +#endif + else + header_len = skb_transport_header(skb) - skb->data + + (tcp_hdr(skb)->doff << 2u); + return header_len; +} + +/* Map all data from an SKB for DMA and create descriptors on the queue. + */ +int efx_tx_map_data(struct efx_tx_queue *tx_queue, struct sk_buff *skb, + unsigned int segment_count) +{ + struct efx_nic *efx = tx_queue->efx; + struct device *dma_dev = &efx->pci_dev->dev; + unsigned int frag_index, nr_frags; + dma_addr_t dma_addr, unmap_addr; + struct efx_tx_buffer *buffer; + unsigned short dma_flags; + size_t len, unmap_len; +#ifdef EFX_NOT_UPSTREAM + /* Avoid -Wmaybe-uninitialised warning with gcc 4.8.5 */ + int header_len = 0; +#else + int header_len; +#endif + + if (segment_count) { + /* Ensure linear headers for TSO offload */ + header_len = efx_tx_tso_header_length(skb); + + if (header_len < 0) + { + /* We shouldn't have advertised encap TSO support, + * because this kernel doesn't have the bits we need + * to make it work. So let's complain loudly. + */ + WARN_ON_ONCE(1); + return -EINVAL; + } + if (unlikely(header_len > skb_headlen(skb))) { + /* Pull headers into linear area */ + if (!pskb_may_pull(skb, header_len)) + return -ENOMEM; + } + } + + /* Map header data. */ + len = skb_headlen(skb); + dma_addr = dma_map_single(dma_dev, skb->data, len, DMA_TO_DEVICE); + dma_flags = EFX_TX_BUF_MAP_SINGLE; + unmap_len = len; + unmap_addr = dma_addr; + + if (unlikely(dma_mapping_error(dma_dev, dma_addr))) + return -EIO; + + if (segment_count) { + /* For TSO we need to put the header in to a separate + * descriptor. Map this separately if necessary. + */ + if (header_len != len) { + buffer = efx_tx_map_chunk(tx_queue, dma_addr, header_len); + if (!buffer) + return -EBUSY; + len -= header_len; + dma_addr += header_len; + } + } + + /* Add descriptors for each fragment. */ + nr_frags = skb_shinfo(skb)->nr_frags; + frag_index = 0; + do { + skb_frag_t *fragment; + + buffer = efx_tx_map_chunk(tx_queue, dma_addr, len); + if (!buffer) + return -EBUSY; + + /* The final descriptor for a fragment is responsible for + * unmapping the whole fragment. + */ + buffer->flags = EFX_TX_BUF_CONT | dma_flags; + buffer->unmap_len = unmap_len; + buffer->dma_offset = buffer->dma_addr - unmap_addr; + + if (frag_index >= nr_frags) { + /* Store SKB details with the final buffer for + * the completion. + */ + buffer->skb = skb; + buffer->flags = EFX_TX_BUF_SKB | dma_flags; + return 0; + } + + /* Move on to the next fragment. */ + fragment = &skb_shinfo(skb)->frags[frag_index++]; + len = skb_frag_size(fragment); + dma_addr = skb_frag_dma_map(dma_dev, fragment, + 0, len, DMA_TO_DEVICE); + dma_flags = 0; + unmap_len = len; + unmap_addr = dma_addr; + + if (unlikely(dma_mapping_error(dma_dev, dma_addr))) + return -EIO; + } while (1); + + if (netdev_xmit_more()) + /* There's another TX on the way. Prefetch next descriptor. */ + prefetchw(__efx_tx_queue_get_insert_buffer(tx_queue)); +} + +/* + * Fallback to software TSO. + * + * This is used if we are unable to send a GSO packet through hardware TSO. + * This should only ever happen due to per-queue restrictions - unsupported + * packets should first be filtered by the feature flags and check_features. + * + * Returns 0 on success, error code otherwise. + */ +int efx_tx_tso_fallback(struct efx_tx_queue *tx_queue, struct sk_buff *skb) +{ + struct sk_buff *segments, *next; + + segments = skb_gso_segment(skb, 0); + if (IS_ERR(segments)) + return PTR_ERR(segments); + + dev_consume_skb_any(skb); + + skb_list_walk_safe(segments, skb, next) { + skb_mark_not_on_list(skb); +#if defined(EFX_USE_KCOMPAT) && defined(EFX_HAVE_SKB_XMIT_MORE) + /* This explicitly sets the flag. Note that checks of this flag + * are buried in the netdev_xmit_more() kcompat macro. + */ + if (next) + skb->xmit_more = true; +#endif + efx_enqueue_skb(tx_queue, skb); + } + + return 0; +} diff --git a/src/drivers/net/ethernet/sfc/tx_common.h b/src/drivers/net/ethernet/sfc/tx_common.h new file mode 100644 index 0000000..282dee4 --- /dev/null +++ b/src/drivers/net/ethernet/sfc/tx_common.h @@ -0,0 +1,45 @@ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2019 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#ifndef EFX_TX_COMMON_H +#define EFX_TX_COMMON_H + +extern unsigned int tx_cb_order __read_mostly; + +int efx_probe_tx_queue(struct efx_tx_queue *tx_queue); +int efx_init_tx_queue(struct efx_tx_queue *tx_queue); +void efx_fini_tx_queue(struct efx_tx_queue *tx_queue); +void efx_destroy_tx_queue(struct efx_tx_queue *tx_queue); + +void efx_purge_tx_queue(struct efx_tx_queue *tx_queue); + +void efx_dequeue_buffer(struct efx_tx_queue *tx_queue, + struct efx_tx_buffer *buffer, + unsigned int *pkts_compl, + unsigned int *bytes_compl, + unsigned int *efv_pkts_compl); + +static inline bool efx_tx_buffer_in_use(struct efx_tx_buffer *buffer) +{ + return buffer->len || (buffer->flags & EFX_TX_BUF_OPTION); +} + +void efx_xmit_done_check_empty(struct efx_tx_queue *tx_queue); +void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index); + +void efx_enqueue_unwind(struct efx_tx_queue *tx_queue,unsigned int insert_count); +struct efx_tx_buffer *efx_tx_map_chunk(struct efx_tx_queue *tx_queue, + dma_addr_t dma_addr, size_t len); +int efx_tx_map_data(struct efx_tx_queue *tx_queue, struct sk_buff *skb, + unsigned int segment_count); +int efx_tx_tso_header_length(struct sk_buff *skb); +int efx_tx_tso_fallback(struct efx_tx_queue *tx_queue, struct sk_buff *skb); + +#endif + diff --git a/src/drivers/net/ethernet/sfc/tx_tso.c b/src/drivers/net/ethernet/sfc/tx_tso.c new file mode 100644 index 0000000..1e908ed --- /dev/null +++ b/src/drivers/net/ethernet/sfc/tx_tso.c @@ -0,0 +1,610 @@ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2005-2006 Fen Systems Ltd. + * Copyright 2005-2017 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#if !defined(EFX_USE_KCOMPAT) +#include +#else +#include +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 20) +#include +#endif +#endif +#include +#include +#include "net_driver.h" +#include "efx.h" +#include "io.h" +#include "nic.h" +#include "tx.h" +#include "workarounds.h" +#include "ef10_regs.h" +#ifdef CONFIG_SFC_TRACING +#include +#endif + +/* Efx TCP segmentation acceleration. + * + * Why? Because by doing it here in the driver we can go significantly + * faster than the GSO. + * + * Requires TX checksum offload support. + */ + +#define PTR_DIFF(p1, p2) ((u8 *)(p1) - (u8 *)(p2)) + +/** + * struct tso_state - TSO state for an SKB + * @out_len: Remaining length in current segment + * @seqnum: Current sequence number + * @ipv4_id: Current IPv4 ID, host endian + * @packet_space: Remaining space in current packet + * @dma_addr: DMA address of current position + * @in_len: Remaining length in current SKB fragment + * @unmap_len: Length of SKB fragment + * @unmap_addr: DMA address of SKB fragment + * @dma_flags: TX buffer flags for DMA mapping - %EFX_TX_BUF_MAP_SINGLE or 0 + * @protocol: Network protocol (after any VLAN header) + * @ip_off: Offset of IP header + * @tcp_off: Offset of TCP header + * @header_len: Number of bytes of header + * @ip_base_len: IPv4 tot_len or IPv6 payload_len, before TCP payload + * @header_dma_addr: Header DMA address, when using option descriptors + * @header_unmap_len: Header DMA mapped length, or 0 if not using option + * descriptors + * + * The state used during segmentation. It is put into this data structure + * just to make it easy to pass into inline functions. + */ +struct tso_state { + /* Output position */ + unsigned int out_len; + unsigned int seqnum; + u16 ipv4_id; + unsigned int packet_space; + + /* Input position */ + dma_addr_t dma_addr; + unsigned int in_len; + unsigned int unmap_len; + dma_addr_t unmap_addr; + unsigned short dma_flags; + + __be16 protocol; + unsigned int ip_off; + unsigned int tcp_off; + unsigned int header_len; + unsigned int ip_base_len; + dma_addr_t header_dma_addr; + unsigned int header_unmap_len; +}; + +static inline void prefetch_ptr(struct efx_tx_queue *tx_queue) +{ + unsigned int insert_ptr = efx_tx_queue_get_insert_index(tx_queue); + char *ptr; + + ptr = (char *)(tx_queue->buffer + insert_ptr); + prefetch(ptr); + prefetch(ptr + 0x80); + + ptr = (char *)(((efx_qword_t *)tx_queue->txd.addr) + insert_ptr); + prefetch(ptr); + prefetch(ptr + 0x80); +} + +/** + * efx_tx_queue_insert - push descriptors onto the TX queue + * @tx_queue: Efx TX queue + * @dma_addr: DMA address of fragment + * @len: Length of fragment + * @final_buffer: The final buffer inserted into the queue + * + * Push descriptors onto the TX queue. + */ +static void efx_tx_queue_insert(struct efx_tx_queue *tx_queue, + dma_addr_t dma_addr, unsigned int len, + struct efx_tx_buffer **final_buffer) +{ + struct efx_tx_buffer *buffer; + unsigned int dma_len; + + EFX_WARN_ON_ONCE_PARANOID(len <= 0); + + while (1) { + buffer = efx_tx_queue_get_insert_buffer(tx_queue); + ++tx_queue->insert_count; + + EFX_WARN_ON_ONCE_PARANOID(tx_queue->insert_count - + tx_queue->read_count >= + tx_queue->efx->txq_entries); + + buffer->dma_addr = dma_addr; + + dma_len = tx_queue->efx->type->tx_limit_len(tx_queue, + dma_addr, len); + + /* If there's space for everything this is our last buffer. */ + if (dma_len >= len) + break; + + buffer->len = dma_len; + buffer->flags = EFX_TX_BUF_CONT; + dma_addr += dma_len; + len -= dma_len; + } + + EFX_WARN_ON_ONCE_PARANOID(!len); + buffer->len = len; + *final_buffer = buffer; +} + +/* + * Verify that our various assumptions about sk_buffs and the conditions + * under which TSO will be attempted hold true. Also determine and fill + * in the protocol number through the provided pointer. + */ +static int efx_tso_check_protocol(struct sk_buff *skb, __be16 *protocol) +{ + *protocol = skb->protocol; + + if (((struct ethhdr *)skb->data)->h_proto != *protocol) + return -EINVAL; + +#if !defined(EFX_USE_KCOMPAT) || defined(NETIF_F_VLAN_TSO) + if (*protocol == htons(ETH_P_8021Q)) { + struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data; + + *protocol = veh->h_vlan_encapsulated_proto; + } +#endif + + /* For SW TSO or TSOv1 we can only deal with TCP in IPv4 or IPv6 */ + if (*protocol == htons(ETH_P_IP)) { + if (ip_hdr(skb)->protocol != IPPROTO_TCP) + return -EINVAL; + } else if (*protocol == htons(ETH_P_IPV6)) { + if (ipv6_hdr(skb)->nexthdr != NEXTHDR_TCP) + return -EINVAL; + } else { + return -EINVAL; + } + + /* All headers (incl. TCP header) must be in linear data area. This + * should always be the case, so warn if not. + */ + if (WARN_ON((PTR_DIFF(tcp_hdr(skb), skb->data) + + (tcp_hdr(skb)->doff << 2u)) > + skb_headlen(skb))) + return -EINVAL; + + return 0; +} + +static u8 *efx_tsoh_get_buffer(struct efx_tx_queue *tx_queue, + struct efx_tx_buffer *buffer, unsigned int len) +{ + u8 *result; + + EFX_WARN_ON_ONCE_PARANOID(buffer->len); + EFX_WARN_ON_ONCE_PARANOID(buffer->flags); + EFX_WARN_ON_ONCE_PARANOID(buffer->unmap_len); + + result = efx_tx_get_copy_buffer_limited(tx_queue, buffer, len); + + if (result) { + buffer->flags = EFX_TX_BUF_CONT; + } else { + buffer->buf = kmalloc(NET_IP_ALIGN + len, GFP_ATOMIC); + if (unlikely(!buffer->buf)) + return NULL; + tx_queue->tso_long_headers++; + result = (u8 *)buffer->buf + NET_IP_ALIGN; + buffer->flags = EFX_TX_BUF_CONT | EFX_TX_BUF_HEAP; + } + + buffer->len = len; + + return result; +} + +/* + * Put a TSO header into the TX queue. + * + * This is special-cased because we know that it is small enough to fit in + * a single fragment, and we know it doesn't cross a page boundary. It + * also allows us to not worry about end-of-packet etc. + */ +static int efx_tso_put_header(struct efx_tx_queue *tx_queue, + struct efx_tx_buffer *buffer, u8 *header) +{ + if (unlikely(buffer->flags & EFX_TX_BUF_HEAP)) { + buffer->dma_addr = dma_map_single(&tx_queue->efx->pci_dev->dev, + header, buffer->len, + DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(&tx_queue->efx->pci_dev->dev, + buffer->dma_addr))) { + kfree(buffer->buf); + buffer->len = 0; + buffer->flags = 0; + return -ENOMEM; + } + buffer->unmap_len = buffer->len; + buffer->dma_offset = 0; + buffer->flags |= EFX_TX_BUF_MAP_SINGLE; + } + + ++tx_queue->insert_count; + return 0; +} + + +/* Parse the SKB header and initialise state. */ +static int tso_start(struct tso_state *st, struct efx_nic *efx, + struct efx_tx_queue *tx_queue, + const struct sk_buff *skb) +{ + struct device *dma_dev = &efx->pci_dev->dev; + unsigned int header_len, in_len; + bool use_opt_desc = false; + dma_addr_t dma_addr; + + if (tx_queue->tso_version == 1) + use_opt_desc = true; + + st->ip_off = skb_network_header(skb) - skb->data; + st->tcp_off = skb_transport_header(skb) - skb->data; + header_len = st->tcp_off + (tcp_hdr(skb)->doff << 2u); + in_len = skb_headlen(skb) - header_len; + st->header_len = header_len; + st->in_len = in_len; + if (st->protocol == htons(ETH_P_IP)) { + st->ip_base_len = st->header_len - st->ip_off; + st->ipv4_id = ntohs(ip_hdr(skb)->id); + } else { + st->ip_base_len = st->header_len - st->tcp_off; + st->ipv4_id = 0; + } + st->seqnum = ntohl(tcp_hdr(skb)->seq); + + EFX_WARN_ON_ONCE_PARANOID(tcp_hdr(skb)->urg); + EFX_WARN_ON_ONCE_PARANOID(tcp_hdr(skb)->syn); + EFX_WARN_ON_ONCE_PARANOID(tcp_hdr(skb)->rst); + + st->out_len = skb->len - header_len; + + if (!use_opt_desc) { + st->header_unmap_len = 0; + + if (likely(in_len == 0)) { + st->dma_flags = 0; + st->unmap_len = 0; + return 0; + } + + dma_addr = dma_map_single(dma_dev, skb->data + header_len, + in_len, DMA_TO_DEVICE); + st->dma_flags = EFX_TX_BUF_MAP_SINGLE; + st->dma_addr = dma_addr; + st->unmap_addr = dma_addr; + st->unmap_len = in_len; + } else { + dma_addr = dma_map_single(dma_dev, skb->data, + skb_headlen(skb), DMA_TO_DEVICE); + st->header_dma_addr = dma_addr; + st->header_unmap_len = skb_headlen(skb); + st->dma_flags = 0; + st->dma_addr = dma_addr + header_len; + st->unmap_len = 0; + } + + return unlikely(dma_mapping_error(dma_dev, dma_addr)) ? -ENOMEM : 0; +} + +static int tso_get_fragment(struct tso_state *st, struct efx_nic *efx, + skb_frag_t *frag) +{ + st->unmap_addr = skb_frag_dma_map(&efx->pci_dev->dev, frag, 0, + skb_frag_size(frag), DMA_TO_DEVICE); + if (likely(!dma_mapping_error(&efx->pci_dev->dev, st->unmap_addr))) { + st->dma_flags = 0; + st->unmap_len = skb_frag_size(frag); + st->in_len = skb_frag_size(frag); + st->dma_addr = st->unmap_addr; + return 0; + } + return -ENOMEM; +} + + +/** + * tso_fill_packet_with_fragment - form descriptors for the current fragment + * @tx_queue: Efx TX queue + * @skb: Socket buffer + * @st: TSO state + * + * Form descriptors for the current fragment, until we reach the end + * of fragment or end-of-packet. + */ +static void tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue, + const struct sk_buff *skb, + struct tso_state *st) +{ + struct efx_tx_buffer *buffer; + int n; + + if (st->in_len == 0) + return; + if (st->packet_space == 0) + return; + + EFX_WARN_ON_ONCE_PARANOID(st->in_len <= 0); + EFX_WARN_ON_ONCE_PARANOID(st->packet_space <= 0); + + n = min(st->in_len, st->packet_space); + + st->packet_space -= n; + st->out_len -= n; + st->in_len -= n; + + efx_tx_queue_insert(tx_queue, st->dma_addr, n, &buffer); + + if (st->out_len == 0) { + /* Transfer ownership of the skb */ + buffer->skb = skb; + buffer->flags = EFX_TX_BUF_SKB; + } else if (st->packet_space != 0) { + buffer->flags = EFX_TX_BUF_CONT; + } + + if (st->in_len == 0) { + /* Transfer ownership of the DMA mapping */ + buffer->unmap_len = st->unmap_len; + buffer->dma_offset = buffer->unmap_len - buffer->len; + buffer->flags |= st->dma_flags; + st->unmap_len = 0; + } + + st->dma_addr += n; +} + +/** + * tso_start_new_packet - generate a new header and prepare for the new packet + * @tx_queue: Efx TX queue + * @skb: Socket buffer + * @st: TSO state + * @is_first: true if this is the first packet + * + * Generate a new header and prepare for the new packet. + * + * Return: 0 on success, or -%ENOMEM if failed to alloc header. + */ +static int tso_start_new_packet(struct efx_tx_queue *tx_queue, + const struct sk_buff *skb, + struct tso_state *st, + bool is_first) +{ + struct efx_tx_buffer *buffer = + efx_tx_queue_get_insert_buffer(tx_queue); + bool is_last = st->out_len <= skb_shinfo(skb)->gso_size; + u8 tcp_flags_mask; + + if (!is_last) { + st->packet_space = skb_shinfo(skb)->gso_size; + tcp_flags_mask = TCPHDR_FIN | TCPHDR_PSH; + } else { + st->packet_space = st->out_len; + tcp_flags_mask = 0; + } + + if (!is_first) + tcp_flags_mask |= TCPHDR_CWR; /* Congestion control */ + + if (!st->header_unmap_len) { + /* Allocate and insert a DMA-mapped header buffer. */ + struct tcphdr *tsoh_th; + unsigned int ip_length; + u8 *header; + int rc; + + header = efx_tsoh_get_buffer(tx_queue, buffer, st->header_len); + if (!header) + return -ENOMEM; + + tsoh_th = (struct tcphdr *)(header + st->tcp_off); + + /* Copy and update the headers. */ + memcpy(header, skb->data, st->header_len); + + tsoh_th->seq = htonl(st->seqnum); + tcp_flag_byte(tsoh_th) &= ~tcp_flags_mask; + + ip_length = st->ip_base_len + st->packet_space; + + if (st->protocol == htons(ETH_P_IP)) { + struct iphdr *tsoh_iph = + (struct iphdr *)(header + st->ip_off); + + tsoh_iph->tot_len = htons(ip_length); + tsoh_iph->id = htons(st->ipv4_id); + } else { + struct ipv6hdr *tsoh_iph = + (struct ipv6hdr *)(header + st->ip_off); + + tsoh_iph->payload_len = htons(ip_length); + } + + rc = efx_tso_put_header(tx_queue, buffer, header); + if (unlikely(rc)) + return rc; + } else { + /* Send the original headers with a TSO option descriptor + * in front + */ + u8 tcp_flags = tcp_flag_byte(tcp_hdr(skb)) & ~tcp_flags_mask; + + buffer->flags = EFX_TX_BUF_OPTION; + buffer->len = 0; + buffer->unmap_len = 0; + EFX_POPULATE_QWORD_5(buffer->option, + ESF_DZ_TX_DESC_IS_OPT, 1, + ESF_DZ_TX_OPTION_TYPE, + ESE_DZ_TX_OPTION_DESC_TSO, + ESF_DZ_TX_TSO_TCP_FLAGS, tcp_flags, + ESF_DZ_TX_TSO_IP_ID, st->ipv4_id, + ESF_DZ_TX_TSO_TCP_SEQNO, st->seqnum); + ++tx_queue->insert_count; + + /* We mapped the headers in tso_start(). Unmap them + * when the last segment is completed. + */ + buffer = efx_tx_queue_get_insert_buffer(tx_queue); + buffer->dma_addr = st->header_dma_addr; + buffer->len = st->header_len; + if (is_last) { + buffer->flags = EFX_TX_BUF_CONT | EFX_TX_BUF_MAP_SINGLE; + buffer->unmap_len = st->header_unmap_len; + buffer->dma_offset = 0; + /* Ensure we only unmap them once in case of a + * later DMA mapping error and rollback + */ + st->header_unmap_len = 0; + } else { + buffer->flags = EFX_TX_BUF_CONT; + buffer->unmap_len = 0; + } + ++tx_queue->insert_count; + } + + st->seqnum += skb_shinfo(skb)->gso_size; + + /* Linux leaves suitable gaps in the IP ID space for us to fill. */ + ++st->ipv4_id; + + return 0; +} + +/** + * efx_nic_tx_tso_sw - segment and transmit a TSO socket buffer using SW or FATSOv1 + * @tx_queue: Efx TX queue + * @skb: Socket buffer + * @data_mapped: Did we map the data? Always set to true + * by this on success. + * + * Add socket buffer @skb to @tx_queue, doing TSO or return != 0 if + * @skb was not enqueued. In all cases @skb is consumed. + * + * Context: You must hold netif_tx_lock() to call this function. + * Return: %NETDEV_TX_OK. + */ +int efx_nic_tx_tso_sw(struct efx_tx_queue *tx_queue, struct sk_buff *skb, + bool *data_mapped) +{ + struct efx_nic *efx = tx_queue->efx; + int frag_i, rc; + struct tso_state state; + +#if defined(EFX_USE_KCOMPAT) && !defined(EFX_HAVE_GSO_MAX_SEGS) + /* Since the stack does not limit the number of segments per + * skb, we must do so. Otherwise an attacker may be able to + * make the TCP produce skbs that will never fit in our TX + * queue, causing repeated resets. + */ + if (unlikely(skb_shinfo(skb)->gso_segs > EFX_TSO_MAX_SEGS)) { + unsigned int excess = + (skb_shinfo(skb)->gso_segs - EFX_TSO_MAX_SEGS) * + skb_shinfo(skb)->gso_size; + if (__pskb_trim(skb, skb->len - excess)) + return -E2BIG; + } +#endif + + prefetch(skb->data); + + /* Find the packet protocol and sanity-check it */ + rc = efx_tso_check_protocol(skb, &state.protocol); + if (rc) + return rc; + + rc = tso_start(&state, efx, tx_queue, skb); + if (rc) + goto mem_err; + + if (likely(state.in_len == 0)) { + /* Grab the first payload fragment. */ + EFX_WARN_ON_ONCE_PARANOID(skb_shinfo(skb)->nr_frags < 1); + frag_i = 0; + rc = tso_get_fragment(&state, efx, + skb_shinfo(skb)->frags + frag_i); + if (rc) + goto mem_err; + } else { + /* Payload starts in the header area. */ + frag_i = -1; + } + + if (tso_start_new_packet(tx_queue, skb, &state, true) < 0) + goto mem_err; + + prefetch_ptr(tx_queue); + + while (1) { + tso_fill_packet_with_fragment(tx_queue, skb, &state); + + /* Move onto the next fragment? */ + if (state.in_len == 0) { + if (++frag_i >= skb_shinfo(skb)->nr_frags) + /* End of payload reached. */ + break; + rc = tso_get_fragment(&state, efx, + skb_shinfo(skb)->frags + frag_i); + if (rc) + goto mem_err; + } + + /* Start at new packet? */ + if (state.packet_space == 0 && + tso_start_new_packet(tx_queue, skb, &state, false) < 0) + goto mem_err; + } + + *data_mapped = true; + + return 0; + + mem_err: + netif_err(efx, tx_err, efx->net_dev, + "Out of memory for TSO headers, or DMA mapping error\n"); + + /* Free the DMA mapping we were in the process of writing out */ + if (state.unmap_len) { + if (state.dma_flags & EFX_TX_BUF_MAP_SINGLE) + dma_unmap_single(&efx->pci_dev->dev, state.unmap_addr, + state.unmap_len, DMA_TO_DEVICE); + else + dma_unmap_page(&efx->pci_dev->dev, state.unmap_addr, + state.unmap_len, DMA_TO_DEVICE); + } + + /* Free the header DMA mapping, if using option descriptors */ + if (state.header_unmap_len) + dma_unmap_single(&efx->pci_dev->dev, state.header_dma_addr, + state.header_unmap_len, DMA_TO_DEVICE); + + return -ENOMEM; +} diff --git a/src/drivers/net/ethernet/sfc/workarounds.h b/src/drivers/net/ethernet/sfc/workarounds.h new file mode 100644 index 0000000..5b7d658 --- /dev/null +++ b/src/drivers/net/ethernet/sfc/workarounds.h @@ -0,0 +1,44 @@ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2006-2017 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#ifndef EFX_WORKAROUNDS_H +#define EFX_WORKAROUNDS_H + +/* + * Hardware workarounds. + * Bug numbers are from Solarflare's Bugzilla. + */ + +#define EFX_WORKAROUND_EF10(efx) (efx_nic_rev(efx) >= EFX_REV_HUNT_A0) + +/* Lockup when writing event block registers at gen2/gen3 */ +#define EFX_EF10_WORKAROUND_35388(efx) \ + ((struct efx_ef10_nic_data *)efx->nic_data)->workaround_35388 +#define EFX_WORKAROUND_35388(efx) \ + (efx_nic_rev(efx) == EFX_REV_HUNT_A0 && EFX_EF10_WORKAROUND_35388(efx)) + +#ifdef EFX_NOT_UPSTREAM +/* RX doorbell seems to go AWOL on Stratus machines during breaker tests */ +#define EFX_WORKAROUND_59975(efx) 0 +/* Driverlink probe can take >1 sec to perform license challenge */ +#define EFX_WORKAROUND_62649 defined +#ifdef EFX_HAVE_MTD_USECOUNT +/* MTD can leave a bad usecount */ +#define EFX_WORKAROUND_63680 +#endif +#endif + +/* Delay creation of MTD devices to avoid naming conflicts */ +#define EFX_WORKAROUND_87308 1 + +/* Moderation timer access must go through MCDI */ +#define EFX_EF10_WORKAROUND_61265(efx) \ + ((struct efx_ef10_nic_data *)efx->nic_data)->workaround_61265 + +#endif /* EFX_WORKAROUNDS_H */ diff --git a/src/drivers/net/ethernet/sfc/xdp.c b/src/drivers/net/ethernet/sfc/xdp.c new file mode 100644 index 0000000..8f07bf5 --- /dev/null +++ b/src/drivers/net/ethernet/sfc/xdp.c @@ -0,0 +1,776 @@ +// SPDX-License-Identifier: GPL-2.0 +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2019 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#include "net_driver.h" +#include "xdp.h" +#include "nic.h" +#include "tx_common.h" +#include "rx_common.h" +#include "efx_channels.h" +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_XDP_TRACE) +#include +#endif + +/* Maximum rx prefix used by any architecture. */ +#define EFX_MAX_RX_PREFIX_SIZE 22 + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_XDP) +unsigned int efx_xdp_max_mtu(struct efx_nic *efx) +{ + /* The maximum MTU that we can fit in a single page, allowing for + * framing, overhead and XDP headroom. + */ + int overhead = EFX_MAX_FRAME_LEN(0) + sizeof(struct efx_rx_page_state) + + efx->rx_prefix_size + efx->type->rx_buffer_padding + + efx->rx_ip_align + XDP_PACKET_HEADROOM; + + return PAGE_SIZE - overhead; +} + +int efx_xdp_setup_prog(struct efx_nic *efx, struct bpf_prog *prog) +{ + struct bpf_prog *old_prog; + + if (prog && efx->net_dev->mtu > efx_xdp_max_mtu(efx)) { + netif_err(efx, drv, efx->net_dev, + "Unable to configure XDP with MTU of %d (max: %d)\n", + efx->net_dev->mtu, efx_xdp_max_mtu(efx)); + return -EINVAL; + } + + old_prog = rtnl_dereference(efx->xdp_prog); + rcu_assign_pointer(efx->xdp_prog, prog); + /* Release the reference that was originally passed by the caller. */ + if (old_prog) + bpf_prog_put(old_prog); + + return 0; +} + +/* Context: process, rtnl_lock() held. */ +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_XDP_SOCK) +#if defined(CONFIG_XDP_SOCKETS) +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_XSK_POOL) +static int efx_xsk_pool_enable(struct efx_nic *efx, struct xsk_buff_pool *pool, + u16 qid) +{ + struct efx_channel *channel; + bool if_running; + int err; + + err = xsk_pool_dma_map(pool, &efx->pci_dev->dev, 0); + if (err) + return err; + + channel = efx_get_channel(efx, qid); + if_running = (efx->state == STATE_NET_UP); + + if (if_running) { + err = efx_channel_stop_xsk_queue(channel); + if (err) { + netif_err(efx, drv, efx->net_dev, + "Channel %u Stop data path failed\n", + qid); + goto xsk_q_stop_fail; + } + } + + channel->zc = true; + + if (if_running) { + err = efx_channel_start_xsk_queue(channel); + if (err) { + netif_err(efx, drv, efx->net_dev, + "Channel %u Start data path failed\n", + qid); + goto xsk_q_start_fail; + } + } + + return 0; +xsk_q_start_fail: /* try to recover old configuration */ + channel->zc = false; + efx_channel_start_xsk_queue(channel); +xsk_q_stop_fail: + xsk_pool_dma_unmap(pool, 0); + return err; +} + +static int efx_xsk_pool_disable(struct efx_nic *efx, u16 qid) +{ + struct net_device *netdev = efx->net_dev; + struct efx_channel *channel; + struct xsk_buff_pool *pool; + bool if_running; + int rc; + + pool = xsk_get_pool_from_qid(netdev, qid); + if (!pool) + return -EINVAL; + + channel = efx_get_channel(efx, qid); + if_running = (efx->state == STATE_NET_UP); + + if (if_running) { + rc = efx_channel_stop_xsk_queue(channel); + WARN_ON(rc); + if (rc) + goto xsk_q_stop_fail; + } + + channel->zc = false; + + if (if_running) { + rc = efx_channel_start_xsk_queue(channel); + WARN_ON(rc); + } + + xsk_pool_dma_unmap(pool, 0); + + return 0; +xsk_q_stop_fail: + channel->zc = false; + xsk_pool_dma_unmap(pool, 0); + return rc; +} +#else +static int efx_xsk_umem_dma_map(struct pci_dev *pci_dev, struct xdp_umem *umem) +{ + struct device *dev = &pci_dev->dev; + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_USE_XSK_BUFFER_ALLOC) + return xsk_buff_dma_map(umem, dev, 0); +#else + unsigned int i, j; + dma_addr_t dma; + + for (i = 0; i < umem->npgs; i++) { + dma = dma_map_page(dev, umem->pgs[i], 0, PAGE_SIZE, + DMA_BIDIRECTIONAL); + if (dma_mapping_error(dev, dma)) + goto out_unmap; + + umem->pages[i].dma = dma; + } + return 0; +out_unmap: + for (j = 0; j < i; j++) { + dma_unmap_page(dev, umem->pages[j].dma, PAGE_SIZE, + DMA_BIDIRECTIONAL); + umem->pages[j].dma = 0; + } + + return -EINVAL; +#endif +} + +static void efx_xsk_umem_dma_unmap(struct pci_dev *pci_dev, + struct xdp_umem *umem) +{ +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_USE_XSK_BUFFER_ALLOC) + xsk_buff_dma_unmap(umem, 0); +#else + struct device *dev = &pci_dev->dev; + unsigned int i; + + for (i = 0; i < umem->npgs; i++) { + dma_unmap_page(dev, umem->pages[i].dma, PAGE_SIZE, + DMA_BIDIRECTIONAL); + + umem->pages[i].dma = 0; + } +#endif +} + +static int efx_xsk_umem_enable(struct efx_nic *efx, struct xdp_umem *umem, + u16 qid) +{ + struct efx_channel *channel = efx_get_channel(efx, qid); +#if defined(EFX_USE_KCOMPAT) && !defined(EFX_USE_XSK_BUFFER_ALLOC) + struct xdp_umem_fq_reuse *reuseq; +#endif + bool if_running; + int err; + +#if defined(EFX_USE_KCOMPAT) && !defined(EFX_USE_XSK_BUFFER_ALLOC) + reuseq = xsk_reuseq_prepare(channel->rx_queue.ptr_mask + 1); + if (!reuseq) + return -ENOMEM; + + xsk_reuseq_free(xsk_reuseq_swap(umem, reuseq)); +#endif + err = efx_xsk_umem_dma_map(efx->pci_dev, umem); + if (err) + return err; + + if_running = (efx->state == STATE_NET_UP); + + if (if_running) { + err = efx_channel_stop_xsk_queue(channel); + if (err) { + netif_err(efx, drv, efx->net_dev, + "Channel %u Stop data path failed\n", + qid); + goto xsk_q_stop_fail; + } + } + + channel->zc = true; + + if (if_running) { + err = efx_channel_start_xsk_queue(channel); + if (err) { + netif_err(efx, drv, efx->net_dev, + "Channel %u Start data path failed\n", + qid); + goto xsk_q_start_fail; + } + } + + return 0; +xsk_q_start_fail: /* try to recover old configuration */ + channel->zc = false; + efx_channel_start_xsk_queue(channel); +xsk_q_stop_fail: + efx_xsk_umem_dma_unmap(efx->pci_dev, umem); + return err; +} + +static int efx_xsk_umem_disable(struct efx_nic *efx, u16 qid) +{ + struct net_device *netdev = efx->net_dev; + struct efx_channel *channel; + struct xdp_umem *umem; + bool if_running; + int rc; + + umem = xdp_get_umem_from_qid(netdev, qid); + if (!umem) + return -EINVAL; + + channel = efx_get_channel(efx, qid); + if_running = (efx->state == STATE_NET_UP); + + if (if_running) { + rc = efx_channel_stop_xsk_queue(channel); + WARN_ON(rc); + if (rc) + goto xsk_q_stop_fail; + } + + channel->zc = false; + + if (if_running) { + rc = efx_channel_start_xsk_queue(channel); + WARN_ON(rc); + } + + efx_xsk_umem_dma_unmap(efx->pci_dev, umem); + + return 0; +xsk_q_stop_fail: + channel->zc = false; + efx_xsk_umem_dma_unmap(efx->pci_dev, umem); + return rc; +} + +static inline bool efx_xsk_umem_consume_tx(struct xdp_umem *umem, + struct xdp_desc *desc, + dma_addr_t *dma) +{ +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_XSK_UMEM_CONS_TX_2PARAM) + if (xsk_umem_consume_tx(umem, desc)) { +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_USE_XSK_BUFFER_ALLOC) + *dma = xsk_buff_raw_get_dma(umem, desc->addr); +#else + *dma = xdp_umem_get_dma(umem, desc->addr); +#endif + + return true; + } +#else + return xsk_umem_consume_tx(umem, dma, &desc->len); +#endif + return false; +} +#endif /* EFX_HAVE_XSK_POOL */ + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_XSK_POOL) +static int efx_xsk_pool_setup(struct efx_nic *efx, struct xsk_buff_pool *pool, + u16 qid) +#else +static int efx_xsk_umem_setup(struct efx_nic *efx, struct xdp_umem *umem, + u16 qid) +#endif +{ + struct net_device *netdev = efx->net_dev; + + if (qid >= netdev->real_num_rx_queues || + qid >= netdev->real_num_tx_queues) + return -EINVAL; + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_XSK_POOL) + return pool ? efx_xsk_pool_enable(efx, pool, qid) : + efx_xsk_pool_disable(efx, qid); +#else + return umem ? efx_xsk_umem_enable(efx, umem, qid) : + efx_xsk_umem_disable(efx, qid); +#endif +} + +static void efx_xmit_zc(struct efx_tx_queue *tx_queue) +{ + struct efx_tx_buffer *tx_buf; + unsigned int total_bytes = 0; + unsigned int pkt_cnt = 0; + struct xdp_desc desc; + dma_addr_t dma; + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_XSK_POOL) + while (tx_queue->xsk_pool) { + if (!xsk_tx_peek_desc(tx_queue->xsk_pool, &desc)) + break; + dma = xsk_buff_raw_get_dma(tx_queue->xsk_pool, desc.addr); +#else + while (tx_queue->umem) { + if (!efx_xsk_umem_consume_tx(tx_queue->umem, &desc, &dma)) + break; +#endif + prefetchw(__efx_tx_queue_get_insert_buffer(tx_queue)); + + tx_buf = efx_tx_map_chunk(tx_queue, dma, desc.len); + if (!tx_buf) + break; + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_USE_XSK_BUFFER_ALLOC) +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_XSK_POOL) + xsk_buff_raw_dma_sync_for_device(tx_queue->xsk_pool, + dma, desc.len); +#else + xsk_buff_raw_dma_sync_for_device(tx_queue->umem, dma, desc.len); +#endif +#else + dma_sync_single_for_device(&tx_queue->efx->net_dev->dev, dma, + desc.len, DMA_TO_DEVICE); +#endif + tx_buf->flags |= EFX_TX_BUF_XSK; + tx_buf->flags &= ~EFX_TX_BUF_CONT; + pkt_cnt++; + total_bytes += desc.len; + } +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_XSK_POOL) + if (tx_queue->xsk_pool && pkt_cnt) { +#else + if (tx_queue->umem && pkt_cnt) { +#endif + efx_nic_push_buffers(tx_queue); + + tx_queue->tx_packets += pkt_cnt; + tx_queue->tx_bytes += total_bytes; + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_XSK_POOL) + xsk_tx_release(tx_queue->xsk_pool); +#else + xsk_umem_consume_tx_done(tx_queue->umem); +#endif + } +} + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_XSK_NEED_WAKEUP) +int efx_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags) +#else +int efx_xsk_async_xmit(struct net_device *dev, u32 queue_id) +#endif +{ + struct efx_nic *efx = efx_netdev_priv(dev); + struct efx_tx_queue *tx_queue; + struct efx_channel *channel; + + if (!netif_running(dev)) + return -EINVAL; + + channel = efx_get_tx_channel(efx, queue_id); + if (!channel || !channel->zc) + return -EINVAL; + tx_queue = efx_channel_get_xsk_tx_queue(channel); +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_XSK_POOL) + if (unlikely(!tx_queue || !tx_queue->xsk_pool || +#else + if (unlikely(!tx_queue || !tx_queue->umem || +#endif + !efx_is_xsk_tx_queue(tx_queue))) + return -EINVAL; + efx_xmit_zc(tx_queue); + + return 0; +} +#endif +#endif + +int efx_xdp(struct net_device *dev, struct netdev_bpf *xdp) +{ + struct efx_nic *efx = efx_netdev_priv(dev); +#if defined(EFX_USE_KCOMPAT) && defined(EFX_HAVE_XDP_QUERY_PROG) + struct bpf_prog *xdp_prog; +#endif + + switch (xdp->command) { + case XDP_SETUP_PROG: + return efx_xdp_setup_prog(efx, xdp->prog); +#if defined(EFX_USE_KCOMPAT) && defined(EFX_HAVE_XDP_QUERY_PROG) + case XDP_QUERY_PROG: + xdp_prog = rtnl_dereference(efx->xdp_prog); +#if defined(EFX_USE_KCOMPAT) && (defined(EFX_HAVE_XDP_PROG_ATTACHED) || defined(EFX_HAVE_XDP_OLD)) + xdp->prog_attached = !!xdp_prog; +#endif +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_XDP_PROG_ID) || !defined(EFX_HAVE_XDP_OLD) + xdp->prog_id = xdp_prog ? xdp_prog->aux->id : 0; +#endif + return 0; +#endif +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_XSK_POOL) +#if defined(CONFIG_XDP_SOCKETS) + case XDP_SETUP_XSK_POOL: + return efx_xsk_pool_setup(efx, xdp->xsk.pool, + xdp->xsk.queue_id); +#endif +#else +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_XDP_SOCK) +#if defined(CONFIG_XDP_SOCKETS) + case XDP_SETUP_XSK_UMEM: + return efx_xsk_umem_setup(efx, xdp->xsk.umem, + xdp->xsk.queue_id); +#endif +#endif +#endif /* EFX_HAVE_XSK_POOL*/ + default: + return -EINVAL; + } +} + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_XDP_REDIR) +/* Context: NAPI */ +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_XDP_TX_FLAGS) +int efx_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **xdpfs, + u32 flags) +{ + struct efx_nic *efx = efx_netdev_priv(dev); + + if (!netif_running(dev)) + return -EINVAL; + + return efx_xdp_tx_buffers(efx, n, xdpfs, flags & XDP_XMIT_FLUSH); +} +#else +int efx_xdp_xmit(struct net_device *dev, struct xdp_frame *xdpf) +{ + struct efx_nic *efx = efx_netdev_priv(dev); + int rc; + + if (!netif_running(dev)) + return -EINVAL; + + rc = efx_xdp_tx_buffers(efx, 1, &xdpf, false); + + if (rc == 1) + return 0; + if (rc == 0) + return -ENOSPC; + return rc; +} +#endif + +#if defined(EFX_USE_KCOMPAT) && defined(EFX_NEED_XDP_FLUSH) +/* Context: NAPI */ +void efx_xdp_flush(struct net_device *dev) +{ + efx_xdp_tx_buffers(efx_netdev_priv(dev), 0, NULL, true); +} +#endif /* NEED_XDP_FLUSH */ +#endif /* HAVE_XDP_REDIR */ +#endif /* HAVE_XDP */ + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_XDP_TX) +/* Transmit a packet from an XDP buffer + * + * Returns number of packets sent on success, error code otherwise. + * Runs in NAPI context, either in our poll (for XDP TX) or a different NIC + * (for XDP redirect). + */ +int efx_xdp_tx_buffers(struct efx_nic *efx, int n, struct xdp_frame **xdpfs, + bool flush) +{ + struct efx_tx_buffer *tx_buffer; + struct efx_tx_queue *tx_queue; + struct xdp_frame *xdpf; + unsigned int total = 0; + dma_addr_t dma_addr; + unsigned int len; + int space; + int cpu; + int i; + + cpu = raw_smp_processor_id(); + + if (!efx->xdp_tx_queue_count || + unlikely(cpu >= efx->xdp_tx_queue_count)) + return -EINVAL; + + tx_queue = efx->xdp_tx_queues[cpu]; + if (unlikely(!tx_queue)) + return -EINVAL; + + if (n && xdpfs) { + /* Check for available space. We should never need multiple + * descriptors per frame. + */ + space = efx->txq_entries + + tx_queue->read_count - tx_queue->insert_count; + n = min(n, space); + + for (i = 0; i < n; i++) { + xdpf = xdpfs[i]; + + /* We'll want a descriptor for this tx. */ + prefetchw(__efx_tx_queue_get_insert_buffer(tx_queue)); + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_XDP_FRAME_API) + len = xdpf->len; +#else + len = xdpf->data_end - xdpf->data; +#endif + +/* Map for DMA. */ + dma_addr = dma_map_single(&efx->pci_dev->dev, + xdpf->data, len, + DMA_TO_DEVICE); + if (dma_mapping_error(&efx->pci_dev->dev, dma_addr)) + return -EIO; + + /* Create descriptor and set up for unmapping DMA. */ + tx_buffer = efx_tx_map_chunk(tx_queue, dma_addr, len); + if (!tx_buffer) + return -EBUSY; +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_XDP_FRAME_API) + tx_buffer->xdpf = xdpf; +#else + tx_buffer->buf = xdpf->data; +#endif + tx_buffer->flags = EFX_TX_BUF_XDP | + EFX_TX_BUF_MAP_SINGLE; + tx_buffer->dma_offset = 0; + tx_buffer->unmap_len = len; + total += len; + } + } + + /* Pass to hardware. */ + if (flush) + efx_nic_push_buffers(tx_queue); + + tx_queue->tx_packets += n; + tx_queue->tx_bytes += total; + + return n; +} +#endif + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_XDP) +/** efx_xdp_rx: perform XDP processing on a received packet + * + * Returns true if packet should still be delivered. + */ +int efx_xdp_rx(struct efx_nic *efx, struct efx_rx_queue *rx_queue, + struct efx_rx_buffer *rx_buf, u8 **ehp) +{ + u8 rx_prefix[EFX_MAX_RX_PREFIX_SIZE]; + struct xdp_buff *xdp_ptr, xdp; + bool free_buf_on_fail = true; + struct bpf_prog *xdp_prog; + struct xdp_frame *xdpf; + u32 xdp_act; + + s16 offset; + int rc; + + rcu_read_lock(); + xdp_prog = rcu_dereference(efx->xdp_prog); + if (!xdp_prog) { + rcu_read_unlock(); + return XDP_PASS; + } + + if (unlikely(rx_queue->rx_pkt_n_frags > 1)) { + /* We can't do XDP on fragmented packets - drop. */ + rcu_read_unlock(); + efx_free_rx_buffers(rx_queue, rx_buf, + rx_queue->rx_pkt_n_frags); + if (net_ratelimit()) + netif_err(efx, rx_err, efx->net_dev, + "XDP is not possible with multiple receive fragments (%d)\n", + rx_queue->rx_pkt_n_frags); + rx_queue->n_rx_xdp_bad_drops++; + return XDP_DROP; + } +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_XDP_SOCK) +#if defined(CONFIG_XDP_SOCKETS) +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_USE_XSK_BUFFER_ALLOC) + if (rx_buf->flags & EFX_RX_BUF_ZC) +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_XSK_POOL) + xsk_buff_dma_sync_for_cpu(rx_buf->xsk_buf, rx_queue->xsk_pool); +#else + xsk_buff_dma_sync_for_cpu(rx_buf->xsk_buf); +#endif + else +#endif /* EFX_USE_XSK_BUFFER_ALLOC */ +#endif /* CONFIG_XDP_SOCKETS */ + dma_sync_single_for_cpu(&efx->pci_dev->dev, rx_buf->dma_addr, + rx_buf->len, DMA_FROM_DEVICE); +#else + dma_sync_single_for_cpu(&efx->pci_dev->dev, rx_buf->dma_addr, + rx_buf->len, DMA_FROM_DEVICE); +#endif + + /* Save the rx prefix. */ + EFX_WARN_ON_PARANOID(efx->rx_prefix_size > EFX_MAX_RX_PREFIX_SIZE); + memcpy(rx_prefix, *ehp - efx->rx_prefix_size, + efx->rx_prefix_size); + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_XDP_RXQ_INFO) + xdp_init_buff(&xdp, efx->rx_page_buf_step, &rx_queue->xdp_rxq_info); +#else + xdp_init_buff(&xdp, efx->rx_page_buf_step); +#endif + + /* No support yet for XDP metadata */ + xdp_prepare_buff(&xdp, *ehp - XDP_PACKET_HEADROOM, XDP_PACKET_HEADROOM, + rx_buf->len, false); + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_XDP_SOCK) + if (efx_rx_queue_channel(rx_queue)->zc) { +#if defined(EFX_USE_KCOMPAT) && defined(EFX_HAVE_XDP_SOCK) && defined(EFX_HAVE_XSK_OFFSET_ADJUST) + xdp.handle = rx_buf->handle; +#endif + free_buf_on_fail = false; + } +#endif + xdp_act = bpf_prog_run_xdp(xdp_prog, &xdp); + rcu_read_unlock(); + + offset = (u8 *)xdp.data - *ehp; + +#if defined(EFX_USE_KCOMPAT) && defined(EFX_HAVE_XDP_SOCK) && defined(EFX_HAVE_XSK_OFFSET_ADJUST) +#if defined(CONFIG_XDP_SOCKETS) + if (efx_rx_queue_channel(rx_queue)->zc) + xdp.handle = xsk_umem_adjust_offset(rx_queue->umem, xdp.handle, + xdp.data - + xdp.data_hard_start + + efx->rx_prefix_size); +#endif +#endif + xdp_ptr = &xdp; + switch (xdp_act) { + case XDP_PASS: + /* Fix up rx prefix. */ + if (offset) { + *ehp += offset; + rx_buf->page_offset += offset; + rx_buf->len -= offset; + memcpy(*ehp - efx->rx_prefix_size, rx_prefix, + efx->rx_prefix_size); + } + break; + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_XDP_TX) + case XDP_TX: + /* Buffer ownership passes to tx on success. */ +#if !defined(EFX_USE_KCOMPAT) || (defined(EFX_HAVE_XDP_SOCK) && defined(EFX_USE_XSK_BUFFER_ALLOC)) +#if defined(CONFIG_XDP_SOCKETS) + if (rx_buf->flags & EFX_RX_BUF_ZC) { + xdp_ptr = rx_buf->xsk_buf; + xdp_ptr->data = xdp.data; + } +#endif +#endif +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_XDP_COVERT_XDP_BUFF_FRAME_API) + xdpf = xdp_convert_buff_to_frame(xdp_ptr); +#else +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_XDP_FRAME_API) + xdpf = convert_to_xdp_frame(xdp_ptr); +#else + xdpf = xdp_ptr; +#endif +#endif + rc = efx_xdp_tx_buffers(efx, 1, &xdpf, true); + if (rc != 1) { + if (free_buf_on_fail) + efx_free_rx_buffers(rx_queue, rx_buf, 1); + if (net_ratelimit()) + netif_err(efx, rx_err, efx->net_dev, + "XDP TX failed (%d)\n", rc); + rx_queue->n_rx_xdp_bad_drops++; + xdp_act = XDP_DROP; + } else { + rx_queue->n_rx_xdp_tx++; + } + break; +#endif + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_XDP_REDIR) + case XDP_REDIRECT: +#if !defined(EFX_USE_KCOMPAT) || (defined(EFX_HAVE_XDP_SOCK) && defined(EFX_USE_XSK_BUFFER_ALLOC)) +#if defined(CONFIG_XDP_SOCKETS) + if (rx_buf->flags & EFX_RX_BUF_ZC) { + xdp_ptr = rx_buf->xsk_buf; + xdp_ptr->data = xdp.data; + xdp_ptr->data_end = xdp.data_end; + } +#endif +#endif + rc = xdp_do_redirect(efx->net_dev, xdp_ptr, xdp_prog); + if (rc) { + if (free_buf_on_fail) + efx_free_rx_buffers(rx_queue, rx_buf, 1); + if (net_ratelimit()) + netif_err(efx, rx_err, efx->net_dev, + "XDP redirect failed (%d)\n", rc); + rx_queue->n_rx_xdp_bad_drops++; + xdp_act = XDP_DROP; + } else { + rx_queue->n_rx_xdp_redirect++; + } + break; +#endif + + default: +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_BPF_WARN_INVALID_XDP_ACTION_3PARAM) + bpf_warn_invalid_xdp_action(efx->net_dev, xdp_prog, xdp_act); +#else + bpf_warn_invalid_xdp_action(xdp_act); +#endif + fallthrough; + case XDP_ABORTED: + trace_xdp_exception(efx->net_dev, xdp_prog, xdp_act); + if (free_buf_on_fail) + efx_free_rx_buffers(rx_queue, rx_buf, 1); + rx_queue->n_rx_xdp_bad_drops++; + break; + + case XDP_DROP: + if (free_buf_on_fail) + efx_free_rx_buffers(rx_queue, rx_buf, 1); + rx_queue->n_rx_xdp_drops++; + break; + } + + return xdp_act; +} +#endif + diff --git a/src/drivers/net/ethernet/sfc/xdp.h b/src/drivers/net/ethernet/sfc/xdp.h new file mode 100644 index 0000000..a981b33 --- /dev/null +++ b/src/drivers/net/ethernet/sfc/xdp.h @@ -0,0 +1,51 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2019 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#include "net_driver.h" + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_XDP) +unsigned int efx_xdp_max_mtu(struct efx_nic *efx); +int efx_xdp_setup_prog(struct efx_nic *efx, struct bpf_prog *prog); +int efx_xdp(struct net_device *dev, struct netdev_bpf *xdp); +int efx_xdp_rx(struct efx_nic *efx, struct efx_rx_queue *rx_queue, + struct efx_rx_buffer *rx_buf, u8 **ehp); +#else +static inline int efx_xdp_rx(struct efx_nic *efx, struct efx_rx_queue *rx_queue, + struct efx_rx_buffer *rx_buf, u8 **ehp) +{ + return -ENOTSUPP; +} +#endif +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_XDP_REDIR) +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_XDP_TX_FLAGS) +int efx_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **xdpfs, + u32 flags); +#else +int efx_xdp_xmit(struct net_device *dev, struct xdp_frame *xdpf); +#endif +#if defined(EFX_USE_KCOMPAT) && defined(EFX_NEED_XDP_FLUSH) +void efx_xdp_flush(struct net_device *dev); +#endif +#endif + +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_XDP_SOCK) +#if defined(CONFIG_XDP_SOCKETS) +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_XSK_NEED_WAKEUP) +int efx_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags); +#else +int efx_xsk_async_xmit(struct net_device *dev, u32 queue_id); +#endif +#endif +#endif +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_XDP_TX) +int efx_xdp_tx_buffers(struct efx_nic *efx, int n, struct xdp_frame **xdpfs, + bool flush); +#endif + diff --git a/src/include/linux/net/sfc/sfc_rdma.h b/src/include/linux/net/sfc/sfc_rdma.h new file mode 100644 index 0000000..d13d76c --- /dev/null +++ b/src/include/linux/net/sfc/sfc_rdma.h @@ -0,0 +1,88 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * sfc_rdma.h - RDMA interface using the virtual bus + * + * Copyright 2019 Solarflare Communications Inc. + * Copyright 2019-2020 Xilinx Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#ifndef _SFC_RDMA_H +#define _SFC_RDMA_H + +#define SFC_RDMA_DEVNAME "rdma" + +/* RDMA client API */ +enum sfc_event_type { + SFC_EVENT_UNREGISTER, /**< Device is being removed */ + SFC_EVENT_RESET_DOWN, /**< Hardware is down for reset */ + SFC_EVENT_RESET_UP, /**< Hardware is back after reset */ +}; + +struct sfc_rdma_event { + enum sfc_event_type type; + bool value; /**< Link state */ +}; + +/** RDMA driver operations */ +struct sfc_rdma_drvops { + void (*handle_event)(struct auxiliary_device *auxdev, + const struct sfc_rdma_event *event); +}; + +struct sfc_rdma_client; + +/* RDMA server API */ +/** Device parameters */ +enum sfc_rdma_param { + SFC_RDMA_NETDEV, +}; + +struct sfc_rdma_param_value { + union { + struct net_device *net_dev; + }; +}; + +/** Remote Procedure Call to the firmware */ +struct sfc_rdma_rpc { + unsigned int cmd; + size_t inlen; + const u32 *inbuf; + size_t outlen; + size_t *outlen_actual; + u32 *outbuf; +}; + +/** + * RDMA device operations. + * + * @open: Clients need to open a device before using it. This will prevent it + * from being removed and provides a handle for further operations. + * @close: Closing a device unlocks it. + */ +struct sfc_rdma_devops { + struct sfc_rdma_client *(*open)(struct auxiliary_device *auxdev, + const struct sfc_rdma_drvops *ops); + int (*close)(struct sfc_rdma_client *handle); + + int (*get_param)(struct sfc_rdma_client *handle, enum sfc_rdma_param p, + struct sfc_rdma_param_value *arg); + int (*fw_rpc)(struct sfc_rdma_client *handle, struct sfc_rdma_rpc *rpc); +}; + +/** + * RDMA device interface. + * + * @vdev: The parent virtual bus device. + * @ops: Device API. + */ +struct sfc_rdma_device { + struct auxiliary_device auxdev; + const struct sfc_rdma_devops *ops; +}; + +#endif /* _SFC_RDMA_H */ diff --git a/src/include/trace/events/sfc.h b/src/include/trace/events/sfc.h new file mode 100644 index 0000000..1e78e23 --- /dev/null +++ b/src/include/trace/events/sfc.h @@ -0,0 +1,193 @@ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2013 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM sfc + +#if !defined(TRACE_EVENTS_SFC_H) || defined(TRACE_HEADER_MULTI_READ) +#define TRACE_EVENTS_SFC_H + +#include +#if !defined(EFX_USE_KCOMPAT) || defined(EFX_HAVE_NET_GRO_H) +#include +#endif + +TRACE_EVENT(sfc_receive, + + TP_PROTO(const struct sk_buff *skb, bool gro, bool vlan_tagged, u16 vlan_tci), + + TP_ARGS(skb, gro, vlan_tagged, vlan_tci), + + TP_STRUCT__entry( + __string( dev_name, skb->dev->name ) + __field( unsigned int, napi_id ) + __field( u16, queue_mapping ) + __field( const void *, skbaddr ) + __field( bool, gro ) + __field( bool, vlan_tagged ) + __field( u16, vlan_proto ) + __field( u16, vlan_tci ) + __field( u16, protocol ) + __field( u8, ip_summed ) + __field( u32, rxhash ) + __field( bool, l4_rxhash ) + __field( unsigned int, len ) + __field( unsigned int, data_len ) + __field( unsigned int, truesize ) + __field( bool, mac_header_valid) + __field( int, mac_header ) + __field( unsigned char, nr_frags ) + __field( u16, gso_size ) + __field( u16, gso_type ) + ), + + TP_fast_assign( + __assign_str(dev_name, skb->dev->name); +#ifdef CONFIG_NET_LL_RX_POLL + __entry->napi_id = skb->napi_id; +#else + __entry->napi_id = 0; +#endif + __entry->queue_mapping = skb->queue_mapping; + __entry->skbaddr = skb; + __entry->gro = gro; +#ifndef EFX_HAVE_VLAN_RX_PATH + /* Ignore vlan arguments and look at the skb, as the + * vlan arguments are *not* being passed separately + * to the kernel + */ + __entry->vlan_tagged = skb_vlan_tag_present(skb); +#ifndef EFX_HAVE_OLD___VLAN_PUT_TAG + __entry->vlan_proto = ntohs(skb->vlan_proto); +#else + __entry->vlan_proto = ETH_P_8021Q; +#endif + __entry->vlan_tci = skb_vlan_tag_get(skb); +#else + __entry->vlan_tagged = vlan_tagged; + __entry->vlan_proto = ETH_P_8021Q; + __entry->vlan_tci = vlan_tci; +#endif + { + const struct ethhdr* eth; + unsigned int hlen; + unsigned int off; + + off = skb_gro_offset(skb); + hlen = off + sizeof(*eth); + eth = skb_gro_header_hard((struct sk_buff *) skb, hlen) ? + skb_gro_header_slow((struct sk_buff *) skb, hlen, off) : + skb_gro_header_fast((struct sk_buff *) skb, off); + __entry->protocol = gro && eth ? + ntohs(eth->h_proto) : + ntohs(skb->protocol); + }; + __entry->ip_summed = skb->ip_summed; +#ifdef EFX_HAVE_SKB_HASH + __entry->rxhash = skb->hash; + __entry->l4_rxhash = skb->l4_hash; +#else + #ifdef EFX_HAVE_RXHASH_SUPPORT + __entry->rxhash = skb->rxhash; + #else + __entry->rxhash = 0; + #endif + #ifdef EFX_HAVE_L4_RXHASH + __entry->l4_rxhash = skb->l4_rxhash; + #else + __entry->l4_rxhash = false; + #endif +#endif + __entry->len = skb->len; + __entry->data_len = skb->data_len; + __entry->truesize = skb->truesize; + __entry->mac_header_valid = skb_mac_header_was_set(skb); + __entry->mac_header = skb_mac_header(skb) - skb->data; + __entry->nr_frags = skb_shinfo(skb)->nr_frags; + __entry->gso_size = skb_shinfo(skb)->gso_size; + __entry->gso_type = skb_shinfo(skb)->gso_type; + ), + + TP_printk("dev_name=%s napi_id=%#x queue_mapping=%u skbaddr=%p gro=%d vlan_tagged=%d vlan_proto=0x%04x vlan_tci=0x%04x protocol=0x%04x ip_summed=%d rxhash=0x%08x l4_rxhash=%d len=%u data_len=%u truesize=%u mac_header_valid=%d mac_header=%d nr_frags=%d gso_size=%d gso_type=%#x", + __get_str(dev_name), __entry->napi_id, __entry->queue_mapping, + __entry->skbaddr, __entry->gro, __entry->vlan_tagged, + __entry->vlan_proto, __entry->vlan_tci, __entry->protocol, + __entry->ip_summed, __entry->rxhash, __entry->l4_rxhash, + __entry->len, __entry->data_len, __entry->truesize, + __entry->mac_header_valid, __entry->mac_header, + __entry->nr_frags, __entry->gso_size, __entry->gso_type) +); + +TRACE_EVENT(sfc_transmit, + + TP_PROTO(const struct sk_buff *skb, const struct net_device *net_dev), + + TP_ARGS(skb, net_dev), + + TP_STRUCT__entry( + __string( dev_name, net_dev->name ) + __field( u16, queue_mapping ) + __field( const void *, skbaddr ) + __field( bool, vlan_tagged ) + __field( u16, vlan_proto ) + __field( u16, vlan_tci ) + __field( u16, protocol ) + __field( u8, ip_summed ) + __field( unsigned int, len ) + __field( unsigned int, data_len ) + __field( int, network_offset ) + __field( bool, transport_offset_valid) + __field( int, transport_offset) + __field( u8, tx_flags ) + __field( u16, gso_size ) + __field( u16, gso_segs ) + __field( u16, gso_type ) + ), + + TP_fast_assign( + __assign_str(dev_name, net_dev->name); + __entry->queue_mapping = skb->queue_mapping; + __entry->skbaddr = skb; + __entry->vlan_tagged = skb_vlan_tag_present(skb); +#ifndef EFX_HAVE_OLD___VLAN_PUT_TAG + __entry->vlan_proto = ntohs(skb->vlan_proto); +#else + __entry->vlan_proto = ETH_P_8021Q; +#endif + __entry->vlan_tci = skb_vlan_tag_get(skb); + __entry->protocol = ntohs(skb->protocol); + __entry->ip_summed = skb->ip_summed; + __entry->len = skb->len; + __entry->data_len = skb->data_len; + __entry->network_offset = skb_network_offset(skb); + __entry->transport_offset_valid = + skb_transport_header_was_set(skb); + __entry->transport_offset = skb_transport_offset(skb); +#if defined(EFX_HAVE_SKBTX_HW_TSTAMP) + __entry->tx_flags = skb_shinfo(skb)->tx_flags; +#else + __entry->tx_flags = skb_shinfo(skb)->tx_flags.flags; +#endif + __entry->gso_size = skb_shinfo(skb)->gso_size; + __entry->gso_segs = skb_shinfo(skb)->gso_segs; + __entry->gso_type = skb_shinfo(skb)->gso_type; + ), + + TP_printk("dev_name=%s queue_mapping=%u skbaddr=%p vlan_tagged=%d vlan_proto=0x%04x vlan_tci=0x%04x protocol=0x%04x ip_summed=%d len=%u data_len=%u network_offset=%d transport_offset_valid=%d transport_offset=%d tx_flags=%d gso_size=%d gso_segs=%d gso_type=%#x", + __get_str(dev_name), __entry->queue_mapping, __entry->skbaddr, + __entry->vlan_tagged, __entry->vlan_proto, __entry->vlan_tci, + __entry->protocol, __entry->ip_summed, __entry->len, __entry->data_len, + __entry->network_offset, __entry->transport_offset_valid, + __entry->transport_offset, __entry->tx_flags, + __entry->gso_size, __entry->gso_segs, __entry->gso_type) +); + +#endif /* TRACE_EVENTS_SFC_H */ + +#include diff --git a/src/scripts/Makefile.common b/src/scripts/Makefile.common new file mode 100644 index 0000000..733d07b --- /dev/null +++ b/src/scripts/Makefile.common @@ -0,0 +1,144 @@ +# SPDX-License-Identifier: GPL-2.0 +################################################################################ +# +# Driver for Solarflare and Xilinx network controllers and boards +# Copyright 2019 Solarflare Communications Inc. +# Copyright 2019-2020 Xilinx Inc. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 as published +# by the Free Software Foundation, incorporated herein by reference. +# +################################################################################ +# +# Note: you MUST set TOPDIR before including this file. + +# dkms may set ${arch} to aarch64 +ifeq ($(ARCH),aarch64) +ARCH:=arm64 +endif + +ifdef KERNELRELEASE +# kbuild part of makefile +# +ifndef EFX_UPSTREAM +# Compiler flags +EXTRA_CFLAGS += -Wall -Wno-deprecated-declarations -DEFX_USE_KCOMPAT=1 +EXTRA_CFLAGS += -I$(TOPDIR)/include +ifdef EFX_FOR_UPSTREAM +EXTRA_CFLAGS += -include config.h +else +EXTRA_CFLAGS += -DEFX_NOT_UPSTREAM=1 +endif +ifdef EFX_C_MODEL +EXTRA_CFLAGS += -DEFX_C_MODEL +endif +# Debugging-enabled builds +ifndef NDEBUG +EXTRA_CFLAGS += -DDEBUG -g +endif + +endif # !EFX_UPSTREAM + +# Define filechk if necessary +ifndef filechk +define filechk + $(Q)set -e; \ + $(if Q,echo ' CHK $@';) \ + mkdir -p $(dir $@); \ + $(filechk_$(1)) < $< > $@.tmp; \ + if [ -r $@ ] && cmp -s $@ $@.tmp; then \ + rm -f $@.tmp; \ + else \ + $(if Q,echo ' UPD $@';) \ + mv -f $@.tmp $@; \ + fi +endef +endif + +ifndef EFX_UPSTREAM +# autocompat.h depends on the kernel compiled against. +# However, there is nothing stopping the user compiling on multiple +# machines in the same directory. The .kpath target provides a simple +# dependency check for this. +# +# Module(s).symvers also depends on the kernel compiled against, but +# can simply be deleted here. However mmake does some more complicated +# management of Module.symvers and does not allow changing the kernel +# path, so don't touch it when invoked from an mmake tree. +$(obj)/.kpath: FORCE + @if ! [ -f $@ ] || [ $$(cat $@) != $(objtree) ]; then \ + echo $(objtree) >$@; \ + $(if $(MMAKE_IN_KBUILD),,rm -f $(obj)/*.symvers;) \ + fi + +ifdef KBUILD_SRC +define filechk_autocompat.h + $(src)/kernel_compat.sh -k $(KBUILD_SRC) -o "$(CURDIR)" $(if $(filter 1,$(V)),-v,-q) +endef +else +define filechk_autocompat.h + $(src)/kernel_compat.sh -k "$(CURDIR)" -o "$(CURDIR)" $(if $(filter 1,$(V)),-v,-q) +endef +endif + +$(obj)/autocompat.h: $(obj)/.kpath $(src)/kernel_compat.sh $(TOPDIR)/scripts/kernel_compat_funcs.sh FORCE + +$(call filechk,autocompat.h) + +endif # !EFX_UPSTREAM +else +# normal makefile +# +ifndef EFX_UPSTREAM +# Get kernel version and source directory. Either may be specified and +# we work out the other automatically. If neither is specified then we +# assume the current kernel version. +# Compat: KDIR used to be called KPATH +ifdef KPATH +KDIR ?= $(KPATH) +ifneq ($(MAKECMDGOALS),export-srpm) +ifndef KVER +KVER := $(shell sed -r 's/^\#define UTS_RELEASE "(.*)"/\1/; t; d' $(KDIR)/include/generated/utsrelease.h $(KDIR)/include/linux/utsrelease.h $(KDIR)/include/linux/version.h 2>/dev/null) +ifeq ($(KVER),) +$(error Failed to find kernel version for $(KDIR)) +endif +endif # !KVER +endif # !export-srpm +endif # KPATH + +ifndef KVER +KVER := $(shell uname -r) +endif +KDIR ?= /lib/modules/$(KVER)/build +export KDIR KVER + +# Special build flags +ifeq ($(origin CC),default) +ifneq ($(CROSS_COMPILE),) +CC=$(CROSS_COMPILE)gcc +endif +else +ifneq ($(CC),) +EXTRA_MAKEFLAGS += CC="$(CC)" +endif +endif # default CC +ifneq ($(C),) +EXTRA_MAKEFLAGS += C="$(C)" +endif +ifdef KMP_RELEASE +EXTRA_MAKEFLAGS += KMP_RELEASE=1 +endif +export EXTRA_MAKEFLAGS + +# Export Support +EXPORT_CMD = $(TOPDIR)/scripts/export.sh +export EXPORT_CMD + +# Install support +ifeq ($(INSTALL_MOD_DIR),) +INSTALL_MOD_DIR := updates +endif + +endif # !EFX_UPSTREAM + +endif diff --git a/src/scripts/kernel_compat_funcs.sh b/src/scripts/kernel_compat_funcs.sh new file mode 100644 index 0000000..0739606 --- /dev/null +++ b/src/scripts/kernel_compat_funcs.sh @@ -0,0 +1,649 @@ +###################################################################### +# +# Driver for Solarflare network controllers and boards +# Copyright 2019 Solarflare Communications Inc. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 as published +# by the Free Software Foundation, incorporated herein by reference. +# +###################################################################### + +err () { echo >&2 "$*"; } +log () { err "$me: $*"; } +vlog () { $efx_verbose && err "$me: $*"; } +fail () { log "$*"; exit 1; } +try () { "$@" || fail "'$*' failed"; } +vmsg () { $efx_quiet || log "$@"; } + +function usage() +{ + err + err "usage:" + err " $me [options] " + err + err "description:" + err " Produce a list of kernel compatability macros to match the " + err " kernel_compat.c and kernel_compat.h files" + err + err "options:" + err " -k KPATH -- Specify the path to the kernel build source tree" + err " defaults to /lib/modules/VERSION/build" + err " -o PATH -- Specify the output directory, if any" + err " defaults to KPATH" + err " -r VERSION -- Specify the kernel version instead to test" + err ' defaults to `uname -r`' + err " -a ARCH -- Set the architecture to ARCH" + err " defaults to `uname -m`" + err " -m MAP -- Specify a System map for the build kernel." + err " By default will look in KPATH and /boot" + err " -q -- Quieten the checks" + err " -v -- Verbose output" + err " -s -- Symbol list to use" + err " -- Symbol to evaluate." + err " By default every symbol is evaluated" + +} + +###################################################################### +# Generic methods for standard symbol types + +# Look for up to 3 numeric components separated by dots and stop when +# we find anything that doesn't match this. Convert to a number like +# the LINUX_VERSION_CODE macro does. +function string_to_version_code +{ + local ver="$1" + local code=0 + local place=65536 + local num + + while [ -n "$ver" ]; do + # Look for numeric component; if none found then we're done; + # otherwise add to the code + num=${ver%%[^0-9]*} + test -n "$num" || break + code=$((code + $num * $place)) + + # If this was the last component (place value = 1) then we're done; + # otherwise update place value + test $place -gt 1 || break + place=$((place / 256)) + + # Move past numeric component and following dot (if present) + ver=${ver#$num} + ver=${ver#.} + done + + echo $code +} + +# Test cases for string_to_version_code: +# test $(string_to_version_code 1.2.3) = $((1 * 65536 + 2 * 256 + 3)) +# test $(string_to_version_code 12.34.56) = $((12 * 65536 + 34 * 256 + 56)) +# test $(string_to_version_code 12.34.56foo) = $((12 * 65536 + 34 * 256 + 56)) +# test $(string_to_version_code 12.34.56.78) = $((12 * 65536 + 34 * 256 + 56)) +# test $(string_to_version_code 12.34.56.foo) = $((12 * 65536 + 34 * 256 + 56)) +# test $(string_to_version_code 12.34.56-foo) = $((12 * 65536 + 34 * 256 + 56)) +# test $(string_to_version_code 12.34) = $((12 * 65536 + 34 * 256)) +# test $(string_to_version_code 12.34.0) = $((12 * 65536 + 34 * 256)) +# test $(string_to_version_code 12.34foo) = $((12 * 65536 + 34 * 256)) +# test $(string_to_version_code 12.34-56) = $((12 * 65536 + 34 * 256)) +# test $(string_to_version_code 12.34.foo) = $((12 * 65536 + 34 * 256)) +# test $(string_to_version_code 12.34-foo) = $((12 * 65536 + 34 * 256)) + +function do_kver() +{ + shift 2; + local op="$1" + local right_ver="$2" + + local left=$(string_to_version_code "$KVER") + local right=$(string_to_version_code "$right_ver") + + local result=$((1 - ($left $op $right))) + local msg="$KVER $op $right_ver == $left $op $right == " + if [ $result = 0 ]; then + msg="$msg true" + else + msg="$msg false" + fi + vmsg "$msg" + return $result +} + +function do_symbol() { shift 2; test_symbol "$@"; } +function do_nsymbol() { shift 2; ! test_symbol "$@"; } +function do_symtype() { shift 2; defer_test_symtype pos "$@"; } +function do_nsymtype() { shift 2; defer_test_symtype neg "$@"; } +function do_member() { shift 2; defer_test_memtype pos "$@" void; } +function do_nmember() { shift 2; defer_test_memtype neg "$@" void; } +function do_memtype() { shift 2; defer_test_memtype pos "$@"; } +function do_nmemtype() { shift 2; defer_test_memtype neg "$@"; } +function do_bitfield() { shift 2; defer_test_bitfield pos "$@"; } +function do_nbitfield() { shift 2; defer_test_bitfield neg "$@"; } +function do_export() +{ + local sym=$3 + shift 3 + + # Only scan header files for the symbol + test_symbol $sym $(echo "$@" | sed -r 's/ [^ ]+\.c/ /g') || return + test_export $sym "$@" +} +function do_nexport() { ! do_export "$@"; } +function do_file() +{ + for file in "$@"; do + if [ -f $KBUILD_SRC/$file ]; then + return 0 + fi + done + return 1 +} +function do_nfile() { ! do_file "$@"; } + +function do_custom() { do_$1; } + +###################################################################### +# Implementation of kernel feature checking + +# Special return value for deferred test +DEFERRED=42 + +function atexit_cleanup() +{ + rc=$? + [ -n "$rmfiles" ] && rm -rf $rmfiles + return $rc +} + +function strip_comments() +{ + local file=$1 + + cat $1 | sed -e ' +/\/\*/!b +:a +/\*\//!{ +N +ba +} +s:/\*.*\*/::' | sed -e '/^#include/d' +} + +function test_symbol() +{ + local symbol=$1 + shift + local file + local prefix + local prefix_list + + for file in "$@"; do + # For speed, lets just grep through the file. The symbol may + # be of any of these forms: + # #define SYMBOL + # typedef void (SYMBOL)(void) + # extern void SYMBOL(void) + # void (*SYMBOL)(void) + # enum { SYMBOL, } void + # + # Since 3.7 headers can be in both $KBUILD_SRC/include + # or $KBUILD_SRC/include/uapi so check both + # If the file contains "include/linux" then build set of + # prefixes + + prefix=$(dirname $file) + file=$(basename $file) + if [ "$prefix" == "include/linux" ]; then + prefix_list="include/linux/ include/uapi/linux/" + else + prefix_list="$prefix/" + fi + + for prefix in $prefix_list; do + if [ $efx_verbose = true ]; then + echo >&2 "Looking for '$symbol' in '$KBUILD_SRC/$prefix$file'" + fi + [ -f "$KBUILD_SRC/$prefix$file" ] && \ + strip_comments $KBUILD_SRC/$prefix$file | \ + egrep -w "$symbol" >/dev/null && \ + return 0 + done + done + return 1 +} + +function defer_test_symtype() +{ + local sense=$1 + local symbol=$2 + local file=$3 + shift 3 + local type="$*" + + if [ ${file:0:8} != "include/" ]; then + fail "defer_test_symtype() can work in include/ - request was '$file'" + fi + + defer_test_compile $sense " +#include +#include +#include +#include <${file:8}> + +#include \"_autocompat.h\" + +#pragma GCC diagnostic ignored \"-Wmissing-format-attribute\" +typedef __typeof($type) kernel_compat_test_t; +kernel_compat_test_t *kernel_compat_dummy = &$symbol; +" +} + +function defer_test_memtype() +{ + local sense=$1 + local aggtype="${2/_/ }" + local memname=$3 + local file=$4 + shift 4 + local memtype="$*" + + if [ ${file:0:8} != "include/" ]; then + fail "defer_test_symtype() can work in include/ - request was '$file'" + fi + + defer_test_compile $sense " +#include <${file:8}> +$aggtype kernel_compat_dummy_1; +__typeof($memtype) *kernel_compat_dummy_2 = &kernel_compat_dummy_1.$memname; +" +} + +function defer_test_bitfield() +{ + local sense=$1 + local aggtype="${2/_/ }" + local memname=$3 + local file=$4 + shift 4 + + if [ ${file:0:8} != "include/" ]; then + fail "defer_test_bitfield() only works in include/ - request was '$file'" + fi + + defer_test_compile $sense " +#include <${file:8}> +$aggtype kernel_compat_dummy_1; +unsigned long test(void); +unsigned long test(void) { + return kernel_compat_dummy_1.$memname; +} +" +} + +function test_inline_symbol() +{ + local symbol=$1 + local file=$2 + local t=$(mktemp) + rmfiles="$rmfiles $t" + + [ -f "$KBUILD_SRC/$file" ] || return + + # TODO: This isn't very satisfactory. Alternative options are: + # 1. Come up with a clever sed version + # 2. Do a test compile, and look for an undefined symbol (extern) + + # look for the inline..symbol. This is complicated since the inline + # and the symbol may be on different lines. + strip_comments $KBUILD_SRC/$file | \ + egrep -m 1 -B 1 '(^|[,\* \(])'"$symbol"'($|[,; \(\)])' > $t + [ $? = 0 ] || return $? + + # there is either an inline on the final line, or an inline and + # no semicolon on the previous line + head -1 $t | egrep -q 'inline[^;]*$' && return + tail -1 $t | egrep -q 'inline' && return + + return 1 +} + +function test_export() +{ + local symbol=$1 + shift + local files="$@" + local file match + + # Looks for the given export symbol $symbol, defined in $file + # Since this symbol is exported, we can look for it in: + # 1. $KBUILD_MODULE_SYMVERS + # 2. If the full source is installed, look in there. + # May give a false positive if the export is conditional. + # 3. The MAP file if present. May give a false positive + # because it lists all extern (not only exported) symbols. + if [ -f $KBUILD_MODULE_SYMVERS ]; then + if [ $efx_verbose = true ]; then + echo >&2 "Looking for export of $symbol in $KBUILD_MODULE_SYMVERS" + fi + [ -n "$(awk '/0x[0-9a-f]+[\t ]+'$symbol'[\t ]+/' $KBUILD_MODULE_SYMVERS)" ] + else + for file in $files; do + if [ $efx_verbose = true ]; then + echo >&2 "Looking for export of $symbol in $KBUILD_SRC/$file" + fi + if [ -f $KBUILD_SRC/$file ]; then + egrep -q 'EXPORT_(PER_CPU)?SYMBOL(_GPL)?\('"$symbol"'\)' $KBUILD_SRC/$file && return + fi + done + if [ -n "$MAP" ]; then + if [ $efx_verbose = true ]; then + echo >&2 "Looking for export of $symbol in $MAP" + fi + egrep -q "[A-Z] $symbol\$" $MAP && return + fi + return 1 + fi +} + +function test_compile() +{ + local source="$1" + local rc + local dir=$(mktemp -d) + echo "$source" > $dir/test.c + cat > $dir/Makefile <$dir/log 2>&1 + rc=$? + + if [ $efx_verbose = true ]; then + echo >&2 "tried to compile:" + sed >&2 's/^/ /' $dir/test.c + echo >&2 "compiler output:" + sed >&2 's/^/ /' $dir/log + fi + + rm -rf $dir + return $rc +} + +function defer_test_compile() +{ + local sense=$1 + local source="$2" + echo "$source" > "$compile_dir/test_$key.c" + echo "obj-m += test_$key.o" >> "$compile_dir/Makefile" + eval deferred_$sense=\"\$deferred_$sense $key\" + return $DEFERRED +} + +function read_make_variables() +{ + local regexp='' + local split='(' + local variable + local variables="$@" + local dir=$(mktemp -d) + + for variable in $variables; do + echo "\$(warning $variable=\$($variable))" >> $dir/Makefile + regexp=$regexp$split$variable + split='|' + done + make -C $KPATH $EXTRA_MAKEFLAGS O=$KOUT ARCH=$ARCH M=$dir 2>&1 >/dev/null | sed -r "s#$dir/Makefile:.*: ($regexp)=.*$)#\1#; t; d" + rc=$? + + rm -rf $dir + return $rc +} + +function read_define() +{ + local variable="$1" + local file="$2" + cat $KOUT/$2 | sed -r 's/#define '"$variable"' (.*)/\1/; t; d' +} + +efx_quiet=false +efx_verbose=false + +KVER= +KPATH= +KOUT= +FILTER= +MAP= +EXTRA_MAKEFLAGS= +kompat_symbols= + +# These variables from an outer build will interfere with our test builds +unset KBUILD_EXTMOD +unset KBUILD_SRC +unset M +unset TOPDIR +unset sub_make_done + +# Filter out make options except for job-server (parallel make) +old_MAKEFLAGS="${MAKEFLAGS:-}" +MAKEFLAGS= +next= +for word in $old_MAKEFLAGS; do + case "$word" in + '-j' | '-l') + export MAKEFLAGS="$MAKEFLAGS $word" + next=1 + ;; + '-j'* | '-l'*) + export MAKEFLAGS="$MAKEFLAGS $word" + ;; + '--jobserver-fds'* | '--jobs='* | '--jobs' | '--load-average'*) + export MAKEFLAGS="$MAKEFLAGS $word" + ;; + *) + test -n "$next" && export MAKEFLAGS="$MAKEFLAGS $word" + next= + ;; + esac +done + +# Clean-up temporary files when we exit. +rmfiles= +trap atexit_cleanup EXIT + +while [ $# -gt 0 ]; do + case "$1" in + -r) KVER=$2; shift;; + -k) KPATH=$2; shift;; + -o) KOUT=$2; shift;; + -q) efx_quiet=true;; + -m) MAP=$2; shift;; + -v) efx_verbose=true;; + -s) kompat_symbols="$2"; shift;; + -*) usage; exit -1;; + *) [ -z $FILTER ] && FILTER=$1 || FILTER="$FILTER|$1";; + *) break; + esac + shift +done + +vmsg "MAKEFLAGS := $MAKEFLAGS" + +# resolve KVER and KPATH +[ -z "$KVER" ] && [ -z "$KPATH" ] && KVER=`uname -r` +[ -z "$KPATH" ] && KPATH=/lib/modules/$KVER/build +[ -z "$KOUT" ] && KOUT="$KPATH" + +# Need to set CC explicitly on the kernel make line +# Needs to override top-level kernel Makefile setting +# Somehow this script does the wrong thing when a space is used in $CC, +# particularly when ccache is used, so disable that. +if [ -n "${CC:-}" ]; then + CC=${CC/ccache /} + CC=${CC/ /} + EXTRA_MAKEFLAGS=CC=${CC} +fi + +if [ -n "${CROSS_COMPILE:-}" ]; then + EXTRA_MAKEFLAGS="${EXTRA_MAKEFLAGS} CROSS_COMPILE=${CROSS_COMPILE} ARCH=${ARCH}" +fi + +# Select the right warnings - complicated by working out which options work +makefile_prefix=' +ifndef try-run +try-run = $(shell set -e; \ + TMP="$(obj)/.$$$$.tmp"; \ + TMPO="$(obj)/.$$$$.o"; \ + if ($(1)) >/dev/null 2>&1; \ + then echo "$(2)"; \ + else echo "$(3)"; \ + fi; \ + rm -f "$$TMP" "$$TMPO") +endif +ifndef cc-disable-warning +cc-disable-warning = $(call try-run,\ + $(CC) $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS) -W$(strip $(1)) -c -xc /dev/null -o "$$TMP",-Wno-$(strip $(1))) +endif +EXTRA_CFLAGS = -Werror $(call cc-disable-warning, unused-but-set-variable) +' + +# Ensure it looks like a build tree and we can build a module +[ -d "$KPATH" ] || fail "$KPATH is not a directory" +[ -f "$KPATH/Makefile" ] || fail "$KPATH/Makefile is not present" +test_compile "#include +MODULE_LICENSE(\"GPL\");" || \ + fail "Kernel build tree is unable to build modules" + +# strip the KVER out of UTS_RELEASE, and compare to the specified KVER +_KVER= +for F in include/generated/utsrelease.h include/linux/utsrelease.h include/linux/version.h; do + [ -f $KOUT/$F ] && _KVER="$(eval echo $(read_define UTS_RELEASE $F))" && break +done +[ -n "$_KVER" ] || fail "Unable to identify kernel version from $KOUT" +if [ -n "$KVER" ]; then + [ "$KVER" = "$_KVER" ] || fail "$KOUT kernel version $_KVER does not match $KVER" +fi +KVER=$_KVER +unset _KVER + +vmsg "KVER := $KVER" +vmsg "KPATH := $KPATH" + +# Read the following variables from the Makefile: +# KBUILD_SRC: Root of source tree (not the same as KPATH under SUSE) +# ARCH: Target architecture name +# SRCARCH: Target architecture directory name (2.6.24 onward) +# CONFIG_X86_{32,64}: Work around ARCH = x86 madness +# CONFIG_PTP_1588_CLOCK: PTP clock support + +[ -n "$ARCH" ] && export ARCH +eval $(read_make_variables KBUILD_SRC ARCH SRCARCH CONFIG_X86_32 CONFIG_X86_64 CONFIG_PTP_1588_CLOCK abs_srctree) + +# Define: +# KBUILD_SRC: Was renamed into abs_srctree in linux-5.3 +# KBUILD_SRC: If not already set, same as KPATH +# SRCARCH: If not already set, same as ARCH +# WORDSUFFIX: Suffix added to some filenames by the i386/amd64 merge +[ -n "${KBUILD_SRC:-}" ] || KBUILD_SRC=${abs_srctree:-} +[ -n "${KBUILD_SRC:-}" ] || KBUILD_SRC=$KPATH +[ -n "${SRCARCH:-}" ] || SRCARCH=$ARCH +if [ "$ARCH" = "i386" ] || [ "${CONFIG_X86_32:-}" = "y" ]; then + WORDSUFFIX=_32 +elif [ "$ARCH" = "x86_64" ] || [ "${CONFIG_X86_64:-}" = "y" ]; then + WORDSUFFIX=_64 +else + WORDSUFFIX= +fi +[ -f "$KBUILD_SRC/arch/$SRCARCH/Makefile" ] || fail "$KBUILD_SRC doesn't directly build $SRCARCH" + +vmsg "KBUILD_SRC := $KBUILD_SRC" +vmsg "SRCARCH := $SRCARCH" +vmsg "WORDSUFFIX := $WORDSUFFIX" + +if [ -f "$KPATH/Module.symvers" ] ; then + KBUILD_MODULE_SYMVERS=$KPATH/Module.symvers +elif [ -n "${O:-}" -a -f "${O:-}/Module.symvers" ] ; then + KBUILD_MODULE_SYMVERS="$O/Module.symvers" +elif [ -f "$PWD/Module.symvers" ] ; then + KBUILD_MODULE_SYMVERS="$PWD/Module.symvers" +else + KBUILD_MODULE_SYMVERS="" +fi +vmsg "KBUILD_MODULE_SYMVERS := $KBUILD_MODULE_SYMVERS" + +# try and find the System map [used by test_export] +if [ -z "$MAP" ]; then + if [ -f /boot/System.map-$KVER ]; then + MAP=/boot/System.map-$KVER + elif [ $KVER = "`uname -r`" ] && [ -f /proc/kallsyms ]; then + MAP=/proc/kallsyms + elif [ -f $KBUILD_MODULE_SYMVERS ]; then + # can use this to find external symbols only + true + else + log "!!Unable to find a valid System map. Export symbol checks may not work" + fi +fi + +if [ "$kompat_symbols" == "" ]; then + kompat_symbols="$(generate_kompat_symbols)" +fi + +# filter the available symbols +if [ -n "$FILTER" ]; then + kompat_symbols="$(echo "$kompat_symbols" | egrep "^($FILTER):")" +fi + +compile_dir="$(mktemp -d)" +rmfiles="$rmfiles $compile_dir" +echo >"$compile_dir/Makefile" "$makefile_prefix" +echo >"$compile_dir/_autocompat.h" +deferred_pos= +deferred_neg= + +# Note that for deferred tests this runs after the Makefile has run all tests +function do_one_symbol() { + local key=$1 + shift + # NB work is in the following if clause "do_${method}" + if "$@"; then + echo "#define $key yes" + # So that future compile tests can consume this + echo "#define $key yes" >> "${compile_dir}/_autocompat.h" + elif [ $? -ne $DEFERRED ]; then + echo "// #define $key" + fi +} + +# process each symbol +for symbol in $kompat_symbols; do + # split symbol at colons; disable globbing (pathname expansion) + set -o noglob + IFS=: + set -- $symbol + unset IFS + set +o noglob + + key="$1" + method="$2" + do_one_symbol $key do_${method} "$@" +done + +# Run the deferred compile tests +eval make -C $KPATH -k $EXTRA_MAKEFLAGS O="$KOUT" M="$compile_dir" \ + >"$compile_dir/log" 2>&1 \ + || true +if [ $efx_verbose = true ]; then + echo >&2 "compiler output:" + sed >&2 's/^/ /' "$compile_dir/log" +fi +for key in $deferred_pos; do + # Use existence of object file as evidence of compile without warning/errors + do_one_symbol $key test -f "$compile_dir/test_$key.o" +done +for key in $deferred_neg; do + do_one_symbol $key test ! -f "$compile_dir/test_$key.o" +done diff --git a/udev/75-xilinx.rules b/udev/75-xilinx.rules new file mode 100644 index 0000000..57f5abf --- /dev/null +++ b/udev/75-xilinx.rules @@ -0,0 +1,22 @@ +# +# Xilinx SmartNIC net representor device names +# +SUBSYSTEM!="net", GOTO="xlnx_net_rep_exit" +ACTION!="add", GOTO="xlnx_net_rep_exit" + +# This comparison will fail when there is no PCI vendor ID defined. +ENV{ID_VENDOR_ID}!="", GOTO="xlnx_net_rep_with_pci_id" + +# This will match representors and produce names for them. +ATTRS{phys_port_name}!="", ATTRS{phys_switch_id}!="", PROGRAM="/usr/lib/sfc/produce-representor-name.sh %k $attr{phys_port_name} $attr{phys_switch_id}", NAME="%c" +GOTO="xlnx_net_rep_exit" + +LABEL="xlnx_net_rep_with_pci_id" + +# Non-Xilinx interfaces are ignored. +ENV{ID_VENDOR_ID}!="0x10ee", GOTO="xlnx_net_rep_exit" + +# This will match PFs and VFs. +ATTRS{phys_port_id}!="", RUN+="/usr/lib/sfc/trigger-representors.sh %k $attr{phys_port_id}" + +LABEL="xlnx_net_rep_exit" diff --git a/udev/produce-representor-name.sh b/udev/produce-representor-name.sh new file mode 100755 index 0000000..234f70b --- /dev/null +++ b/udev/produce-representor-name.sh @@ -0,0 +1,407 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0-only + +# Copyright 2021 Xilinx Inc. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 as published +# by the Free Software Foundation, incorporated herein by reference. + +# $1 - current name of representor +# $2 - phys_port_name of representor +# $3 - phys_switch_id of representor +orig_name="$1" +phys_port_name="$2" +phys_switch_id="$3" + +# By default, no renaming will happen. +new_name="$orig_name" + +# This script's standard output is meant to be read by udev. +# The contents written to stdout will become the name of the interface. + +# This file normally goes in the /usr/lib/sfc directory. + +LOCKDIR=/tmp/sfc_interface_renaming.lock + +# Change to a proper file name in order to enable debug logging. +#LOGFILE=/tmp/sfc_produce_representor_name.log +LOGFILE=/dev/null + +# Don't forget to change udev's log level via udevadm when changing this line. +# Change to anything other than an empty value to enable logging to udev. +LOG_TO_STDERR= + +# How long to sleep between interface listings. Value is in seconds. +# Do not set to a value lower than 0.001 seconds (1 millisecond) +INTERFACE_ENUMERATION_SLEEP_DURATION=0.1 +# Maximum amount of time in seconds to wait for interface to show up, plus one +# second due to lack of precision. +INTERFACE_ENUMERATION_TIME_LIMIT=4 +# This is required because sysfs optimizations sometimes cause directories and +# files (attributes) to be invisible to CPUs other than the ones which created +# them in sysfs for a few seconds after creation. +INTERFACE_ENUMERATION_START=0 + +dbgPrint() { + echo "$$ $orig_name: $*" >> $LOGFILE + [ -n "$LOG_TO_STDERR" ] && echo "$$ $orig_name: $*" 1>&2 +} + +while ! mkdir "$LOCKDIR" > /dev/null 2> /dev/null; do + sleep 0.1 +done + +trap 'rm -rf "$LOCKDIR"' EXIT KILL TERM INT + +dbgPrint ================================== +dbgPrint $(date) +dbgPrint phys_port_name=$phys_port_name +dbgPrint phys_switch_id=$phys_switch_id + +regexPFREP="^p([0-9]+)if([0-9]+)pf([0-9]+)$" +regexVFREP="^p([0-9]+)pf([0-9]+)vf([0-9]+)$" +regexSOCVFREP="^p([0-9]+)if([0-9]+)pf([0-9]+)vf([0-9]+)$" + +regexPFNAME0="^eth([0-9]+)$" +regexPFNAME1="^ens([0-9]+)f([0-9]+)(n.+)?$" +regexPFNAME2="^enp([0-9]+)s([0-9]+)f([0-9]+)(n.+)?$" +regexPFNAME3="^enP([0-9]+)p([0-9]+)s([0-9]+)f([0-9]+)(n.+)?$" + +# $1 - directory of the PF in sysfs +# $2 - representor if number +# $3 - representor pf number +getPfRepName() { + local pf="$1" + local name="$(basename "$pf")" + local rep_if="$2" + local rep_pf="$3" + + dbgPrint "Basename of $pf is $name" + dbgPrint "if $rep_if; pf $rep_pf" + + if [[ $name =~ $regexPFNAME0 ]]; then + dbgPrint "Matching PF did not get a proper name yet." + + # No name is returned in this situation so the search for a matching PF continues. + # This is useful, because this interface might get a proper name later. + return 2 + elif [[ $name =~ $regexPFNAME1 ]]; then + local slotNumber="${BASH_REMATCH[1]}" + local portName="${BASH_REMATCH[3]}" + + dbgPrint "PF name match #1: $slotNumber ${BASH_REMATCH[2]} $portName" + + if [ "$rep_if" == "0" ]; then + # This represents the function on the *current* side + echo -n "ens${slotNumber}f${rep_pf}${portName}rep" + dbgPrint "Returning ens${slotNumber}f${rep_pf}${portName}rep" + return 0 + else + # This represents the function on the *other* side + echo -n "ens${slotNumber}f${rep_pf}${portName}soc" + dbgPrint "Returning ens${slotNumber}f${rep_pf}${portName}soc" + return 0 + fi + elif [[ $name =~ $regexPFNAME2 ]]; then + local portNumber="${BASH_REMATCH[1]}" + local slotNumber="${BASH_REMATCH[2]}" + local portName="${BASH_REMATCH[4]}" + + dbgPrint "PF name match #2: $portNumber $slotNumber ${BASH_REMATCH[3]} $portName" + + if [ "$rep_if" == "0" ]; then + echo -n "enp${portNumber}s${slotNumber}f${rep_pf}${portName}rep" + dbgPrint "Returning enp${portNumber}s${slotNumber}f${rep_pf}${portName}rep" + return 0 + else + echo -n "enp${portNumber}s${slotNumber}f${rep_pf}${portName}soc" + dbgPrint "Returning enp${portNumber}s${slotNumber}f${rep_pf}${portName}soc" + return 0 + fi + elif [[ $name =~ $regexPFNAME3 ]]; then + local domainNumber="${BASH_REMATCH[1]}" + local portNumber="${BASH_REMATCH[2]}" + local slotNumber="${BASH_REMATCH[3]}" + local portName="${BASH_REMATCH[5]}" + + dbgPrint "PF name match #3: $domainNumber $portNumber $slotNumber ${BASH_REMATCH[4]} $portName" + + if [ "$rep_if" == "0" ]; then + echo -n "enP${domainNumber}p${portNumber}s${slotNumber}f${rep_pf}${portName}rep" + dbgPrint "Returning enP${domainNumber}p${portNumber}s${slotNumber}f${rep_pf}${portName}rep" + return 0 + else + echo -n "enP${domainNumber}p${portNumber}s${slotNumber}f${rep_pf}${portName}soc" + dbgPrint "Returning enP${domainNumber}p${portNumber}s${slotNumber}f${rep_pf}${portName}soc" + return 0 + fi + else + dbgPrint "Name of matching PF did not match a known format:" + dbgPrint "$1" + + # A non-empty "name" is returned so the search for a matching PF stops. + echo -n INVALID + return 1 + fi + + dbgPrint "THIS SHOULD NOT BE REACHED" +} + +# $1 - directory of the PF in sysfs +# $2 - representor pf number +# $3 - representor vf number +getVfRepName() { + local pf="$1" + local name="$(basename "$pf")" + local rep_pf="$2" + local rep_vf="$3" + + dbgPrint "Basename of $pf is $name" + dbgPrint "pf $rep_pf; vf $rep_vf" + + if [[ $name =~ $regexPFNAME0 ]]; then + dbgPrint "Matching PF did not get a proper name yet." + + return 2 + elif [[ $name =~ $regexPFNAME1 ]]; then + local slotNumber="${BASH_REMATCH[1]}" + local portName="${BASH_REMATCH[3]}" + + dbgPrint "PF name match #1: $slotNumber ${BASH_REMATCH[2]} $portName" + + # This represents the function on the *current* side + echo -n "ens${slotNumber}f${rep_pf}${portName}v${rep_vf}rep" + dbgPrint "Returning ens${slotNumber}f${rep_pf}${portName}v${rep_vf}rep" + return 0 + elif [[ $name =~ $regexPFNAME2 ]]; then + local portNumber="${BASH_REMATCH[1]}" + local slotNumber="${BASH_REMATCH[2]}" + local portName="${BASH_REMATCH[4]}" + + dbgPrint "PF name match #2: $portNumber $slotNumber ${BASH_REMATCH[3]} $portName" + + echo -n "enp${portNumber}s${slotNumber}f${rep_pf}${portName}v${rep_vf}rep" + dbgPrint "Returning enp${portNumber}s${slotNumber}f${rep_pf}${portName}v${rep_vf}rep" + return 0 + elif [[ $name =~ $regexPFNAME3 ]]; then + local domainNumber="${BASH_REMATCH[1]}" + local portNumber="${BASH_REMATCH[2]}" + local slotNumber="${BASH_REMATCH[3]}" + local portName="${BASH_REMATCH[5]}" + + dbgPrint "PF name match #3: $domainNumber $portNumber $slotNumber ${BASH_REMATCH[4]} $portName" + + echo -n "enP${domainNumber}p${portNumber}s${slotNumber}f${rep_pf}${portName}v${rep_vf}rep" + dbgPrint "Returning enP${domainNumber}p${portNumber}s${slotNumber}f${rep_pf}${portName}v${rep_vf}rep" + return 0 + else + dbgPrint "Name of matching PF did not match a known format:" + dbgPrint "$1" + + echo -n INVALID + return 1 + fi + + dbgPrint "THIS SHOULD NOT BE REACHED" +} + +# $1 - Interface's sysfs node +# $2 - phys_port_id to match +# returns 0 if this is a Xilinx ef100 PF, otherwise non-zero +checkNetworkInterface() { + local nic="$1" + local phys_switch_id="$2" + + dbgPrint "Looking at NIC $nic" + + phys_port_id="$(cat "$nic/phys_port_id" 2> /dev/null)" + + if [ "$?" != 0 ]; then + dbgPrint "Failed to read phys_port_id" + return 1 + fi + + if [ "$phys_switch_id" != "$phys_port_id" ]; then + dbgPrint "$phys_switch_id != $phys_port_id" + return 1 + fi + + device_vendor="$(cat "$nic/device/vendor" 2> /dev/null)" + + if [ "$?" != 0 ]; then + dbgPrint "Failed to read PCI device vendor" + return 1 + fi + + if [ "$device_vendor" != "0x10ee" ]; then + dbgPrint "$device_vendor != 0x10ee" + return 1 + fi + + device_class="$(cat "$nic/device/class" 2> /dev/null)" + + if [ "$?" != 0 ]; then + dbgPrint "Failed to read PCI device class" + return 1 + fi + + if [ "$device_class" != "0x020000" ]; then + dbgPrint "$device_class != 0x020000" + return 1 + fi + + dbgPrint "Found matching PF $nic" + + return 0 +} + +shouldLookForInterfacesAgain() { + local CURRENT_TIME="$(date +%s)" + # rep_name being non-empty means that a suitable PF was found for the representor. + (( (CURRENT_TIME - INTERFACE_ENUMERATION_START) < INTERFACE_ENUMERATION_TIME_LIMIT )) && [ -z "$rep_name" ] +} + +INTERFACE_ENUMERATION_START="$(date +%s)" +rep_name="" +ret=0 + +if [[ $phys_port_name =~ $regexPFREP ]]; then + # If this is a PF representor, look for the matching PF's interface, and name this representor according to the PF. + + repPort="${BASH_REMATCH[1]}" + repSide="${BASH_REMATCH[2]}" + repPf="${BASH_REMATCH[3]}" + + dbgPrint "Found matches for PF representor!" + dbgPrint "port: $repPort" + dbgPrint "side: $repSide" + dbgPrint "pf: $repPf" + + while shouldLookForInterfacesAgain; do + for nic in /sys/class/net/*; do + checkNetworkInterface "$nic" "$phys_switch_id" || continue + + rep_name="$(getPfRepName "$nic" "$repSide" "$repPf")" + + if [ "$?" == 0 ]; then + new_name="$rep_name" + fi + + # Whether a proper name was found or not, loop stops because a suitable PF was found. + break + done + + # No matching PF was found means that iteration will be attempted again, + # but a short sleep is useful to prevent excessive loads on the system. + [ -z "$rep_name" ] && sleep "$INTERFACE_ENUMERATION_SLEEP_DURATION" + done +elif [[ $phys_port_name =~ $regexVFREP ]]; then + # If this is a VF representor, look for the matching PF's interface. If found, + # the representor will be named according to the VF as deduced from the PF's name. + # The VF is not searched for, because it doesn't have a phys_port_id property that can be used. + + repPort="${BASH_REMATCH[1]}" + repPf="${BASH_REMATCH[2]}" + repVf="${BASH_REMATCH[3]}" + + dbgPrint "Found matches for VF representor!" + dbgPrint "port: $repPort" + dbgPrint "pf: $repPf" + dbgPrint "vf: $repVf" + + while shouldLookForInterfacesAgain; do + for nic in /sys/class/net/*; do + checkNetworkInterface "$nic" "$phys_switch_id" || continue + + rep_name="$(getVfRepName "$nic" "$repPf" "$repVf")" + + if [ "$?" == 0 ]; then + new_name="$rep_name" + fi + + break + done + + [ -z "$rep_name" ] && sleep "$INTERFACE_ENUMERATION_SLEEP_DURATION" + done +elif [[ $phys_port_name =~ $regexSOCVFREP ]]; then + # If this is a VF representor on the SoC, look for the matching PF's interface. + # If found, the representor will be named according to the VF as deduced from the PF's name. + # The VF is not searched for, because it doesn't have a phys_port_id property that can be used. + + repPort="${BASH_REMATCH[1]}" + repSide="${BASH_REMATCH[2]}" + repPf="${BASH_REMATCH[3]}" + repVf="${BASH_REMATCH[4]}" + + dbgPrint "Found matches for VF representor!" + dbgPrint "port: $repPort" + dbgPrint "side: $repSide" + dbgPrint "pf: $repPf" + dbgPrint "vf: $repVf" + + if [ "$repSide" != "0" ]; then + dbgPrint "Expected SoC VF representor if to be 0, got $repSide instead." + fi + + while shouldLookForInterfacesAgain; do + for nic in /sys/class/net/*; do + checkNetworkInterface "$nic" "$phys_switch_id" || continue + + rep_name="$(getVfRepName "$nic" "$repPf" "$repVf")" + + if [ "$?" == 0 ]; then + new_name="$rep_name" + fi + + break + done + + [ -z "$rep_name" ] && sleep "$INTERFACE_ENUMERATION_SLEEP_DURATION" + done +else + dbgPrint "Regexes don't match phys_port_name" + + echo -n "$orig_name" + exit 1 +fi + +if [ -z "$rep_name" ]; then + CURRENT_TIME="$(date +%s)" + + if (( (CURRENT_TIME - INTERFACE_ENUMERATION_START) >= INTERFACE_ENUMERATION_TIME_LIMIT )); then + dbgPrint "Tried searching for interface for too long." + fi + + echo -n "$orig_name" + exit 2 +fi + +# Once a suitable name is produced for the representor, attempt to set it as altname first. + +if [ "$new_name" != "$orig_name" ]; then + # If the interface name is preserved, adding an altname is not attempted. + dbgPrint "Trying to add altname $new_name" + if [ -n "$LOG_TO_STDERR" ]; then + ip link property add dev "$orig_name" altname "$new_name" 1>&2 + ret=$? + elif [ -n "$LOGFILE" ]; then + ip link property add dev "$orig_name" altname "$new_name" >> $LOGFILE 2>> $LOGFILE + ret=$? + else + ip link property add dev "$orig_name" altname "$new_name" > /dev/null 2> /dev/null + ret=$? + fi +else + ret=0 +fi + +if [ "$ret" == 0 ]; then + # Altname was successfully set (or skipped entirely), therefore the name of the interface is preserved. + echo -n "$orig_name" +else + # Altname failed to set, therefore the interface is renamed. + echo -n "$new_name" +fi + diff --git a/udev/trigger-representors.sh b/udev/trigger-representors.sh new file mode 100755 index 0000000..16040c2 --- /dev/null +++ b/udev/trigger-representors.sh @@ -0,0 +1,68 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0-only + +# Copyright 2021 Xilinx Inc. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 as published +# by the Free Software Foundation, incorporated herein by reference. + + +# This file normally goes in the /usr/lib/sfc directory. + +# $1 - current name of interface +# $2 - phys_port_id of interface +orig_name="$1" + +LOCKDIR=/tmp/sfc_interface_renaming.lock + +# Change to a proper file name in order to enable debug logging. +#LOGFILE=/tmp/sfc_trigger.log +LOGFILE=/dev/null + +# Don't forget to change udev's log level via udevadm when changing this line. +# Change to anything other than an empty value to enable logging to udev. +LOG_TO_STDERR= + +function dbgPrint { + echo "$$ $orig_name: $*" >> $LOGFILE + [ -n "$LOG_TO_STDERR" ] && echo "$$ $orig_name: $*" 1>&2 +} + +# How long to sleep between mutex directory checks. Value is in seconds. +# Do not set to a value lower than 0.001 seconds (1 millisecond) +SLEEP_DURATION=0.1 +# Maximum amount of time in seconds to wait for mutex. +SLEEP_MAX=30 +SLEEP_TIMER=0 + +# Conversion done in order to be able to use arithmetic operations. +SLEEP_DURATION_IN_MS="$(printf %.0f "${SLEEP_DURATION}e3")" + +while ! mkdir "$LOCKDIR" > /dev/null 2> /dev/null; do + sleep "$SLEEP_DURATION" + + if (( (SLEEP_TIMER += SLEEP_DURATION_IN_MS) >= (SLEEP_MAX * 1000) )); then + dbgPrint "Timed out while waiting for mutex directory." + break + fi +done + +if (( SLEEP_TIMER < (SLEEP_MAX * 1000) )); then + # If the loop above timed out, removing the lock directory is not a sensible idea. + trap 'rm -rf "$LOCKDIR"' EXIT KILL TERM INT +fi + +dbgPrint ================================== +dbgPrint $(date) +dbgPrint "$2" + +if [ -n "$LOG_TO_STDERR" ]; then + udevadm trigger -v -c add -s net -a "phys_switch_id=$2" 1>&2 +elif [ -n "$LOGFILE" ]; then + udevadm trigger -v -c add -s net -a "phys_switch_id=$2" >> "$LOGFILE" 2>> "$LOGFILE" +else + udevadm trigger -v -c add -s net -a "phys_switch_id=$2" > /dev/null 2>> /dev/null +fi + +exit 0 -- Gitee