From fc62a710b149be7a2c2da9db10a6d2cec1268fbe Mon Sep 17 00:00:00 2001 From: wangkaiyuan Date: Fri, 11 Oct 2024 18:48:55 +0800 Subject: [PATCH] Merge bnxt_en and bnxt_re build Signed-off-by: wangkaiyuan --- Makefile | 34 + bnxt_en | 1 + .../.Module.symvers.cmd | 0 .../.modules.order.cmd | 0 {src => bnxt_en-1.10.3-229.0.139.0}/COPYING | 0 {src => bnxt_en-1.10.3-229.0.139.0}/ChangeLog | 0 {src => bnxt_en-1.10.3-229.0.139.0}/MANIFEST | 0 {src => bnxt_en-1.10.3-229.0.139.0}/Makefile | 0 .../README.TXT | 0 {src => bnxt_en-1.10.3-229.0.139.0}/bnxt.c | 0 {src => bnxt_en-1.10.3-229.0.139.0}/bnxt.h | 0 .../bnxt_auxbus_compat.c | 0 .../bnxt_auxbus_compat.h | 0 .../bnxt_compat.h | 0 .../bnxt_compat_link_modes.c | 0 .../bnxt_compat_link_modes.h | 0 .../bnxt_coredump.c | 0 .../bnxt_coredump.h | 0 .../bnxt_dbr.h | 0 .../bnxt_dcb.c | 0 .../bnxt_dcb.h | 0 .../bnxt_debugfs.c | 0 .../bnxt_debugfs.h | 0 .../bnxt_debugfs_cpt.c | 0 .../bnxt_devlink.c | 0 .../bnxt_devlink.h | 0 .../bnxt_dim.c | 0 .../bnxt_dim.h | 0 .../bnxt_en.mod | 0 .../bnxt_ethtool.c | 0 .../bnxt_ethtool.h | 0 .../bnxt_ethtool_compat.c | 0 .../bnxt_extra_ver.h | 0 .../bnxt_fw_hdr.h | 0 .../bnxt_hdbr.c | 0 .../bnxt_hdbr.h | 0 .../bnxt_hsi.h | 0 .../bnxt_hwmon.c | 0 .../bnxt_hwmon.h | 0 .../bnxt_hwrm.c | 0 .../bnxt_hwrm.h | 0 .../bnxt_ktls.c | 0 .../bnxt_ktls.h | 0 .../bnxt_lfc.c | 0 .../bnxt_lfc.h | 0 .../bnxt_lfc_ioctl.h | 0 .../bnxt_log.c | 0 .../bnxt_log.h | 0 .../bnxt_log_data.c | 0 .../bnxt_log_data.h | 0 .../bnxt_mpc.c | 0 .../bnxt_mpc.h | 0 .../bnxt_netlink.c | 0 .../bnxt_netlink.h | 0 .../bnxt_netmap_linux.h | 0 .../bnxt_nvm_defs.h | 0 .../bnxt_ptp.c | 0 .../bnxt_ptp.h | 0 .../bnxt_sriov.c | 0 .../bnxt_sriov.h | 0 .../bnxt_sriov_sysfs.c | 0 .../bnxt_sriov_sysfs.h | 0 {src => bnxt_en-1.10.3-229.0.139.0}/bnxt_tc.c | 0 {src => bnxt_en-1.10.3-229.0.139.0}/bnxt_tc.h | 0 .../bnxt_tc_compat.h | 0 .../bnxt_tfc.c | 0 .../bnxt_tfc.h | 0 .../bnxt_udcc.c | 0 .../bnxt_udcc.h | 0 .../bnxt_ulp.c | 0 .../bnxt_ulp.h | 0 .../bnxt_vfr.c | 0 .../bnxt_vfr.h | 0 .../bnxt_xdp.c | 0 .../bnxt_xdp.h | 0 .../find_src.awk | 0 .../hcapi/bitalloc.c | 0 .../hcapi/bitalloc.h | 0 .../hcapi/cfa/cfa_p40_hw.h | 0 .../hcapi/cfa/cfa_p58_hw.h | 0 .../hcapi/cfa/hcapi_cfa.h | 0 .../hcapi/cfa/hcapi_cfa_defs.h | 0 .../hcapi/cfa/hcapi_cfa_p4.c | 0 .../hcapi/cfa/hcapi_cfa_p4.h | 0 .../hcapi/cfa/hcapi_cfa_p58.c | 0 .../hcapi/cfa/hcapi_cfa_p58.h | 0 .../hcapi/cfa_v3/include/cfa_resources.h | 0 .../hcapi/cfa_v3/include/cfa_types.h | 0 .../hcapi/cfa_v3/include/cfa_util.h | 0 .../hcapi/cfa_v3/include/sys_util.h | 0 .../hcapi/cfa_v3/mm/cfa_mm.c | 0 .../hcapi/cfa_v3/mm/include/cfa_mm.h | 0 .../hcapi/cfa_v3/mm/include/sys_util.h | 0 .../hcapi/cfa_v3/mpc/cfa_bld_mpc.c | 0 .../cfa_v3/mpc/cfa_bld_p70_host_mpc_wrapper.c | 0 .../hcapi/cfa_v3/mpc/cfa_bld_p70_mpc.c | 0 .../hcapi/cfa_v3/mpc/cfa_bld_p70_mpcops.c | 0 .../hcapi/cfa_v3/mpc/include/cfa_bld_defs.h | 0 .../mpc/include/cfa_bld_mpc_field_ids.h | 0 .../hcapi/cfa_v3/mpc/include/cfa_bld_mpcops.h | 0 .../include/cfa_bld_p70_host_mpc_wrapper.h | 0 .../cfa_v3/mpc/include/cfa_bld_p70_mpc.h | 0 .../cfa_v3/mpc/include/cfa_bld_p70_mpc_defs.h | 0 .../cfa_v3/mpc/include/cfa_bld_p70_mpcops.h | 0 .../mpc/include/cfa_p70_mpc_field_ids.h | 0 .../mpc/include/cfa_p70_mpc_field_mapping.h | 0 .../cfa_v3/mpc/include/cfa_p70_mpc_structs.h | 0 .../hcapi/cfa_v3/tim/cfa_tim.c | 0 .../hcapi/cfa_v3/tim/include/cfa_tim.h | 0 .../hcapi/cfa_v3/tpm/cfa_tpm.c | 0 .../hcapi/cfa_v3/tpm/include/cfa_tpm.h | 0 .../tf_core/cfa_resource_types.h | 0 .../tf_core/cfa_tcam_mgr.c | 0 .../tf_core/cfa_tcam_mgr.h | 0 .../tf_core/cfa_tcam_mgr_device.h | 0 .../tf_core/cfa_tcam_mgr_hwop_msg.c | 0 .../tf_core/cfa_tcam_mgr_hwop_msg.h | 0 .../tf_core/cfa_tcam_mgr_p4.c | 0 .../tf_core/cfa_tcam_mgr_p4.h | 0 .../tf_core/cfa_tcam_mgr_p58.c | 0 .../tf_core/cfa_tcam_mgr_p58.h | 0 .../tf_core/dpool.c | 0 .../tf_core/dpool.h | 0 .../tf_core/rand.c | 0 .../tf_core/rand.h | 0 .../tf_core/tf_core.c | 0 .../tf_core/tf_core.h | 0 .../tf_core/tf_device.c | 0 .../tf_core/tf_device.h | 0 .../tf_core/tf_device_p4.c | 0 .../tf_core/tf_device_p4.h | 0 .../tf_core/tf_device_p58.c | 0 .../tf_core/tf_device_p58.h | 0 .../tf_core/tf_em.h | 0 .../tf_core/tf_em_hash_internal.c | 0 .../tf_core/tf_em_internal.c | 0 .../tf_core/tf_ext_flow_handle.h | 0 .../tf_core/tf_global_cfg.c | 0 .../tf_core/tf_global_cfg.h | 0 .../tf_core/tf_identifier.c | 0 .../tf_core/tf_identifier.h | 0 .../tf_core/tf_if_tbl.c | 0 .../tf_core/tf_if_tbl.h | 0 .../tf_core/tf_msg.c | 0 .../tf_core/tf_msg.h | 0 .../tf_core/tf_rm.c | 0 .../tf_core/tf_rm.h | 0 .../tf_core/tf_session.c | 0 .../tf_core/tf_session.h | 0 .../tf_core/tf_sram_mgr.c | 0 .../tf_core/tf_sram_mgr.h | 0 .../tf_core/tf_tbl.c | 0 .../tf_core/tf_tbl.h | 0 .../tf_core/tf_tbl_sram.c | 0 .../tf_core/tf_tbl_sram.h | 0 .../tf_core/tf_tcam.c | 0 .../tf_core/tf_tcam.h | 0 .../tf_core/tf_tcam_mgr_msg.c | 0 .../tf_core/tf_tcam_mgr_msg.h | 0 .../tf_core/tf_util.c | 0 .../tf_core/tf_util.h | 0 .../tf_ulp/bnxt_tf_common.h | 0 .../tf_ulp/bnxt_tf_tc_shim.c | 0 .../tf_ulp/bnxt_tf_tc_shim.h | 0 .../tf_ulp/bnxt_tf_ulp.c | 0 .../tf_ulp/bnxt_tf_ulp.h | 0 .../tf_ulp/bnxt_tf_ulp_tf.c | 0 .../tf_ulp/bnxt_tf_ulp_tf.h | 0 .../tf_ulp/bnxt_tf_ulp_tfc.c | 0 .../tf_ulp/bnxt_tf_ulp_tfc.h | 0 .../tf_ulp/bnxt_ulp_flow.h | 0 .../tf_ulp/bnxt_ulp_linux_flow.c | 0 .../tf_ulp/bnxt_ulp_meter.c | 0 .../generic_templates/ulp_template_db_act.c | 0 .../generic_templates/ulp_template_db_class.c | 0 .../generic_templates/ulp_template_db_enum.h | 0 .../generic_templates/ulp_template_db_field.h | 0 .../generic_templates/ulp_template_db_tbl.c | 0 .../generic_templates/ulp_template_db_tbl.h | 0 .../ulp_template_db_thor2_act.c | 0 .../ulp_template_db_thor2_act.o | Bin .../ulp_template_db_thor2_class.c | 0 .../ulp_template_db_thor2_class.o | Bin .../ulp_template_db_thor_act.c | 0 .../ulp_template_db_thor_class.c | 0 .../ulp_template_db_wh_plus_act.c | 0 .../ulp_template_db_wh_plus_class.c | 0 .../tf_ulp/ulp_def_rules.c | 0 .../tf_ulp/ulp_fc_mgr.c | 0 .../tf_ulp/ulp_fc_mgr.h | 0 .../tf_ulp/ulp_fc_mgr_tf.c | 0 .../tf_ulp/ulp_fc_mgr_tfc.c | 0 .../tf_ulp/ulp_flow_db.c | 0 .../tf_ulp/ulp_flow_db.h | 0 .../tf_ulp/ulp_gen_tbl.c | 0 .../tf_ulp/ulp_gen_tbl.h | 0 .../tf_ulp/ulp_generic_flow_offload.c | 0 .../tf_ulp/ulp_generic_flow_offload.h | 0 .../tf_ulp/ulp_linux.h | 0 .../tf_ulp/ulp_mapper.c | 0 .../tf_ulp/ulp_mapper.h | 0 .../tf_ulp/ulp_mapper_tf.c | 0 .../tf_ulp/ulp_mapper_tfc.c | 0 .../tf_ulp/ulp_mark_mgr.c | 0 .../tf_ulp/ulp_mark_mgr.h | 0 .../tf_ulp/ulp_matcher.c | 0 .../tf_ulp/ulp_matcher.h | 0 .../tf_ulp/ulp_port_db.c | 0 .../tf_ulp/ulp_port_db.h | 0 .../tf_ulp/ulp_tc_handler_tbl.c | 0 .../tf_ulp/ulp_tc_parser.c | 0 .../tf_ulp/ulp_tc_parser.h | 0 .../tf_ulp/ulp_template_debug.c | 0 .../tf_ulp/ulp_template_debug.h | 0 .../tf_ulp/ulp_template_debug_proto.h | 0 .../tf_ulp/ulp_template_struct.h | 0 .../tf_ulp/ulp_tf_debug.c | 0 .../tf_ulp/ulp_tf_debug.h | 0 .../tf_ulp/ulp_udcc.c | 0 .../tf_ulp/ulp_udcc.h | 0 .../tf_ulp/ulp_utils.c | 0 .../tf_ulp/ulp_utils.h | 0 .../tfc_v3/tfc.h | 0 .../tfc_v3/tfc_act.c | 0 .../tfc_v3/tfc_action_handle.h | 0 .../tfc_v3/tfc_cpm.c | 0 .../tfc_v3/tfc_cpm.h | 0 .../tfc_v3/tfc_debug.h | 0 .../tfc_v3/tfc_em.c | 0 .../tfc_v3/tfc_em.h | 0 .../tfc_v3/tfc_flow_handle.h | 0 .../tfc_v3/tfc_global_id.c | 0 .../tfc_v3/tfc_ident.c | 0 .../tfc_v3/tfc_idx_tbl.c | 0 .../tfc_v3/tfc_if_tbl.c | 0 .../tfc_v3/tfc_init.c | 0 .../tfc_v3/tfc_mpc_table.c | 0 .../tfc_v3/tfc_msg.c | 0 .../tfc_v3/tfc_msg.h | 0 .../tfc_v3/tfc_priv.c | 0 .../tfc_v3/tfc_priv.h | 0 .../tfc_v3/tfc_session.c | 0 .../tfc_v3/tfc_tbl_scope.c | 0 .../tfc_v3/tfc_tcam.c | 0 .../tfc_v3/tfc_util.c | 0 .../tfc_v3/tfc_util.h | 0 .../tfc_v3/tfc_vf2pf_msg.c | 0 .../tfc_v3/tfc_vf2pf_msg.h | 0 .../tfc_v3/tfo.c | 0 .../tfc_v3/tfo.h | 0 bnxt_re | 1 + bnxt_re-1.10.3-229.0.139.0/COPYING | 339 + bnxt_re-1.10.3-229.0.139.0/Makefile | 1026 +++ bnxt_re-1.10.3-229.0.139.0/README.TXT | 916 ++ bnxt_re-1.10.3-229.0.139.0/bnxt_re-abi.h | 188 + bnxt_re-1.10.3-229.0.139.0/bnxt_re.h | 934 ++ bnxt_re-1.10.3-229.0.139.0/bnxt_setupcc.sh | 528 ++ bnxt_re-1.10.3-229.0.139.0/compat.c | 842 ++ bnxt_re-1.10.3-229.0.139.0/compat.h | 650 ++ bnxt_re-1.10.3-229.0.139.0/configfs.c | 4406 ++++++++++ bnxt_re-1.10.3-229.0.139.0/configfs.h | 112 + bnxt_re-1.10.3-229.0.139.0/dcb.c | 289 + bnxt_re-1.10.3-229.0.139.0/dcb.h | 48 + bnxt_re-1.10.3-229.0.139.0/debugfs.c | 1238 +++ bnxt_re-1.10.3-229.0.139.0/debugfs.h | 49 + bnxt_re-1.10.3-229.0.139.0/hdbr.c | 642 ++ bnxt_re-1.10.3-229.0.139.0/hdbr.h | 88 + bnxt_re-1.10.3-229.0.139.0/hw_counters.c | 511 ++ bnxt_re-1.10.3-229.0.139.0/hw_counters.h | 149 + bnxt_re-1.10.3-229.0.139.0/ib_verbs.c | 7585 +++++++++++++++++ bnxt_re-1.10.3-229.0.139.0/ib_verbs.h | 697 ++ bnxt_re-1.10.3-229.0.139.0/main.c | 5911 +++++++++++++ bnxt_re-1.10.3-229.0.139.0/qplib_fp.c | 3833 +++++++++ bnxt_re-1.10.3-229.0.139.0/qplib_fp.h | 651 ++ bnxt_re-1.10.3-229.0.139.0/qplib_rcfw.c | 1429 ++++ bnxt_re-1.10.3-229.0.139.0/qplib_rcfw.h | 300 + bnxt_re-1.10.3-229.0.139.0/qplib_res.c | 1224 +++ bnxt_re-1.10.3-229.0.139.0/qplib_res.h | 984 +++ bnxt_re-1.10.3-229.0.139.0/qplib_sp.c | 1304 +++ bnxt_re-1.10.3-229.0.139.0/qplib_sp.h | 453 + bnxt_re-1.10.3-229.0.139.0/qplib_tlv.h | 193 + bnxt_re-1.10.3-229.0.139.0/roce_hsi.h | 6615 ++++++++++++++ bnxt_re-1.10.3-229.0.139.0/stats.c | 476 ++ bnxt_re-1.10.3-229.0.139.0/stats.h | 232 + kmod-bnxt_en.spec => kmod-bnxt.spec | 387 +- 285 files changed, 45240 insertions(+), 25 deletions(-) create mode 100644 Makefile create mode 120000 bnxt_en rename {src => bnxt_en-1.10.3-229.0.139.0}/.Module.symvers.cmd (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/.modules.order.cmd (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/COPYING (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/ChangeLog (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/MANIFEST (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/Makefile (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/README.TXT (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/bnxt.c (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/bnxt.h (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/bnxt_auxbus_compat.c (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/bnxt_auxbus_compat.h (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/bnxt_compat.h (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/bnxt_compat_link_modes.c (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/bnxt_compat_link_modes.h (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/bnxt_coredump.c (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/bnxt_coredump.h (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/bnxt_dbr.h (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/bnxt_dcb.c (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/bnxt_dcb.h (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/bnxt_debugfs.c (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/bnxt_debugfs.h (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/bnxt_debugfs_cpt.c (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/bnxt_devlink.c (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/bnxt_devlink.h (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/bnxt_dim.c (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/bnxt_dim.h (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/bnxt_en.mod (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/bnxt_ethtool.c (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/bnxt_ethtool.h (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/bnxt_ethtool_compat.c (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/bnxt_extra_ver.h (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/bnxt_fw_hdr.h (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/bnxt_hdbr.c (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/bnxt_hdbr.h (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/bnxt_hsi.h (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/bnxt_hwmon.c (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/bnxt_hwmon.h (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/bnxt_hwrm.c (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/bnxt_hwrm.h (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/bnxt_ktls.c (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/bnxt_ktls.h (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/bnxt_lfc.c (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/bnxt_lfc.h (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/bnxt_lfc_ioctl.h (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/bnxt_log.c (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/bnxt_log.h (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/bnxt_log_data.c (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/bnxt_log_data.h (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/bnxt_mpc.c (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/bnxt_mpc.h (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/bnxt_netlink.c (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/bnxt_netlink.h (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/bnxt_netmap_linux.h (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/bnxt_nvm_defs.h (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/bnxt_ptp.c (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/bnxt_ptp.h (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/bnxt_sriov.c (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/bnxt_sriov.h (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/bnxt_sriov_sysfs.c (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/bnxt_sriov_sysfs.h (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/bnxt_tc.c (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/bnxt_tc.h (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/bnxt_tc_compat.h (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/bnxt_tfc.c (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/bnxt_tfc.h (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/bnxt_udcc.c (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/bnxt_udcc.h (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/bnxt_ulp.c (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/bnxt_ulp.h (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/bnxt_vfr.c (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/bnxt_vfr.h (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/bnxt_xdp.c (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/bnxt_xdp.h (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/find_src.awk (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/hcapi/bitalloc.c (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/hcapi/bitalloc.h (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/hcapi/cfa/cfa_p40_hw.h (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/hcapi/cfa/cfa_p58_hw.h (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/hcapi/cfa/hcapi_cfa.h (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/hcapi/cfa/hcapi_cfa_defs.h (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/hcapi/cfa/hcapi_cfa_p4.c (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/hcapi/cfa/hcapi_cfa_p4.h (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/hcapi/cfa/hcapi_cfa_p58.c (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/hcapi/cfa/hcapi_cfa_p58.h (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/hcapi/cfa_v3/include/cfa_resources.h (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/hcapi/cfa_v3/include/cfa_types.h (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/hcapi/cfa_v3/include/cfa_util.h (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/hcapi/cfa_v3/include/sys_util.h (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/hcapi/cfa_v3/mm/cfa_mm.c (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/hcapi/cfa_v3/mm/include/cfa_mm.h (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/hcapi/cfa_v3/mm/include/sys_util.h (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/hcapi/cfa_v3/mpc/cfa_bld_mpc.c (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/hcapi/cfa_v3/mpc/cfa_bld_p70_host_mpc_wrapper.c (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/hcapi/cfa_v3/mpc/cfa_bld_p70_mpc.c (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/hcapi/cfa_v3/mpc/cfa_bld_p70_mpcops.c (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/hcapi/cfa_v3/mpc/include/cfa_bld_defs.h (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/hcapi/cfa_v3/mpc/include/cfa_bld_mpc_field_ids.h (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/hcapi/cfa_v3/mpc/include/cfa_bld_mpcops.h (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/hcapi/cfa_v3/mpc/include/cfa_bld_p70_host_mpc_wrapper.h (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/hcapi/cfa_v3/mpc/include/cfa_bld_p70_mpc.h (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/hcapi/cfa_v3/mpc/include/cfa_bld_p70_mpc_defs.h (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/hcapi/cfa_v3/mpc/include/cfa_bld_p70_mpcops.h (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/hcapi/cfa_v3/mpc/include/cfa_p70_mpc_field_ids.h (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/hcapi/cfa_v3/mpc/include/cfa_p70_mpc_field_mapping.h (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/hcapi/cfa_v3/mpc/include/cfa_p70_mpc_structs.h (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/hcapi/cfa_v3/tim/cfa_tim.c (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/hcapi/cfa_v3/tim/include/cfa_tim.h (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/hcapi/cfa_v3/tpm/cfa_tpm.c (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/hcapi/cfa_v3/tpm/include/cfa_tpm.h (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/tf_core/cfa_resource_types.h (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/tf_core/cfa_tcam_mgr.c (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/tf_core/cfa_tcam_mgr.h (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/tf_core/cfa_tcam_mgr_device.h (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/tf_core/cfa_tcam_mgr_hwop_msg.c (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/tf_core/cfa_tcam_mgr_hwop_msg.h (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/tf_core/cfa_tcam_mgr_p4.c (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/tf_core/cfa_tcam_mgr_p4.h (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/tf_core/cfa_tcam_mgr_p58.c (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/tf_core/cfa_tcam_mgr_p58.h (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/tf_core/dpool.c (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/tf_core/dpool.h (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/tf_core/rand.c (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/tf_core/rand.h (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/tf_core/tf_core.c (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/tf_core/tf_core.h (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/tf_core/tf_device.c (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/tf_core/tf_device.h (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/tf_core/tf_device_p4.c (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/tf_core/tf_device_p4.h (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/tf_core/tf_device_p58.c (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/tf_core/tf_device_p58.h (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/tf_core/tf_em.h (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/tf_core/tf_em_hash_internal.c (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/tf_core/tf_em_internal.c (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/tf_core/tf_ext_flow_handle.h (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/tf_core/tf_global_cfg.c (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/tf_core/tf_global_cfg.h (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/tf_core/tf_identifier.c (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/tf_core/tf_identifier.h (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/tf_core/tf_if_tbl.c (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/tf_core/tf_if_tbl.h (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/tf_core/tf_msg.c (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/tf_core/tf_msg.h (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/tf_core/tf_rm.c (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/tf_core/tf_rm.h (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/tf_core/tf_session.c (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/tf_core/tf_session.h (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/tf_core/tf_sram_mgr.c (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/tf_core/tf_sram_mgr.h (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/tf_core/tf_tbl.c (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/tf_core/tf_tbl.h (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/tf_core/tf_tbl_sram.c (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/tf_core/tf_tbl_sram.h (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/tf_core/tf_tcam.c (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/tf_core/tf_tcam.h (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/tf_core/tf_tcam_mgr_msg.c (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/tf_core/tf_tcam_mgr_msg.h (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/tf_core/tf_util.c (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/tf_core/tf_util.h (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/tf_ulp/bnxt_tf_common.h (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/tf_ulp/bnxt_tf_tc_shim.c (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/tf_ulp/bnxt_tf_tc_shim.h (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/tf_ulp/bnxt_tf_ulp.c (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/tf_ulp/bnxt_tf_ulp.h (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/tf_ulp/bnxt_tf_ulp_tf.c (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/tf_ulp/bnxt_tf_ulp_tf.h (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/tf_ulp/bnxt_tf_ulp_tfc.c (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/tf_ulp/bnxt_tf_ulp_tfc.h (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/tf_ulp/bnxt_ulp_flow.h (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/tf_ulp/bnxt_ulp_linux_flow.c (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/tf_ulp/bnxt_ulp_meter.c (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/tf_ulp/generic_templates/ulp_template_db_act.c (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/tf_ulp/generic_templates/ulp_template_db_class.c (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/tf_ulp/generic_templates/ulp_template_db_enum.h (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/tf_ulp/generic_templates/ulp_template_db_field.h (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/tf_ulp/generic_templates/ulp_template_db_tbl.c (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/tf_ulp/generic_templates/ulp_template_db_tbl.h (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/tf_ulp/generic_templates/ulp_template_db_thor2_act.c (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/tf_ulp/generic_templates/ulp_template_db_thor2_act.o (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/tf_ulp/generic_templates/ulp_template_db_thor2_class.c (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/tf_ulp/generic_templates/ulp_template_db_thor2_class.o (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/tf_ulp/generic_templates/ulp_template_db_thor_act.c (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/tf_ulp/generic_templates/ulp_template_db_thor_class.c (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/tf_ulp/generic_templates/ulp_template_db_wh_plus_act.c (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/tf_ulp/generic_templates/ulp_template_db_wh_plus_class.c (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/tf_ulp/ulp_def_rules.c (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/tf_ulp/ulp_fc_mgr.c (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/tf_ulp/ulp_fc_mgr.h (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/tf_ulp/ulp_fc_mgr_tf.c (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/tf_ulp/ulp_fc_mgr_tfc.c (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/tf_ulp/ulp_flow_db.c (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/tf_ulp/ulp_flow_db.h (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/tf_ulp/ulp_gen_tbl.c (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/tf_ulp/ulp_gen_tbl.h (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/tf_ulp/ulp_generic_flow_offload.c (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/tf_ulp/ulp_generic_flow_offload.h (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/tf_ulp/ulp_linux.h (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/tf_ulp/ulp_mapper.c (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/tf_ulp/ulp_mapper.h (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/tf_ulp/ulp_mapper_tf.c (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/tf_ulp/ulp_mapper_tfc.c (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/tf_ulp/ulp_mark_mgr.c (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/tf_ulp/ulp_mark_mgr.h (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/tf_ulp/ulp_matcher.c (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/tf_ulp/ulp_matcher.h (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/tf_ulp/ulp_port_db.c (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/tf_ulp/ulp_port_db.h (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/tf_ulp/ulp_tc_handler_tbl.c (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/tf_ulp/ulp_tc_parser.c (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/tf_ulp/ulp_tc_parser.h (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/tf_ulp/ulp_template_debug.c (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/tf_ulp/ulp_template_debug.h (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/tf_ulp/ulp_template_debug_proto.h (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/tf_ulp/ulp_template_struct.h (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/tf_ulp/ulp_tf_debug.c (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/tf_ulp/ulp_tf_debug.h (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/tf_ulp/ulp_udcc.c (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/tf_ulp/ulp_udcc.h (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/tf_ulp/ulp_utils.c (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/tf_ulp/ulp_utils.h (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/tfc_v3/tfc.h (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/tfc_v3/tfc_act.c (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/tfc_v3/tfc_action_handle.h (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/tfc_v3/tfc_cpm.c (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/tfc_v3/tfc_cpm.h (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/tfc_v3/tfc_debug.h (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/tfc_v3/tfc_em.c (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/tfc_v3/tfc_em.h (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/tfc_v3/tfc_flow_handle.h (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/tfc_v3/tfc_global_id.c (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/tfc_v3/tfc_ident.c (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/tfc_v3/tfc_idx_tbl.c (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/tfc_v3/tfc_if_tbl.c (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/tfc_v3/tfc_init.c (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/tfc_v3/tfc_mpc_table.c (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/tfc_v3/tfc_msg.c (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/tfc_v3/tfc_msg.h (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/tfc_v3/tfc_priv.c (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/tfc_v3/tfc_priv.h (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/tfc_v3/tfc_session.c (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/tfc_v3/tfc_tbl_scope.c (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/tfc_v3/tfc_tcam.c (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/tfc_v3/tfc_util.c (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/tfc_v3/tfc_util.h (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/tfc_v3/tfc_vf2pf_msg.c (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/tfc_v3/tfc_vf2pf_msg.h (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/tfc_v3/tfo.c (100%) rename {src => bnxt_en-1.10.3-229.0.139.0}/tfc_v3/tfo.h (100%) create mode 120000 bnxt_re create mode 100644 bnxt_re-1.10.3-229.0.139.0/COPYING create mode 100644 bnxt_re-1.10.3-229.0.139.0/Makefile create mode 100644 bnxt_re-1.10.3-229.0.139.0/README.TXT create mode 100644 bnxt_re-1.10.3-229.0.139.0/bnxt_re-abi.h create mode 100644 bnxt_re-1.10.3-229.0.139.0/bnxt_re.h create mode 100644 bnxt_re-1.10.3-229.0.139.0/bnxt_setupcc.sh create mode 100644 bnxt_re-1.10.3-229.0.139.0/compat.c create mode 100644 bnxt_re-1.10.3-229.0.139.0/compat.h create mode 100644 bnxt_re-1.10.3-229.0.139.0/configfs.c create mode 100644 bnxt_re-1.10.3-229.0.139.0/configfs.h create mode 100644 bnxt_re-1.10.3-229.0.139.0/dcb.c create mode 100644 bnxt_re-1.10.3-229.0.139.0/dcb.h create mode 100644 bnxt_re-1.10.3-229.0.139.0/debugfs.c create mode 100644 bnxt_re-1.10.3-229.0.139.0/debugfs.h create mode 100644 bnxt_re-1.10.3-229.0.139.0/hdbr.c create mode 100644 bnxt_re-1.10.3-229.0.139.0/hdbr.h create mode 100644 bnxt_re-1.10.3-229.0.139.0/hw_counters.c create mode 100644 bnxt_re-1.10.3-229.0.139.0/hw_counters.h create mode 100644 bnxt_re-1.10.3-229.0.139.0/ib_verbs.c create mode 100644 bnxt_re-1.10.3-229.0.139.0/ib_verbs.h create mode 100644 bnxt_re-1.10.3-229.0.139.0/main.c create mode 100644 bnxt_re-1.10.3-229.0.139.0/qplib_fp.c create mode 100644 bnxt_re-1.10.3-229.0.139.0/qplib_fp.h create mode 100644 bnxt_re-1.10.3-229.0.139.0/qplib_rcfw.c create mode 100644 bnxt_re-1.10.3-229.0.139.0/qplib_rcfw.h create mode 100644 bnxt_re-1.10.3-229.0.139.0/qplib_res.c create mode 100644 bnxt_re-1.10.3-229.0.139.0/qplib_res.h create mode 100644 bnxt_re-1.10.3-229.0.139.0/qplib_sp.c create mode 100644 bnxt_re-1.10.3-229.0.139.0/qplib_sp.h create mode 100644 bnxt_re-1.10.3-229.0.139.0/qplib_tlv.h create mode 100644 bnxt_re-1.10.3-229.0.139.0/roce_hsi.h create mode 100644 bnxt_re-1.10.3-229.0.139.0/stats.c create mode 100644 bnxt_re-1.10.3-229.0.139.0/stats.h rename kmod-bnxt_en.spec => kmod-bnxt.spec (48%) diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..a245a33 --- /dev/null +++ b/Makefile @@ -0,0 +1,34 @@ +#!/usr/bin/make + +KVER= +ifeq ($(KVER),) + KVER=$(shell uname -r) +endif + +PREFIX= + +default: build + + +l2install: + make -C bnxt_en KVER=$(KVER) PREFIX=$(PREFIX) install + +l2clean: + make -C bnxt_en clean + +roceinstall: + make -C bnxt_re KVER=$(KVER) PREFIX=$(PREFIX) install + +roceclean: + make -C bnxt_re clean + +build: + make -C bnxt_en KVER=$(KVER) PREFIX=$(PREFIX) + make -C bnxt_re KVER=$(KVER) PREFIX=$(PREFIX) + +install: build l2install roceinstall + +clean: l2clean roceclean + +.PHONEY: all clean install + diff --git a/bnxt_en b/bnxt_en new file mode 120000 index 0000000..0f68053 --- /dev/null +++ b/bnxt_en @@ -0,0 +1 @@ +bnxt_en-1.10.3-229.0.139.0 \ No newline at end of file diff --git a/src/.Module.symvers.cmd b/bnxt_en-1.10.3-229.0.139.0/.Module.symvers.cmd similarity index 100% rename from src/.Module.symvers.cmd rename to bnxt_en-1.10.3-229.0.139.0/.Module.symvers.cmd diff --git a/src/.modules.order.cmd b/bnxt_en-1.10.3-229.0.139.0/.modules.order.cmd similarity index 100% rename from src/.modules.order.cmd rename to bnxt_en-1.10.3-229.0.139.0/.modules.order.cmd diff --git a/src/COPYING b/bnxt_en-1.10.3-229.0.139.0/COPYING similarity index 100% rename from src/COPYING rename to bnxt_en-1.10.3-229.0.139.0/COPYING diff --git a/src/ChangeLog b/bnxt_en-1.10.3-229.0.139.0/ChangeLog similarity index 100% rename from src/ChangeLog rename to bnxt_en-1.10.3-229.0.139.0/ChangeLog diff --git a/src/MANIFEST b/bnxt_en-1.10.3-229.0.139.0/MANIFEST similarity index 100% rename from src/MANIFEST rename to bnxt_en-1.10.3-229.0.139.0/MANIFEST diff --git a/src/Makefile b/bnxt_en-1.10.3-229.0.139.0/Makefile similarity index 100% rename from src/Makefile rename to bnxt_en-1.10.3-229.0.139.0/Makefile diff --git a/src/README.TXT b/bnxt_en-1.10.3-229.0.139.0/README.TXT similarity index 100% rename from src/README.TXT rename to bnxt_en-1.10.3-229.0.139.0/README.TXT diff --git a/src/bnxt.c b/bnxt_en-1.10.3-229.0.139.0/bnxt.c similarity index 100% rename from src/bnxt.c rename to bnxt_en-1.10.3-229.0.139.0/bnxt.c diff --git a/src/bnxt.h b/bnxt_en-1.10.3-229.0.139.0/bnxt.h similarity index 100% rename from src/bnxt.h rename to bnxt_en-1.10.3-229.0.139.0/bnxt.h diff --git a/src/bnxt_auxbus_compat.c b/bnxt_en-1.10.3-229.0.139.0/bnxt_auxbus_compat.c similarity index 100% rename from src/bnxt_auxbus_compat.c rename to bnxt_en-1.10.3-229.0.139.0/bnxt_auxbus_compat.c diff --git a/src/bnxt_auxbus_compat.h b/bnxt_en-1.10.3-229.0.139.0/bnxt_auxbus_compat.h similarity index 100% rename from src/bnxt_auxbus_compat.h rename to bnxt_en-1.10.3-229.0.139.0/bnxt_auxbus_compat.h diff --git a/src/bnxt_compat.h b/bnxt_en-1.10.3-229.0.139.0/bnxt_compat.h similarity index 100% rename from src/bnxt_compat.h rename to bnxt_en-1.10.3-229.0.139.0/bnxt_compat.h diff --git a/src/bnxt_compat_link_modes.c b/bnxt_en-1.10.3-229.0.139.0/bnxt_compat_link_modes.c similarity index 100% rename from src/bnxt_compat_link_modes.c rename to bnxt_en-1.10.3-229.0.139.0/bnxt_compat_link_modes.c diff --git a/src/bnxt_compat_link_modes.h b/bnxt_en-1.10.3-229.0.139.0/bnxt_compat_link_modes.h similarity index 100% rename from src/bnxt_compat_link_modes.h rename to bnxt_en-1.10.3-229.0.139.0/bnxt_compat_link_modes.h diff --git a/src/bnxt_coredump.c b/bnxt_en-1.10.3-229.0.139.0/bnxt_coredump.c similarity index 100% rename from src/bnxt_coredump.c rename to bnxt_en-1.10.3-229.0.139.0/bnxt_coredump.c diff --git a/src/bnxt_coredump.h b/bnxt_en-1.10.3-229.0.139.0/bnxt_coredump.h similarity index 100% rename from src/bnxt_coredump.h rename to bnxt_en-1.10.3-229.0.139.0/bnxt_coredump.h diff --git a/src/bnxt_dbr.h b/bnxt_en-1.10.3-229.0.139.0/bnxt_dbr.h similarity index 100% rename from src/bnxt_dbr.h rename to bnxt_en-1.10.3-229.0.139.0/bnxt_dbr.h diff --git a/src/bnxt_dcb.c b/bnxt_en-1.10.3-229.0.139.0/bnxt_dcb.c similarity index 100% rename from src/bnxt_dcb.c rename to bnxt_en-1.10.3-229.0.139.0/bnxt_dcb.c diff --git a/src/bnxt_dcb.h b/bnxt_en-1.10.3-229.0.139.0/bnxt_dcb.h similarity index 100% rename from src/bnxt_dcb.h rename to bnxt_en-1.10.3-229.0.139.0/bnxt_dcb.h diff --git a/src/bnxt_debugfs.c b/bnxt_en-1.10.3-229.0.139.0/bnxt_debugfs.c similarity index 100% rename from src/bnxt_debugfs.c rename to bnxt_en-1.10.3-229.0.139.0/bnxt_debugfs.c diff --git a/src/bnxt_debugfs.h b/bnxt_en-1.10.3-229.0.139.0/bnxt_debugfs.h similarity index 100% rename from src/bnxt_debugfs.h rename to bnxt_en-1.10.3-229.0.139.0/bnxt_debugfs.h diff --git a/src/bnxt_debugfs_cpt.c b/bnxt_en-1.10.3-229.0.139.0/bnxt_debugfs_cpt.c similarity index 100% rename from src/bnxt_debugfs_cpt.c rename to bnxt_en-1.10.3-229.0.139.0/bnxt_debugfs_cpt.c diff --git a/src/bnxt_devlink.c b/bnxt_en-1.10.3-229.0.139.0/bnxt_devlink.c similarity index 100% rename from src/bnxt_devlink.c rename to bnxt_en-1.10.3-229.0.139.0/bnxt_devlink.c diff --git a/src/bnxt_devlink.h b/bnxt_en-1.10.3-229.0.139.0/bnxt_devlink.h similarity index 100% rename from src/bnxt_devlink.h rename to bnxt_en-1.10.3-229.0.139.0/bnxt_devlink.h diff --git a/src/bnxt_dim.c b/bnxt_en-1.10.3-229.0.139.0/bnxt_dim.c similarity index 100% rename from src/bnxt_dim.c rename to bnxt_en-1.10.3-229.0.139.0/bnxt_dim.c diff --git a/src/bnxt_dim.h b/bnxt_en-1.10.3-229.0.139.0/bnxt_dim.h similarity index 100% rename from src/bnxt_dim.h rename to bnxt_en-1.10.3-229.0.139.0/bnxt_dim.h diff --git a/src/bnxt_en.mod b/bnxt_en-1.10.3-229.0.139.0/bnxt_en.mod similarity index 100% rename from src/bnxt_en.mod rename to bnxt_en-1.10.3-229.0.139.0/bnxt_en.mod diff --git a/src/bnxt_ethtool.c b/bnxt_en-1.10.3-229.0.139.0/bnxt_ethtool.c similarity index 100% rename from src/bnxt_ethtool.c rename to bnxt_en-1.10.3-229.0.139.0/bnxt_ethtool.c diff --git a/src/bnxt_ethtool.h b/bnxt_en-1.10.3-229.0.139.0/bnxt_ethtool.h similarity index 100% rename from src/bnxt_ethtool.h rename to bnxt_en-1.10.3-229.0.139.0/bnxt_ethtool.h diff --git a/src/bnxt_ethtool_compat.c b/bnxt_en-1.10.3-229.0.139.0/bnxt_ethtool_compat.c similarity index 100% rename from src/bnxt_ethtool_compat.c rename to bnxt_en-1.10.3-229.0.139.0/bnxt_ethtool_compat.c diff --git a/src/bnxt_extra_ver.h b/bnxt_en-1.10.3-229.0.139.0/bnxt_extra_ver.h similarity index 100% rename from src/bnxt_extra_ver.h rename to bnxt_en-1.10.3-229.0.139.0/bnxt_extra_ver.h diff --git a/src/bnxt_fw_hdr.h b/bnxt_en-1.10.3-229.0.139.0/bnxt_fw_hdr.h similarity index 100% rename from src/bnxt_fw_hdr.h rename to bnxt_en-1.10.3-229.0.139.0/bnxt_fw_hdr.h diff --git a/src/bnxt_hdbr.c b/bnxt_en-1.10.3-229.0.139.0/bnxt_hdbr.c similarity index 100% rename from src/bnxt_hdbr.c rename to bnxt_en-1.10.3-229.0.139.0/bnxt_hdbr.c diff --git a/src/bnxt_hdbr.h b/bnxt_en-1.10.3-229.0.139.0/bnxt_hdbr.h similarity index 100% rename from src/bnxt_hdbr.h rename to bnxt_en-1.10.3-229.0.139.0/bnxt_hdbr.h diff --git a/src/bnxt_hsi.h b/bnxt_en-1.10.3-229.0.139.0/bnxt_hsi.h similarity index 100% rename from src/bnxt_hsi.h rename to bnxt_en-1.10.3-229.0.139.0/bnxt_hsi.h diff --git a/src/bnxt_hwmon.c b/bnxt_en-1.10.3-229.0.139.0/bnxt_hwmon.c similarity index 100% rename from src/bnxt_hwmon.c rename to bnxt_en-1.10.3-229.0.139.0/bnxt_hwmon.c diff --git a/src/bnxt_hwmon.h b/bnxt_en-1.10.3-229.0.139.0/bnxt_hwmon.h similarity index 100% rename from src/bnxt_hwmon.h rename to bnxt_en-1.10.3-229.0.139.0/bnxt_hwmon.h diff --git a/src/bnxt_hwrm.c b/bnxt_en-1.10.3-229.0.139.0/bnxt_hwrm.c similarity index 100% rename from src/bnxt_hwrm.c rename to bnxt_en-1.10.3-229.0.139.0/bnxt_hwrm.c diff --git a/src/bnxt_hwrm.h b/bnxt_en-1.10.3-229.0.139.0/bnxt_hwrm.h similarity index 100% rename from src/bnxt_hwrm.h rename to bnxt_en-1.10.3-229.0.139.0/bnxt_hwrm.h diff --git a/src/bnxt_ktls.c b/bnxt_en-1.10.3-229.0.139.0/bnxt_ktls.c similarity index 100% rename from src/bnxt_ktls.c rename to bnxt_en-1.10.3-229.0.139.0/bnxt_ktls.c diff --git a/src/bnxt_ktls.h b/bnxt_en-1.10.3-229.0.139.0/bnxt_ktls.h similarity index 100% rename from src/bnxt_ktls.h rename to bnxt_en-1.10.3-229.0.139.0/bnxt_ktls.h diff --git a/src/bnxt_lfc.c b/bnxt_en-1.10.3-229.0.139.0/bnxt_lfc.c similarity index 100% rename from src/bnxt_lfc.c rename to bnxt_en-1.10.3-229.0.139.0/bnxt_lfc.c diff --git a/src/bnxt_lfc.h b/bnxt_en-1.10.3-229.0.139.0/bnxt_lfc.h similarity index 100% rename from src/bnxt_lfc.h rename to bnxt_en-1.10.3-229.0.139.0/bnxt_lfc.h diff --git a/src/bnxt_lfc_ioctl.h b/bnxt_en-1.10.3-229.0.139.0/bnxt_lfc_ioctl.h similarity index 100% rename from src/bnxt_lfc_ioctl.h rename to bnxt_en-1.10.3-229.0.139.0/bnxt_lfc_ioctl.h diff --git a/src/bnxt_log.c b/bnxt_en-1.10.3-229.0.139.0/bnxt_log.c similarity index 100% rename from src/bnxt_log.c rename to bnxt_en-1.10.3-229.0.139.0/bnxt_log.c diff --git a/src/bnxt_log.h b/bnxt_en-1.10.3-229.0.139.0/bnxt_log.h similarity index 100% rename from src/bnxt_log.h rename to bnxt_en-1.10.3-229.0.139.0/bnxt_log.h diff --git a/src/bnxt_log_data.c b/bnxt_en-1.10.3-229.0.139.0/bnxt_log_data.c similarity index 100% rename from src/bnxt_log_data.c rename to bnxt_en-1.10.3-229.0.139.0/bnxt_log_data.c diff --git a/src/bnxt_log_data.h b/bnxt_en-1.10.3-229.0.139.0/bnxt_log_data.h similarity index 100% rename from src/bnxt_log_data.h rename to bnxt_en-1.10.3-229.0.139.0/bnxt_log_data.h diff --git a/src/bnxt_mpc.c b/bnxt_en-1.10.3-229.0.139.0/bnxt_mpc.c similarity index 100% rename from src/bnxt_mpc.c rename to bnxt_en-1.10.3-229.0.139.0/bnxt_mpc.c diff --git a/src/bnxt_mpc.h b/bnxt_en-1.10.3-229.0.139.0/bnxt_mpc.h similarity index 100% rename from src/bnxt_mpc.h rename to bnxt_en-1.10.3-229.0.139.0/bnxt_mpc.h diff --git a/src/bnxt_netlink.c b/bnxt_en-1.10.3-229.0.139.0/bnxt_netlink.c similarity index 100% rename from src/bnxt_netlink.c rename to bnxt_en-1.10.3-229.0.139.0/bnxt_netlink.c diff --git a/src/bnxt_netlink.h b/bnxt_en-1.10.3-229.0.139.0/bnxt_netlink.h similarity index 100% rename from src/bnxt_netlink.h rename to bnxt_en-1.10.3-229.0.139.0/bnxt_netlink.h diff --git a/src/bnxt_netmap_linux.h b/bnxt_en-1.10.3-229.0.139.0/bnxt_netmap_linux.h similarity index 100% rename from src/bnxt_netmap_linux.h rename to bnxt_en-1.10.3-229.0.139.0/bnxt_netmap_linux.h diff --git a/src/bnxt_nvm_defs.h b/bnxt_en-1.10.3-229.0.139.0/bnxt_nvm_defs.h similarity index 100% rename from src/bnxt_nvm_defs.h rename to bnxt_en-1.10.3-229.0.139.0/bnxt_nvm_defs.h diff --git a/src/bnxt_ptp.c b/bnxt_en-1.10.3-229.0.139.0/bnxt_ptp.c similarity index 100% rename from src/bnxt_ptp.c rename to bnxt_en-1.10.3-229.0.139.0/bnxt_ptp.c diff --git a/src/bnxt_ptp.h b/bnxt_en-1.10.3-229.0.139.0/bnxt_ptp.h similarity index 100% rename from src/bnxt_ptp.h rename to bnxt_en-1.10.3-229.0.139.0/bnxt_ptp.h diff --git a/src/bnxt_sriov.c b/bnxt_en-1.10.3-229.0.139.0/bnxt_sriov.c similarity index 100% rename from src/bnxt_sriov.c rename to bnxt_en-1.10.3-229.0.139.0/bnxt_sriov.c diff --git a/src/bnxt_sriov.h b/bnxt_en-1.10.3-229.0.139.0/bnxt_sriov.h similarity index 100% rename from src/bnxt_sriov.h rename to bnxt_en-1.10.3-229.0.139.0/bnxt_sriov.h diff --git a/src/bnxt_sriov_sysfs.c b/bnxt_en-1.10.3-229.0.139.0/bnxt_sriov_sysfs.c similarity index 100% rename from src/bnxt_sriov_sysfs.c rename to bnxt_en-1.10.3-229.0.139.0/bnxt_sriov_sysfs.c diff --git a/src/bnxt_sriov_sysfs.h b/bnxt_en-1.10.3-229.0.139.0/bnxt_sriov_sysfs.h similarity index 100% rename from src/bnxt_sriov_sysfs.h rename to bnxt_en-1.10.3-229.0.139.0/bnxt_sriov_sysfs.h diff --git a/src/bnxt_tc.c b/bnxt_en-1.10.3-229.0.139.0/bnxt_tc.c similarity index 100% rename from src/bnxt_tc.c rename to bnxt_en-1.10.3-229.0.139.0/bnxt_tc.c diff --git a/src/bnxt_tc.h b/bnxt_en-1.10.3-229.0.139.0/bnxt_tc.h similarity index 100% rename from src/bnxt_tc.h rename to bnxt_en-1.10.3-229.0.139.0/bnxt_tc.h diff --git a/src/bnxt_tc_compat.h b/bnxt_en-1.10.3-229.0.139.0/bnxt_tc_compat.h similarity index 100% rename from src/bnxt_tc_compat.h rename to bnxt_en-1.10.3-229.0.139.0/bnxt_tc_compat.h diff --git a/src/bnxt_tfc.c b/bnxt_en-1.10.3-229.0.139.0/bnxt_tfc.c similarity index 100% rename from src/bnxt_tfc.c rename to bnxt_en-1.10.3-229.0.139.0/bnxt_tfc.c diff --git a/src/bnxt_tfc.h b/bnxt_en-1.10.3-229.0.139.0/bnxt_tfc.h similarity index 100% rename from src/bnxt_tfc.h rename to bnxt_en-1.10.3-229.0.139.0/bnxt_tfc.h diff --git a/src/bnxt_udcc.c b/bnxt_en-1.10.3-229.0.139.0/bnxt_udcc.c similarity index 100% rename from src/bnxt_udcc.c rename to bnxt_en-1.10.3-229.0.139.0/bnxt_udcc.c diff --git a/src/bnxt_udcc.h b/bnxt_en-1.10.3-229.0.139.0/bnxt_udcc.h similarity index 100% rename from src/bnxt_udcc.h rename to bnxt_en-1.10.3-229.0.139.0/bnxt_udcc.h diff --git a/src/bnxt_ulp.c b/bnxt_en-1.10.3-229.0.139.0/bnxt_ulp.c similarity index 100% rename from src/bnxt_ulp.c rename to bnxt_en-1.10.3-229.0.139.0/bnxt_ulp.c diff --git a/src/bnxt_ulp.h b/bnxt_en-1.10.3-229.0.139.0/bnxt_ulp.h similarity index 100% rename from src/bnxt_ulp.h rename to bnxt_en-1.10.3-229.0.139.0/bnxt_ulp.h diff --git a/src/bnxt_vfr.c b/bnxt_en-1.10.3-229.0.139.0/bnxt_vfr.c similarity index 100% rename from src/bnxt_vfr.c rename to bnxt_en-1.10.3-229.0.139.0/bnxt_vfr.c diff --git a/src/bnxt_vfr.h b/bnxt_en-1.10.3-229.0.139.0/bnxt_vfr.h similarity index 100% rename from src/bnxt_vfr.h rename to bnxt_en-1.10.3-229.0.139.0/bnxt_vfr.h diff --git a/src/bnxt_xdp.c b/bnxt_en-1.10.3-229.0.139.0/bnxt_xdp.c similarity index 100% rename from src/bnxt_xdp.c rename to bnxt_en-1.10.3-229.0.139.0/bnxt_xdp.c diff --git a/src/bnxt_xdp.h b/bnxt_en-1.10.3-229.0.139.0/bnxt_xdp.h similarity index 100% rename from src/bnxt_xdp.h rename to bnxt_en-1.10.3-229.0.139.0/bnxt_xdp.h diff --git a/src/find_src.awk b/bnxt_en-1.10.3-229.0.139.0/find_src.awk similarity index 100% rename from src/find_src.awk rename to bnxt_en-1.10.3-229.0.139.0/find_src.awk diff --git a/src/hcapi/bitalloc.c b/bnxt_en-1.10.3-229.0.139.0/hcapi/bitalloc.c similarity index 100% rename from src/hcapi/bitalloc.c rename to bnxt_en-1.10.3-229.0.139.0/hcapi/bitalloc.c diff --git a/src/hcapi/bitalloc.h b/bnxt_en-1.10.3-229.0.139.0/hcapi/bitalloc.h similarity index 100% rename from src/hcapi/bitalloc.h rename to bnxt_en-1.10.3-229.0.139.0/hcapi/bitalloc.h diff --git a/src/hcapi/cfa/cfa_p40_hw.h b/bnxt_en-1.10.3-229.0.139.0/hcapi/cfa/cfa_p40_hw.h similarity index 100% rename from src/hcapi/cfa/cfa_p40_hw.h rename to bnxt_en-1.10.3-229.0.139.0/hcapi/cfa/cfa_p40_hw.h diff --git a/src/hcapi/cfa/cfa_p58_hw.h b/bnxt_en-1.10.3-229.0.139.0/hcapi/cfa/cfa_p58_hw.h similarity index 100% rename from src/hcapi/cfa/cfa_p58_hw.h rename to bnxt_en-1.10.3-229.0.139.0/hcapi/cfa/cfa_p58_hw.h diff --git a/src/hcapi/cfa/hcapi_cfa.h b/bnxt_en-1.10.3-229.0.139.0/hcapi/cfa/hcapi_cfa.h similarity index 100% rename from src/hcapi/cfa/hcapi_cfa.h rename to bnxt_en-1.10.3-229.0.139.0/hcapi/cfa/hcapi_cfa.h diff --git a/src/hcapi/cfa/hcapi_cfa_defs.h b/bnxt_en-1.10.3-229.0.139.0/hcapi/cfa/hcapi_cfa_defs.h similarity index 100% rename from src/hcapi/cfa/hcapi_cfa_defs.h rename to bnxt_en-1.10.3-229.0.139.0/hcapi/cfa/hcapi_cfa_defs.h diff --git a/src/hcapi/cfa/hcapi_cfa_p4.c b/bnxt_en-1.10.3-229.0.139.0/hcapi/cfa/hcapi_cfa_p4.c similarity index 100% rename from src/hcapi/cfa/hcapi_cfa_p4.c rename to bnxt_en-1.10.3-229.0.139.0/hcapi/cfa/hcapi_cfa_p4.c diff --git a/src/hcapi/cfa/hcapi_cfa_p4.h b/bnxt_en-1.10.3-229.0.139.0/hcapi/cfa/hcapi_cfa_p4.h similarity index 100% rename from src/hcapi/cfa/hcapi_cfa_p4.h rename to bnxt_en-1.10.3-229.0.139.0/hcapi/cfa/hcapi_cfa_p4.h diff --git a/src/hcapi/cfa/hcapi_cfa_p58.c b/bnxt_en-1.10.3-229.0.139.0/hcapi/cfa/hcapi_cfa_p58.c similarity index 100% rename from src/hcapi/cfa/hcapi_cfa_p58.c rename to bnxt_en-1.10.3-229.0.139.0/hcapi/cfa/hcapi_cfa_p58.c diff --git a/src/hcapi/cfa/hcapi_cfa_p58.h b/bnxt_en-1.10.3-229.0.139.0/hcapi/cfa/hcapi_cfa_p58.h similarity index 100% rename from src/hcapi/cfa/hcapi_cfa_p58.h rename to bnxt_en-1.10.3-229.0.139.0/hcapi/cfa/hcapi_cfa_p58.h diff --git a/src/hcapi/cfa_v3/include/cfa_resources.h b/bnxt_en-1.10.3-229.0.139.0/hcapi/cfa_v3/include/cfa_resources.h similarity index 100% rename from src/hcapi/cfa_v3/include/cfa_resources.h rename to bnxt_en-1.10.3-229.0.139.0/hcapi/cfa_v3/include/cfa_resources.h diff --git a/src/hcapi/cfa_v3/include/cfa_types.h b/bnxt_en-1.10.3-229.0.139.0/hcapi/cfa_v3/include/cfa_types.h similarity index 100% rename from src/hcapi/cfa_v3/include/cfa_types.h rename to bnxt_en-1.10.3-229.0.139.0/hcapi/cfa_v3/include/cfa_types.h diff --git a/src/hcapi/cfa_v3/include/cfa_util.h b/bnxt_en-1.10.3-229.0.139.0/hcapi/cfa_v3/include/cfa_util.h similarity index 100% rename from src/hcapi/cfa_v3/include/cfa_util.h rename to bnxt_en-1.10.3-229.0.139.0/hcapi/cfa_v3/include/cfa_util.h diff --git a/src/hcapi/cfa_v3/include/sys_util.h b/bnxt_en-1.10.3-229.0.139.0/hcapi/cfa_v3/include/sys_util.h similarity index 100% rename from src/hcapi/cfa_v3/include/sys_util.h rename to bnxt_en-1.10.3-229.0.139.0/hcapi/cfa_v3/include/sys_util.h diff --git a/src/hcapi/cfa_v3/mm/cfa_mm.c b/bnxt_en-1.10.3-229.0.139.0/hcapi/cfa_v3/mm/cfa_mm.c similarity index 100% rename from src/hcapi/cfa_v3/mm/cfa_mm.c rename to bnxt_en-1.10.3-229.0.139.0/hcapi/cfa_v3/mm/cfa_mm.c diff --git a/src/hcapi/cfa_v3/mm/include/cfa_mm.h b/bnxt_en-1.10.3-229.0.139.0/hcapi/cfa_v3/mm/include/cfa_mm.h similarity index 100% rename from src/hcapi/cfa_v3/mm/include/cfa_mm.h rename to bnxt_en-1.10.3-229.0.139.0/hcapi/cfa_v3/mm/include/cfa_mm.h diff --git a/src/hcapi/cfa_v3/mm/include/sys_util.h b/bnxt_en-1.10.3-229.0.139.0/hcapi/cfa_v3/mm/include/sys_util.h similarity index 100% rename from src/hcapi/cfa_v3/mm/include/sys_util.h rename to bnxt_en-1.10.3-229.0.139.0/hcapi/cfa_v3/mm/include/sys_util.h diff --git a/src/hcapi/cfa_v3/mpc/cfa_bld_mpc.c b/bnxt_en-1.10.3-229.0.139.0/hcapi/cfa_v3/mpc/cfa_bld_mpc.c similarity index 100% rename from src/hcapi/cfa_v3/mpc/cfa_bld_mpc.c rename to bnxt_en-1.10.3-229.0.139.0/hcapi/cfa_v3/mpc/cfa_bld_mpc.c diff --git a/src/hcapi/cfa_v3/mpc/cfa_bld_p70_host_mpc_wrapper.c b/bnxt_en-1.10.3-229.0.139.0/hcapi/cfa_v3/mpc/cfa_bld_p70_host_mpc_wrapper.c similarity index 100% rename from src/hcapi/cfa_v3/mpc/cfa_bld_p70_host_mpc_wrapper.c rename to bnxt_en-1.10.3-229.0.139.0/hcapi/cfa_v3/mpc/cfa_bld_p70_host_mpc_wrapper.c diff --git a/src/hcapi/cfa_v3/mpc/cfa_bld_p70_mpc.c b/bnxt_en-1.10.3-229.0.139.0/hcapi/cfa_v3/mpc/cfa_bld_p70_mpc.c similarity index 100% rename from src/hcapi/cfa_v3/mpc/cfa_bld_p70_mpc.c rename to bnxt_en-1.10.3-229.0.139.0/hcapi/cfa_v3/mpc/cfa_bld_p70_mpc.c diff --git a/src/hcapi/cfa_v3/mpc/cfa_bld_p70_mpcops.c b/bnxt_en-1.10.3-229.0.139.0/hcapi/cfa_v3/mpc/cfa_bld_p70_mpcops.c similarity index 100% rename from src/hcapi/cfa_v3/mpc/cfa_bld_p70_mpcops.c rename to bnxt_en-1.10.3-229.0.139.0/hcapi/cfa_v3/mpc/cfa_bld_p70_mpcops.c diff --git a/src/hcapi/cfa_v3/mpc/include/cfa_bld_defs.h b/bnxt_en-1.10.3-229.0.139.0/hcapi/cfa_v3/mpc/include/cfa_bld_defs.h similarity index 100% rename from src/hcapi/cfa_v3/mpc/include/cfa_bld_defs.h rename to bnxt_en-1.10.3-229.0.139.0/hcapi/cfa_v3/mpc/include/cfa_bld_defs.h diff --git a/src/hcapi/cfa_v3/mpc/include/cfa_bld_mpc_field_ids.h b/bnxt_en-1.10.3-229.0.139.0/hcapi/cfa_v3/mpc/include/cfa_bld_mpc_field_ids.h similarity index 100% rename from src/hcapi/cfa_v3/mpc/include/cfa_bld_mpc_field_ids.h rename to bnxt_en-1.10.3-229.0.139.0/hcapi/cfa_v3/mpc/include/cfa_bld_mpc_field_ids.h diff --git a/src/hcapi/cfa_v3/mpc/include/cfa_bld_mpcops.h b/bnxt_en-1.10.3-229.0.139.0/hcapi/cfa_v3/mpc/include/cfa_bld_mpcops.h similarity index 100% rename from src/hcapi/cfa_v3/mpc/include/cfa_bld_mpcops.h rename to bnxt_en-1.10.3-229.0.139.0/hcapi/cfa_v3/mpc/include/cfa_bld_mpcops.h diff --git a/src/hcapi/cfa_v3/mpc/include/cfa_bld_p70_host_mpc_wrapper.h b/bnxt_en-1.10.3-229.0.139.0/hcapi/cfa_v3/mpc/include/cfa_bld_p70_host_mpc_wrapper.h similarity index 100% rename from src/hcapi/cfa_v3/mpc/include/cfa_bld_p70_host_mpc_wrapper.h rename to bnxt_en-1.10.3-229.0.139.0/hcapi/cfa_v3/mpc/include/cfa_bld_p70_host_mpc_wrapper.h diff --git a/src/hcapi/cfa_v3/mpc/include/cfa_bld_p70_mpc.h b/bnxt_en-1.10.3-229.0.139.0/hcapi/cfa_v3/mpc/include/cfa_bld_p70_mpc.h similarity index 100% rename from src/hcapi/cfa_v3/mpc/include/cfa_bld_p70_mpc.h rename to bnxt_en-1.10.3-229.0.139.0/hcapi/cfa_v3/mpc/include/cfa_bld_p70_mpc.h diff --git a/src/hcapi/cfa_v3/mpc/include/cfa_bld_p70_mpc_defs.h b/bnxt_en-1.10.3-229.0.139.0/hcapi/cfa_v3/mpc/include/cfa_bld_p70_mpc_defs.h similarity index 100% rename from src/hcapi/cfa_v3/mpc/include/cfa_bld_p70_mpc_defs.h rename to bnxt_en-1.10.3-229.0.139.0/hcapi/cfa_v3/mpc/include/cfa_bld_p70_mpc_defs.h diff --git a/src/hcapi/cfa_v3/mpc/include/cfa_bld_p70_mpcops.h b/bnxt_en-1.10.3-229.0.139.0/hcapi/cfa_v3/mpc/include/cfa_bld_p70_mpcops.h similarity index 100% rename from src/hcapi/cfa_v3/mpc/include/cfa_bld_p70_mpcops.h rename to bnxt_en-1.10.3-229.0.139.0/hcapi/cfa_v3/mpc/include/cfa_bld_p70_mpcops.h diff --git a/src/hcapi/cfa_v3/mpc/include/cfa_p70_mpc_field_ids.h b/bnxt_en-1.10.3-229.0.139.0/hcapi/cfa_v3/mpc/include/cfa_p70_mpc_field_ids.h similarity index 100% rename from src/hcapi/cfa_v3/mpc/include/cfa_p70_mpc_field_ids.h rename to bnxt_en-1.10.3-229.0.139.0/hcapi/cfa_v3/mpc/include/cfa_p70_mpc_field_ids.h diff --git a/src/hcapi/cfa_v3/mpc/include/cfa_p70_mpc_field_mapping.h b/bnxt_en-1.10.3-229.0.139.0/hcapi/cfa_v3/mpc/include/cfa_p70_mpc_field_mapping.h similarity index 100% rename from src/hcapi/cfa_v3/mpc/include/cfa_p70_mpc_field_mapping.h rename to bnxt_en-1.10.3-229.0.139.0/hcapi/cfa_v3/mpc/include/cfa_p70_mpc_field_mapping.h diff --git a/src/hcapi/cfa_v3/mpc/include/cfa_p70_mpc_structs.h b/bnxt_en-1.10.3-229.0.139.0/hcapi/cfa_v3/mpc/include/cfa_p70_mpc_structs.h similarity index 100% rename from src/hcapi/cfa_v3/mpc/include/cfa_p70_mpc_structs.h rename to bnxt_en-1.10.3-229.0.139.0/hcapi/cfa_v3/mpc/include/cfa_p70_mpc_structs.h diff --git a/src/hcapi/cfa_v3/tim/cfa_tim.c b/bnxt_en-1.10.3-229.0.139.0/hcapi/cfa_v3/tim/cfa_tim.c similarity index 100% rename from src/hcapi/cfa_v3/tim/cfa_tim.c rename to bnxt_en-1.10.3-229.0.139.0/hcapi/cfa_v3/tim/cfa_tim.c diff --git a/src/hcapi/cfa_v3/tim/include/cfa_tim.h b/bnxt_en-1.10.3-229.0.139.0/hcapi/cfa_v3/tim/include/cfa_tim.h similarity index 100% rename from src/hcapi/cfa_v3/tim/include/cfa_tim.h rename to bnxt_en-1.10.3-229.0.139.0/hcapi/cfa_v3/tim/include/cfa_tim.h diff --git a/src/hcapi/cfa_v3/tpm/cfa_tpm.c b/bnxt_en-1.10.3-229.0.139.0/hcapi/cfa_v3/tpm/cfa_tpm.c similarity index 100% rename from src/hcapi/cfa_v3/tpm/cfa_tpm.c rename to bnxt_en-1.10.3-229.0.139.0/hcapi/cfa_v3/tpm/cfa_tpm.c diff --git a/src/hcapi/cfa_v3/tpm/include/cfa_tpm.h b/bnxt_en-1.10.3-229.0.139.0/hcapi/cfa_v3/tpm/include/cfa_tpm.h similarity index 100% rename from src/hcapi/cfa_v3/tpm/include/cfa_tpm.h rename to bnxt_en-1.10.3-229.0.139.0/hcapi/cfa_v3/tpm/include/cfa_tpm.h diff --git a/src/tf_core/cfa_resource_types.h b/bnxt_en-1.10.3-229.0.139.0/tf_core/cfa_resource_types.h similarity index 100% rename from src/tf_core/cfa_resource_types.h rename to bnxt_en-1.10.3-229.0.139.0/tf_core/cfa_resource_types.h diff --git a/src/tf_core/cfa_tcam_mgr.c b/bnxt_en-1.10.3-229.0.139.0/tf_core/cfa_tcam_mgr.c similarity index 100% rename from src/tf_core/cfa_tcam_mgr.c rename to bnxt_en-1.10.3-229.0.139.0/tf_core/cfa_tcam_mgr.c diff --git a/src/tf_core/cfa_tcam_mgr.h b/bnxt_en-1.10.3-229.0.139.0/tf_core/cfa_tcam_mgr.h similarity index 100% rename from src/tf_core/cfa_tcam_mgr.h rename to bnxt_en-1.10.3-229.0.139.0/tf_core/cfa_tcam_mgr.h diff --git a/src/tf_core/cfa_tcam_mgr_device.h b/bnxt_en-1.10.3-229.0.139.0/tf_core/cfa_tcam_mgr_device.h similarity index 100% rename from src/tf_core/cfa_tcam_mgr_device.h rename to bnxt_en-1.10.3-229.0.139.0/tf_core/cfa_tcam_mgr_device.h diff --git a/src/tf_core/cfa_tcam_mgr_hwop_msg.c b/bnxt_en-1.10.3-229.0.139.0/tf_core/cfa_tcam_mgr_hwop_msg.c similarity index 100% rename from src/tf_core/cfa_tcam_mgr_hwop_msg.c rename to bnxt_en-1.10.3-229.0.139.0/tf_core/cfa_tcam_mgr_hwop_msg.c diff --git a/src/tf_core/cfa_tcam_mgr_hwop_msg.h b/bnxt_en-1.10.3-229.0.139.0/tf_core/cfa_tcam_mgr_hwop_msg.h similarity index 100% rename from src/tf_core/cfa_tcam_mgr_hwop_msg.h rename to bnxt_en-1.10.3-229.0.139.0/tf_core/cfa_tcam_mgr_hwop_msg.h diff --git a/src/tf_core/cfa_tcam_mgr_p4.c b/bnxt_en-1.10.3-229.0.139.0/tf_core/cfa_tcam_mgr_p4.c similarity index 100% rename from src/tf_core/cfa_tcam_mgr_p4.c rename to bnxt_en-1.10.3-229.0.139.0/tf_core/cfa_tcam_mgr_p4.c diff --git a/src/tf_core/cfa_tcam_mgr_p4.h b/bnxt_en-1.10.3-229.0.139.0/tf_core/cfa_tcam_mgr_p4.h similarity index 100% rename from src/tf_core/cfa_tcam_mgr_p4.h rename to bnxt_en-1.10.3-229.0.139.0/tf_core/cfa_tcam_mgr_p4.h diff --git a/src/tf_core/cfa_tcam_mgr_p58.c b/bnxt_en-1.10.3-229.0.139.0/tf_core/cfa_tcam_mgr_p58.c similarity index 100% rename from src/tf_core/cfa_tcam_mgr_p58.c rename to bnxt_en-1.10.3-229.0.139.0/tf_core/cfa_tcam_mgr_p58.c diff --git a/src/tf_core/cfa_tcam_mgr_p58.h b/bnxt_en-1.10.3-229.0.139.0/tf_core/cfa_tcam_mgr_p58.h similarity index 100% rename from src/tf_core/cfa_tcam_mgr_p58.h rename to bnxt_en-1.10.3-229.0.139.0/tf_core/cfa_tcam_mgr_p58.h diff --git a/src/tf_core/dpool.c b/bnxt_en-1.10.3-229.0.139.0/tf_core/dpool.c similarity index 100% rename from src/tf_core/dpool.c rename to bnxt_en-1.10.3-229.0.139.0/tf_core/dpool.c diff --git a/src/tf_core/dpool.h b/bnxt_en-1.10.3-229.0.139.0/tf_core/dpool.h similarity index 100% rename from src/tf_core/dpool.h rename to bnxt_en-1.10.3-229.0.139.0/tf_core/dpool.h diff --git a/src/tf_core/rand.c b/bnxt_en-1.10.3-229.0.139.0/tf_core/rand.c similarity index 100% rename from src/tf_core/rand.c rename to bnxt_en-1.10.3-229.0.139.0/tf_core/rand.c diff --git a/src/tf_core/rand.h b/bnxt_en-1.10.3-229.0.139.0/tf_core/rand.h similarity index 100% rename from src/tf_core/rand.h rename to bnxt_en-1.10.3-229.0.139.0/tf_core/rand.h diff --git a/src/tf_core/tf_core.c b/bnxt_en-1.10.3-229.0.139.0/tf_core/tf_core.c similarity index 100% rename from src/tf_core/tf_core.c rename to bnxt_en-1.10.3-229.0.139.0/tf_core/tf_core.c diff --git a/src/tf_core/tf_core.h b/bnxt_en-1.10.3-229.0.139.0/tf_core/tf_core.h similarity index 100% rename from src/tf_core/tf_core.h rename to bnxt_en-1.10.3-229.0.139.0/tf_core/tf_core.h diff --git a/src/tf_core/tf_device.c b/bnxt_en-1.10.3-229.0.139.0/tf_core/tf_device.c similarity index 100% rename from src/tf_core/tf_device.c rename to bnxt_en-1.10.3-229.0.139.0/tf_core/tf_device.c diff --git a/src/tf_core/tf_device.h b/bnxt_en-1.10.3-229.0.139.0/tf_core/tf_device.h similarity index 100% rename from src/tf_core/tf_device.h rename to bnxt_en-1.10.3-229.0.139.0/tf_core/tf_device.h diff --git a/src/tf_core/tf_device_p4.c b/bnxt_en-1.10.3-229.0.139.0/tf_core/tf_device_p4.c similarity index 100% rename from src/tf_core/tf_device_p4.c rename to bnxt_en-1.10.3-229.0.139.0/tf_core/tf_device_p4.c diff --git a/src/tf_core/tf_device_p4.h b/bnxt_en-1.10.3-229.0.139.0/tf_core/tf_device_p4.h similarity index 100% rename from src/tf_core/tf_device_p4.h rename to bnxt_en-1.10.3-229.0.139.0/tf_core/tf_device_p4.h diff --git a/src/tf_core/tf_device_p58.c b/bnxt_en-1.10.3-229.0.139.0/tf_core/tf_device_p58.c similarity index 100% rename from src/tf_core/tf_device_p58.c rename to bnxt_en-1.10.3-229.0.139.0/tf_core/tf_device_p58.c diff --git a/src/tf_core/tf_device_p58.h b/bnxt_en-1.10.3-229.0.139.0/tf_core/tf_device_p58.h similarity index 100% rename from src/tf_core/tf_device_p58.h rename to bnxt_en-1.10.3-229.0.139.0/tf_core/tf_device_p58.h diff --git a/src/tf_core/tf_em.h b/bnxt_en-1.10.3-229.0.139.0/tf_core/tf_em.h similarity index 100% rename from src/tf_core/tf_em.h rename to bnxt_en-1.10.3-229.0.139.0/tf_core/tf_em.h diff --git a/src/tf_core/tf_em_hash_internal.c b/bnxt_en-1.10.3-229.0.139.0/tf_core/tf_em_hash_internal.c similarity index 100% rename from src/tf_core/tf_em_hash_internal.c rename to bnxt_en-1.10.3-229.0.139.0/tf_core/tf_em_hash_internal.c diff --git a/src/tf_core/tf_em_internal.c b/bnxt_en-1.10.3-229.0.139.0/tf_core/tf_em_internal.c similarity index 100% rename from src/tf_core/tf_em_internal.c rename to bnxt_en-1.10.3-229.0.139.0/tf_core/tf_em_internal.c diff --git a/src/tf_core/tf_ext_flow_handle.h b/bnxt_en-1.10.3-229.0.139.0/tf_core/tf_ext_flow_handle.h similarity index 100% rename from src/tf_core/tf_ext_flow_handle.h rename to bnxt_en-1.10.3-229.0.139.0/tf_core/tf_ext_flow_handle.h diff --git a/src/tf_core/tf_global_cfg.c b/bnxt_en-1.10.3-229.0.139.0/tf_core/tf_global_cfg.c similarity index 100% rename from src/tf_core/tf_global_cfg.c rename to bnxt_en-1.10.3-229.0.139.0/tf_core/tf_global_cfg.c diff --git a/src/tf_core/tf_global_cfg.h b/bnxt_en-1.10.3-229.0.139.0/tf_core/tf_global_cfg.h similarity index 100% rename from src/tf_core/tf_global_cfg.h rename to bnxt_en-1.10.3-229.0.139.0/tf_core/tf_global_cfg.h diff --git a/src/tf_core/tf_identifier.c b/bnxt_en-1.10.3-229.0.139.0/tf_core/tf_identifier.c similarity index 100% rename from src/tf_core/tf_identifier.c rename to bnxt_en-1.10.3-229.0.139.0/tf_core/tf_identifier.c diff --git a/src/tf_core/tf_identifier.h b/bnxt_en-1.10.3-229.0.139.0/tf_core/tf_identifier.h similarity index 100% rename from src/tf_core/tf_identifier.h rename to bnxt_en-1.10.3-229.0.139.0/tf_core/tf_identifier.h diff --git a/src/tf_core/tf_if_tbl.c b/bnxt_en-1.10.3-229.0.139.0/tf_core/tf_if_tbl.c similarity index 100% rename from src/tf_core/tf_if_tbl.c rename to bnxt_en-1.10.3-229.0.139.0/tf_core/tf_if_tbl.c diff --git a/src/tf_core/tf_if_tbl.h b/bnxt_en-1.10.3-229.0.139.0/tf_core/tf_if_tbl.h similarity index 100% rename from src/tf_core/tf_if_tbl.h rename to bnxt_en-1.10.3-229.0.139.0/tf_core/tf_if_tbl.h diff --git a/src/tf_core/tf_msg.c b/bnxt_en-1.10.3-229.0.139.0/tf_core/tf_msg.c similarity index 100% rename from src/tf_core/tf_msg.c rename to bnxt_en-1.10.3-229.0.139.0/tf_core/tf_msg.c diff --git a/src/tf_core/tf_msg.h b/bnxt_en-1.10.3-229.0.139.0/tf_core/tf_msg.h similarity index 100% rename from src/tf_core/tf_msg.h rename to bnxt_en-1.10.3-229.0.139.0/tf_core/tf_msg.h diff --git a/src/tf_core/tf_rm.c b/bnxt_en-1.10.3-229.0.139.0/tf_core/tf_rm.c similarity index 100% rename from src/tf_core/tf_rm.c rename to bnxt_en-1.10.3-229.0.139.0/tf_core/tf_rm.c diff --git a/src/tf_core/tf_rm.h b/bnxt_en-1.10.3-229.0.139.0/tf_core/tf_rm.h similarity index 100% rename from src/tf_core/tf_rm.h rename to bnxt_en-1.10.3-229.0.139.0/tf_core/tf_rm.h diff --git a/src/tf_core/tf_session.c b/bnxt_en-1.10.3-229.0.139.0/tf_core/tf_session.c similarity index 100% rename from src/tf_core/tf_session.c rename to bnxt_en-1.10.3-229.0.139.0/tf_core/tf_session.c diff --git a/src/tf_core/tf_session.h b/bnxt_en-1.10.3-229.0.139.0/tf_core/tf_session.h similarity index 100% rename from src/tf_core/tf_session.h rename to bnxt_en-1.10.3-229.0.139.0/tf_core/tf_session.h diff --git a/src/tf_core/tf_sram_mgr.c b/bnxt_en-1.10.3-229.0.139.0/tf_core/tf_sram_mgr.c similarity index 100% rename from src/tf_core/tf_sram_mgr.c rename to bnxt_en-1.10.3-229.0.139.0/tf_core/tf_sram_mgr.c diff --git a/src/tf_core/tf_sram_mgr.h b/bnxt_en-1.10.3-229.0.139.0/tf_core/tf_sram_mgr.h similarity index 100% rename from src/tf_core/tf_sram_mgr.h rename to bnxt_en-1.10.3-229.0.139.0/tf_core/tf_sram_mgr.h diff --git a/src/tf_core/tf_tbl.c b/bnxt_en-1.10.3-229.0.139.0/tf_core/tf_tbl.c similarity index 100% rename from src/tf_core/tf_tbl.c rename to bnxt_en-1.10.3-229.0.139.0/tf_core/tf_tbl.c diff --git a/src/tf_core/tf_tbl.h b/bnxt_en-1.10.3-229.0.139.0/tf_core/tf_tbl.h similarity index 100% rename from src/tf_core/tf_tbl.h rename to bnxt_en-1.10.3-229.0.139.0/tf_core/tf_tbl.h diff --git a/src/tf_core/tf_tbl_sram.c b/bnxt_en-1.10.3-229.0.139.0/tf_core/tf_tbl_sram.c similarity index 100% rename from src/tf_core/tf_tbl_sram.c rename to bnxt_en-1.10.3-229.0.139.0/tf_core/tf_tbl_sram.c diff --git a/src/tf_core/tf_tbl_sram.h b/bnxt_en-1.10.3-229.0.139.0/tf_core/tf_tbl_sram.h similarity index 100% rename from src/tf_core/tf_tbl_sram.h rename to bnxt_en-1.10.3-229.0.139.0/tf_core/tf_tbl_sram.h diff --git a/src/tf_core/tf_tcam.c b/bnxt_en-1.10.3-229.0.139.0/tf_core/tf_tcam.c similarity index 100% rename from src/tf_core/tf_tcam.c rename to bnxt_en-1.10.3-229.0.139.0/tf_core/tf_tcam.c diff --git a/src/tf_core/tf_tcam.h b/bnxt_en-1.10.3-229.0.139.0/tf_core/tf_tcam.h similarity index 100% rename from src/tf_core/tf_tcam.h rename to bnxt_en-1.10.3-229.0.139.0/tf_core/tf_tcam.h diff --git a/src/tf_core/tf_tcam_mgr_msg.c b/bnxt_en-1.10.3-229.0.139.0/tf_core/tf_tcam_mgr_msg.c similarity index 100% rename from src/tf_core/tf_tcam_mgr_msg.c rename to bnxt_en-1.10.3-229.0.139.0/tf_core/tf_tcam_mgr_msg.c diff --git a/src/tf_core/tf_tcam_mgr_msg.h b/bnxt_en-1.10.3-229.0.139.0/tf_core/tf_tcam_mgr_msg.h similarity index 100% rename from src/tf_core/tf_tcam_mgr_msg.h rename to bnxt_en-1.10.3-229.0.139.0/tf_core/tf_tcam_mgr_msg.h diff --git a/src/tf_core/tf_util.c b/bnxt_en-1.10.3-229.0.139.0/tf_core/tf_util.c similarity index 100% rename from src/tf_core/tf_util.c rename to bnxt_en-1.10.3-229.0.139.0/tf_core/tf_util.c diff --git a/src/tf_core/tf_util.h b/bnxt_en-1.10.3-229.0.139.0/tf_core/tf_util.h similarity index 100% rename from src/tf_core/tf_util.h rename to bnxt_en-1.10.3-229.0.139.0/tf_core/tf_util.h diff --git a/src/tf_ulp/bnxt_tf_common.h b/bnxt_en-1.10.3-229.0.139.0/tf_ulp/bnxt_tf_common.h similarity index 100% rename from src/tf_ulp/bnxt_tf_common.h rename to bnxt_en-1.10.3-229.0.139.0/tf_ulp/bnxt_tf_common.h diff --git a/src/tf_ulp/bnxt_tf_tc_shim.c b/bnxt_en-1.10.3-229.0.139.0/tf_ulp/bnxt_tf_tc_shim.c similarity index 100% rename from src/tf_ulp/bnxt_tf_tc_shim.c rename to bnxt_en-1.10.3-229.0.139.0/tf_ulp/bnxt_tf_tc_shim.c diff --git a/src/tf_ulp/bnxt_tf_tc_shim.h b/bnxt_en-1.10.3-229.0.139.0/tf_ulp/bnxt_tf_tc_shim.h similarity index 100% rename from src/tf_ulp/bnxt_tf_tc_shim.h rename to bnxt_en-1.10.3-229.0.139.0/tf_ulp/bnxt_tf_tc_shim.h diff --git a/src/tf_ulp/bnxt_tf_ulp.c b/bnxt_en-1.10.3-229.0.139.0/tf_ulp/bnxt_tf_ulp.c similarity index 100% rename from src/tf_ulp/bnxt_tf_ulp.c rename to bnxt_en-1.10.3-229.0.139.0/tf_ulp/bnxt_tf_ulp.c diff --git a/src/tf_ulp/bnxt_tf_ulp.h b/bnxt_en-1.10.3-229.0.139.0/tf_ulp/bnxt_tf_ulp.h similarity index 100% rename from src/tf_ulp/bnxt_tf_ulp.h rename to bnxt_en-1.10.3-229.0.139.0/tf_ulp/bnxt_tf_ulp.h diff --git a/src/tf_ulp/bnxt_tf_ulp_tf.c b/bnxt_en-1.10.3-229.0.139.0/tf_ulp/bnxt_tf_ulp_tf.c similarity index 100% rename from src/tf_ulp/bnxt_tf_ulp_tf.c rename to bnxt_en-1.10.3-229.0.139.0/tf_ulp/bnxt_tf_ulp_tf.c diff --git a/src/tf_ulp/bnxt_tf_ulp_tf.h b/bnxt_en-1.10.3-229.0.139.0/tf_ulp/bnxt_tf_ulp_tf.h similarity index 100% rename from src/tf_ulp/bnxt_tf_ulp_tf.h rename to bnxt_en-1.10.3-229.0.139.0/tf_ulp/bnxt_tf_ulp_tf.h diff --git a/src/tf_ulp/bnxt_tf_ulp_tfc.c b/bnxt_en-1.10.3-229.0.139.0/tf_ulp/bnxt_tf_ulp_tfc.c similarity index 100% rename from src/tf_ulp/bnxt_tf_ulp_tfc.c rename to bnxt_en-1.10.3-229.0.139.0/tf_ulp/bnxt_tf_ulp_tfc.c diff --git a/src/tf_ulp/bnxt_tf_ulp_tfc.h b/bnxt_en-1.10.3-229.0.139.0/tf_ulp/bnxt_tf_ulp_tfc.h similarity index 100% rename from src/tf_ulp/bnxt_tf_ulp_tfc.h rename to bnxt_en-1.10.3-229.0.139.0/tf_ulp/bnxt_tf_ulp_tfc.h diff --git a/src/tf_ulp/bnxt_ulp_flow.h b/bnxt_en-1.10.3-229.0.139.0/tf_ulp/bnxt_ulp_flow.h similarity index 100% rename from src/tf_ulp/bnxt_ulp_flow.h rename to bnxt_en-1.10.3-229.0.139.0/tf_ulp/bnxt_ulp_flow.h diff --git a/src/tf_ulp/bnxt_ulp_linux_flow.c b/bnxt_en-1.10.3-229.0.139.0/tf_ulp/bnxt_ulp_linux_flow.c similarity index 100% rename from src/tf_ulp/bnxt_ulp_linux_flow.c rename to bnxt_en-1.10.3-229.0.139.0/tf_ulp/bnxt_ulp_linux_flow.c diff --git a/src/tf_ulp/bnxt_ulp_meter.c b/bnxt_en-1.10.3-229.0.139.0/tf_ulp/bnxt_ulp_meter.c similarity index 100% rename from src/tf_ulp/bnxt_ulp_meter.c rename to bnxt_en-1.10.3-229.0.139.0/tf_ulp/bnxt_ulp_meter.c diff --git a/src/tf_ulp/generic_templates/ulp_template_db_act.c b/bnxt_en-1.10.3-229.0.139.0/tf_ulp/generic_templates/ulp_template_db_act.c similarity index 100% rename from src/tf_ulp/generic_templates/ulp_template_db_act.c rename to bnxt_en-1.10.3-229.0.139.0/tf_ulp/generic_templates/ulp_template_db_act.c diff --git a/src/tf_ulp/generic_templates/ulp_template_db_class.c b/bnxt_en-1.10.3-229.0.139.0/tf_ulp/generic_templates/ulp_template_db_class.c similarity index 100% rename from src/tf_ulp/generic_templates/ulp_template_db_class.c rename to bnxt_en-1.10.3-229.0.139.0/tf_ulp/generic_templates/ulp_template_db_class.c diff --git a/src/tf_ulp/generic_templates/ulp_template_db_enum.h b/bnxt_en-1.10.3-229.0.139.0/tf_ulp/generic_templates/ulp_template_db_enum.h similarity index 100% rename from src/tf_ulp/generic_templates/ulp_template_db_enum.h rename to bnxt_en-1.10.3-229.0.139.0/tf_ulp/generic_templates/ulp_template_db_enum.h diff --git a/src/tf_ulp/generic_templates/ulp_template_db_field.h b/bnxt_en-1.10.3-229.0.139.0/tf_ulp/generic_templates/ulp_template_db_field.h similarity index 100% rename from src/tf_ulp/generic_templates/ulp_template_db_field.h rename to bnxt_en-1.10.3-229.0.139.0/tf_ulp/generic_templates/ulp_template_db_field.h diff --git a/src/tf_ulp/generic_templates/ulp_template_db_tbl.c b/bnxt_en-1.10.3-229.0.139.0/tf_ulp/generic_templates/ulp_template_db_tbl.c similarity index 100% rename from src/tf_ulp/generic_templates/ulp_template_db_tbl.c rename to bnxt_en-1.10.3-229.0.139.0/tf_ulp/generic_templates/ulp_template_db_tbl.c diff --git a/src/tf_ulp/generic_templates/ulp_template_db_tbl.h b/bnxt_en-1.10.3-229.0.139.0/tf_ulp/generic_templates/ulp_template_db_tbl.h similarity index 100% rename from src/tf_ulp/generic_templates/ulp_template_db_tbl.h rename to bnxt_en-1.10.3-229.0.139.0/tf_ulp/generic_templates/ulp_template_db_tbl.h diff --git a/src/tf_ulp/generic_templates/ulp_template_db_thor2_act.c b/bnxt_en-1.10.3-229.0.139.0/tf_ulp/generic_templates/ulp_template_db_thor2_act.c similarity index 100% rename from src/tf_ulp/generic_templates/ulp_template_db_thor2_act.c rename to bnxt_en-1.10.3-229.0.139.0/tf_ulp/generic_templates/ulp_template_db_thor2_act.c diff --git a/src/tf_ulp/generic_templates/ulp_template_db_thor2_act.o b/bnxt_en-1.10.3-229.0.139.0/tf_ulp/generic_templates/ulp_template_db_thor2_act.o similarity index 100% rename from src/tf_ulp/generic_templates/ulp_template_db_thor2_act.o rename to bnxt_en-1.10.3-229.0.139.0/tf_ulp/generic_templates/ulp_template_db_thor2_act.o diff --git a/src/tf_ulp/generic_templates/ulp_template_db_thor2_class.c b/bnxt_en-1.10.3-229.0.139.0/tf_ulp/generic_templates/ulp_template_db_thor2_class.c similarity index 100% rename from src/tf_ulp/generic_templates/ulp_template_db_thor2_class.c rename to bnxt_en-1.10.3-229.0.139.0/tf_ulp/generic_templates/ulp_template_db_thor2_class.c diff --git a/src/tf_ulp/generic_templates/ulp_template_db_thor2_class.o b/bnxt_en-1.10.3-229.0.139.0/tf_ulp/generic_templates/ulp_template_db_thor2_class.o similarity index 100% rename from src/tf_ulp/generic_templates/ulp_template_db_thor2_class.o rename to bnxt_en-1.10.3-229.0.139.0/tf_ulp/generic_templates/ulp_template_db_thor2_class.o diff --git a/src/tf_ulp/generic_templates/ulp_template_db_thor_act.c b/bnxt_en-1.10.3-229.0.139.0/tf_ulp/generic_templates/ulp_template_db_thor_act.c similarity index 100% rename from src/tf_ulp/generic_templates/ulp_template_db_thor_act.c rename to bnxt_en-1.10.3-229.0.139.0/tf_ulp/generic_templates/ulp_template_db_thor_act.c diff --git a/src/tf_ulp/generic_templates/ulp_template_db_thor_class.c b/bnxt_en-1.10.3-229.0.139.0/tf_ulp/generic_templates/ulp_template_db_thor_class.c similarity index 100% rename from src/tf_ulp/generic_templates/ulp_template_db_thor_class.c rename to bnxt_en-1.10.3-229.0.139.0/tf_ulp/generic_templates/ulp_template_db_thor_class.c diff --git a/src/tf_ulp/generic_templates/ulp_template_db_wh_plus_act.c b/bnxt_en-1.10.3-229.0.139.0/tf_ulp/generic_templates/ulp_template_db_wh_plus_act.c similarity index 100% rename from src/tf_ulp/generic_templates/ulp_template_db_wh_plus_act.c rename to bnxt_en-1.10.3-229.0.139.0/tf_ulp/generic_templates/ulp_template_db_wh_plus_act.c diff --git a/src/tf_ulp/generic_templates/ulp_template_db_wh_plus_class.c b/bnxt_en-1.10.3-229.0.139.0/tf_ulp/generic_templates/ulp_template_db_wh_plus_class.c similarity index 100% rename from src/tf_ulp/generic_templates/ulp_template_db_wh_plus_class.c rename to bnxt_en-1.10.3-229.0.139.0/tf_ulp/generic_templates/ulp_template_db_wh_plus_class.c diff --git a/src/tf_ulp/ulp_def_rules.c b/bnxt_en-1.10.3-229.0.139.0/tf_ulp/ulp_def_rules.c similarity index 100% rename from src/tf_ulp/ulp_def_rules.c rename to bnxt_en-1.10.3-229.0.139.0/tf_ulp/ulp_def_rules.c diff --git a/src/tf_ulp/ulp_fc_mgr.c b/bnxt_en-1.10.3-229.0.139.0/tf_ulp/ulp_fc_mgr.c similarity index 100% rename from src/tf_ulp/ulp_fc_mgr.c rename to bnxt_en-1.10.3-229.0.139.0/tf_ulp/ulp_fc_mgr.c diff --git a/src/tf_ulp/ulp_fc_mgr.h b/bnxt_en-1.10.3-229.0.139.0/tf_ulp/ulp_fc_mgr.h similarity index 100% rename from src/tf_ulp/ulp_fc_mgr.h rename to bnxt_en-1.10.3-229.0.139.0/tf_ulp/ulp_fc_mgr.h diff --git a/src/tf_ulp/ulp_fc_mgr_tf.c b/bnxt_en-1.10.3-229.0.139.0/tf_ulp/ulp_fc_mgr_tf.c similarity index 100% rename from src/tf_ulp/ulp_fc_mgr_tf.c rename to bnxt_en-1.10.3-229.0.139.0/tf_ulp/ulp_fc_mgr_tf.c diff --git a/src/tf_ulp/ulp_fc_mgr_tfc.c b/bnxt_en-1.10.3-229.0.139.0/tf_ulp/ulp_fc_mgr_tfc.c similarity index 100% rename from src/tf_ulp/ulp_fc_mgr_tfc.c rename to bnxt_en-1.10.3-229.0.139.0/tf_ulp/ulp_fc_mgr_tfc.c diff --git a/src/tf_ulp/ulp_flow_db.c b/bnxt_en-1.10.3-229.0.139.0/tf_ulp/ulp_flow_db.c similarity index 100% rename from src/tf_ulp/ulp_flow_db.c rename to bnxt_en-1.10.3-229.0.139.0/tf_ulp/ulp_flow_db.c diff --git a/src/tf_ulp/ulp_flow_db.h b/bnxt_en-1.10.3-229.0.139.0/tf_ulp/ulp_flow_db.h similarity index 100% rename from src/tf_ulp/ulp_flow_db.h rename to bnxt_en-1.10.3-229.0.139.0/tf_ulp/ulp_flow_db.h diff --git a/src/tf_ulp/ulp_gen_tbl.c b/bnxt_en-1.10.3-229.0.139.0/tf_ulp/ulp_gen_tbl.c similarity index 100% rename from src/tf_ulp/ulp_gen_tbl.c rename to bnxt_en-1.10.3-229.0.139.0/tf_ulp/ulp_gen_tbl.c diff --git a/src/tf_ulp/ulp_gen_tbl.h b/bnxt_en-1.10.3-229.0.139.0/tf_ulp/ulp_gen_tbl.h similarity index 100% rename from src/tf_ulp/ulp_gen_tbl.h rename to bnxt_en-1.10.3-229.0.139.0/tf_ulp/ulp_gen_tbl.h diff --git a/src/tf_ulp/ulp_generic_flow_offload.c b/bnxt_en-1.10.3-229.0.139.0/tf_ulp/ulp_generic_flow_offload.c similarity index 100% rename from src/tf_ulp/ulp_generic_flow_offload.c rename to bnxt_en-1.10.3-229.0.139.0/tf_ulp/ulp_generic_flow_offload.c diff --git a/src/tf_ulp/ulp_generic_flow_offload.h b/bnxt_en-1.10.3-229.0.139.0/tf_ulp/ulp_generic_flow_offload.h similarity index 100% rename from src/tf_ulp/ulp_generic_flow_offload.h rename to bnxt_en-1.10.3-229.0.139.0/tf_ulp/ulp_generic_flow_offload.h diff --git a/src/tf_ulp/ulp_linux.h b/bnxt_en-1.10.3-229.0.139.0/tf_ulp/ulp_linux.h similarity index 100% rename from src/tf_ulp/ulp_linux.h rename to bnxt_en-1.10.3-229.0.139.0/tf_ulp/ulp_linux.h diff --git a/src/tf_ulp/ulp_mapper.c b/bnxt_en-1.10.3-229.0.139.0/tf_ulp/ulp_mapper.c similarity index 100% rename from src/tf_ulp/ulp_mapper.c rename to bnxt_en-1.10.3-229.0.139.0/tf_ulp/ulp_mapper.c diff --git a/src/tf_ulp/ulp_mapper.h b/bnxt_en-1.10.3-229.0.139.0/tf_ulp/ulp_mapper.h similarity index 100% rename from src/tf_ulp/ulp_mapper.h rename to bnxt_en-1.10.3-229.0.139.0/tf_ulp/ulp_mapper.h diff --git a/src/tf_ulp/ulp_mapper_tf.c b/bnxt_en-1.10.3-229.0.139.0/tf_ulp/ulp_mapper_tf.c similarity index 100% rename from src/tf_ulp/ulp_mapper_tf.c rename to bnxt_en-1.10.3-229.0.139.0/tf_ulp/ulp_mapper_tf.c diff --git a/src/tf_ulp/ulp_mapper_tfc.c b/bnxt_en-1.10.3-229.0.139.0/tf_ulp/ulp_mapper_tfc.c similarity index 100% rename from src/tf_ulp/ulp_mapper_tfc.c rename to bnxt_en-1.10.3-229.0.139.0/tf_ulp/ulp_mapper_tfc.c diff --git a/src/tf_ulp/ulp_mark_mgr.c b/bnxt_en-1.10.3-229.0.139.0/tf_ulp/ulp_mark_mgr.c similarity index 100% rename from src/tf_ulp/ulp_mark_mgr.c rename to bnxt_en-1.10.3-229.0.139.0/tf_ulp/ulp_mark_mgr.c diff --git a/src/tf_ulp/ulp_mark_mgr.h b/bnxt_en-1.10.3-229.0.139.0/tf_ulp/ulp_mark_mgr.h similarity index 100% rename from src/tf_ulp/ulp_mark_mgr.h rename to bnxt_en-1.10.3-229.0.139.0/tf_ulp/ulp_mark_mgr.h diff --git a/src/tf_ulp/ulp_matcher.c b/bnxt_en-1.10.3-229.0.139.0/tf_ulp/ulp_matcher.c similarity index 100% rename from src/tf_ulp/ulp_matcher.c rename to bnxt_en-1.10.3-229.0.139.0/tf_ulp/ulp_matcher.c diff --git a/src/tf_ulp/ulp_matcher.h b/bnxt_en-1.10.3-229.0.139.0/tf_ulp/ulp_matcher.h similarity index 100% rename from src/tf_ulp/ulp_matcher.h rename to bnxt_en-1.10.3-229.0.139.0/tf_ulp/ulp_matcher.h diff --git a/src/tf_ulp/ulp_port_db.c b/bnxt_en-1.10.3-229.0.139.0/tf_ulp/ulp_port_db.c similarity index 100% rename from src/tf_ulp/ulp_port_db.c rename to bnxt_en-1.10.3-229.0.139.0/tf_ulp/ulp_port_db.c diff --git a/src/tf_ulp/ulp_port_db.h b/bnxt_en-1.10.3-229.0.139.0/tf_ulp/ulp_port_db.h similarity index 100% rename from src/tf_ulp/ulp_port_db.h rename to bnxt_en-1.10.3-229.0.139.0/tf_ulp/ulp_port_db.h diff --git a/src/tf_ulp/ulp_tc_handler_tbl.c b/bnxt_en-1.10.3-229.0.139.0/tf_ulp/ulp_tc_handler_tbl.c similarity index 100% rename from src/tf_ulp/ulp_tc_handler_tbl.c rename to bnxt_en-1.10.3-229.0.139.0/tf_ulp/ulp_tc_handler_tbl.c diff --git a/src/tf_ulp/ulp_tc_parser.c b/bnxt_en-1.10.3-229.0.139.0/tf_ulp/ulp_tc_parser.c similarity index 100% rename from src/tf_ulp/ulp_tc_parser.c rename to bnxt_en-1.10.3-229.0.139.0/tf_ulp/ulp_tc_parser.c diff --git a/src/tf_ulp/ulp_tc_parser.h b/bnxt_en-1.10.3-229.0.139.0/tf_ulp/ulp_tc_parser.h similarity index 100% rename from src/tf_ulp/ulp_tc_parser.h rename to bnxt_en-1.10.3-229.0.139.0/tf_ulp/ulp_tc_parser.h diff --git a/src/tf_ulp/ulp_template_debug.c b/bnxt_en-1.10.3-229.0.139.0/tf_ulp/ulp_template_debug.c similarity index 100% rename from src/tf_ulp/ulp_template_debug.c rename to bnxt_en-1.10.3-229.0.139.0/tf_ulp/ulp_template_debug.c diff --git a/src/tf_ulp/ulp_template_debug.h b/bnxt_en-1.10.3-229.0.139.0/tf_ulp/ulp_template_debug.h similarity index 100% rename from src/tf_ulp/ulp_template_debug.h rename to bnxt_en-1.10.3-229.0.139.0/tf_ulp/ulp_template_debug.h diff --git a/src/tf_ulp/ulp_template_debug_proto.h b/bnxt_en-1.10.3-229.0.139.0/tf_ulp/ulp_template_debug_proto.h similarity index 100% rename from src/tf_ulp/ulp_template_debug_proto.h rename to bnxt_en-1.10.3-229.0.139.0/tf_ulp/ulp_template_debug_proto.h diff --git a/src/tf_ulp/ulp_template_struct.h b/bnxt_en-1.10.3-229.0.139.0/tf_ulp/ulp_template_struct.h similarity index 100% rename from src/tf_ulp/ulp_template_struct.h rename to bnxt_en-1.10.3-229.0.139.0/tf_ulp/ulp_template_struct.h diff --git a/src/tf_ulp/ulp_tf_debug.c b/bnxt_en-1.10.3-229.0.139.0/tf_ulp/ulp_tf_debug.c similarity index 100% rename from src/tf_ulp/ulp_tf_debug.c rename to bnxt_en-1.10.3-229.0.139.0/tf_ulp/ulp_tf_debug.c diff --git a/src/tf_ulp/ulp_tf_debug.h b/bnxt_en-1.10.3-229.0.139.0/tf_ulp/ulp_tf_debug.h similarity index 100% rename from src/tf_ulp/ulp_tf_debug.h rename to bnxt_en-1.10.3-229.0.139.0/tf_ulp/ulp_tf_debug.h diff --git a/src/tf_ulp/ulp_udcc.c b/bnxt_en-1.10.3-229.0.139.0/tf_ulp/ulp_udcc.c similarity index 100% rename from src/tf_ulp/ulp_udcc.c rename to bnxt_en-1.10.3-229.0.139.0/tf_ulp/ulp_udcc.c diff --git a/src/tf_ulp/ulp_udcc.h b/bnxt_en-1.10.3-229.0.139.0/tf_ulp/ulp_udcc.h similarity index 100% rename from src/tf_ulp/ulp_udcc.h rename to bnxt_en-1.10.3-229.0.139.0/tf_ulp/ulp_udcc.h diff --git a/src/tf_ulp/ulp_utils.c b/bnxt_en-1.10.3-229.0.139.0/tf_ulp/ulp_utils.c similarity index 100% rename from src/tf_ulp/ulp_utils.c rename to bnxt_en-1.10.3-229.0.139.0/tf_ulp/ulp_utils.c diff --git a/src/tf_ulp/ulp_utils.h b/bnxt_en-1.10.3-229.0.139.0/tf_ulp/ulp_utils.h similarity index 100% rename from src/tf_ulp/ulp_utils.h rename to bnxt_en-1.10.3-229.0.139.0/tf_ulp/ulp_utils.h diff --git a/src/tfc_v3/tfc.h b/bnxt_en-1.10.3-229.0.139.0/tfc_v3/tfc.h similarity index 100% rename from src/tfc_v3/tfc.h rename to bnxt_en-1.10.3-229.0.139.0/tfc_v3/tfc.h diff --git a/src/tfc_v3/tfc_act.c b/bnxt_en-1.10.3-229.0.139.0/tfc_v3/tfc_act.c similarity index 100% rename from src/tfc_v3/tfc_act.c rename to bnxt_en-1.10.3-229.0.139.0/tfc_v3/tfc_act.c diff --git a/src/tfc_v3/tfc_action_handle.h b/bnxt_en-1.10.3-229.0.139.0/tfc_v3/tfc_action_handle.h similarity index 100% rename from src/tfc_v3/tfc_action_handle.h rename to bnxt_en-1.10.3-229.0.139.0/tfc_v3/tfc_action_handle.h diff --git a/src/tfc_v3/tfc_cpm.c b/bnxt_en-1.10.3-229.0.139.0/tfc_v3/tfc_cpm.c similarity index 100% rename from src/tfc_v3/tfc_cpm.c rename to bnxt_en-1.10.3-229.0.139.0/tfc_v3/tfc_cpm.c diff --git a/src/tfc_v3/tfc_cpm.h b/bnxt_en-1.10.3-229.0.139.0/tfc_v3/tfc_cpm.h similarity index 100% rename from src/tfc_v3/tfc_cpm.h rename to bnxt_en-1.10.3-229.0.139.0/tfc_v3/tfc_cpm.h diff --git a/src/tfc_v3/tfc_debug.h b/bnxt_en-1.10.3-229.0.139.0/tfc_v3/tfc_debug.h similarity index 100% rename from src/tfc_v3/tfc_debug.h rename to bnxt_en-1.10.3-229.0.139.0/tfc_v3/tfc_debug.h diff --git a/src/tfc_v3/tfc_em.c b/bnxt_en-1.10.3-229.0.139.0/tfc_v3/tfc_em.c similarity index 100% rename from src/tfc_v3/tfc_em.c rename to bnxt_en-1.10.3-229.0.139.0/tfc_v3/tfc_em.c diff --git a/src/tfc_v3/tfc_em.h b/bnxt_en-1.10.3-229.0.139.0/tfc_v3/tfc_em.h similarity index 100% rename from src/tfc_v3/tfc_em.h rename to bnxt_en-1.10.3-229.0.139.0/tfc_v3/tfc_em.h diff --git a/src/tfc_v3/tfc_flow_handle.h b/bnxt_en-1.10.3-229.0.139.0/tfc_v3/tfc_flow_handle.h similarity index 100% rename from src/tfc_v3/tfc_flow_handle.h rename to bnxt_en-1.10.3-229.0.139.0/tfc_v3/tfc_flow_handle.h diff --git a/src/tfc_v3/tfc_global_id.c b/bnxt_en-1.10.3-229.0.139.0/tfc_v3/tfc_global_id.c similarity index 100% rename from src/tfc_v3/tfc_global_id.c rename to bnxt_en-1.10.3-229.0.139.0/tfc_v3/tfc_global_id.c diff --git a/src/tfc_v3/tfc_ident.c b/bnxt_en-1.10.3-229.0.139.0/tfc_v3/tfc_ident.c similarity index 100% rename from src/tfc_v3/tfc_ident.c rename to bnxt_en-1.10.3-229.0.139.0/tfc_v3/tfc_ident.c diff --git a/src/tfc_v3/tfc_idx_tbl.c b/bnxt_en-1.10.3-229.0.139.0/tfc_v3/tfc_idx_tbl.c similarity index 100% rename from src/tfc_v3/tfc_idx_tbl.c rename to bnxt_en-1.10.3-229.0.139.0/tfc_v3/tfc_idx_tbl.c diff --git a/src/tfc_v3/tfc_if_tbl.c b/bnxt_en-1.10.3-229.0.139.0/tfc_v3/tfc_if_tbl.c similarity index 100% rename from src/tfc_v3/tfc_if_tbl.c rename to bnxt_en-1.10.3-229.0.139.0/tfc_v3/tfc_if_tbl.c diff --git a/src/tfc_v3/tfc_init.c b/bnxt_en-1.10.3-229.0.139.0/tfc_v3/tfc_init.c similarity index 100% rename from src/tfc_v3/tfc_init.c rename to bnxt_en-1.10.3-229.0.139.0/tfc_v3/tfc_init.c diff --git a/src/tfc_v3/tfc_mpc_table.c b/bnxt_en-1.10.3-229.0.139.0/tfc_v3/tfc_mpc_table.c similarity index 100% rename from src/tfc_v3/tfc_mpc_table.c rename to bnxt_en-1.10.3-229.0.139.0/tfc_v3/tfc_mpc_table.c diff --git a/src/tfc_v3/tfc_msg.c b/bnxt_en-1.10.3-229.0.139.0/tfc_v3/tfc_msg.c similarity index 100% rename from src/tfc_v3/tfc_msg.c rename to bnxt_en-1.10.3-229.0.139.0/tfc_v3/tfc_msg.c diff --git a/src/tfc_v3/tfc_msg.h b/bnxt_en-1.10.3-229.0.139.0/tfc_v3/tfc_msg.h similarity index 100% rename from src/tfc_v3/tfc_msg.h rename to bnxt_en-1.10.3-229.0.139.0/tfc_v3/tfc_msg.h diff --git a/src/tfc_v3/tfc_priv.c b/bnxt_en-1.10.3-229.0.139.0/tfc_v3/tfc_priv.c similarity index 100% rename from src/tfc_v3/tfc_priv.c rename to bnxt_en-1.10.3-229.0.139.0/tfc_v3/tfc_priv.c diff --git a/src/tfc_v3/tfc_priv.h b/bnxt_en-1.10.3-229.0.139.0/tfc_v3/tfc_priv.h similarity index 100% rename from src/tfc_v3/tfc_priv.h rename to bnxt_en-1.10.3-229.0.139.0/tfc_v3/tfc_priv.h diff --git a/src/tfc_v3/tfc_session.c b/bnxt_en-1.10.3-229.0.139.0/tfc_v3/tfc_session.c similarity index 100% rename from src/tfc_v3/tfc_session.c rename to bnxt_en-1.10.3-229.0.139.0/tfc_v3/tfc_session.c diff --git a/src/tfc_v3/tfc_tbl_scope.c b/bnxt_en-1.10.3-229.0.139.0/tfc_v3/tfc_tbl_scope.c similarity index 100% rename from src/tfc_v3/tfc_tbl_scope.c rename to bnxt_en-1.10.3-229.0.139.0/tfc_v3/tfc_tbl_scope.c diff --git a/src/tfc_v3/tfc_tcam.c b/bnxt_en-1.10.3-229.0.139.0/tfc_v3/tfc_tcam.c similarity index 100% rename from src/tfc_v3/tfc_tcam.c rename to bnxt_en-1.10.3-229.0.139.0/tfc_v3/tfc_tcam.c diff --git a/src/tfc_v3/tfc_util.c b/bnxt_en-1.10.3-229.0.139.0/tfc_v3/tfc_util.c similarity index 100% rename from src/tfc_v3/tfc_util.c rename to bnxt_en-1.10.3-229.0.139.0/tfc_v3/tfc_util.c diff --git a/src/tfc_v3/tfc_util.h b/bnxt_en-1.10.3-229.0.139.0/tfc_v3/tfc_util.h similarity index 100% rename from src/tfc_v3/tfc_util.h rename to bnxt_en-1.10.3-229.0.139.0/tfc_v3/tfc_util.h diff --git a/src/tfc_v3/tfc_vf2pf_msg.c b/bnxt_en-1.10.3-229.0.139.0/tfc_v3/tfc_vf2pf_msg.c similarity index 100% rename from src/tfc_v3/tfc_vf2pf_msg.c rename to bnxt_en-1.10.3-229.0.139.0/tfc_v3/tfc_vf2pf_msg.c diff --git a/src/tfc_v3/tfc_vf2pf_msg.h b/bnxt_en-1.10.3-229.0.139.0/tfc_v3/tfc_vf2pf_msg.h similarity index 100% rename from src/tfc_v3/tfc_vf2pf_msg.h rename to bnxt_en-1.10.3-229.0.139.0/tfc_v3/tfc_vf2pf_msg.h diff --git a/src/tfc_v3/tfo.c b/bnxt_en-1.10.3-229.0.139.0/tfc_v3/tfo.c similarity index 100% rename from src/tfc_v3/tfo.c rename to bnxt_en-1.10.3-229.0.139.0/tfc_v3/tfo.c diff --git a/src/tfc_v3/tfo.h b/bnxt_en-1.10.3-229.0.139.0/tfc_v3/tfo.h similarity index 100% rename from src/tfc_v3/tfo.h rename to bnxt_en-1.10.3-229.0.139.0/tfc_v3/tfo.h diff --git a/bnxt_re b/bnxt_re new file mode 120000 index 0000000..768a01a --- /dev/null +++ b/bnxt_re @@ -0,0 +1 @@ +bnxt_re-1.10.3-229.0.139.0 \ No newline at end of file diff --git a/bnxt_re-1.10.3-229.0.139.0/COPYING b/bnxt_re-1.10.3-229.0.139.0/COPYING new file mode 100644 index 0000000..d159169 --- /dev/null +++ b/bnxt_re-1.10.3-229.0.139.0/COPYING @@ -0,0 +1,339 @@ + GNU GENERAL PUBLIC LICENSE + Version 2, June 1991 + + Copyright (C) 1989, 1991 Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The licenses for most software are designed to take away your +freedom to share and change it. By contrast, the GNU General Public +License is intended to guarantee your freedom to share and change free +software--to make sure the software is free for all its users. This +General Public License applies to most of the Free Software +Foundation's software and to any other program whose authors commit to +using it. (Some other Free Software Foundation software is covered by +the GNU Lesser General Public License instead.) You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +this service if you wish), that you receive source code or can get it +if you want it, that you can change the software or use pieces of it +in new free programs; and that you know you can do these things. + + To protect your rights, we need to make restrictions that forbid +anyone to deny you these rights or to ask you to surrender the rights. +These restrictions translate to certain responsibilities for you if you +distribute copies of the software, or if you modify it. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must give the recipients all the rights that +you have. You must make sure that they, too, receive or can get the +source code. And you must show them these terms so they know their +rights. + + We protect your rights with two steps: (1) copyright the software, and +(2) offer you this license which gives you legal permission to copy, +distribute and/or modify the software. + + Also, for each author's protection and ours, we want to make certain +that everyone understands that there is no warranty for this free +software. If the software is modified by someone else and passed on, we +want its recipients to know that what they have is not the original, so +that any problems introduced by others will not reflect on the original +authors' reputations. + + Finally, any free program is threatened constantly by software +patents. We wish to avoid the danger that redistributors of a free +program will individually obtain patent licenses, in effect making the +program proprietary. To prevent this, we have made it clear that any +patent must be licensed for everyone's free use or not licensed at all. + + The precise terms and conditions for copying, distribution and +modification follow. + + GNU GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License applies to any program or other work which contains +a notice placed by the copyright holder saying it may be distributed +under the terms of this General Public License. The "Program", below, +refers to any such program or work, and a "work based on the Program" +means either the Program or any derivative work under copyright law: +that is to say, a work containing the Program or a portion of it, +either verbatim or with modifications and/or translated into another +language. (Hereinafter, translation is included without limitation in +the term "modification".) Each licensee is addressed as "you". + +Activities other than copying, distribution and modification are not +covered by this License; they are outside its scope. The act of +running the Program is not restricted, and the output from the Program +is covered only if its contents constitute a work based on the +Program (independent of having been made by running the Program). +Whether that is true depends on what the Program does. + + 1. You may copy and distribute verbatim copies of the Program's +source code as you receive it, in any medium, provided that you +conspicuously and appropriately publish on each copy an appropriate +copyright notice and disclaimer of warranty; keep intact all the +notices that refer to this License and to the absence of any warranty; +and give any other recipients of the Program a copy of this License +along with the Program. + +You may charge a fee for the physical act of transferring a copy, and +you may at your option offer warranty protection in exchange for a fee. + + 2. You may modify your copy or copies of the Program or any portion +of it, thus forming a work based on the Program, and copy and +distribute such modifications or work under the terms of Section 1 +above, provided that you also meet all of these conditions: + + a) You must cause the modified files to carry prominent notices + stating that you changed the files and the date of any change. + + b) You must cause any work that you distribute or publish, that in + whole or in part contains or is derived from the Program or any + part thereof, to be licensed as a whole at no charge to all third + parties under the terms of this License. + + c) If the modified program normally reads commands interactively + when run, you must cause it, when started running for such + interactive use in the most ordinary way, to print or display an + announcement including an appropriate copyright notice and a + notice that there is no warranty (or else, saying that you provide + a warranty) and that users may redistribute the program under + these conditions, and telling the user how to view a copy of this + License. (Exception: if the Program itself is interactive but + does not normally print such an announcement, your work based on + the Program is not required to print an announcement.) + +These requirements apply to the modified work as a whole. If +identifiable sections of that work are not derived from the Program, +and can be reasonably considered independent and separate works in +themselves, then this License, and its terms, do not apply to those +sections when you distribute them as separate works. But when you +distribute the same sections as part of a whole which is a work based +on the Program, the distribution of the whole must be on the terms of +this License, whose permissions for other licensees extend to the +entire whole, and thus to each and every part regardless of who wrote it. + +Thus, it is not the intent of this section to claim rights or contest +your rights to work written entirely by you; rather, the intent is to +exercise the right to control the distribution of derivative or +collective works based on the Program. + +In addition, mere aggregation of another work not based on the Program +with the Program (or with a work based on the Program) on a volume of +a storage or distribution medium does not bring the other work under +the scope of this License. + + 3. You may copy and distribute the Program (or a work based on it, +under Section 2) in object code or executable form under the terms of +Sections 1 and 2 above provided that you also do one of the following: + + a) Accompany it with the complete corresponding machine-readable + source code, which must be distributed under the terms of Sections + 1 and 2 above on a medium customarily used for software interchange; or, + + b) Accompany it with a written offer, valid for at least three + years, to give any third party, for a charge no more than your + cost of physically performing source distribution, a complete + machine-readable copy of the corresponding source code, to be + distributed under the terms of Sections 1 and 2 above on a medium + customarily used for software interchange; or, + + c) Accompany it with the information you received as to the offer + to distribute corresponding source code. (This alternative is + allowed only for noncommercial distribution and only if you + received the program in object code or executable form with such + an offer, in accord with Subsection b above.) + +The source code for a work means the preferred form of the work for +making modifications to it. For an executable work, complete source +code means all the source code for all modules it contains, plus any +associated interface definition files, plus the scripts used to +control compilation and installation of the executable. However, as a +special exception, the source code distributed need not include +anything that is normally distributed (in either source or binary +form) with the major components (compiler, kernel, and so on) of the +operating system on which the executable runs, unless that component +itself accompanies the executable. + +If distribution of executable or object code is made by offering +access to copy from a designated place, then offering equivalent +access to copy the source code from the same place counts as +distribution of the source code, even though third parties are not +compelled to copy the source along with the object code. + + 4. You may not copy, modify, sublicense, or distribute the Program +except as expressly provided under this License. Any attempt +otherwise to copy, modify, sublicense or distribute the Program is +void, and will automatically terminate your rights under this License. +However, parties who have received copies, or rights, from you under +this License will not have their licenses terminated so long as such +parties remain in full compliance. + + 5. You are not required to accept this License, since you have not +signed it. However, nothing else grants you permission to modify or +distribute the Program or its derivative works. These actions are +prohibited by law if you do not accept this License. Therefore, by +modifying or distributing the Program (or any work based on the +Program), you indicate your acceptance of this License to do so, and +all its terms and conditions for copying, distributing or modifying +the Program or works based on it. + + 6. Each time you redistribute the Program (or any work based on the +Program), the recipient automatically receives a license from the +original licensor to copy, distribute or modify the Program subject to +these terms and conditions. You may not impose any further +restrictions on the recipients' exercise of the rights granted herein. +You are not responsible for enforcing compliance by third parties to +this License. + + 7. If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), +conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot +distribute so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you +may not distribute the Program at all. For example, if a patent +license would not permit royalty-free redistribution of the Program by +all those who receive copies directly or indirectly through you, then +the only way you could satisfy both it and this License would be to +refrain entirely from distribution of the Program. + +If any portion of this section is held invalid or unenforceable under +any particular circumstance, the balance of the section is intended to +apply and the section as a whole is intended to apply in other +circumstances. + +It is not the purpose of this section to induce you to infringe any +patents or other property right claims or to contest validity of any +such claims; this section has the sole purpose of protecting the +integrity of the free software distribution system, which is +implemented by public license practices. Many people have made +generous contributions to the wide range of software distributed +through that system in reliance on consistent application of that +system; it is up to the author/donor to decide if he or she is willing +to distribute software through any other system and a licensee cannot +impose that choice. + +This section is intended to make thoroughly clear what is believed to +be a consequence of the rest of this License. + + 8. If the distribution and/or use of the Program is restricted in +certain countries either by patents or by copyrighted interfaces, the +original copyright holder who places the Program under this License +may add an explicit geographical distribution limitation excluding +those countries, so that distribution is permitted only in or among +countries not thus excluded. In such case, this License incorporates +the limitation as if written in the body of this License. + + 9. The Free Software Foundation may publish revised and/or new versions +of the General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + +Each version is given a distinguishing version number. If the Program +specifies a version number of this License which applies to it and "any +later version", you have the option of following the terms and conditions +either of that version or of any later version published by the Free +Software Foundation. If the Program does not specify a version number of +this License, you may choose any version ever published by the Free Software +Foundation. + + 10. If you wish to incorporate parts of the Program into other free +programs whose distribution conditions are different, write to the author +to ask for permission. For software which is copyrighted by the Free +Software Foundation, write to the Free Software Foundation; we sometimes +make exceptions for this. Our decision will be guided by the two goals +of preserving the free status of all derivatives of our free software and +of promoting the sharing and reuse of software generally. + + NO WARRANTY + + 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY +FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN +OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES +PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED +OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS +TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE +PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, +REPAIR OR CORRECTION. + + 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR +REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, +INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING +OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED +TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY +YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER +PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE +POSSIBILITY OF SUCH DAMAGES. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +convey the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License along + with this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +Also add information on how to contact you by electronic and paper mail. + +If the program is interactive, make it output a short notice like this +when it starts in an interactive mode: + + Gnomovision version 69, Copyright (C) year name of author + Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, the commands you use may +be called something other than `show w' and `show c'; they could even be +mouse-clicks or menu items--whatever suits your program. + +You should also get your employer (if you work as a programmer) or your +school, if any, to sign a "copyright disclaimer" for the program, if +necessary. Here is a sample; alter the names: + + Yoyodyne, Inc., hereby disclaims all copyright interest in the program + `Gnomovision' (which makes passes at compilers) written by James Hacker. + + , 1 April 1989 + Ty Coon, President of Vice + +This General Public License does not permit incorporating your program into +proprietary programs. If your program is a subroutine library, you may +consider it more useful to permit linking proprietary applications with the +library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. diff --git a/bnxt_re-1.10.3-229.0.139.0/Makefile b/bnxt_re-1.10.3-229.0.139.0/Makefile new file mode 100644 index 0000000..d96dcb5 --- /dev/null +++ b/bnxt_re-1.10.3-229.0.139.0/Makefile @@ -0,0 +1,1026 @@ +#!/usr/bin/make +# Makefile for building Linux Broadcom Gigabit ethernet RDMA driver as a module. +# $id$ +KVER := +ifeq ($(KVER),) + KVER := $(shell uname -r) +endif + +__ARCH := $(shell uname -m) + +ifeq ($(BNXT_EN_INC),) + BNXT_EN_INC := $(shell pwd)/../bnxt_en + export BNXT_EN_INC +endif + +ifeq ($(BNXT_QPLIB_INC),) + BNXT_QPLIB_INC := $(shell pwd) + export BNXT_QPLIB_INC +endif + +ifneq ($(SPARSE_EXEC_PATH), ) + BNXT_SPARSE_CMD := CHECK="$(SPARSE_EXEC_PATH) -p=kernel" C=2 CF="-D__CHECK_ENDIAN__" +endif + +ifneq ($(SMATCH_EXEC_PATH), ) + BNXT_SMATCH_CMD := CHECK="$(SMATCH_EXEC_PATH) -p=kernel" C=1 +endif + +# PREFIX may be set by the RPM build to set the effective root. +PREFIX := +# ifeq ($(shell ls /lib/modules/$(KVER)/build > /dev/null 2>&1 && echo build),) +# # SuSE source RPMs +# _KVER := $(shell echo $(KVER) | cut -d "-" -f1,2) +# _KFLA := $(shell echo $(KVER) | cut -d "-" -f3) +# _ARCH := $(shell file -b /lib/modules/$(shell uname -r)/build | cut -d "/" -f5) +# ifeq ($(_ARCH),) +# _ARCH := $(__ARCH) +# endif +# ifeq ($(shell ls /usr/src/linux-$(_KVER)-obj > /dev/null 2>&1 && echo linux),) +# ifeq ($(shell ls /usr/src/kernels/$(KVER)-$(__ARCH) > /dev/null 2>&1 && echo linux),) +# LINUX := +# else +# LINUX := /usr/src/kernels/$(KVER)-$(__ARCH) +# LINUXSRC := $(LINUX) +# endif +# else +# LINUX := /usr/src/linux-$(_KVER)-obj/$(_ARCH)/$(_KFLA) +# LINUXSRC := /usr/src/linux-$(_KVER) +# endif +# else +# LINUX := /lib/modules/$(KVER)/build +# ifeq ($(shell ls /lib/modules/$(KVER)/source > /dev/null 2>&1 && echo source),) +# LINUXSRC := $(LINUX) +# else +# LINUXSRC := /lib/modules/$(KVER)/source +# endif +# endif + +# ifneq ($(KDIR),) +# LINUX := $(KDIR) +# LINUXSRC := $(LINUX) +# endif + +KDIR ?= /usr/src/kernels/${KVER} +LINUX := $(KDIR) +LINUXSRC := $(LINUX) + +ifeq ($(shell ls $(LINUXSRC)/include/uapi > /dev/null 2>&1 && echo uapi),) + UAPI := +else + UAPI := uapi +endif + +ifeq ($(BCMMODDIR),) + ifeq ($(shell ls /lib/modules/$(KVER)/updates > /dev/null 2>&1 && echo 1),1) + BCMMODDIR := /lib/modules/$(KVER)/updates/drivers/infiniband/hw/bnxt_re + else + ifeq ($(shell grep -q "search.*[[:space:]]updates" /etc/depmod.conf > /dev/null 2>&1 && echo 1),1) + BCMMODDIR := /lib/modules/$(KVER)/updates/drivers/infiniband/hw/bnxt_re + else + ifeq ($(shell grep -q "search.*[[:space:]]updates" /etc/depmod.d/* > /dev/null 2>&1 && echo 1),1) + BCMMODDIR := /lib/modules/$(KVER)/updates/drivers/infiniband/hw/bnxt_re + else + BCMMODDIR := /lib/modules/$(KVER)/kernel/drivers/infiniband/hw/bnxt_re + endif + endif + endif +endif + +ifeq ($(OFED_VERSION), ) + $(warning Using native IB stack) + OFED_VERSION := OFED-NATIVE +endif + +#find OFED version and compat-includes +ofed_major := $(filter OFED-3.% OFED-4.%, $(OFED_VERSION)) +ifneq ($(ofed_major), ) +exists := $(shell if [ -e /usr/src/compat-rdma$(OFED_VERSION) ];\ + then echo y; fi) +ifeq ($(exists), ) +$(shell ln -s /usr/src/compat-rdma\ + /usr/src/compat-rdma$(OFED_VERSION)) +endif +OFA_BUILD_PATH := /usr/src/compat-rdma$(OFED_VERSION) +OFA_KERNEL_PATH := /usr/src/compat-rdma$(OFED_VERSION) +EXTRA_CFLAGS += -DOFED_3_x +ofed_4_17_x := $(filter OFED-4.17%, $(ofed_major)) +ifneq ($(ofed_4_17_x), ) +EXTRA_CFLAGS += -D__OFED_BUILD__ +endif +EXTRA_CFLAGS += -include $(OFA_KERNEL_PATH)/include/linux/compat-2.6.h + +AUTOCONF_H := -include $(shell /bin/ls -1 $(LINUX)/include/*/autoconf.h 2> /dev/null | head -1) +endif #end non 3.x OFED + +ifeq (OFED-NATIVE, $(findstring OFED-NATIVE, $(OFED_VERSION))) +OFA_KERNEL_PATH := $(LINUXSRC) +OFA_BUILD_PATH := $(LINUX) +else +# Add OFED symbols only if external OFED is used +KBUILD_EXTRA_SYMBOLS := $(OFA_BUILD_PATH)/Module.symvers +endif + +ifneq ($(BNXT_PEER_MEM_INC),) +KBUILD_EXTRA_SYMBOLS += $(BNXT_PEER_MEM_INC)/Module.symvers +endif + +ifeq ($(shell ls /lib/modules/$(KVER)/source > /dev/null 2>&1 && echo source),) +OFA_KERNEL_PATH := $(OFA_BUILD_PATH) +endif + +EXTRA_CFLAGS += -I$(BNXT_EN_INC) + +# Distro specific compilation flags +DISTRO_CFLAG := -D__LINUX + +ifneq ($(shell grep "netdev_notifier_info_to_dev" $(LINUXSRC)/include/linux/netdevice.h > /dev/null 2>&1 && echo netdev_not),) + DISTRO_CFLAG += -DHAVE_NETDEV_NOTIFIER_INFO_TO_DEV +endif + +ifneq ($(shell grep "NETDEV_PRE_CHANGEADDR" $(LINUXSRC)/include/linux/netdevice.h > /dev/null 2>&1 && echo netdev_not),) + DISTRO_CFLAG += -DHAVE_NETDEV_PRE_CHANGEADDR +endif + +ifneq ($(shell grep "NETDEV_CVLAN_FILTER_PUSH_INFO" $(LINUXSRC)/include/linux/netdevice.h > /dev/null 2>&1 && echo netdev_not),) + DISTRO_CFLAG += -DHAVE_NETDEV_CVLAN_FILTER_PUSH_INFO +endif + +ifneq ($(shell grep "NETDEV_UDP_TUNNEL_DROP_INFO" $(LINUXSRC)/include/linux/netdevice.h > /dev/null 2>&1 && echo netdev_not),) + DISTRO_CFLAG += -DHAVE_NETDEV_UDP_TUNNEL_DROP_INFO +endif + +ifneq ($(shell grep -o "NETDEV_CHANGE_TX_QUEUE_LEN" $(LINUXSRC)/include/linux/netdevice.h),) + DISTRO_CFLAG += -DHAVE_NETDEV_CHANGE_TX_QUEUE_LEN +endif + +ifneq ($(shell grep -o "NETDEV_PRECHANGEUPPER" $(LINUXSRC)/include/linux/netdevice.h),) + DISTRO_CFLAG += -DHAVE_NETDEV_PRECHANGEUPPER +endif + +ifneq ($(shell grep -o "NETDEV_CHANGELOWERSTATE" $(LINUXSRC)/include/linux/netdevice.h),) + DISTRO_CFLAG += -DHAVE_NETDEV_CHANGELOWERSTATE +endif + +ifneq ($(shell grep "register_netdevice_notifier_rh" $(LINUXSRC)/include/linux/netdevice.h > /dev/null 2>&1 && echo register_net),) + DISTRO_CFLAG += -DHAVE_REGISTER_NETDEVICE_NOTIFIER_RH +endif + +ifneq ($(shell grep "__vlan_find_dev_deep_rcu" $(LINUXSRC)/include/linux/if_vlan.h > /dev/null 2>&1 && echo vlan_find_dev_deep_rcu),) + DISTRO_CFLAG += -DHAVE_VLAN_FIND_DEV_DEEP_RCU +endif + +ifneq ($(shell grep -so "ib_mw_type" $(OFA_KERNEL_PATH)/include/rdma/ib_verbs.h > /dev/null 2>&1 && echo ib_mw_type),) + DISTRO_CFLAG += -DHAVE_IB_MW_TYPE +endif + +ifneq ($(shell grep -A 3 "alloc_mw" $(OFA_KERNEL_PATH)/include/rdma/ib_verbs.h |grep "struct ib_udata" > /dev/null 2>&1 && echo ib_udata),) + DISTRO_CFLAG += -DHAVE_ALLOW_MW_WITH_UDATA +endif + +ifneq ($(shell grep "ib_fmr" $(OFA_KERNEL_PATH)/include/rdma/ib_verbs.h > /dev/null 2>&1 && echo ib_fmr),) + DISTRO_CFLAG += -DHAVE_IB_FMR +endif + +ifneq ($(shell grep "rdma_ah_init_attr" $(OFA_KERNEL_PATH)/include/rdma/ib_verbs.h > /dev/null 2>&1 && echo ib_fmr),) + DISTRO_CFLAG += -DHAVE_RDMA_AH_INIT_ATTR +endif + +ifneq ($(shell grep -so "ib_bind_mw" $(OFA_KERNEL_PATH)/include/rdma/ib_verbs.h > /dev/null 2>&1 && echo ib_bind_mw),) + DISTRO_CFLAG += -DHAVE_IB_BIND_MW +endif + +ifneq ($(shell grep "ib_create_mr" $(OFA_KERNEL_PATH)/include/rdma/ib_verbs.h > /dev/null 2>&1 && echo ib_create_mr),) + DISTRO_CFLAG += -DHAVE_IB_CREATE_MR +endif + +ifneq ($(shell grep "ib_flow" $(OFA_KERNEL_PATH)/include/rdma/ib_verbs.h > /dev/null 2>&1 && echo ib_flow),) + DISTRO_CFLAG += -DHAVE_IB_FLOW +endif + +ifneq ($(shell grep "rereg_user_mr" $(OFA_KERNEL_PATH)/include/rdma/ib_verbs.h > /dev/null 2>&1 && echo rereg_user_mr),) + DISTRO_CFLAG += -DHAVE_IB_REREG_USER_MR +endif + +ifneq ($(shell grep "MEM_WINDOW_TYPE" $(OFA_KERNEL_PATH)/include/rdma/ib_verbs.h > /dev/null 2>&1 && echo mem_window_type),) + DISTRO_CFLAG += -DHAVE_IB_MEM_WINDOW_TYPE +endif + +ifneq ($(shell grep "odp_caps" $(OFA_KERNEL_PATH)/include/rdma/ib_verbs.h > /dev/null 2>&1 && echo odp_caps),) + DISTRO_CFLAG += -DHAVE_IB_ODP_CAPS +endif + +ifneq ($(shell grep "IP_BASED_GIDS" $(OFA_KERNEL_PATH)/include/rdma/ib_verbs.h > /dev/null 2>&1 && echo ip_based_gids),) + DISTRO_CFLAG += -DHAVE_IB_BASED_GIDS +endif + +ifneq ($(shell grep "IB_GID_TYPE_ROCE_UDP_ENCAP" $(OFA_KERNEL_PATH)/include/rdma/ib_verbs.h |grep "IB_UVERBS_GID_TYPE_ROCE_V2" ),) + DISTRO_CFLAG += -DHAVE_GID_TYPE_ROCE_UDP_ENCAP_ROCEV2 +endif + +ifneq ($(shell grep "dmac" $(OFA_KERNEL_PATH)/include/rdma/ib_verbs.h > /dev/null 2>&1 && echo dmac),) + DISTRO_CFLAG += -DHAVE_IB_AH_DMAC +endif + +ifneq ($(shell grep "IB_ZERO_BASED" $(OFA_KERNEL_PATH)/include/rdma/ib_verbs.h > /dev/null 2>&1 && echo zero),) + DISTRO_CFLAG += -DHAVE_IB_ZERO_BASED +endif + +ifneq ($(shell grep "IB_ACCESS_ON_DEMAND" $(OFA_KERNEL_PATH)/include/rdma/ib_verbs.h > /dev/null 2>&1 && echo demand),) + DISTRO_CFLAG += -DHAVE_IB_ACCESS_ON_DEMAND +endif + +ifneq ($(shell grep "sg_table" $(OFA_KERNEL_PATH)/include/rdma/ib_umem.h > /dev/null 2>&1 && echo sg_table),) + DISTRO_CFLAG += -DHAVE_IB_UMEM_SG_TABLE +endif + +ifneq ($(shell grep "sg_append_table" $(OFA_KERNEL_PATH)/include/rdma/ib_umem.h > /dev/null 2>&1 && echo sg_append_table),) + DISTRO_CFLAG += -DHAVE_IB_UMEM_SG_APPEND_TABLE +endif + +ifneq ($(shell grep "ib_mr_init_attr" $(OFA_KERNEL_PATH)/include/rdma/ib_verbs.h > /dev/null 2>&1 && echo mr_init_attr),) + DISTRO_CFLAG += -DHAVE_IB_MR_INIT_ATTR +endif + +# add_gid/del_gid replaced the modify_gid +ifneq ($(shell grep "add_gid" $(OFA_KERNEL_PATH)/include/rdma/ib_verbs.h > /dev/null 2>&1 && echo add_gid),) + DISTRO_CFLAG += -DHAVE_IB_ADD_DEL_GID +endif + +ifneq ($(shell grep "modify_gid" $(OFA_KERNEL_PATH)/include/rdma/ib_verbs.h > /dev/null 2>&1 && echo modify_gid),) + DISTRO_CFLAG += -DHAVE_IB_MODIFY_GID +endif + +ifneq ($(shell grep -A 3 "struct ib_gid_attr" $(OFA_KERNEL_PATH)/include/rdma/ib_verbs.h > /dev/null 2>&1 && echo struct ib_gid_attr),) + DISTRO_CFLAG += -DHAVE_IB_GID_ATTR +endif + +ifneq ($(shell grep -A 3 "struct ib_bind_mw_wr" $(OFA_KERNEL_PATH)/include/rdma/ib_verbs.h > /dev/null 2>&1 && echo struct ib_bind_mw_wr),) + DISTRO_CFLAG += -DHAVE_IB_BIND_MW_WR +endif + +ifneq ($(shell grep "alloc_mr" $(OFA_KERNEL_PATH)/include/rdma/ib_verbs.h > /dev/null 2>&1 && echo alloc_mr),) + DISTRO_CFLAG += -DHAVE_IB_ALLOC_MR +endif + +ifneq ($(shell grep "query_mr" $(OFA_KERNEL_PATH)/include/rdma/ib_verbs.h > /dev/null 2>&1 && echo query_mr),) + DISTRO_CFLAG += -DHAVE_IB_QUERY_MR +endif + +ifneq ($(shell grep "alloc_fast_reg_mr" $(OFA_KERNEL_PATH)/include/rdma/ib_verbs.h > /dev/null 2>&1 && echo fast_reg_mr),) + DISTRO_CFLAG += -DHAVE_IB_FAST_REG_MR +endif + +ifneq ($(shell grep "map_mr_sg" $(OFA_KERNEL_PATH)/include/rdma/ib_verbs.h > /dev/null 2>&1 && echo map_mr_sg),) + DISTRO_CFLAG += -DHAVE_IB_MAP_MR_SG +endif + +ifneq ($(shell grep -A 2 "int ib_map_mr_sg" $(OFA_KERNEL_PATH)/include/rdma/ib_verbs.h | grep sg_offset > /dev/null 2>&1 && echo sg_offset),) + DISTRO_CFLAG += -DHAVE_IB_MAP_MR_SG_OFFSET +endif + +ifneq ($(shell grep -A 2 "int ib_map_mr_sg" $(OFA_KERNEL_PATH)/include/rdma/ib_verbs.h | grep page_size > /dev/null 2>&1 && echo page_size),) + DISTRO_CFLAG += -DHAVE_IB_MAP_MR_SG_PAGE_SIZE +endif + +ifneq ($(shell grep "IB_WR_REG_MR" $(OFA_KERNEL_PATH)/include/rdma/ib_verbs.h > /dev/null 2>&1 && echo wr_reg_mr),) + DISTRO_CFLAG += -DHAVE_IB_REG_MR_WR +endif + +ifneq ($(shell grep "ib_mw_bind_info" $(OFA_KERNEL_PATH)/include/rdma/ib_verbs.h > /dev/null 2>&1 && echo bind_mw),) + DISTRO_CFLAG += -DHAVE_IB_MW_BIND_INFO +endif + +ifneq ($(shell grep "rdma_wr" $(OFA_KERNEL_PATH)/include/rdma/ib_verbs.h > /dev/null 2>&1 && echo rdma_wr),) + DISTRO_CFLAG += -DHAVE_IB_RDMA_WR +endif + +ifneq ($(shell grep "reg_phys_mr" $(OFA_KERNEL_PATH)/include/rdma/ib_verbs.h > /dev/null 2>&1 && echo reg_phys_mr),) + DISTRO_CFLAG += -DHAVE_IB_REG_PHYS_MR +endif + +ifneq ($(shell grep "ud_wr" $(OFA_KERNEL_PATH)/include/rdma/ib_verbs.h > /dev/null 2>&1 && echo ud_wr),) + DISTRO_CFLAG += -DHAVE_IB_UD_WR +endif + +ifneq ($(shell grep "get_netdev" $(OFA_KERNEL_PATH)/include/rdma/ib_verbs.h > /dev/null 2>&1 && echo get_netdev),) + DISTRO_CFLAG += -DHAVE_IB_GET_NETDEV +endif + +ifneq ($(shell grep "get_port_immutable" $(OFA_KERNEL_PATH)/include/rdma/ib_verbs.h > /dev/null 2>&1 && echo port_immutable),) + DISTRO_CFLAG += -DHAVE_IB_GET_PORT_IMMUTABLE +endif + +ifneq ($(shell grep -o "get_dev_fw_str" $(OFA_KERNEL_PATH)/include/rdma/ib_verbs.h > /dev/null 2>&1 && echo get_dev_fw_str),) + DISTRO_CFLAG += -DHAVE_IB_GET_DEV_FW_STR +ifneq ($(shell grep "get_dev_fw_str" $(OFA_KERNEL_PATH)/include/rdma/ib_verbs.h|grep -o "str_len" > /dev/null 2>&1 && echo str_len),) + DISTRO_CFLAG += -DIB_GET_DEV_FW_STR_HAS_STRLEN +endif +endif + +ifneq ($(shell grep "WIDTH_2X" $(OFA_KERNEL_PATH)/include/rdma/ib_verbs.h > /dev/null 2>&1 && echo width_2x),) + DISTRO_CFLAG += -DHAVE_IB_WIDTH_2X +endif + +ifneq ($(shell grep -o "sriov_configure" $(LINUXSRC)/include/linux/pci.h),) + DISTRO_CFLAG += -DPCIE_SRIOV_CONFIGURE + ifneq ($(shell grep -A 2 "pci_driver_rh" $(LINUXSRC)/include/linux/pci.h | \ + grep -o "sriov_configure"),) + DISTRO_CFLAG += -DSRIOV_CONF_DEF_IN_PCI_DRIVER_RH + endif +endif + +ifneq ($(shell ls $(LINUXSRC)/include/net/flow_offload.h > /dev/null 2>&1 && echo flow_offload),) + DISTRO_CFLAG += -DHAVE_FLOW_OFFLOAD_H + ifneq ($(shell grep -so "struct flow_cls_offload" $(LINUXSRC)/include/net/flow_offload.h),) + DISTRO_CFLAG += -DHAVE_TC_FLOW_CLS_OFFLOAD + endif + ifneq ($(shell grep -o "flow_block_cb_setup_simple" $(LINUXSRC)/include/net/flow_offload.h),) + DISTRO_CFLAG += -DHAVE_SETUP_TC_BLOCK_HELPER + endif + ifneq ($(shell grep -o "__flow_indr_block_cb_register" $(LINUXSRC)/include/net/flow_offload.h || \ + grep -o "flow_indr_block_bind_cb_t" $(LINUXSRC)/include/net/flow_offload.h),) + DISTRO_CFLAG += -DHAVE_FLOW_INDR_BLOCK_CB + ifneq ($(shell grep -A 1 "void flow_indr_dev_unregister" $(LINUXSRC)/include/net/flow_offload.h | grep -o "flow_setup_cb_t \*setup_cb"),) + DISTRO_CFLAG += -DHAVE_OLD_FLOW_INDR_DEV_UNRGTR + endif + endif + ifneq ($(shell grep -o "FLOW_ACTION_POLICE" $(LINUXSRC)/include/net/flow_offload.h),) + DISTRO_CFLAG += -DHAVE_FLOW_ACTION_POLICE + endif + ifneq ($(shell grep -o "flow_action_basic_hw_stats_check" $(LINUXSRC)/include/net/flow_offload.h),) + DISTRO_CFLAG += -DHAVE_FLOW_ACTION_BASIC_HW_STATS_CHECK + endif + ifneq ($(shell grep -o "flow_indr_dev_register" $(LINUXSRC)/include/net/flow_offload.h),) + DISTRO_CFLAG += -DHAVE_FLOW_INDR_DEV_RGTR + endif + ifneq ($(shell grep -A 2 "flow_stats_update" $(LINUXSRC)/include/net/flow_offload.h | grep -o drops),) + DISTRO_CFLAG += -DHAVE_FLOW_STATS_DROPS + endif + ifneq ($(shell grep -A 3 "flow_indr_block_bind_cb_t" $(LINUXSRC)/include/net/flow_offload.h | grep -o cleanup),) + DISTRO_CFLAG += -DHAVE_FLOW_INDR_BLOCK_CLEANUP + endif + ifneq ($(shell grep -o "cb_list_head" $(LINUXSRC)/include/net/flow_offload.h),) + DISTRO_CFLAG += -DHAVE_FLOW_INDIR_BLK_PROTECTION + endif +endif + +ifneq ($(shell grep -s "devlink_ops" $(LINUXSRC)/include/net/devlink.h),) + DISTRO_CFLAG += -DHAVE_DEVLINK + ifeq ($(shell grep -o "devlink_register(struct devlink \*devlink);" $(LINUXSRC)/include/net/devlink.h),) + DISTRO_CFLAG += -DHAVE_DEVLINK_REGISTER_DEV + endif +endif + +ifneq ($(shell grep -s -A 7 "devlink_port_attrs" $(LINUXSRC)/include/net/devlink.h | grep -o "netdev_phys_item_id"),) + DISTRO_CFLAG += -DHAVE_DEVLINK_PORT_ATTRS +endif + +ifneq ($(shell grep -s -A 1 "devlink_port_attrs_set" $(LINUXSRC)/include/net/devlink.h | grep -o "struct devlink_port_attrs"),) + DISTRO_CFLAG += -DHAVE_DEVLINK_PORT_ATTRS_SET_NEW +endif + +ifneq ($(shell grep -s "devlink_param" $(LINUXSRC)/include/net/devlink.h),) + DISTRO_CFLAG += -DHAVE_DEVLINK_PARAM + ifneq ($(shell grep -s -A 2 "int (\*validate)" $(LINUXSRC)/include/net/devlink.h | grep "struct netlink_ext_ack \*extack"),) + DISTRO_CFLAG += -DHAVE_DEVLINK_VALIDATE_NEW + endif +endif + +ifneq ($(shell grep -o "ndo_get_port_parent_id" $(LINUXSRC)/include/linux/netdevice.h),) + DISTRO_CFLAG += -DHAVE_NDO_GET_PORT_PARENT_ID +endif + +ifneq ($(shell grep -s "switchdev_ops" $(LINUXSRC)/include/net/switchdev.h),) + DISTRO_CFLAG += -DHAVE_SWITCHDEV +endif + +ifneq ($(shell grep -o "net_device_ops_extended" $(LINUXSRC)/include/linux/netdevice.h),) + ifneq ($(shell grep -o "ndo_xdp_xmit" $(LINUXSRC)/include/linux/netdevice.h),) + DISTRO_CFLAG += -DHAVE_EXT_NDO_XDP_XMIT + endif +else ifneq ($(shell grep -o "ndo_xdp" $(LINUXSRC)/include/linux/netdevice.h),) + DISTRO_CFLAG += -DHAVE_NDO_XDP + ifneq ($(shell grep -o "ndo_bpf" $(LINUXSRC)/include/linux/netdevice.h),) + DISTRO_CFLAG += -DHAVE_NDO_BPF + endif + ifneq ($(shell ls $(LINUXSRC)/include/linux/bpf_trace.h > /dev/null 2>&1 && echo bpf_trace),) + DISTRO_CFLAG += -DHAVE_BPF_TRACE + endif + ifneq ($(shell grep -o "skb_metadata_set" $(LINUXSRC)/include/linux/skbuff.h),) + DISTRO_CFLAG += -DHAVE_XDP_DATA_META + endif + ifneq ($(shell grep -o "void bpf_prog_add" $(LINUXSRC)/include/linux/bpf.h),) + DISTRO_CFLAG += -DHAVE_VOID_BPF_PROG_ADD + endif + ifneq ($(shell grep "void bpf_warn_invalid_xdp_action" $(LINUXSRC)/include/linux/filter.h | grep -o "struct net_device"),) + DISTRO_CFLAG += -DHAVE_BPF_WARN_INVALID_XDP_ACTION_EXT + endif +endif + +ifneq ($(shell grep -o "udp_tunnel_nic" $(LINUXSRC)/include/linux/netdevice.h),) + DISTRO_CFLAG += -DHAVE_UDP_TUNNEL_NIC +endif + +ifneq ($(shell grep -A 2 "process_mad" $(OFA_KERNEL_PATH)/include/rdma/ib_verbs.h | grep "u32 port_num"),) + DISTRO_CFLAG += -DHAVE_PROCESS_MAD_U32_PORT +else + ifneq ($(shell grep "ib_mad_hdr" $(OFA_KERNEL_PATH)/include/rdma/ib_verbs.h > /dev/null 2>&1 && echo ib_mad_hdr),) + DISTRO_CFLAG += -DHAVE_PROCESS_MAD_IB_MAD_HDR + endif +endif + +ifneq ($(shell grep "query_device" $(OFA_KERNEL_PATH)/include/rdma/ib_verbs.h -A2 | grep udata > /dev/null 2>&1 && echo query_device),) + DISTRO_CFLAG += -DHAVE_IB_QUERY_DEVICE_UDATA +endif + +ifneq ($(shell grep "cq_init_attr" $(OFA_KERNEL_PATH)/include/rdma/ib_verbs.h > /dev/null 2>&1 && echo cq_init_attr),) + DISTRO_CFLAG += -DHAVE_IB_CQ_INIT_ATTR +endif + +ifneq ($(shell grep "drain_rq" $(OFA_KERNEL_PATH)/include/rdma/ib_verbs.h > /dev/null 2>&1 && echo drain_rq),) + DISTRO_CFLAG += -DHAVE_IB_DRAIN +endif + +ifneq ($(shell grep "RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP" $(OFA_KERNEL_PATH)/include/rdma/ib_verbs.h > /dev/null 2>&1 && echo roce_v2_enable),) + DISTRO_CFLAG += -DENABLE_ROCEV2_QP1 +endif + +ifneq (,$(shell grep -so "ETHTOOL_LINK_MODE_25000baseCR_Full_BIT" $(LINUXSRC)/include/$(UAPI)/linux/ethtool.h $(LINUXSRC)/include/linux/ethtool.h)) + DISTRO_CFLAG += -DHAVE_ETHTOOL_GLINKSETTINGS_25G +endif + +ifneq (,$(shell grep -so "IB_USER_VERBS_EX_CMD_MODIFY_QP" $(OFA_KERNEL_PATH)/include/$(UAPI)/rdma/ib_user_verbs.h)) + DISTRO_CFLAG += -DHAVE_IB_USER_VERBS_EX_CMD_MODIFY_QP +endif + +ifneq (,$(shell grep -so "struct ib_mr \*(\*rereg_user_mr)" $(OFA_KERNEL_PATH)/include/rdma/ib_verbs.h)) + DISTRO_CFLAG += -DHAVE_REREG_USER_MR_RET_PTR +endif + +ifneq (,$(shell grep -so "uverbs_ex_cmd_mask" $(OFA_KERNEL_PATH)/include/rdma/ib_verbs.h)) + DISTRO_CFLAG += -DHAVE_IB_UVERBS_CMD_MASK_IN_DRIVER +endif + +ifneq (,$(shell grep -so "IB_QP_ATTR_STANDARD_BITS" $(OFA_KERNEL_PATH)/include/rdma/ib_verbs.h)) + DISTRO_CFLAG += -DHAVE_IB_QP_ATTR_STANDARD_BITS +endif + +ifneq ($(shell grep -o "rdma_addr_find_l2_eth_by_grh" $(OFA_KERNEL_PATH)/include/rdma/ib_addr.h),) + DISTRO_CFLAG += -DHAVE_RDMA_ADDR_FIND_L2_ETH_BY_GRH +endif + +ifneq ($(shell grep "rdma_addr_find_l2_eth_by_grh" $(OFA_KERNEL_PATH)/include/rdma/ib_addr.h -A2 | grep net_device ),) + DISTRO_CFLAG += -DHAVE_RDMA_ADDR_FIND_L2_ETH_BY_GRH_WITH_NETDEV +endif + +ifneq ($(shell grep -A 2 "rdma_addr_find_dmac_by_grh" $(OFA_KERNEL_PATH)/include/rdma/ib_addr.h | grep if_index),) + DISTRO_CFLAG += -DHAVE_RDMA_ADDR_FIND_DMAC_BY_GRH_V2 +endif + +ifneq (,$(shell grep -o "if_list" $(LINUXSRC)/include/net/if_inet6.h)) + DISTRO_CFLAG += -DHAVE_INET6_IF_LIST +endif + +ifneq ($(shell grep -o "PKT_HASH_TYPE" $(LINUXSRC)/include/linux/skbuff.h),) + DISTRO_CFLAG += -DHAVE_SKB_HASH_TYPE +endif + +ifneq ($(shell grep "create_ah" $(OFA_KERNEL_PATH)/include/rdma/ib_verbs.h -A2 | grep udata > /dev/null 2>&1 && echo create_ah),) + DISTRO_CFLAG += -DHAVE_IB_CREATE_AH_UDATA +endif + +ifneq ($(shell grep "*create_user_ah" $(OFA_KERNEL_PATH)/include/rdma/ib_verbs.h > /dev/null 2>&1 && echo create_user_ah),) + DISTRO_CFLAG += -DHAVE_IB_CREATE_USER_AH +endif + +ifneq ($(shell grep -o "ether_addr_copy" $(LINUXSRC)/include/linux/etherdevice.h),) + DISTRO_CFLAG += -DHAVE_ETHER_ADDR_COPY +endif + +ifneq ($(shell grep -o "page_shift" $(OFA_KERNEL_PATH)/include/rdma/ib_umem.h),) + DISTRO_CFLAG += -DHAVE_IB_UMEM_PAGE_SHIFT +endif + +ifneq ($(shell grep -o "ib_umem_page_count" $(OFA_KERNEL_PATH)/include/rdma/ib_umem.h),) + DISTRO_CFLAG += -DHAVE_IB_UMEM_PAGE_COUNT +endif + +ifneq ($(shell grep -o "npages" $(OFA_KERNEL_PATH)/include/rdma/ib_umem.h),) + DISTRO_CFLAG += -DHAVE_NPAGES_IB_UMEM +endif + +ifneq ($(shell grep -o "page_size" $(OFA_KERNEL_PATH)/include/rdma/ib_umem.h),) + DISTRO_CFLAG += -DHAVE_IB_UMEM_PAGE_SIZE +endif + +ifneq ($(shell grep -o "rdma_ah_attr" $(OFA_KERNEL_PATH)/include/rdma/ib_verbs.h),) + DISTRO_CFLAG += -DHAVE_RDMA_AH_ATTR +endif + +ifneq ($(shell grep -o "roce_ah_attr" $(OFA_KERNEL_PATH)/include/rdma/ib_verbs.h),) + DISTRO_CFLAG += -DHAVE_ROCE_AH_ATTR +endif + +ifneq ($(shell grep -o "ib_resolve_eth_dmac" $(OFA_KERNEL_PATH)/include/rdma/ib_verbs.h),) + DISTRO_CFLAG += -DHAVE_IB_RESOLVE_ETH_DMAC +endif + +ifneq ($(shell grep -o "rdma_umem_for_each_dma_block" $(OFA_KERNEL_PATH)/include/rdma/ib_umem.h),) + DISTRO_CFLAG += -DHAVE_RDMA_UMEM_FOR_EACH_DMA_BLOCK +endif + +ifneq ($(shell grep -o "disassociate_ucontext" $(OFA_KERNEL_PATH)/include/rdma/ib_verbs.h),) + DISTRO_CFLAG += -DHAVE_DISASSOCIATE_UCNTX +endif + +ifneq ($(shell if [ -e $(LINUXSRC)/include/net/bonding.h ]; then echo y; fi),) + DISTRO_CFLAG += -DHAVE_NET_BONDING_H +endif + +ifneq ($(shell if [ -e $(LINUXSRC)/include/linux/sched/mm.h ]; then echo y; fi),) + DISTRO_CFLAG += -DHAVE_SCHED_MM_H +endif + +ifneq ($(shell if [ -e $(LINUXSRC)/include/linux/sched/task.h ]; then echo y; fi),) + DISTRO_CFLAG += -DHAVE_SCHED_TASK_H +endif + +ifneq ($(shell grep "ib_register_device" $(OFA_KERNEL_PATH)/include/rdma/ib_verbs.h | grep name),) +ifneq ($(shell grep "ib_register_device" $(OFA_KERNEL_PATH)/include/rdma/ib_verbs.h -A1 | grep "dma_device"),) + DISTRO_CFLAG += -DHAVE_DMA_DEVICE_IN_IB_REGISTER_DEVICE +else + DISTRO_CFLAG += -DHAVE_NAME_IN_IB_REGISTER_DEVICE +endif +endif + + +ifneq ($(shell grep "ib_modify_qp_is_ok" $(OFA_KERNEL_PATH)/include/rdma/ib_verbs.h -A2 | grep rdma_link_layer),) + DISTRO_CFLAG += -DHAVE_LL_IN_IB_MODIFY_QP_IS_OK +endif + +ifneq ($(shell grep "rdma_user_mmap_io" $(OFA_KERNEL_PATH)/include/rdma/ib_verbs.h),) + DISTRO_CFLAG += -DHAVE_RDMA_USER_MMAP_IO +endif + +ifneq ($(shell grep "rdma_user_mmap_io" $(OFA_KERNEL_PATH)/include/rdma/ib_verbs.h -A2 |grep rdma_user_mmap_entry),) + DISTRO_CFLAG += -DHAVE_RDMA_USER_MMAP_IO_USE_MMAP_ENTRY +endif + +ifneq ($(shell grep "ib_counters_read_attr" $(OFA_KERNEL_PATH)/include/rdma/ib_verbs.h),) +# Kind of a misnomer to handle disassociate_ucontext in RH8.0. This is the best key +# in ib_verbs.h. + DISTRO_CFLAG += -DHAVE_NO_MM_MMAP_SEM +endif + +# Configfs stuff +ifneq ($(shell grep -w "CONFIGFS_ATTR" $(LINUXSRC)/include/linux/configfs.h|grep -o _pfx),) + HAVE_CONFIGFS_ENABLED=y + EXTRA_CFLAGS += -DHAVE_CONFIGFS_ENABLED + ifneq ($(shell grep -o "configfs_add_default_group" $(LINUXSRC)/include/linux/configfs.h),) + DISTRO_CFLAG += -DHAVE_CFGFS_ADD_DEF_GRP + endif +else + ifneq ($(shell grep -w "__CONFIGFS_ATTR" $(LINUXSRC)/include/linux/configfs.h|grep -o _show),) + HAVE_CONFIGFS_ENABLED=y + EXTRA_CFLAGS += -DHAVE_CONFIGFS_ENABLED -DHAVE_OLD_CONFIGFS_API + endif +endif + +ifneq ($(shell grep -o "ib_umem_get_flags" $(OFA_KERNEL_PATH)/include/rdma/ib_umem.h),) + DISTRO_CFLAG += -DHAVE_IB_UMEM_GET_FLAGS -DCONFIG_INFINIBAND_PEER_MEM +endif + +ifneq ($(shell grep -o "ib_umem_get_peer" $(OFA_KERNEL_PATH)/include/rdma/ib_umem.h),) + DISTRO_CFLAG += -DHAVE_IB_UMEM_GET_PEER -DCONFIG_INFINIBAND_PEER_MEM +endif + +ifneq ($(shell grep -o "ib_umem_dmabuf_get" $(OFA_KERNEL_PATH)/include/rdma/ib_umem.h),) + DISTRO_CFLAG += -DHAVE_IB_UMEM_DMABUF + ifneq ($(shell grep -o "ib_umem_dmabuf_get_pinned" $(OFA_KERNEL_PATH)/include/rdma/ib_umem.h),) + DISTRO_CFLAG += -DHAVE_IB_UMEM_DMABUF_PINNED + endif +endif + +ifneq ($(shell grep -o "ib_umem_stop_invalidation_notifier" $(OFA_KERNEL_PATH)/include/rdma/ib_umem.h),) + DISTRO_CFLAG += -DHAVE_IB_UMEM_STOP_INVALIDATION +endif + +ifneq ($(shell grep -o "NETDEV_BONDING_FAILOVER" $(LINUXSRC)/include/linux/netdevice.h),) + ifeq ("$(shell test -e $(LINUXSRC)/include/net/bonding.h && echo test)", "test") + DISTRO_CFLAG += -DLEGACY_BOND_SUPPORT + endif +endif + +ifneq ($(shell grep -o "netdev_master_upper_dev_get" $(LINUXSRC)/include/linux/netdevice.h),) + DISTRO_CFLAG += -DHAVE_NETDEV_MASTER_UPPER_DEV_GET +endif + +ifneq ($(shell grep -o "dev_get_stats64" $(LINUXSRC)/include/linux/netdevice.h),) + DISTRO_CFLAG += -DHAVE_DEV_GET_STATS64 +endif + +ifneq ($(shell grep -o "ndo_get_stats64" $(LINUXSRC)/include/linux/netdevice.h),) + ifeq ($(shell grep -o "net_device_ops_ext" $(LINUXSRC)/include/linux/netdevice.h),) + DISTRO_CFLAG += -DNETDEV_GET_STATS64 + endif + ifneq ($(shell grep -o "net_device_ops_extended" $(LINUXSRC)/include/linux/netdevice.h),) + DISTRO_CFLAG += -DNETDEV_GET_STATS64 + endif + ifneq ($(shell grep "ndo_get_stats64" $(LINUXSRC)/include/linux/netdevice.h | grep -o "void"),) + DISTRO_CFLAG += -DNETDEV_GET_STATS64_VOID + endif +endif + +ifneq ($(shell grep -o "ndo_do_ioctl" $(LINUXSRC)/include/linux/netdevice.h),) + DISTRO_CFLAG += -DHAVE_NDO_DO_IOCTL +endif + +ifneq ($(shell grep -o "ndo_eth_ioctl" $(LINUXSRC)/include/linux/netdevice.h),) + DISTRO_CFLAG += -DHAVE_NDO_ETH_IOCTL +endif + +ifneq ($(BNXT_PEER_MEM_INC),) + export BNXT_PEER_MEM_INC + ifneq ($(shell grep -o "ib_umem_get_flags" $(BNXT_PEER_MEM_INC)/peer_umem.h),) + DISTRO_CFLAG += -DHAVE_IB_UMEM_GET_FLAGS -DCONFIG_INFINIBAND_PEER_MEM + endif + EXTRA_CFLAGS += -DIB_PEER_MEM_MOD_SUPPORT + EXTRA_CFLAGS += -I$(BNXT_PEER_MEM_INC) +endif + +ifneq ($(shell ls $(LINUXSRC)/include/net/flow_dissector.h > /dev/null 2>&1 && echo flow),) + DISTRO_CFLAG += -DHAVE_FLOW_DISSECTOR +endif + +ifneq ($(shell ls $(LINUXSRC)/include/linux/dim.h > /dev/null 2>&1 && echo dim),) + DISTRO_CFLAG += -DHAVE_DIM +endif + +ifneq ($(shell ls $(OFA_KERNEL_PATH)/include/rdma/uverbs_ioctl.h > /dev/null 2>&1 && echo uverbs_ioctl),) + DISTRO_CFLAG += -DHAVE_UVERBS_IOCTL_H + ifneq ($(shell grep "rdma_udata_to_drv_context" $(OFA_KERNEL_PATH)/include/rdma/uverbs_ioctl.h),) + DISTRO_CFLAG += -DHAVE_RDMA_UDATA_TO_DRV_CONTEXT + endif +endif + +ifneq ($(shell grep -A 1 "(*post_srq_recv)" $(OFA_KERNEL_PATH)/include/rdma/ib_verbs.h | grep -o "const struct ib_recv_wr"),) + DISTRO_CFLAG += -DHAVE_IB_ARG_CONST_CHANGE +endif + +ifneq ($(shell grep -A 1 "(*get_netdev)" $(OFA_KERNEL_PATH)/include/rdma/ib_verbs.h | grep -o "u32 port_num"),) + DISTRO_CFLAG += -DHAVE_IB_SUPPORT_MORE_RDMA_PORTS +endif + +ifneq ($(shell grep -A 4 "(*create_flow)" $(OFA_KERNEL_PATH)/include/rdma/ib_verbs.h | grep -o "struct ib_udata"),) + DISTRO_CFLAG += -DHAVE_UDATA_FOR_CREATE_FLOW +endif + +ifneq ($(shell grep -A 15 "struct ib_device_attr {" $(OFA_KERNEL_PATH)/include/rdma/ib_verbs.h | grep -o "max_send_sge"),) + DISTRO_CFLAG += -DHAVE_SEPARATE_SEND_RECV_SGE +endif + +ifneq ($(shell grep "ib_get_cached_gid" $(OFA_KERNEL_PATH)/include/rdma/ib_cache.h > /dev/null 2>&1 && echo ib_get_cached_gid),) + DISTRO_CFLAG += -DHAVE_IB_GET_CACHED_GID +endif + +ifneq ($(shell grep "rdma_create_user_ah" $(OFA_KERNEL_PATH)/include/rdma/ib_verbs.h > /dev/null 2>&1 && echo rdma_create_user_ah),) + DISTRO_CFLAG += -DHAVE_CREATE_USER_AH +endif + +ifeq ($(ofed_major), OFED-4.17) + ifeq ($(shell grep -A 1 "(*add_gid)" $(OFA_KERNEL_PATH)/include/rdma/ib_verbs.h | grep -o "port_num"),) + DISTRO_CFLAG += -DHAVE_SIMPLIFIED_ADD_DEL_GID + endif +else + ifneq ($(shell grep -A 2 "struct ib_gid_attr {" $(OFA_KERNEL_PATH)/include/rdma/ib_verbs.h | grep -o "struct ib_device"),) + ifeq ($(shell grep -A 1 "(*add_gid)" $(OFA_KERNEL_PATH)/include/rdma/ib_verbs.h | grep -o "port_num"),) + DISTRO_CFLAG += -DHAVE_SIMPLIFIED_ADD_DEL_GID + endif + ifeq ($(shell grep -A 1 "(*add_gid)" $(OFA_KERNEL_PATH)/include/rdma/ib_verbs.h | grep -o "union ib_gid"),) + DISTRO_CFLAG += -DHAVE_SIMPLER_ADD_GID + endif + endif +endif + +ifneq ($(shell grep -A 6 "struct ib_ah {" $(OFA_KERNEL_PATH)/include/rdma/ib_verbs.h | grep -o "sgid_attr"),) + DISTRO_CFLAG += -DHAVE_GID_ATTR_IN_IB_AH +endif +ifneq ($(shell grep "rdma_gid_attr_network_type" $(OFA_KERNEL_PATH)/include/rdma/ib_verbs.h),) + DISTRO_CFLAG += -DHAVE_RDMA_GID_ATTR_NETWORK_TYPE +endif + +ifneq ($(shell grep "ib_set_device_ops" $(OFA_KERNEL_PATH)/include/rdma/ib_verbs.h),) + DISTRO_CFLAG += -DHAVE_IB_SET_DEV_OPS +endif + +ifneq ($(shell grep "RDMA_CREATE_AH_SLEEPABLE" $(OFA_KERNEL_PATH)/include/rdma/ib_verbs.h),) + DISTRO_CFLAG += -DHAVE_SLEEPABLE_AH +endif + +ifneq ($(shell grep "IB_POLL_UNBOUND_WORKQUEUE" $(OFA_KERNEL_PATH)/include/rdma/ib_verbs.h),) + DISTRO_CFLAG += -DHAVE_IB_POLL_UNBOUND_WORKQUEUE +endif + +ifneq ($(shell grep "dma_zalloc_coherent" $(LINUXSRC)/include/linux/dma-mapping.h),) + DISTRO_CFLAG += -DHAVE_DMA_ZALLOC_COHERENT +endif + +ifneq ($(shell grep "for_each_sg_dma_page" $(LINUXSRC)/include/linux/scatterlist.h),) + DISTRO_CFLAG += -DHAVE_FOR_EACH_SG_DMA_PAGE +endif + +ifneq ($(shell grep "has_secondary_link" $(LINUXSRC)/include/linux/pci.h),) + DISTRO_CFLAG += -DHAS_PCI_SECONDARY_LINK +endif + +ifneq ($(shell grep "pci_enable_atomic_ops_to_root" $(LINUXSRC)/include/linux/pci.h),) + DISTRO_CFLAGS += -DHAS_ENABLE_ATOMIC_OPS +endif + +ifneq ($(shell grep "tasklet_setup" $(LINUXSRC)/include/linux/interrupt.h),) + DISTRO_CFLAG += -DHAS_TASKLET_SETUP +endif + +ifneq ($(shell grep "sysfs_emit" $(LINUXSRC)/include/linux/sysfs.h),) + DISTRO_CFLAG += -DHAS_SYSFS_EMIT +endif + +ifneq ($(shell grep "DECLARE_RDMA_OBJ_SIZE" $(OFA_KERNEL_PATH)/include/rdma/ib_verbs.h | grep -o "ib_pd"),) + DISTRO_CFLAG += -DHAVE_PD_ALLOC_IN_IB_CORE +endif + +ifneq ($(shell grep "DECLARE_RDMA_OBJ_SIZE" $(OFA_KERNEL_PATH)/include/rdma/ib_verbs.h | grep -o "ib_cq"),) + DISTRO_CFLAG += -DHAVE_CQ_ALLOC_IN_IB_CORE +endif + +ifneq ($(shell grep "DECLARE_RDMA_OBJ_SIZE" $(OFA_KERNEL_PATH)/include/rdma/ib_verbs.h | grep -o "ib_qp"),) + DISTRO_CFLAG += -DHAVE_QP_ALLOC_IN_IB_CORE +endif + +ifneq ($(shell grep "(\*alloc_pd)" $(OFA_KERNEL_PATH)/include/rdma/ib_verbs.h | grep -o "ib_ucontext"),) + DISTRO_CFLAG += -DHAVE_UCONTEXT_IN_ALLOC_PD +endif + +ifneq ($(shell grep "(\*alloc_pd)" $(OFA_KERNEL_PATH)/include/rdma/ib_verbs.h -A1| grep -o "ib_ucontext"),) + DISTRO_CFLAG += -DHAVE_UCONTEXT_IN_ALLOC_PD +endif + +ifneq ($(shell grep "ib_device_ops" $(OFA_KERNEL_PATH)/include/rdma/ib_verbs.h -A1| grep -o "owner"),) + DISTRO_CFLAG += -DHAVE_IB_OWNER_IN_DEVICE_OPS +endif + +ifneq ($(shell grep "DECLARE_RDMA_OBJ_SIZE" $(OFA_KERNEL_PATH)/include/rdma/ib_verbs.h | grep -o "ib_ah"),) + DISTRO_CFLAG += -DHAVE_AH_ALLOC_IN_IB_CORE +endif + +ifneq ($(shell grep "DECLARE_RDMA_OBJ_SIZE" $(OFA_KERNEL_PATH)/include/rdma/ib_verbs.h | grep -o "ib_srq"),) + DISTRO_CFLAG += -DHAVE_SRQ_CREATE_IN_IB_CORE +endif + +ifneq ($(shell grep "(\*dealloc_pd)" $(OFA_KERNEL_PATH)/include/rdma/ib_verbs.h | grep -o "udata"),) + DISTRO_CFLAG += -DHAVE_DEALLOC_PD_UDATA +endif +ifneq ($(shell grep "(\*dealloc_pd)" $(OFA_KERNEL_PATH)/include/rdma/ib_verbs.h | grep -o "void"),) + DISTRO_CFLAG += -DHAVE_DEALLOC_PD_RET_VOID +endif +ifneq ($(shell grep "(\*destroy_srq)" $(OFA_KERNEL_PATH)/include/rdma/ib_verbs.h | grep -o "udata"),) + DISTRO_CFLAG += -DHAVE_DESTROY_SRQ_UDATA +endif + +ifneq ($(shell grep "(\*destroy_cq)" $(OFA_KERNEL_PATH)/include/rdma/ib_verbs.h | grep -o "udata"),) + DISTRO_CFLAG += -DHAVE_DESTROY_CQ_UDATA +endif + +ifneq ($(shell grep "(\*destroy_qp)" $(OFA_KERNEL_PATH)/include/rdma/ib_verbs.h | grep -o "udata"),) + DISTRO_CFLAG += -DHAVE_DESTROY_QP_UDATA +endif + +ifneq ($(shell grep "(\*destroy_ah)" $(OFA_KERNEL_PATH)/include/rdma/ib_verbs.h | grep -o "void"),) + DISTRO_CFLAG += -DHAVE_DESTROY_AH_RET_VOID +endif + +ifneq ($(shell grep "(\*destroy_srq)" $(OFA_KERNEL_PATH)/include/rdma/ib_verbs.h | grep -o "void"),) + DISTRO_CFLAG += -DHAVE_DESTROY_SRQ_RET_VOID +endif + +ifneq ($(shell grep "(\*alloc_mw)" $(OFA_KERNEL_PATH)/include/rdma/ib_verbs.h | grep -o "int"),) + DISTRO_CFLAG += -DHAVE_ALLOC_MW_RET_INT +endif + +ifneq ($(shell grep "(\*destroy_cq)" $(OFA_KERNEL_PATH)/include/rdma/ib_verbs.h | grep -o "void"),) + DISTRO_CFLAG += -DHAVE_DESTROY_CQ_RET_VOID +endif + +ifneq ($(shell grep "(\*create_cq)" $(OFA_KERNEL_PATH)/include/rdma/ib_verbs.h -A2 | grep -o "ib_ucontext"),) + DISTRO_CFLAG += -DHAVE_CREATE_CQ_UCONTEXT +endif + +ifneq ($(shell grep "(\*dereg_mr)" $(OFA_KERNEL_PATH)/include/rdma/ib_verbs.h | grep -o "udata"),) + DISTRO_CFLAG += -DHAVE_DEREG_MR_UDATA +endif + +ifneq ($(shell grep "(\*alloc_mr)" $(OFA_KERNEL_PATH)/include/rdma/ib_verbs.h -A1 | grep -o "ib_udata"),) + DISTRO_CFLAG += -DHAVE_ALLOC_MR_UDATA +endif + +ifneq ($(shell grep "DECLARE_RDMA_OBJ_SIZE" $(OFA_KERNEL_PATH)/include/rdma/ib_verbs.h | grep -o "ib_ucontext"),) + DISTRO_CFLAG += -DHAVE_UCONTEXT_ALLOC_IN_IB_CORE +endif + +ifneq ($(shell grep "DECLARE_RDMA_OBJ_SIZE" $(OFA_KERNEL_PATH)/include/rdma/ib_verbs.h | grep -o "ib_mw"),) + DISTRO_CFLAG += -DHAVE_ALLOC_MW_IN_IB_CORE +endif + +ifneq ($(shell grep "ib_alloc_device" $(OFA_KERNEL_PATH)/include/rdma/ib_verbs.h | grep -o "member"),) + DISTRO_CFLAG += -DHAVE_MEMBER_IN_IB_ALLOC_DEVICE +endif + +ifneq ($(shell grep "ib_umem_get" $(OFA_KERNEL_PATH)/include/rdma/ib_umem.h | grep -o "udata"),) + DISTRO_CFLAG += -DHAVE_UDATA_IN_IB_UMEM_GET +endif + +ifneq ($(shell grep "ib_umem_get" $(OFA_KERNEL_PATH)/include/rdma/ib_umem.h | grep -o "ib_device"),) + DISTRO_CFLAG += -DHAVE_IB_DEVICE_IN_IB_UMEM_GET +endif + +ifneq ($(shell grep "ib_umem_get" $(OFA_KERNEL_PATH)/include/rdma/ib_umem.h -A1| grep -o "dmasync"),) + DISTRO_CFLAG += -DHAVE_DMASYNC_IB_UMEM_GET +endif + +ifneq ($(shell grep "ib_umem_num_pages" $(OFA_KERNEL_PATH)/include/rdma/ib_umem.h),) + DISTRO_CFLAG += -DHAVE_IB_UMEM_NUM_PAGES +endif + +ifneq ($(shell grep -o "size_t ib_umem_num_dma_blocks" $(OFA_KERNEL_PATH)/include/rdma/ib_umem.h),) + DISTRO_CFLAG += -DHAVE_IB_UMEM_NUM_DMA_BLOCKS +endif + +ifneq ($(shell grep -o "long ib_umem_find_best_pgsz" $(OFA_KERNEL_PATH)/include/rdma/ib_umem.h),) + DISTRO_CFLAG += -DHAVE_IB_UMEM_FIND_BEST_PGSZ +endif + +ifneq ($(shell grep "init_port" $(OFA_KERNEL_PATH)/include/rdma/ib_verbs.h),) + DISTRO_CFLAG += -DHAVE_VERB_INIT_PORT +endif + +ifneq ($(shell grep "rdma_set_device_sysfs_group" $(OFA_KERNEL_PATH)/include/rdma/ib_verbs.h),) + DISTRO_CFLAG += -DHAVE_RDMA_SET_DEVICE_SYSFS_GROUP +endif + +ifneq ($(shell grep "ib_device_set_netdev" $(OFA_KERNEL_PATH)/include/rdma/ib_verbs.h),) + DISTRO_CFLAG += -DHAVE_IB_DEVICE_SET_NETDEV +endif + +ifneq ($(shell grep "rdma_driver_id" $(OFA_KERNEL_PATH)/include/rdma/ib_verbs.h),) + DISTRO_CFLAG += -DHAVE_RDMA_DRIVER_ID +endif + +ifneq ($(shell grep "rdma_for_each_block" $(OFA_KERNEL_PATH)/include/rdma/ib_verbs.h),) + DISTRO_CFLAG += -DHAVE_DMA_BLOCK_ITERATOR +endif + +ifneq ($(shell grep "ib_port_phys_state" $(OFA_KERNEL_PATH)/include/rdma/ib_verbs.h),) + DISTRO_CFLAG += -DHAVE_PHYS_PORT_STATE_ENUM +endif + +ifneq ($(shell grep "ib_get_eth_speed" $(OFA_KERNEL_PATH)/include/rdma/ib_verbs.h),) + DISTRO_CFLAG += -DHAVE_IB_GET_ETH_SPEED +endif + +ifneq ($(shell grep "vlan_id" $(OFA_KERNEL_PATH)/include/rdma/ib_verbs.h),) + DISTRO_CFLAG += -DHAVE_IB_WC_VLAN_ID +endif + +ifneq ($(shell grep "smac" $(OFA_KERNEL_PATH)/include/rdma/ib_verbs.h),) + DISTRO_CFLAG += -DHAVE_IB_WC_SMAC +endif + +ifneq ($(shell grep -s "METADATA_HW_PORT_MUX" $(LINUXSRC)/include/net/dst_metadata.h),) + DISTRO_CFLAG += -DHAVE_METADATA_HW_PORT_MUX +endif + +ifneq ($(shell grep "pci_num_vf" $(LINUXSRC)/include/linux/pci.h),) + DISTRO_CFLAG += -DHAVE_PCI_NUM_VF +endif + +ifneq ($(shell grep "ib_kernel_cap_flags" $(OFA_KERNEL_PATH)/include/rdma/ib_verbs.h),) + DISTRO_CFLAG += -DHAVE_IB_KERNEL_CAP_FLAGS +endif + +ifneq ($(shell grep -so "ida_alloc" $(LINUXSRC)/include/linux/idr.h),) + DISTRO_CFLAG += -DHAVE_IDA_ALLOC +endif + +ifneq ($(shell grep -o "struct auxiliary_device_id" $(LINUXSRC)/include/linux/mod_devicetable.h),) + DISTRO_CFLAG += -DHAVE_AUX_DEVICE_ID +endif + +ifneq ($(shell ls $(LINUXSRC)/include/linux/auxiliary_bus.h > /dev/null 2>&1 && echo auxiliary_driver),) + ifneq ($(CONFIG_AUXILIARY_BUS),) + DISTRO_CFLAG += -DHAVE_AUXILIARY_DRIVER + endif +endif + +ifneq ($(shell grep -so "auxiliary_set_drvdata" $(LINUXSRC)/include/linux/auxiliary_bus.h),) + DISTRO_CFLAG += -DHAVE_AUX_SET_DRVDATA +endif + +ifneq ($(shell grep -so "auxiliary_get_drvdata" $(LINUXSRC)/include/linux/auxiliary_bus.h),) + DISTRO_CFLAG += -DHAVE_AUX_GET_DRVDATA +endif + +ifneq ($(shell grep -o "struct rdma_stat_desc {" $(OFA_KERNEL_PATH)/include/rdma/ib_verbs.h),) + DISTRO_CFLAG += -DHAVE_RDMA_STAT_DESC +endif + +ifneq ($(shell grep -o "alloc_hw_port_stats" $(OFA_KERNEL_PATH)/include/rdma/ib_verbs.h),) + DISTRO_CFLAG += -DHAVE_ALLOC_HW_PORT_STATS +endif + +ifneq ($(shell grep -so "vmalloc_array" $(LINUXSRC)/include/linux/vmalloc.h),) + DISTRO_CFLAG += -DHAVE_VMALLOC_ARRAY +endif + +ifneq ($(shell grep -so "addrconf_addr_eui48" $(LINUXSRC)/include/net/addrconf.h),) + DISTRO_CFLAG += -DHAVE_ADDRCONF_ADDR_EUI48 +endif + +KBUILD_EXTRA_SYMBOLS += $(BNXT_EN_INC)/Module.symvers + +EXTRA_CFLAGS += ${DISTRO_CFLAG} -DFPGA -g -DCONFIG_BNXT_SRIOV \ + -DCONFIG_BNXT_DCB -DENABLE_DEBUGFS -DCONFIG_BNXT_RE \ + -DPOST_QP1_DUMMY_WQE + +EXTRA_CFLAGS += -I$(BNXT_QPLIB_INC) -I$(BNXT_EN_INC) + +BCM_DRV := bnxt_re.ko + +KSRC := $(LINUXSRC) + +ifneq (OFED-NATIVE, $(findstring OFED-NATIVE, $(OFED_VERSION))) +OFED_INCLUDES := LINUXINCLUDE=' \ + $(AUTOCONF_H) \ + -I$(OFA_KERNEL_PATH)/include \ + -I$(OFA_KERNEL_PATH)/include/uapi \ + $$(if $$(CONFIG_XEN),-D__XEN_INTERFACE_VERSION__=$$(CONFIG_XEN_INTERFACE_VERSION)) \ + $$(if $$(CONFIG_XEN),-I$$(KSRC)/arch/x86/include/mach-xen) \ + -I$(OFA_KERNEL_PATH)/arch/$$(SRCARCH)/include/generated/uapi \ + -I$(OFA_KERNEL_PATH)/arch/$$(SRCARCH)/include/generated \ + -Iinclude \ + -I$(KSRC)/include \ + -I$(KSRC)/arch/$$(SRCARCH)/include \ + -I$(KSRC)/include/generated/uapi \ + -I$(KSRC)/include/uapi \ + -I$(KSRC)/arch/$$(SRCARCH)/include/uapi \ + -I$(KSRC)/arch/$$(SRCARCH)/include/generated \ + -I$(KSRC)/arch/$$(SRCARCH)/include/generated/uapi \ + -I$(KDIR)/include/generated/uapi \ + -I$(KDIR)/arch/$$(SRCARCH)/include/generated \ + -I$(KDIR)/arch/$$(SRCARCH)/include/generated/uapi' + +OFA_KERNEL_LINK := $(OFA_KERNEL_PATH) +OFA_BUILD_LINK := $(OFA_BUILD_PATH) +endif + +cflags-y += $(EXTRA_CFLAGS) + +ifneq ($(KERNELRELEASE),) + +obj-m += bnxt_re.o +bnxt_re-y := main.o ib_verbs.o \ + debugfs.o compat.o \ + qplib_res.o qplib_rcfw.o \ + qplib_sp.o qplib_fp.o \ + stats.o dcb.o hdbr.o \ + hw_counters.o + +bnxt_re-$(HAVE_CONFIGFS_ENABLED) += configfs.o + +endif + +default: + $(MAKE) -C $(LINUX) M=$(shell pwd) $(OFED_INCLUDES) \ + $(BNXT_SPARSE_CMD) $(BNXT_SMATCH_CMD) modules + +yocto_all: + $(MAKE) -C $(LINUXSRC) M=$(shell pwd) + +modules_install: + $(MAKE) -C $(LINUXSRC) M=$(shell pwd) modules_install + + +install: default + echo $(PREFIX) + echo $(BCMMODDIR) + echo $(BCM_DRV) + mkdir -p $(PREFIX)/$(BCMMODDIR); + install -m 444 $(BCM_DRV) $(PREFIX)/$(BCMMODDIR); + @if [ "$(PREFIX)" = "" ]; then /sbin/depmod -a ;\ + else echo " *** Run '/sbin/depmod -a' to update the module database.";\ + fi + +.PHONEY: all clean install + +clean: + $(MAKE) -C $(LINUX) M=$(shell pwd) clean diff --git a/bnxt_re-1.10.3-229.0.139.0/README.TXT b/bnxt_re-1.10.3-229.0.139.0/README.TXT new file mode 100644 index 0000000..5b09afa --- /dev/null +++ b/bnxt_re-1.10.3-229.0.139.0/README.TXT @@ -0,0 +1,916 @@ + README Notes + Broadcom bnxt_re Linux RoCE Driver + + Broadcom Limited + 5300 California Avenue, + Irvine, CA 92617 + + Copyright (c) 2015 - 2016 Broadcom Corporation + Copyright (c) 2016 - 2018 Broadcom Limited + Copyright (c) 2018 - 2024 Broadcom Inc. + All rights reserved + + +Table of Contents +================= + + Introduction + BNXT_RE Driver Dependencies + BNXT_RE Driver compilation + Configuration Tips + Limitations + BNXT_RE Dynamic Debug Messages + BNXT_RE Compiler Switches + BNXT_RE Driver Defaults + DCB Settings + Congestion Control + SR-IOV and VF Resource Distribution + Link Aggregation + BNXT_RE Driver Statistics + QP Information in debugfs + + +Introduction +============ + +This file describes the bnxt_re Linux RoCE driver for the Broadcom NetXtreme-C +and NetXtreme-E 10/25/40/50/100/200 Gbps Ethernet Network Controllers. + +Note: Starting from 219.0 release, driver supports only RoCE v2. +RoCE v1 support is deprecated. Please refer Broadcom Linux RoCE Configuration +Guide for details. + +BNXT_RE Driver Dependencies +=========================== + +The RoCE driver has dependencies on the bnxt_en Ethernet counterpart. + + - It also has dependencies on the IB verbs kernel component + (Details given below). + + +BNXT_RE Driver compilation +========================== + +bnxt_re driver compilation depends on whether IB stack is available along with +the OS distribution or an external OFED is required. + + => Distros that has IB Stack available along with OS distribution: + RH7.1/7.2/7.3/6.7/6.8, RH8.x, SLES12SPx,SLES15SPx and Ubuntu 16.04/14.04 and later. + All kernels from 4.5.x onwards + +To compile bnxt_re: + - Untar netxtreme-bnxt_en-.tar.gz + $make + + => Some older distros that doesn't have RoCE v2 support needs upstream OFED + + Please refer OFED release notes from the following link and install + OFED before compiling bnxt_re driver. + http://downloads.openfabrics.org/downloads/OFED/release_notes/OFED_3.18-2_release_notes + + To compile bnxt_re: + $export OFED_VERSION=OFED-4.8 + - For OFEDs other than 4.8, 4.8 needs to be updated + with the current OFED version installed on the system + $make + + => The bnxt_en driver must be built first before building the bnxt_re driver. + Also, while loading the drivers bnxt_en driver must be loaded first. + +Configuration Tips +================== + +- It is recommended to use same host OS version on client and server while + running NFS-RDMA/iSER/NVMEoF tests. Heterogeneous host OS may lead to unexpected + results. This is due to the incompatible ULP server and Client kernel modules. + +- It is recommended to assign at least 3GB RAM to VMs used for memory intensive + applications like NFSoRDMA, iSER, NVMoF etc. + +- When using large number of QPs (close to maximum supported) along with large + message sizes it is recommended to increase the `max_map_count` kernel parameter + using sysctl to avoid memory map failures in the application. + Please refer to https://www.kernel.org/doc/Documentation/sysctl/vm.txt on how to tune + this kernel parameter. + +- When TCP/IP and RoCE traffic are running simultaneously with high work load or + RoCE traffic with high work load, high CPU utilization can result, + leading to CPU soft lockup. Hence it is recommended to spread the workload + across the available CPU cores. This can be achieved by setting the SMP + affinity of the interrupts and RoCE applications. + Please refer to OS documentation for setting smp_affinity and specific + commands like taskset etc. + +- To avoid the SQ full reported during iSER stress testing (kernels SLES 12 and later, + RHEL 7.4 and later), configure the minimum tx depth for the QPs to 4096. + All connections getting established after setting min_tx_depth uses the user specified values. + + echo 4096 > /sys/kernel/config/bnxt_re/bnxt_re0/ports/1/tunables/min_tx_depth + +- For heavy RDMA-READ workloads with large number of active QPs, + a higher ack-timeout value is recommended. + For example: + ib_read_bw with -q 4096 would require ack timeout 18. Ack timeout is + controlled by option "-u". + ib_read_bw --report_gbits -F -m 4096 -q 4096 -d bnxt_re2 -x 3 -u 18 -D 60 -s 65536 + +- For use cases where the adapter QP limit is exercised or active qps are + close to adapter limits, ack timeout needs to be increased to 24 to avoid + retransmissions and loss of performance. + For example: + For multiple instances of ib_send_bw/ib_read_bw/ib_write_bw, which creates + total of 64K QPs, specify higher ack timeout in each application instance + using -u 24. +- "restrict_mrs" module parameter is used to limit the number of MRs supported + by the driver. For now the parameter applies only to 574xx devices. + Setting this flag to 1 would reduce the number for MRs supported by the device + to 64K. + +Limitations +=========== +- GIDs corresponding to IPv4 and IPv6 addresses maybe missing after + device creation sequences such as driver load or device error recovery. + + e.g. when RoCE v1 and RoCE v2 are enabled on the adapter, + ibv_devinfo -d -vvv + Shows: + GID[ 0]: fe80:0000:0000:0000:5e6f:69ff:fe1e:2f3e, RoCE v1 + GID[ 1]: fe80::5e6f:69ff:fe1e:2f3e, RoCE v2 + + Should show: + GID[ 0]: fe80:0000:0000:0000:5e6f:69ff:fe1e:2f3e, RoCE v1 + GID[ 1]: fe80::5e6f:69ff:fe1e:2f3e, RoCE v2 + GID[ 2]: 0000:0000:0000:0000:0000:ffff:c0a8:0033, RoCE v1 + GID[ 3]: ::ffff:192.168.0.51, RoCE v2 + GID[ 4]: 2001:0000:0000:0000:0000:0000:0000:0051, RoCE v1 + GID[ 5]: 2001::51, RoCE v2 + This is due to device creation sequence from netdev event context. + The design change to avoid these failures will be available in future + releases. As a workaround, bring down the L2 interface (ifconfig down) + and bring it up (ifconfg up). This will force stack to add the GIDs again. + +- Stack traces seen with following message during link + down and other administrative events like PFC enable/disable. + "task ib_write_bw:406494 blocked for more than 120 seconds" + This is because of the un-graceful destroy of the resources and FW + can take more time to destroy these resources. RoCE driver can + wait upto 240 seconds before hitting the timeout. These error messages + will stop once all resources are destroyed. + +- When the applications are run simultaneously, there is a chance of commands + getting failed with an error message "send failed - retries:2000". This is due + to the CMD Queue is getting full because applications are creating/destroying + resources simultaneously. This is also observed, when the applications are + ungracefully killed and if restarted before the active resources are cleaned + up when killed. In any case if this issue is seen, restart the application with + delay between the applications. + +- For error recovery to succeed, the interface should be in the ifup state + with no disruptions during the process that might reconfigure the device. + In other words, for reliable error recovery, it is recommended to not run + any configuration changes (such as unloading the RoCE Driver, bonding inerface, + ethtool self-tests etc) while error recovery is in progress. + If at all changes are done, and recovery does not succeed, try the below + actions to recover: + - Unload and reload both the drivers roce and L2 + - Unbind and rebind the PCIe function using sysfs + +- Error messages seen during RoCE driver load after a live FW update/FW reset, + if L2 interface down during the reset. + To avoid errors bring up all ethernet interfaces up before loading RoCE driver. + +- When remote directories are mounted using NFS-RDMA, unloading bnxt_re shall + cause system hang and the system needs a reboot for normal operations. + Always unmount all active NFS mounts over bnxt_re interface, before unloading + bnxt_re driver. + +- Using same interface MTU on both client and server is recommended. User can + see unexpected results if there is a mismatch in interface MTUs on Client + and Server. + +- Changing MAC address of the interface while bnxt_re is loaded can trigger failure + during GID deletion. Unload bnxt_re driver before changing the interface MAC address. + +- The legacy FMR Pool is not supported yet. + +- Raw Ethertype QP is not supported yet. + +- Tunnel is not supported yet. + +- On SLES11 SP4 default kernel(3.0.101-63-default), tc command to map the + priority to traffic class throws error and hence ETS b/w will not get + honored when NIC + RoCE traffic is run together. + This issue is fixed in 3.0.101-91-default. Users are advised to upgrade to + this kernel while testing ETS. + +- iscsi ping timeout reported in dmesg during 128 VF testing over 8 + RHEL 8.3 VMs. Some of the connections report recovery timeout + during scale testing. Reduce the number of VFs to 64 and use + lesser number of VMs per host to avoid the recovery failures. + +- When RoCE VFs are created, destroying the VFs may take a longer time to complete. + For example, 64 VFs destroy may take up to 20 sec. + +- Avoid running ethtool offline selftest when QPs are active. + +- Avoid performing PCI reset when QPs are active. Driver has no way to know + about this reset and eventually causes PCI Fatal errors and a system crash when + QPs are active and Doorbell recovery/pacing enabled. + +- On AMD64 chipset (recently noticed on AMD EPYC 9554) with IOMMU enabled systems, + users can notice the below error string from bnxt_re driver. + + infiniband bnxt_re0: bnxt_re_build_reg_wqe: bnxt_re_mr 0xff211d4fb9eaa800 len (65536 > 4096) + infiniband bnxt_re0: bnxt_re_build_reg_wqe: build_reg_wqe page[0] = 0xffffffffffff0000 + infiniband bnxt_re0: bad_wr seen with opcode = 0x20 + + Primary issue is AMD IOMMU is providing iova reaching to max U64 and that is + not expected. Contact Broadcom support for additional information. + +- Driver is no longer supporting max_msix_vec module parameter. + The num_comp_vectors in the output of "ibv_devinfo -v" is controlled + by the L2 driver ring counts before loading bnxt_re driver. If users + want more completion vectors for RoCE (ie. upto 64 or num_cpus), unload + bnxt_re driver, reduce the L2 rings using ethtool and then load RoCE driver. + +BNXT_RE Dynamic Debug Messages +============================== +The bnxt_re driver supports the Linux dynamic debug feature. + +All error, warning and info messages are logged by default. +Any debug messages if needed, could be enabled by writing to +the standard /dynamic_debug/control file. +Debug messages can be enabled/disabled at various granularities +like - module, file, function, a range of line numbers or a +specific line number. + +The following kernel document describes this in detail with examples: +https://www.kernel.org/doc/Documentation/dynamic-debug-howto.txt + +A few examples on how to use this with bnxt_re driver: + +1) To check the debug messages that are available in bnxt_re: +# cat /sys/kernel/debug/dynamic_debug/control | grep bnxt_re + +2) To enable all debug messages in bnxt_re during load time: +# insmod bnxt_re.ko dyndbg==p + +3) To enable all debug messages in bnxt_re after loading: +# echo "module bnxt_re +p" > /sys/kernel/debug/dynamic_debug/control + +4) To disable all debug messages in bnxt_re after loading: +# echo "module bnxt_re -p" > /sys/kernel/debug/dynamic_debug/control + +5) To enable a debug message at a specific line number in a file: +# echo -n "file qplib_fp.c line 2554 +p" > /sys/kernel/debug/dynamic_debug/control + + +BNXT_RE Compiler Switches +========================= + +ENABLE_DEBUGFS - Enable debugFS operation + +ENABLE_RE_FP_SPINLOCK - Enable spinlocks on the fast path bnxt_re_qp queue + resources + +ENABLE_FP_SPINLOCAK - Enable spinlocks on the fast path bnxt_qplib queue + resources + +ENABLE_DEBUG_SGE - Enable the dumping of SGE info to the journal log + +BNXT_RE Driver Defaults +======================= +Driver enables #3 traffic classes (L2, RoCE and CNP) during the load. +Driver configures the default RoCE and CNP priorities and DSCP values and +enable PFC and CC by default. No other configuration required on the host +if the switches are configured with default values. + +Following are the default CC/QoS values. + +Default priority and DSCP +------------------------- +RoCE Traffic Priority: 3 +RoCE Traffic DSCP: 26 +CNP Traffic Priority: 7 +CNP Traffic DSCP: 48 + +Default traffic classes +---------------------- +TC0: L2 traffic +TC1: RoCE Traffic +TC2: CNP Traffic + +Default PFC priority +-------------------- +Enabled for priority 3 + +Default ETS Configuration +------------------------- +Assigned bandwidth to L2 traffic: 50% +Assigned bandwidth to RoCE traffic: 50% + +DCB Settings +============ + +The users can change the above default values using bnxt_setupcc.sh or +other tools. PFC settings can be configured with bnxtqos or lldptool +While adding new settings, please make sure that the default settings +(say, app TLVs) are removed, before programming the new values. +Refer bnxt_setupcc.sh for usage of lldptool/bnxtqos and configfs. + +Note: Since bnxt_re driver enables PFC on ROCE Priority during driver load, + PFC must be disabled using bnxtqos/lldptool before changing any TC + mapping. This ensures proper mapping between the user traffic class + to HW Queues. + +Note: Unloading bnxt_re driver would change the current DCBx settings on + the adapter. This might include some of the settings done by users + after loading bnxt_re. If bnxt_re is unloaded after the user changes + settings, please clear all DCBx settings before loading bnxt_re again. + + To confirm the dcbx settings, use bnxtqos or lldptool as following. + + $lldptool get-tlv -n -i + or + $ bnxtqos -dev=p6p1 get_qos + + +Example usages of bnxtqos and lldptool is given below. + +bnxtqos +------- +#Set APP TLV for RoCE v2 traffic with RoCE priority + bnxtqos -dev= set_apptlv app=,3,4791 +E.g. To set roce_prio=5, selector=3 and protocol=4791. + bnxtqos -dev=eth0 set_apptlv app=5,3,4791 + +#Enable PFC on RoCE priority + bnxtqos -dev= set_pfc enabled= +E.g. To enable PFC for roce_prio=5 + bnxtqos -dev=eth0 set_pfc enabled=5 + +#Set APP TLV for RoCE dscp and priority + bnxtqos -dev= set_apptlv app=,5, + echo > /sys/kernel/config/bnxt_re//ports/1/cc/roce_dscp +E.g. To set roce_prio=5 and roce_dscp=28 + bnxtqos -dev=eth0 set_apptlv app=5,5,28 + echo 0x1c > /sys/kernel/config/bnxt_re/bnxt_re2/ports/1/cc/roce_dscp + +#Set CNP priority and dscp +#For this command to execute, make sure "service prof type" is supported. +#Following is the command to know the "service prof type" is supported: +#cat /sys/kernel/debug/bnxt_re//info | grep fw_service_prof_type_sup + bnxtqos -dev= set_apptlv app=,5, + echo > /sys/kernel/config/bnxt_re//ports/1/cc/cnp_dscp +E.g. To set cnp_prio=4 and cnp_dscp=40 for selector=5 + bnxtqos -dev=eth0 set_apptlv app=4,5,40 + echo 0x28 > /sys/kernel/config/bnxt_re/bnxt_re2/ports/1/cc/cnp_dscp + +#Set the ETS and Priority to Traffic Class mapping + bnxtqos -dev= set_ets tsa=0:ets,1:ets,2:strict,3:strict,4:strict,5:strict,6:strict,7:strict priority2tc= tcbw=, +E.g. + bnxtqos -dev= set_ets tsa=0:ets,1:ets priority2tc=0:0,1:0,2:0,3:0,4:0,5:1,6:0,7:0 tcbw=20,80 + +#Dump existing settings +bnxtqos -dev= get_qos + +#Delete the existing app tlvs + bnxtqos -dev= set_apptlv -d app= +E.g. To delete roce_prio=5, selector=3 and protocol=4791 + bnxtqos -dev=eth0 set_apptlv -d app=5,3,4791 + +#Disable PFC + bnxtqos -dev= set_pfc enabled=none + +lldptool +-------- +Note: If the switches are capable of handling RoCE TLVs, the following +settings are not required as adapter will override local settings, if any, +with the switch settings. + +Following steps are recommended to configure +the local adapter to set DCB parameters, in case switches are not capable +of DCB negotiations. + +# Load L2 driver and make sure port and Link are UP + service lldpad start + lldptool -L -i p6p1 adminStatus=rxtx +#Disable PFC +lldptool -T -i -V PFC enabled=none +#Delete the existing app TLVs. For eg: +lldptool -T -i -V APP -d app=3,5,26 +lldptool -T -i -V APP -d app=3,3,4791 +#For RoCE-V2 protocol with Priority-5 + lldptool -T -i p6p1 -V APP app=5,3,4791 + lldptool -T -i p6p1 -V ETS-CFG tsa=0:ets,1:ets,2:strict,3:strict,4:strict,5:strict,6:strict,7:strict \ + up2tc=0:0,1:0,2:0,3:0,4:0,5:1,6:0,7:0 tcbw=10,90,0,0,0,0,0,0 + lldptool -T -i p6p1 -V PFC enabled=5 + service lldpad restart + +Note: Please refer man pages of lldptool, lldptool-app, +lldptool-ets, lldptool-pfc, etc. for more details + +Note: VF inherits the PFC settings of the PF. VF doesn't have privilege to +set DCB parameters using lldptool. No need of running lldpad service on the VM. + +Note: The driver supports only one priority for RoCE traffic. + +Note: The driver by default supports Priority VLAN Tagging i.e it adds a NULL +VLAN tag if a priority is configured for RoCE Traffic, without VLANs being +configured. However, for customers who are interested only in PFC via DSCP, +driver provides a knob to disable the auto VLAN 0 tag insertion. + +echo 1 > /sys/kernel/config/bnxt_re/bnxt_re0/ports/1/cc/disable_prio_vlan_tx + +Guidelines for changing DCB settings +------------------------------------ + +The current software requires the following Traffic Class mapping. + +TC0: L2 traffic +TC1: RoCE Traffic +TC2: CNP Traffic and L2 Traffic +TC3 – TC7: L2 traffic + +Each TC can be mapped to different priority. So while mapping priority to traffic +class, make sure that TC1 is mapped for RoCE priority and TC2 is mapped for CNP priority. +RoCE traffic class support only one DSCP value programmed through the DSCP App TLV. Since +the CNP Traffic class (TC2) is shared between CNP and L2 traffic, multiple DSCP values are +supported for this traffic class. The current solution requires that the DSCP App TLV +for CNP should be programmed at the end, after programming other App TLVs. + +Sample programming for multiple DSCP values for TC2. +TC1 RoCE pri – 5 +TC1 RoCE dscp – 59 +TC2 CNP pri – 6 +TC2 CNP dscp – 49 +TC2 L2 dscp – 55 +TC2 L2 dscp – 54 +All other priorities are mapped to remaining traffic classes. + +# Map the priority to Traffic class and enable PFC on priority 5 +bnxtqos -dev=enp37s0f0np0 set_ets tsa=0:ets,1:ets,2:strict,3:ets,4:ets,5:ets,6:ets,7:strict \ + priority2tc=0:0,1:3,2:4,3:5,4:6,5:1,6:2,7:7 tcbw=10,50,2,3,2,33 +bnxtqos -dev=enp37s0f0np0 set_pfc enabled=5 + +#Set up RoCE v2 packet based TLV (dest port 4791) +bnxtqos -dev=enp37s0f0np0 set_apptlv app=5,3,4791 +#Setup RoCE DSCP (59) App TLV +bnxtqos -dev=enp37s0f0np0 set_apptlv app=5,5,59 +#TC2 mapped for priority 6. Setup L2 DSCP values (54,55) first +#and then program CNP DSCP value (49) +bnxtqos -dev=enp37s0f0np0 set_apptlv app=6,5,54 +bnxtqos -dev=enp37s0f0np0 set_apptlv app=6,5,55 +bnxtqos -dev=enp37s0f0np0 set_apptlv app=6,5,49 + +Congestion Control +=================== + +Explicit Congestion Notification (ECN) is a congestion avoidance mechanism. +In this protocol a Congestion Notification Packet(CNP) signals the existence +of congestion to the remote transmitter. Reacting to CNP, the transmitter reduces +the transmit rate on a transmit-flow for a given time quanta. CNP is generated +by the receiver when it detects congestion in the receive processing pipe. + +To export the tuning parameters RoCE driver uses configfs support from linux +kernel. Following are the steps to configure congestion control parameters. + + 1. Pre-requisites + =============== + 1.a Host base lldpad is configured for RoCE-v2 protocol + and a valid priority is assigned to RoCE-v2. + ref: "lldptool" section of this document. + + 2. Mount per-port-configfs interface + =================================== + 2.a Load RoCE driver + 2.b ls /sys/kernel/config should list directory "bnxt_re" + 2.c Create a directory in configfs-path with the RoCE device name. + E.g. for bnxt_re1 use following: + mkdir -p /sys/kernel/config/bnxt_re/bnxt_re1 + 2.d ls /sys/kernel/config/bnxt_re/bnxt_re1 + ls /sys/kernel/config/bnxt_re/bnxt_re1/ports/1/cc/ + cnp_dscp cnp_prio apply cc_mode roce_prio + ecn_enable g inact_cp init_cr init_tr + nph_per_state rtt tcp_cp roce_dscp ecn_marking + 2.e To enable CC, set 1 to ecn_enable, To disable, set 0 + E.g. + echo -n 0x1 > ecn_marking + echo -n 0x0 > ecn_enable + Note: There are other tunables under same directory. Use these fields as + needed. + ... + 2.f Check for "service prof type" is supported, by using the + following command:- + cat /sys/kernel/debug/bnxt_re//info | grep fw_service_prof_type_sup + E.g. for bnxt_re0 use following: + cat /sys/kernel/debug/bnxt_re/bnxt_re0/info | grep fw_service_prof_type_sup + + If "service prof type" is supported, refer to + "DCB settings" section of this document. + If "service prof type" is *not* supported, follow the + steps below. + 2.g Change the value of a specific parameter + echo > roce_prio + echo > cnp_prio + echo > roce_dscp + echo > cnp_dscp + E.g. + echo -n 0x05 > roce_dscp + 2.h Apply the changes to hardware + echo -n 0x01 > apply + Note: Any changes will not take effect unless step 2.h is + carried out. + + 2.i Read back a specific parameter + cat roce_dscp + ... + + 3. Unmount per-port-configfs interface + ==================================== + 3.a remove all per-port-configfs mounts as following: + rmdir /sys/kernel/config/bnxt_re/bnxt_re1 + rmdir /sys/kernel/config/bnxt_re/bnxt_re0 + ... + + Note: If configfs is mounted rmmod bnxt_re will fail. + It is must to perform step 3.a before issuing + rmmod bnxt_re. + +SR-IOV and VF Resource Distribution +=================================== +RDMA SR-IOV is supported on BCM575xx devices only, with NPAR disabled. + +Note: Before enabling the VFs, both bnxt_en and bnxt_re drivers should be loaded. + Loading bnxt_re driver after creating VFs is not supported. Removal of bond + interface while VFs present is also not supported, as removal of bond + interface creates the RoCE base interfaces which is similar to loading + bnxt_re driver. + + In distros that support auto loading of bnxt_re based on udev rules, + (ie. having an entry ENV{ID_NET_DRIVER}=="bnxt_en", RUN{builtin}+="kmod load bnxt_re" + in udev rules file 90-rdma-hw-modules.rules) + Note: The location of the file is distro specific. + RHEL: /usr/lib/udev/rules.d/90-rdma-hw-modules.rules + UBUNTU: /lib/udev/rules.d/90-rdma-hw-modules.rules + If the bnxt_re driver is unloaded before creating VFs, vf creation loads bnxt_re + driver. This operation throws error in dmesg as this is considered as loading + driver after creating VFs. Disable RoCE on the adapter if RoCE feature is not + required or disable this udev rule to prevent auto loading of the bnxt_re driver. + +If SR-IOV is supported on the adapter, QPs, SRQs, CQs and MRs are distributed +across VF by the bnxt_re driver. + +Driver allocates 64K of QPs, SRQs and CQs for the PF pool. It creates 256K MRs +for the PF pool. +For VFs, the driver is restricting the total number of resources as follows + +Max QPs - 6144 +Max MRs - 6144 +Max CQs - 6144 +Max SRQs - 4096 + +For eg: Active number of VFs can be obtained from the following command. + $cat /sys/class/net/p6p1/device/sriov_numvfs + +If sriov_numvfs is 2, half of the above values will be supported by each +VF. + +Note: Since PF is in privileged mode, it is allowed to use the +entire PF pool resources. But VFs are restricted to create max configured +by the above calculation. User must ensure that total resources created by +PF and its VFs shall be less than Max configured (64K for QPs/SRQs/CQs and 256K for MRs). + +Use following command to get the active resource count. +$cat /sys/kernel/debug/bnxt_re//info + +Presence of active RoCE traffic on the VF undergoing Function Level Reset (FLR) +or on any other PFs/VFs impacts the function initialization time +of the VF undergoing FLR. Function initialization time scales linearly as +the cumulative active QP count across all PFs and VFs increases. +The increased function initialization time may lead to VF probe failures +and periodic HWRM timeouts when the cumulative active QP count is greater than 6K QPs. + + +Link Aggregation +================ +Link aggregation is a common technique that is used to provide +additional aggregate bandwidth and high availability for logical +interfaces that aggregate multiple physical interfaces. Additional +aggregate bandwidth can be achieved by balancing the traffic load +across multiple physical interfaces. High availability can be achieved +by reconfiguring the loads across the active links when one of the +physical links fails. +The concepts of link aggregation can be applied to RoCE also. + +The current solution allows a link aggregation only if all of the +following conditions are met: + +-> The netdev associated with each RDMA interface is a + part of an upper level device. +-> The two netdev interfaces part of same bond device. +-> Two netdevs on the same physical device are added to the bond. +-> The link aggregate cannot span separate physical devices. +-> The bond interface has exactly two non-NPAR physical interfaces. +-> The bond mode is one of the following modes: + round-robin (mode0), active-backup (mode 1), xor (mode 2), + or 8023ad (mode 4). + +Note: mode 0, 2 and 4 will be handled as active-active mode in HW. + +When a LAG is created roce device interface is visible +with name bnxt_re_bond0. + +Note: RoCE LAG is not supported on multi host or multi root configs. +Note: If VFs are created on any of the functions of the bond, RoCE Bond device + will not be created. If RoCE bond is created before VF creation, RoCE bond + will continue to work on the PFs. But VF RoCE devices will not be supported. +Note: If the adapter has more than 2 RoCE enabled functions (4 port adapter, etc.), + RoCE bond device will not be created. + There should be exactly two RoCE devices from an adapter when bond is + created. If L2 bond is enabled on this adapter, RoCE doesn't work on + the bnxt_re devices created for the physical interfaces. +Note: RoCE Bond is created only if there are two ethernet functions added + to the bond and the ethernet devices are from the same physical + adapter. Multiple adapters are not supported. +Note: When LAG is enabled, driver creates all QPs on PF0 and firmware + does the load balancing between the 2 LAG ports. In the current + algorithm, firmware will do load balancing on a per DPI (application) + basis. If we have 100 applications creating 1 QP each then all the + QPs will get created on the same port. Similarly if we have 100 + applications each creating odd number of QPs then the QP count + difference between the ports can be up to 100. Only when all the + applications are creating even number of QPs does the firmware + guarantee that the difference in QP count between both ports + is <= 2. +Note: On BCM9574xx devices to enable entropy for RoCE V2 UDP source port + firmware limits the number of GIDs available to 8 across all PFs on + Performance NIC and to 128 on Smart NIC. If host tries to create more + GID entries than these limits then firmware will fail the GID add + command and as a result QP data traffic will fail. +Note: RoCE LAG solution involves a HW pipeline configuration that enables + RoCE traffic to be directed to the right port using an internal GID + to port mapping logic. However, the HW transmit queues and ring + shapers used for RoCE traffic are associated only with port 0. + The GID to port mapping enables re-direction to the correct port as + port status changes. + + To enable transmit endpoint shaping with RoCE LAG, even for an + active-backup mode, the transmit endpoint shapers associated with + port 0 always need to be enabled. + + For example, in active-backup mode if the following command was ran + where port 0 and port 1 was linked at 100Gpbs + + bnxtqos -dev= tx_endpoint_ratelimit port_idx=0 ep0 max=40 + + The TX traffic out of port 0 would be 40Gpbs when port 0 is active. + And when port 1 became active, the TX traffic out also would be 40Gpbs. + This is because the shapers are associated with port 0 in active-backup + mode. Please note in the example above if port_idx was set to 1 + in active-backup mode, the setting for port 1 will be set but not used. + + Another example, in active-backup mode when port 0 goes down, port 1 becomes + active, transmit per COS statistics will not reflect the current active + port stats. RoCE statistics available from debugfs interface are updated + accurately and can be used. +Note: When the L2 bond is created and the RoCE LAG is not created by the driver + due to the RoCE LAG not supported in the device, error messages are seen + in the dmesg for GID add/delete. + +=> Instructions to create/Destroy RoCE LAG + + - Load bnxt_en and bnxt_re driver + - Follow the distro specific commands to create L2 bond. RoCE bond will be + created in the background + - ibv_devices shows bnxt_re_bond0 device once the L2 bond is created. + +Note: If stable name is set by udev rule, the RoCE bond device name will point to the +device name of the first child device of the bond. + +Known Issues with Link aggregation: + +-> Supports only on distros RH 7.2 and later, SLES12 and later. +-> bnxt_re and bnxt_en drivers need to be loaded before creating bond interface. +-> Changing bond mode when RoCE driver is in use can cause system hang. + E.g. changing the bonding mode while running a user application, + can cause a system hang. + Please make sure that no reference to bnxt_re is taken while changing the bond mode. + Use the following command to check the module usage count + #lsmod|grep bnxt_re + For proper removal of bnxt_re devices or update the bond state: + 1. Unmount all active NFS RDMA mounts. + 2. Stop the ibacm service (or any similar service) on systems where OFED is + installed using the command: + # service ibacm stop + 3. Stop all user space RoCE applications. + +-> User has to delete the configfs entry created for the bond device before + a slave is removed from the bond. Without that, user would see error messages + on the terminal and may cause a hang. +-> Create / destroy bond in a loop: + Make sure that enough delay is provided (i.e. 5-10 sec) after create and destroy + of the bond. This is to avoid hang and call traces related to the rtnl_lock usage. +-> When there is a link toggle, bnxt_re driver communicates that to the fw to switch over. + If there are parallel outstanding FW cmds, it can take time for the fail over command + to reach the FW. The QP timeout value should be high enough to accommodate this. + It is recommended to use a timeout value 19. + +- If the error recovery process fails for some reasons when the LAG is created, + any subsequent administrative operations like de-slaving interfaces, unloading + the bonding driver and bringing up base interfaces would cause unexpected + behavior (can be a system crash). + +BNXT_RE Driver Statistics +======================= + +The bnxt_re driver supports debugFS which allows statistics and debug parameters be accessed. +To access this information, read the /sys/kernel/debug/bnxt_re/bnxt_re/info file. Each port will be +listed with associated state. The available statistics will vary based on hardware capability, eg: + +# cat /sys/kernel/debug/bnxt_re/bnxt_re0/info + +bnxt_re debug info: +=====[ IBDEV bnxt_re0 ]============================= + link state: UP + Max QP: 0xff7f + Max SRQ: 0xffff + Max CQ: 0xffff + Max MR: 0x10000 + Max MW: 0x10000 + Active QP: 0x2 + Active SRQ: 0x0 + Active CQ: 0x21 + Active MR: 0x4 + Active MW: 0x0 +... + + +Field Explanation: + +Device resource limits: +Max QP Max number of QP limit +Max SRQ Max number of SRQ limit +Max CQ Max number of CQs limit +Max MR Max number of memory region limit +Max MW Max number of memory window limit + +Active Resources: +Active QP Number of active QPs +Active SRQ Number of active SRQs +Active CQ Number of active CQs +Active MR Number of active Memory Regions +Active MW Number of active Memory Windows +Active RC QP Number of active RC QPs +Active UD QP Number of active UD QPs + +Note: HW uses the same resource pages for MR and MW. + So the total number of Active MR and Active MW should + be less than or equal to Max MR/MW. + +Resource Watermarks: +QP Watermark Max QPs active after driver load +SRQ Watermark Max SRQs active after driver load +CQ Watermark Max CQs active after driver load +MR Watermark Max MRs active after driver load +MW Watermark Max MWs active after driver load +AH Watermark Max AHs active after driver load +PD Watermark Max PDs active after driver load +RC QP Watermark Max RC QPs active after driver load +UD QP Watermark Max UD QPs active after driver load + +Byte and Packet Counters: +Rx Pkts Number of RoCE packets received +Rx Bytes Number of RoCE bytes received +Tx Pkts Number of RoCE packets transmitted +Tx Bytes Number of RoCE bytes transmitted + +Congestion Notification Counters: +CNP Tx Pkts Number of RoCE CNP packets received +CNP Tx Bytes Number of RoCE CNP bytes received +CNP Rx Pkts Number of RoCE CNP packets transmitted +CNP Rx Bytes Number of RoCE CNP bytes transmitted + +RDMA operation Counters: +tx_atomic_req Number of atomic requests transmitted +rx_atomic_req Number of atomic requests received +tx_read_req Number of read requests transmitted +tx_read_resp Number of read responses transmitted +rx_read_req Number of read requests received +rx_read_resp Number of read responses received +tx_write_req Number of write requests transmitted +rx_write_req Number of write request received +tx_send_req Number of send requests transmitted +rx_send_req Number of send requests received + +Driver Debug counters: +Resize CQ count Debug counter for CQ resize ops after driver load +num_irq_started Debug counter for IRQs started after device creation +num_irq_stopped Debug counter for IRQs stopped after device creation +poll_in_intr_en Debug counter for indicating control path polling when + interrupt enabled +poll_in_intr_dis Debug counter for indicating control path polling when + interrupt are disabled +cmdq_full_dbg_cnt Debug counter to indicate control path CMDQ full +fw_service_prof_type_sup Debug info to indicate the current service profile config +dbq_int_recv Debug counter to indicate the DBQ interrupt received +dbq_int_en Debug counter to indicate the number of iterations dbq + interrupt is enabled +dbq_pacing_resched Debug counter to indicate the number of times pacing thread + rescheduled +dbq_pacing_complete Debug counter to indicate the count where the pacing thread + completed +dbq_pacing_alerts Debug counter to indicate the number of times userlibs alerted + the driver for onset congestion +dbq_dbr_fifo_reg Debug counter to monitor the HW FIFO reg +dbr_drop_recov_epoch Debug counter to indicate epoch of latest DBR drop event +dbr_drop_recov_events Debug counter to indicate the number of DBR drop events +dbr_drop_recov_timeouts Debug counter to indicate the DBR drop events scheduled to the + user space and failed to complete within the timeout. +dbr_drop_recov_timeout_users Debug counter to indicate the number of user instances that + experienced timeout when driver finishes the recovery thread. +dbr_drop_recov_event_skips Debug counter to indicate the number of DBR drop events ignored + (skipped) by the driver because of one or more outstanding event. +latency_slab Each slab is of 1 second granularity. The Counters of each slab represent + the total number of rcfw commands completed in that range. + Upto 128 seconds latency is tracked. +rx_dcn_payload_cut Number of received DCN payload cut packets. +te_bypassed Number of transmitted packets that bypassed the transmit engine. + +Recoverable Errors: +Recoverable Errors Number of recoverable errors detected. Recoverable errors are + detected by the HW. HW instructs FW to initiate the recovery + process. RC connection does not teardown as a result of these errors. +to_retransmits Number of retransmission requests +rnr_naks_rcvd Number of RNR (Receiver-Not-Ready) NAKs received. +dup_req Number of duplicated requests detected. +missing_resp Number of responses missing +seq_err_naks_rcvd Number of PSN sequencing error NAKs received +res_oob_drop_count Number of packets dropped because of no host buffers +res_oos_drop_count Number of out of sequence packets received +rx_roce_discard_pkts Number of discard packets received +rx_roce_error_pkts Number of error packets received + + +Fatal Errors: +max_retry_exceeded Number of retransmission requests exceeded the max +unrecoverable_err Number of unrecoverable errors detected +bad_resp_err Number of bad response errors detected +local_qp_op_err Number of QP local operation errors detected +local_protection_err Number of local protection errors detected +mem_mgmt_op_err Number of times HW detected an error because of illegal bind/fast + register/invalidate attempted by the driver +remote_invalid_req_err Number of invalid request received from the remote rdma initiator. +remote_access_err Number of times H/W received a REMOTE ACCESS ERROR NAK from the peer. +remote_op_err Number of times HW received a REMOTE OPERATIONAL ERROR NAK from the peer. + +Responder errors: +res_exceed_max Number of times HW detected incoming Send, RDMA write or RDMA read + messages which exceed the maximum transfer length. +res_length_mismatch Number of times HW detected incoming RDMA write message payload + size does not match write length in the RETH. +res_exceeds_wqe Number of times HW detected Send payload exceeds RQ/SRQ RQE buffer capacity. +res_opcode_err Number of times HW detected First, Only, Middle, Last packets for + incoming requests are improperly ordered with respect to the previous packet. +res_rx_invalid_rkey Number of times HW detected a incoming request with an R_KEY that + did not reference a valid MR/MW. +res_rx_domain_err Number of times HW detected a incoming request with an R_KEY that + referenced a MR/MW that was not in the same PD as the QP on which the + request arrived. +res_rx_no_perm Number of times HW detected a incoming RDMA write request with an + R_KEY that referenced a MR/MW which did not have the access permission + needed for the operation. +res_rx_range_err Number of times HW detected an incoming RDMA write request that had + a combination of R_KEY, VA and length that was out of bounds of the + associated MR/MW. +res_tx_invalid_rkey Number of times HW detected a R_KEY that did not reference a valid + MR/MW while processing incoming read request. +res_tx_domain_err Number of times HW detected a incoming request with an R_KEY that + referenced a MR/MW that was not in the same PD as the QP on which + the RDMA read request is received. +res_tx_no_perm Number of times HW detected a incoming RDMA read request with an R_KEY + that referenced a MR/MW which did not have the access permission needed + for the operation. +res_tx_range_err Number of times HW detected an incoming RDMA read request that had a + combination of R_KEY, VA and length that was out of bounds of the associated MR/MW. +res_irrq_oflow Number of times HW detected that peer sent us more RDMA read or atomic + requests that the negotiated maximum +res_unsup_opcode Number of times HW detected that peer sent us a request with an opcode + for a request type that is not supported on this QP. +res_unaligned_atomic Number of times HW detected that VA of an atomic request is on a memory + boundary that prevents atomic execution. +res_rem_inv_err Number of times HW detected a incoming send with invalidate request in + which the R_KEY to invalidate did not MR/MW which could be invalidated. +res_mem_error64 Number of times HW detected a RQ/SRQ SGE which points to an inaccessible memory. +res_srq_err Number of times HW detected a QP moving to error state because the associated + SRQ is in error. +res_cmp_err Number of time HW detected that there is no CQE space available on CQ or + CQ is not in valid state. +res_invalid_dup_rkey Number of times HW detected invalid R_KEY while re-sending responses to + duplicate read requests. +res_wqe_format_err Number of times HW detected error in the format of the WQE in the RQ/SRQ. +res_cq_load_err Number of times HW detected error while attempting to load the CQ context. +res_srq_load_err Number of times HW detected error while attempting to load the SRQ context. + +Note: When a LAG is created, all the statistics are reported on function 0 of the device. diff --git a/bnxt_re-1.10.3-229.0.139.0/bnxt_re-abi.h b/bnxt_re-1.10.3-229.0.139.0/bnxt_re-abi.h new file mode 100644 index 0000000..8938e3e --- /dev/null +++ b/bnxt_re-1.10.3-229.0.139.0/bnxt_re-abi.h @@ -0,0 +1,188 @@ +/* + * Copyright (c) 2015-2022, Broadcom. All rights reserved. The term + * Broadcom refers to Broadcom Inc. and/or its subsidiaries. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * BSD license below: + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * Author: Eddie Wai + * + * Description: Uverbs ABI header file + */ + +#ifndef __BNXT_RE_UVERBS_ABI_H__ +#define __BNXT_RE_UVERBS_ABI_H__ + +#define BNXT_RE_ABI_VERSION 6 + +enum { + BNXT_RE_COMP_MASK_UCNTX_WC_DPI_ENABLED = 0x01, + BNXT_RE_COMP_MASK_UCNTX_POW2_DISABLED = 0x02, + BNXT_RE_COMP_MASK_UCNTX_RSVD_WQE_DISABLED = 0x04, + BNXT_RE_COMP_MASK_UCNTX_MQP_EX_SUPPORTED = 0x08, + BNXT_RE_COMP_MASK_UCNTX_DBR_PACING_ENABLED = 0x10, + BNXT_RE_COMP_MASK_UCNTX_DBR_RECOVERY_ENABLED = 0x20, + BNXT_RE_COMP_MASK_UCNTX_HW_RETX_ENABLED = 0x40, + BNXT_RE_COMP_MASK_UCNTX_SMALL_RECV_WQE_DRV_SUP = 0x80, +}; + +enum { + BNXT_RE_COMP_MASK_REQ_UCNTX_POW2_SUPPORT = 0x01, + BNXT_RE_COMP_MASK_REQ_UCNTX_RSVD_WQE = 0x02, + BNXT_RE_COMP_MASK_REQ_UCNTX_SMALL_RECV_WQE_LIB_SUP = 0x04, +}; + +struct bnxt_re_uctx_req { + __aligned_u64 comp_mask; +}; + +#define BNXT_RE_CHIP_ID0_CHIP_NUM_SFT 0x00 +#define BNXT_RE_CHIP_ID0_CHIP_REV_SFT 0x10 +#define BNXT_RE_CHIP_ID0_CHIP_MET_SFT 0x18 +struct bnxt_re_uctx_resp { + __u32 dev_id; + __u32 max_qp; + __u32 pg_size; + __u32 cqe_sz; + __u32 max_cqd; + __u32 chip_id0; + __u32 chip_id1; + __u32 modes; + __aligned_u64 comp_mask; + __u8 db_push_mode; +} __attribute__((packed)); + +enum { + BNXT_RE_COMP_MASK_PD_HAS_WC_DPI = 0x01, + BNXT_RE_COMP_MASK_PD_HAS_DBR_BAR_ADDR = 0x02, +}; + +struct bnxt_re_pd_resp { + __u32 pdid; + __u32 dpi; + __u64 dbr; + __u64 comp_mask; /*FIXME: Not working if __aligned_u64 is used */ + __u32 wcdpi; + __u64 dbr_bar_addr; +} __attribute__((packed)); + +enum { + BNXT_RE_COMP_MASK_CQ_HAS_DB_INFO = 0x01, + BNXT_RE_COMP_MASK_CQ_HAS_WC_DPI = 0x02, + BNXT_RE_COMP_MASK_CQ_HAS_CQ_PAGE = 0x04, + BNXT_RE_COMP_MASK_CQ_HAS_HDBR_KADDR = 0x08 +}; + +enum { + BNXT_RE_COMP_MASK_CQ_REQ_HAS_CAP_MASK = 0x1, + BNXT_RE_COMP_MASK_CQ_REQ_HAS_HDBR_KADDR = 0x2 +}; + +enum { + BNXT_RE_COMP_MASK_CQ_REQ_CAP_DBR_RECOVERY = 0x1 +}; + +#define BNXT_RE_IS_DBR_RECOV_CQ(_req) \ + (_req.comp_mask & BNXT_RE_COMP_MASK_CQ_REQ_HAS_CAP_MASK && \ + _req.cq_capability & BNXT_RE_COMP_MASK_CQ_REQ_CAP_DBR_RECOVERY) + +struct bnxt_re_cq_req { + __u64 cq_va; + __u64 cq_handle; + __aligned_u64 comp_mask; + __u16 cq_capability; +} __attribute__((packed)); + +struct bnxt_re_cq_resp { + __u32 cqid; + __u32 tail; + __u32 phase; + __u32 rsvd; + __aligned_u64 comp_mask; + __u32 dpi; + __u64 dbr; + __u32 wcdpi; + __u64 uctx_cq_page; + __u64 hdbr_kaddr; +} __attribute__((packed)); + +struct bnxt_re_resize_cq_req { + __u64 cq_va; +} __attribute__((packed)); + +struct bnxt_re_qp_req { + __u64 qpsva; + __u64 qprva; + __u64 qp_handle; +} __attribute__((packed)); + +struct bnxt_re_qp_resp { + __u32 qpid; + __u32 hdbr_dt; + __u64 hdbr_kaddr_sq; + __u64 hdbr_kaddr_rq; +} __attribute__((packed)); + +struct bnxt_re_srq_req { + __u64 srqva; + __u64 srq_handle; +} __attribute__((packed)); + +struct bnxt_re_srq_resp { + __u32 srqid; + __u64 hdbr_kaddr; +} __attribute__((packed)); + +/* Modify QP */ +enum { + BNXT_RE_COMP_MASK_MQP_EX_PPP_REQ_EN_MASK = 0x1, + BNXT_RE_COMP_MASK_MQP_EX_PPP_REQ_EN = 0x1, + BNXT_RE_COMP_MASK_MQP_EX_PATH_MTU_MASK = 0x2 +}; + +struct bnxt_re_modify_qp_ex_req { + __aligned_u64 comp_mask; + __u32 dpi; + __u32 rsvd; +} __packed; + +struct bnxt_re_modify_qp_ex_resp { + __aligned_u64 comp_mask; + __u32 ppp_st_idx; + __u32 path_mtu; +} __packed; + +enum bnxt_re_shpg_offt { + BNXT_RE_BEG_RESV_OFFT = 0x00, + BNXT_RE_AVID_OFFT = 0x10, + BNXT_RE_AVID_SIZE = 0x04, + BNXT_RE_END_RESV_OFFT = 0xFF0 +}; + +#endif diff --git a/bnxt_re-1.10.3-229.0.139.0/bnxt_re.h b/bnxt_re-1.10.3-229.0.139.0/bnxt_re.h new file mode 100644 index 0000000..d1e2174 --- /dev/null +++ b/bnxt_re-1.10.3-229.0.139.0/bnxt_re.h @@ -0,0 +1,934 @@ +/* + * Copyright (c) 2015-2023, Broadcom. All rights reserved. The term + * Broadcom refers to Broadcom Inc. and/or its subsidiaries. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * BSD license below: + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * Author: Eddie Wai + * + * Description: main (header) + */ + +#ifndef __BNXT_RE_H__ +#define __BNXT_RE_H__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#if defined(HAVE_DISASSOCIATE_UCNTX) && defined(HAVE_SCHED_MM_H) +#include +#endif +#if defined(HAVE_DISASSOCIATE_UCNTX) && defined(HAVE_SCHED_TASK_H) +#include +#endif + +#ifdef LEGACY_BOND_SUPPORT +#ifdef HAVE_NET_BONDING_H +#include +#endif +#endif + +#include +#include +#include +#include +#include +#include +#if defined (HAVE_UVERBS_IOCTL_H) +#include +#endif +#ifdef HAVE_DEVLINK +#include +#endif + +#include "bnxt_ulp.h" +#include "roce_hsi.h" +#include "qplib_res.h" +#include "qplib_sp.h" +#include "qplib_fp.h" +#include "qplib_rcfw.h" +#include "ib_verbs.h" +#include "stats.h" +#include "compat.h" + +#define ROCE_DRV_MODULE_NAME "bnxt_re" +#define ROCE_DRV_MODULE_VERSION "229.0.139.0" +#define ROCE_DRV_MODULE_RELDATE "January 29, 2024" + +#define BNXT_RE_REF_WAIT_COUNT 20 +#define BNXT_RE_ROCE_V1_ETH_TYPE 0x8915 +#define BNXT_RE_ROCE_V2_PORT_NO 4791 +#define BNXT_RE_RES_FREE_WAIT_COUNT 5000 + +#define BNXT_RE_PAGE_SHIFT_4K (12) +#define BNXT_RE_PAGE_SHIFT_8K (13) +#define BNXT_RE_PAGE_SHIFT_64K (16) +#define BNXT_RE_PAGE_SHIFT_2M (21) +#define BNXT_RE_PAGE_SHIFT_8M (23) +#define BNXT_RE_PAGE_SHIFT_1G (30) + +#define BNXT_RE_PAGE_SIZE_4K BIT(BNXT_RE_PAGE_SHIFT_4K) +#define BNXT_RE_PAGE_SIZE_8K BIT(BNXT_RE_PAGE_SHIFT_8K) +#define BNXT_RE_PAGE_SIZE_64K BIT(BNXT_RE_PAGE_SHIFT_64K) +#define BNXT_RE_PAGE_SIZE_2M BIT(BNXT_RE_PAGE_SHIFT_2M) +#define BNXT_RE_PAGE_SIZE_8M BIT(BNXT_RE_PAGE_SHIFT_8M) +#define BNXT_RE_PAGE_SIZE_1G BIT(BNXT_RE_PAGE_SHIFT_1G) + +#define BNXT_RE_MAX_MR_SIZE_LOW BIT(BNXT_RE_PAGE_SHIFT_1G) +#define BNXT_RE_MAX_MR_SIZE_HIGH BIT(39) +#define BNXT_RE_MAX_MR_SIZE BNXT_RE_MAX_MR_SIZE_HIGH + +#define BNXT_RE_MAX_GID_PER_VF 128 + +#define BNXT_RE_RQ_WQE_THRESHOLD 32 +#define BNXT_RE_UD_QP_HW_STALL 0x400000 + +/* + * Setting the default ack delay value to 16, which means + * the default timeout is approx. 260ms(4 usec * 2 ^(timeout)) + */ + +#define BNXT_RE_DEFAULT_ACK_DELAY 16 +#define BNXT_RE_BOND_PF_MAX 2 + +#define BNXT_RE_STATS_CTX_UPDATE_TIMER 250 + +#define BNXT_RE_CHIP_P7(chip_num) \ + ((chip_num) == CHIP_NUM_58818 ||\ + (chip_num) == CHIP_NUM_57608) + +#define BNXT_RE_MIN_KERNEL_QP_TX_DEPTH 4096 +#define BNXT_RE_STOP_QPS_BUDGET 200 + +#define BNXT_RE_HWRM_CMD_TIMEOUT(rdev) \ + ((rdev)->chip_ctx->hwrm_cmd_max_timeout * 1000) + +extern unsigned int min_tx_depth; +extern struct mutex bnxt_re_mutex; +extern struct list_head bnxt_re_dev_list; + +struct bnxt_re_ring_attr { + dma_addr_t *dma_arr; + int pages; + int type; + u32 depth; + u32 lrid; /* Logical ring id */ + u16 flags; + u8 mode; +}; + +#define BNXT_RE_MAX_MSIX 64 +#define BNXT_RE_MIN_MSIX 2 +struct bnxt_re_nq_record { + struct bnxt_msix_entry msix_entries[BNXT_RE_MAX_MSIX]; + /* FP Notification Queue (CQ & SRQ) */ + struct bnxt_qplib_nq nq[BNXT_RE_MAX_MSIX]; + int num_msix; + int max_init; + /* Serialize access to NQ record */ + struct mutex load_lock; +}; + +struct bnxt_re_work { + struct work_struct work; + unsigned long event; + struct bnxt_re_dev *rdev; + struct net_device *vlan_dev; + struct netdev_bonding_info netdev_binfo; + struct bnxt_re_bond_info *binfo; + /* netdev where we received the event */ + struct net_device *netdev; + struct auxiliary_device *adev; +}; + +struct bnxt_re_bond_info { + struct bnxt_re_dev *rdev; + struct net_device *master; + struct net_device *slave1; + struct net_device *slave2; + struct pci_dev *pdev1; + struct pci_dev *pdev2; + struct netdev_bonding_info nbinfo; + struct auxiliary_device *aux_dev1; /* Corresponds to slave1 */ + struct auxiliary_device *aux_dev2; /* Corresponds to slave2 */ + u8 active_port_map; + u8 aggr_mode; + u8 gsi_qp_mode; + u8 wqe_mode; +}; + +/* + * Data structure and defines to handle + * recovery + */ +#define BNXT_RE_PRE_RECOVERY_REMOVE 0x1 +#define BNXT_RE_COMPLETE_REMOVE 0x2 +#define BNXT_RE_POST_RECOVERY_INIT 0x4 +#define BNXT_RE_COMPLETE_INIT 0x8 + +/* QP1 SQ entry data strucutre */ +struct bnxt_re_sqp_entries { + u64 wrid; + struct bnxt_qplib_sge sge; + /* For storing the actual qp1 cqe */ + struct bnxt_qplib_cqe cqe; + struct bnxt_re_qp *qp1_qp; +}; + +/* GSI QP mode enum */ +enum bnxt_re_gsi_mode { + BNXT_RE_GSI_MODE_INVALID = 0, + BNXT_RE_GSI_MODE_ALL = 1, + BNXT_RE_GSI_MODE_ROCE_V1, + BNXT_RE_GSI_MODE_ROCE_V2_IPV4, + BNXT_RE_GSI_MODE_ROCE_V2_IPV6, + BNXT_RE_GSI_MODE_UD +}; + +enum bnxt_re_roce_cap { + BNXT_RE_FLAG_ROCEV1_CAP = 1, + BNXT_RE_FLAG_ROCEV2_CAP, + BNXT_RE_FLAG_ROCEV1_V2_CAP, +}; + +#define BNXT_RE_MAX_GSI_SQP_ENTRIES 1024 +struct bnxt_re_gsi_context { + u8 gsi_qp_mode; + bool first_cq_created; + /* Start: used only in gsi_mode_all */ + struct bnxt_re_qp *gsi_qp; + struct bnxt_re_qp *gsi_sqp; + struct bnxt_re_ah *gsi_sah; + struct bnxt_re_sqp_entries *sqp_tbl; + /* End: used only in gsi_mode_all */ +}; + +struct bnxt_re_tc_rec { + u8 cos_id_roce; + u8 tc_roce; + u8 cos_id_cnp; + u8 tc_cnp; + u8 tc_def; + u8 cos_id_def; + u8 max_tc; + u8 roce_prio; + u8 cnp_prio; + u8 roce_dscp; + u8 cnp_dscp; + u8 prio_valid; + u8 dscp_valid; + bool ecn_enabled; + bool serv_type_enabled; + u64 cnp_dscp_bv; + u64 roce_dscp_bv; +}; + +struct bnxt_re_dscp2pri { + u8 dscp; + u8 mask; + u8 pri; +}; + +struct bnxt_re_cos2bw_cfg { + u8 pad[3]; + struct_group_attr(cfg, __packed, + u8 queue_id; + __le32 min_bw; + __le32 max_bw; + u8 tsa; + u8 pri_lvl; + u8 bw_weight; + ); + u8 unused; +}; + +#define BNXT_RE_AEQ_IDX 0 +#define BNXT_RE_MAX_SGID_ENTRIES 256 + +#define BNXT_RE_DBGFS_FILE_MEM 65536 +enum { + BNXT_RE_STATS_QUERY = 1, + BNXT_RE_QP_QUERY = 2, + BNXT_RE_SERVICE_FN_QUERY = 3, +}; + +struct bnxt_re_en_dev_info { + struct list_head en_list; + struct bnxt_en_dev *en_dev; + struct bnxt_re_dev *rdev; + unsigned long flags; +#define BNXT_RE_FLAG_EN_DEV_NETDEV_REG 0 +#define BNXT_RE_FLAG_EN_DEV_PRIMARY_DEV 1 +#define BNXT_RE_FLAG_EN_DEV_SECONDARY_DEV 2 + u8 wqe_mode; + u8 gsi_mode; + bool te_bypass; + bool binfo_valid; + struct bnxt_re_bond_info binfo; + u32 event_bitmap[3]; +}; + +#define BNXT_RE_MAX_FIFO_DEPTH_P5 0x2c00 +#define BNXT_RE_MAX_FIFO_DEPTH_P7 0x8000 + +#define BNXT_RE_MAX_FIFO_DEPTH(ctx) \ + (_is_chip_p7((ctx)) ? \ + BNXT_RE_MAX_FIFO_DEPTH_P7 :\ + BNXT_RE_MAX_FIFO_DEPTH_P5) + +struct bnxt_dbq_nq_list { + int num_nql_entries; + u16 nq_id[16]; +}; + +#define BNXT_RE_ASYNC_ERR_REP_BASE(_type) \ + (ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_##_type) + +#define BNXT_RE_ASYNC_ERR_DBR_TRESH(_type) \ + (ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_##_type) + +#define BNXT_RE_EVENT_DBR_EPOCH(data) \ + (((data) & \ + BNXT_RE_ASYNC_ERR_DBR_TRESH(EVENT_DATA1_EPOCH_MASK)) >> \ + BNXT_RE_ASYNC_ERR_DBR_TRESH(EVENT_DATA1_EPOCH_SFT)) + +#define BNXT_RE_EVENT_ERROR_REPORT_TYPE(data1) \ + (((data1) & \ + BNXT_RE_ASYNC_ERR_REP_BASE(TYPE_MASK)) >> \ + BNXT_RE_ASYNC_ERR_REP_BASE(TYPE_SFT)) + +#define BNXT_RE_DBR_LIST_ADD(_rdev, _res, _type) \ +{ \ + spin_lock(&(_rdev)->res_list[_type].lock); \ + list_add_tail(&(_res)->dbr_list, \ + &(_rdev)->res_list[_type].head); \ + spin_unlock(&(_rdev)->res_list[_type].lock); \ +} + +#define BNXT_RE_DBR_LIST_DEL(_rdev, _res, _type) \ +{ \ + spin_lock(&(_rdev)->res_list[_type].lock); \ + list_del(&(_res)->dbr_list); \ + spin_unlock(&(_rdev)->res_list[_type].lock); \ +} + +#define BNXT_RE_CQ_PAGE_LIST_ADD(_uctx, _cq) \ +{ \ + mutex_lock(&(_uctx)->cq_lock); \ + list_add_tail(&(_cq)->cq_list, &(_uctx)->cq_list); \ + mutex_unlock(&(_uctx)->cq_lock); \ +} + +#define BNXT_RE_CQ_PAGE_LIST_DEL(_uctx, _cq) \ +{ \ + mutex_lock(&(_uctx)->cq_lock); \ + list_del(&(_cq)->cq_list); \ + mutex_unlock(&(_uctx)->cq_lock); \ +} + +#define BNXT_RE_NETDEV_EVENT(event, x) \ + do { \ + if ((event) == (x)) \ + return #x; \ + } while (0) + +/* Do not change the seq of this enum which is followed by dbr recov */ +enum { + BNXT_RE_RES_TYPE_CQ = 0, + BNXT_RE_RES_TYPE_UCTX, + BNXT_RE_RES_TYPE_QP, + BNXT_RE_RES_TYPE_SRQ, + BNXT_RE_RES_TYPE_MAX +}; + +struct bnxt_re_dbr_res_list { + struct list_head head; + spinlock_t lock; +}; + +struct bnxt_re_dbr_drop_recov_work { + struct work_struct work; + struct bnxt_re_dev *rdev; + u32 curr_epoch; +}; + +struct bnxt_re_aer_work { + struct work_struct work; + struct bnxt_re_dev *rdev; +}; + +struct bnxt_re_dbq_stats { + u64 fifo_occup_slab_1; + u64 fifo_occup_slab_2; + u64 fifo_occup_slab_3; + u64 fifo_occup_slab_4; + u64 fifo_occup_water_mark; + u64 do_pacing_slab_1; + u64 do_pacing_slab_2; + u64 do_pacing_slab_3; + u64 do_pacing_slab_4; + u64 do_pacing_slab_5; + u64 do_pacing_water_mark; +}; + +struct bnxt_re_dbg_mad { + u64 mad_consumed; + u64 mad_processed; +}; + +/* Device debug statistics */ +struct bnxt_re_drv_dbg_stats { + struct bnxt_re_dbq_stats dbq; + struct bnxt_re_dbg_mad mad; +}; + +/* DB pacing counters */ +struct bnxt_re_dbr_sw_stats { + u64 dbq_int_recv; + u64 dbq_int_en; + u64 dbq_pacing_resched; + u64 dbq_pacing_complete; + u64 dbq_pacing_alerts; + u64 dbr_drop_recov_events; + u64 dbr_drop_recov_timeouts; + u64 dbr_drop_recov_timeout_users; + u64 dbr_drop_recov_event_skips; +}; + +/* RoCE push counters */ +struct bnxt_re_ppp_sw_stats { + u32 ppp_enabled_ctxs; + u32 ppp_enabled_qps; +}; + +struct bnxt_re_dev { + struct ib_device ibdev; + struct list_head list; + atomic_t ref_count; + atomic_t sched_count; + unsigned long flags; +#define BNXT_RE_FLAG_NETDEV_REGISTERED 0 +#define BNXT_RE_FLAG_IBDEV_REGISTERED 1 +#define BNXT_RE_FLAG_RECONFIG_SECONDARY_DEV_DCB 3 +#define BNXT_RE_FLAG_ALLOC_RCFW 4 +#define BNXT_RE_FLAG_NET_RING_ALLOC 5 +#define BNXT_RE_FLAG_RCFW_CHANNEL_EN 6 +#define BNXT_RE_FLAG_ALLOC_CTX 7 +#define BNXT_RE_FLAG_STATS_CTX_ALLOC 8 +#define BNXT_RE_FLAG_STATS_CTX2_ALLOC 9 +#define BNXT_RE_FLAG_RCFW_CHANNEL_INIT 10 +#define BNXT_RE_FLAG_WORKER_REG 11 +#define BNXT_RE_FLAG_TBLS_ALLOCINIT 12 +#define BNXT_RE_FLAG_SETUP_NQ 13 +#define BNXT_RE_FLAG_BOND_DEV_REGISTERED 14 +#define BNXT_RE_FLAG_PER_PORT_DEBUG_INFO 15 +#define BNXT_RE_FLAG_DEV_LIST_INITIALIZED 16 +#define BNXT_RE_FLAG_ERR_DEVICE_DETACHED 17 +#define BNXT_RE_FLAG_INIT_DCBX_CC_PARAM 18 +#define BNXT_RE_FLAG_STOP_IN_PROGRESS 20 +#define BNXT_RE_FLAG_ISSUE_ROCE_STATS 29 +#define BNXT_RE_FLAG_ISSUE_CFA_FLOW_STATS 30 + struct net_device *netdev; + struct auxiliary_device *adev; + struct bnxt_qplib_chip_ctx *chip_ctx; + struct bnxt_en_dev *en_dev; + struct bnxt_re_nq_record *nqr; + struct bnxt_re_tc_rec tc_rec[2]; + struct delayed_work worker; + u16 worker_30s; + /* Max of 2 lossless traffic class supported per port */ + u16 cosq[2]; + u8 cur_prio_map; + u8 roce_mode; + /* RCFW Channel */ + struct bnxt_qplib_rcfw rcfw; + /* Device Resources */ + struct bnxt_qplib_dev_attr *dev_attr; + struct bnxt_qplib_res qplib_res; + struct bnxt_qplib_dpi dpi_privileged; + struct bnxt_qplib_cc_param cc_param; + /* serialize update of CC param */ + struct mutex cc_lock; + /* serialize access to active qp list */ + struct mutex qp_lock; + struct list_head qp_list; + + /* Start: QP for handling QP1 packets */ + struct bnxt_re_gsi_context gsi_ctx; + /* End: QP for handling QP1 packets */ + u32 espeed; + /* + * For storing the speed of slave interfaces. + * Same as espeed when bond is not configured + */ + u32 sl_espeed; + /* To be used for a workaround for ISER stack */ + u32 min_tx_depth; + /* To enable qp debug info. Disabled during driver load */ + u32 en_qp_dbg; + struct bnxt_re_bond_info *binfo; +#ifdef RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP + /* Array to handle gid mapping */ + char *gid_map; +#endif /* RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP */ + + struct bnxt_re_device_stats stats; + struct bnxt_re_drv_dbg_stats *dbg_stats; + /* debugfs to expose per port information*/ + struct dentry *port_debug_dir; + struct dentry *info; + struct dentry *drv_dbg_stats; + struct dentry *sp_perf_stats; + struct dentry *pdev_debug_dir; + struct dentry *pdev_qpinfo_dir; + struct workqueue_struct *resolve_wq; + struct list_head mac_wq_list; + struct workqueue_struct *dcb_wq; + struct workqueue_struct *aer_wq; + u64 dbr_throttling_reg_off; + u64 dbr_aeq_arm_reg_off; + u64 dbr_db_fifo_reg_off; + void *dbr_page; + u64 dbr_bar_addr; + u32 pacing_algo_th; + u32 pacing_en_int_th; + u32 do_pacing_save; + struct workqueue_struct *dbq_wq; + struct workqueue_struct *dbr_drop_recov_wq; + struct work_struct dbq_fifo_check_work; + struct delayed_work dbq_pacing_work; + /* protect DB pacing */ + struct mutex dbq_lock; + /* Control DBR pacing feature. Set if enabled */ + bool dbr_pacing; + /* Control DBR recovery feature. Set if enabled */ + bool dbr_drop_recov; + bool user_dbr_drop_recov; + /* DBR recovery feature. Set if running */ + bool dbr_recovery_on; + u32 user_dbr_drop_recov_timeout; + /* + * Value used for pacing algo when pacing is active + */ +#define BNXT_RE_MAX_DBR_DO_PACING 0xFFFF + u32 dbr_do_pacing; + u32 dbq_watermark; /* Current watermark set in HW registers */ + u32 dbq_nq_id; /* Current NQ ID for DBQ events */ + u32 dbq_pacing_time; /* ms */ + u32 dbr_def_do_pacing; /* do_pacing when no congestion */ + u32 dbr_evt_curr_epoch; + bool dbq_int_disable; + + bool is_virtfn; + bool unreg_sched; + + atomic_t dbq_intr_running; + + struct bnxt_re_dbr_sw_stats *dbr_sw_stats; + struct bnxt_re_dbr_res_list res_list[BNXT_RE_RES_TYPE_MAX]; + struct bnxt_dbq_nq_list nq_list; +#ifdef IB_PEER_MEM_MOD_SUPPORT + struct ib_peer_mem_device *peer_dev; +#endif + char dev_name[IB_DEVICE_NAME_MAX + 1]; + + struct bnxt_re_ppp_sw_stats ppp_stats; + + /* HW based DB drop recovery feature */ + bool hdbr_enabled; + struct list_head *hdbr_privileged; + struct list_head hdbr_apps; + struct mutex hdbr_lock; /* protect hdbr_apps list */ + volatile u8 *hdbr_dt; + void *hdbr_dbgfs; + struct workqueue_struct *hdbr_wq; + struct list_head hdbr_fpgs; + struct mutex hdbr_fpg_lock; /* protect free page list */ +}; + +#define bnxt_re_dev_pcifn_id(rdev) ((rdev)->en_dev->pdev->devfn) + +#define BNXT_RE_RESOLVE_RETRY_COUNT_US 5000000 /* 5 sec */ +struct bnxt_re_resolve_dmac_work{ + struct work_struct work; + struct list_head list; + struct bnxt_re_dev *rdev; + RDMA_AH_ATTR *ah_attr; + struct bnxt_re_ah_info *ah_info; + atomic_t status_wait; +}; + +static inline u8 bnxt_re_get_prio(u8 prio_map) +{ + u8 prio = 0xFF; + + for (prio = 0; prio < 8; prio++) + if (prio_map & (1UL << prio)) + break; + return prio; +} + +static inline bool bnxt_re_is_rdev_valid(struct bnxt_re_dev *rdev) +{ + struct bnxt_re_dev *tmp_rdev; + + list_for_each_entry(tmp_rdev, &bnxt_re_dev_list, list) { + if (rdev == tmp_rdev) { + return true; + } + } + + pr_debug("bnxt_re: %s : Invalid rdev received rdev = %p\n", + __func__, rdev); + return false; +} + +static inline bool _is_bnxt_re_dev_lag_capable(u16 dev_cap_flags) +{ + /* + * If the FW provides the LAG support valid bit in device capability + * flags, check for the LAG support bit. Otherwise return as LAG + * capable by default. + */ + if (!(dev_cap_flags & CREQ_QUERY_FUNC_RESP_SB_LINK_AGGR_SUPPORTED_VALID)) + return true; + if (dev_cap_flags & CREQ_QUERY_FUNC_RESP_SB_LINK_AGGR_SUPPORTED) + return true; + return false; +} + +bool bnxt_re_is_lag_allowed(ifbond *master, ifslave *slave, + struct bnxt_re_dev *rdev); +int bnxt_re_get_port_map(struct netdev_bonding_info *netdev_binfo, + struct bnxt_re_bond_info *binfo, + struct net_device *netdev); +int bnxt_re_send_hwrm_cmd(struct bnxt_re_dev *rdev, void *cmd, + int cmdlen); +void bnxt_re_ib_uninit(struct bnxt_re_dev *rdev); +int bnxt_re_set_hwrm_dscp2pri(struct bnxt_re_dev *rdev, + struct bnxt_re_dscp2pri *d2p, u16 count, + u16 target_id); +int bnxt_re_query_hwrm_dscp2pri(struct bnxt_re_dev *rdev, + struct bnxt_re_dscp2pri *d2p, u16 *count, + u16 target_id); +int bnxt_re_query_hwrm_qportcfg(struct bnxt_re_dev *rdev, + struct bnxt_re_tc_rec *cnprec, u16 tid); +int bnxt_re_hwrm_cos2bw_qcfg(struct bnxt_re_dev *rdev, u16 target_id, + struct bnxt_re_cos2bw_cfg *cfg); +int bnxt_re_hwrm_cos2bw_cfg(struct bnxt_re_dev *rdev, u16 target_id, + struct bnxt_re_cos2bw_cfg *cfg); +int bnxt_re_hwrm_pri2cos_cfg(struct bnxt_re_dev *rdev, + u16 target_id, u16 port_id, + u8 *cos_id_map, u8 pri_map); +int bnxt_re_init_dcbx_cc_param(struct bnxt_re_dev *rdev); + +int bnxt_re_prio_vlan_tx_update(struct bnxt_re_dev *rdev); +int bnxt_re_get_slot_pf_count(struct bnxt_re_dev *rdev); +struct bnxt_re_dev *bnxt_re_get_peer_pf(struct bnxt_re_dev *rdev); +struct bnxt_re_dev *bnxt_re_from_netdev(struct net_device *netdev); +u8 bnxt_re_get_priority_mask(struct bnxt_re_dev *rdev, u8 selector); +struct bnxt_qplib_nq * bnxt_re_get_nq(struct bnxt_re_dev *rdev); +void bnxt_re_put_nq(struct bnxt_re_dev *rdev, struct bnxt_qplib_nq *nq); + +#define to_bnxt_re(ptr, type, member) \ + container_of(ptr, type, member) + +#define to_bnxt_re_dev(ptr, member) \ + container_of((ptr), struct bnxt_re_dev, member) + +/* Even number functions from port 0 and odd number from port 1 */ +#define BNXT_RE_IS_PORT0(rdev) (!(rdev->en_dev->pdev->devfn & 1)) + +#define BNXT_RE_ROCE_V1_PACKET 0 +#define BNXT_RE_ROCEV2_IPV4_PACKET 2 +#define BNXT_RE_ROCEV2_IPV6_PACKET 3 +#define BNXT_RE_ACTIVE_MAP_PORT1 0x1 /*port-1 active */ +#define BNXT_RE_ACTIVE_MAP_PORT2 0x2 /*port-2 active */ + +#define BNXT_RE_MEMBER_PORT_MAP (BNXT_RE_ACTIVE_MAP_PORT1 | \ + BNXT_RE_ACTIVE_MAP_PORT2) + +#define rdev_to_dev(rdev) ((rdev) ? (&(rdev)->ibdev.dev) : NULL) + +void bnxt_re_set_dma_device(struct ib_device *ibdev, struct bnxt_re_dev *rdev); +bool bnxt_re_is_rdev_valid(struct bnxt_re_dev *rdev); + +#define bnxt_re_rdev_ready(rdev) (bnxt_re_is_rdev_valid(rdev) && \ + (test_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags))) +#define BNXT_RE_SRIOV_CFG_TIMEOUT 6 + +int bnxt_re_get_device_stats(struct bnxt_re_dev *rdev); +void bnxt_re_remove_device(struct bnxt_re_dev *rdev, u8 removal_type, + struct auxiliary_device *aux_dev); +void bnxt_re_destroy_lag(struct bnxt_re_dev **rdev); +int bnxt_re_add_device(struct bnxt_re_dev **rdev, + struct net_device *netdev, + struct bnxt_re_bond_info *info, u8 qp_mode, + u8 op_type, u8 wqe_mode, + struct auxiliary_device *aux_dev); +void bnxt_re_create_base_interface(struct bnxt_re_bond_info *binfo, bool primary); +int bnxt_re_schedule_work(struct bnxt_re_dev *rdev, unsigned long event, + struct net_device *vlan_dev, + struct netdev_bonding_info *netdev_binfo, + struct bnxt_re_bond_info *binfo, + struct net_device *netdev, + struct auxiliary_device *aux_dev); +void bnxt_re_get_link_speed(struct bnxt_re_dev *rdev); +int _bnxt_re_ib_init(struct bnxt_re_dev *rdev); +int _bnxt_re_ib_init2(struct bnxt_re_dev *rdev); +int bnxt_re_create_lag(ifbond *master, ifslave *slave, + struct netdev_bonding_info *netdev_binfo, + struct net_device *netdev, + struct bnxt_re_dev **rdev, + u8 gsi_mode, u8 wqe_mode); +u8 bnxt_re_get_bond_link_status(struct bnxt_re_bond_info *binfo); + +void bnxt_re_init_resolve_wq(struct bnxt_re_dev *rdev); +void bnxt_re_uninit_resolve_wq(struct bnxt_re_dev *rdev); +void bnxt_re_resolve_dmac_task(struct work_struct *work); + +#if defined(HAVE_IB_UMEM_DMABUF) && !defined(HAVE_IB_UMEM_DMABUF_PINNED) +struct ib_umem_dmabuf *ib_umem_dmabuf_get_pinned(struct ib_device *device, + unsigned long offset, + size_t size, int fd, + int access); +void ib_umem_dmabuf_release_pinned(struct ib_umem_dmabuf *umem_dmabuf); +#endif + +/* The rdev ref_count is to protect immature removal of the device */ +static inline void bnxt_re_hold(struct bnxt_re_dev *rdev) +{ + atomic_inc(&rdev->ref_count); + dev_dbg(rdev_to_dev(rdev), + "Hold ref_count = 0x%x", atomic_read(&rdev->ref_count)); +} + +static inline void bnxt_re_put(struct bnxt_re_dev *rdev) +{ + atomic_dec(&rdev->ref_count); + dev_dbg(rdev_to_dev(rdev), + "Put ref_count = 0x%x", atomic_read(&rdev->ref_count)); +} + +void bnxt_re_debugfs_add_port(struct bnxt_re_dev *rdev, char *dev_name); +void bnxt_re_debugfs_rem_port(struct bnxt_re_dev *rdev); +void bnxt_re_add_dbg_files(struct bnxt_re_dev *rdev); +void bnxt_re_rem_dbg_files(struct bnxt_re_dev *rdev); +void bnxt_re_qp_info_add_qpinfo(struct bnxt_re_dev *rdev, + struct bnxt_re_qp *qp); +void bnxt_re_qp_info_rem_qpinfo(struct bnxt_re_dev *rdev, + struct bnxt_re_qp *qp); + +/* Default DCBx and CC values */ +#define BNXT_RE_DEFAULT_CNP_DSCP 48 +#define BNXT_RE_DEFAULT_CNP_PRI 7 +#define BNXT_RE_DEFAULT_ROCE_DSCP 26 +#define BNXT_RE_DEFAULT_ROCE_PRI 3 + +#define BNXT_RE_DEFAULT_L2_BW 50 +#define BNXT_RE_DEFAULT_ROCE_BW 50 + +#define ROCE_PRIO_VALID 0x0 +#define CNP_PRIO_VALID 0x1 +#define ROCE_DSCP_VALID 0x0 +#define CNP_DSCP_VALID 0x1 + +int bnxt_re_get_pri_dscp_settings(struct bnxt_re_dev *rdev, + u16 target_id, + struct bnxt_re_tc_rec *tc_rec); + +int bnxt_re_setup_dscp(struct bnxt_re_dev *rdev); +int bnxt_re_clear_dscp(struct bnxt_re_dev *rdev); +int bnxt_re_setup_cnp_cos(struct bnxt_re_dev *rdev, bool reset); + +static inline enum ib_port_state bnxt_re_get_link_state(struct bnxt_re_dev *rdev) +{ + if (netif_running(rdev->netdev) && netif_carrier_ok(rdev->netdev)) + return IB_PORT_ACTIVE; + return IB_PORT_DOWN; +} + +static inline char *bnxt_re_link_state_str(struct bnxt_re_dev *rdev) +{ + return bnxt_re_get_link_state(rdev) == IB_PORT_ACTIVE ? "UP" : "DOWN"; +} + +static inline int is_cc_enabled(struct bnxt_re_dev *rdev) +{ + return rdev->cc_param.enable; +} + +static inline bool bnxt_re_rtnl_trylock(void) +{ + u32 lock_retry = 10; + bool locked = false; + + while (lock_retry--) { + if (!rtnl_trylock()) { + usleep_range(1000, 2000); + continue; + } else { + locked = true; + break; + } + } + return locked; +} + +static inline void bnxt_re_init_hwrm_hdr(struct input *hdr, u16 opcd, u16 trid) +{ + hdr->req_type = cpu_to_le16(opcd); + hdr->cmpl_ring = cpu_to_le16(-1); + hdr->target_id = cpu_to_le16(trid); +} + +static inline void bnxt_re_fill_fw_msg(struct bnxt_fw_msg *fw_msg, + void *msg, int msg_len, void *resp, + int resp_max_len, int timeout) +{ + fw_msg->msg = msg; + fw_msg->msg_len = msg_len; + fw_msg->resp = resp; + fw_msg->resp_max_len = resp_max_len; + fw_msg->timeout = timeout; +} + +static inline bool is_qport_service_type_supported(struct bnxt_re_dev *rdev) +{ + return rdev->tc_rec[0].serv_type_enabled; +} + +static inline bool is_bnxt_roce_queue(struct bnxt_re_dev *rdev, u8 ser_prof, u8 prof_type) +{ + if (is_qport_service_type_supported(rdev)) + return (prof_type & QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_TYPE_ROCE); + else + return (ser_prof == QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS_ROCE); +} + +static inline bool is_bnxt_cnp_queue(struct bnxt_re_dev *rdev, u8 ser_prof, u8 prof_type) +{ + if (is_qport_service_type_supported(rdev)) + return (prof_type & QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_TYPE_CNP); + else + return (ser_prof == QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSY_ROCE_CNP); +} + +#define BNXT_RE_MAP_SH_PAGE 0x0 +#define BNXT_RE_MAP_WC 0x1 +#define BNXT_RE_DBR_PAGE 0x2 +#define BNXT_RE_MAP_DB_RECOVERY_PAGE 0x3 + +#define BNXT_RE_DBR_RECOV_USERLAND_TIMEOUT (20) /* 20 ms */ +#define BNXT_RE_DBR_INT_TIME 5 /* ms */ +#define BNXT_RE_PACING_EN_INT_THRESHOLD 50 /* Entries in DB FIFO */ +#define BNXT_RE_PACING_ALGO_THRESHOLD 250 /* Entries in DB FIFO */ +/* Percentage of DB FIFO depth */ +#define BNXT_RE_PACING_DBQ_THRESHOLD BNXT_RE_PACING_DBQ_HIGH_WATERMARK +/* + * Alarm threshold multiple, number of times pacing algo threshold. + * For Thor2 is set to zero, to disable the auto tuning. + */ +#define BNXT_RE_PACING_ALARM_TH_MULTIPLE(ctx) (_is_chip_p7(ctx) ? 0 : 2) + +/* + * Maximum Percentage of configurable DB FIFO depth. + * The Doorbell FIFO depth is 0x2c00. But the DBR_REG_DB_THROTTLING register has only 12 bits + * to program the high watermark. This means user can configure maximum 36% only(4095/11264). + */ +#define BNXT_RE_PACING_DBQ_HIGH_WATERMARK 36 + +/* Default do_pacing value when there is no congestion */ +#define BNXT_RE_DBR_DO_PACING_NO_CONGESTION 0x7F /* 1 in 512 probability */ + +enum { + BNXT_RE_DBQ_EVENT_SCHED = 0, + BNXT_RE_DBR_PACING_EVENT = 1, + BNXT_RE_DBR_NQ_PACING_NOTIFICATION = 2, +}; + +struct bnxt_re_dbq_work { + struct work_struct work; + struct bnxt_re_dev *rdev; + struct hwrm_async_event_cmpl cmpl; + u32 event; +}; + +int bnxt_re_hwrm_qcaps(struct bnxt_re_dev *rdev); +int bnxt_re_enable_dbr_pacing(struct bnxt_re_dev *rdev); +int bnxt_re_disable_dbr_pacing(struct bnxt_re_dev *rdev); +int bnxt_re_set_dbq_throttling_reg(struct bnxt_re_dev *rdev, + u16 nq_id, u32 throttle); +void bnxt_re_pacing_alert(struct bnxt_re_dev *rdev); +int bnxt_re_hwrm_pri2cos_qcfg(struct bnxt_re_dev *rdev, struct bnxt_re_tc_rec *tc_rec, + u16 target_id); +void bnxt_re_rename_debugfs_entry(struct bnxt_re_dev *rdev); +void bnxt_re_debugfs_add_pdev(struct bnxt_re_dev *rdev); +void bnxt_re_debugfs_rem_pdev(struct bnxt_re_dev *rdev); + +static inline unsigned int bnxt_re_get_total_mr_mw_count(struct bnxt_re_dev *rdev) +{ + return (atomic_read(&rdev->stats.rsors.mr_count) + + atomic_read(&rdev->stats.rsors.mw_count)); +} + +static inline void bnxt_re_set_def_pacing_threshold(struct bnxt_re_dev *rdev) +{ + rdev->qplib_res.pacing_data->pacing_th = rdev->pacing_algo_th; + rdev->qplib_res.pacing_data->alarm_th = + rdev->pacing_algo_th * BNXT_RE_PACING_ALARM_TH_MULTIPLE(rdev->chip_ctx); +} + +static inline void bnxt_re_set_def_do_pacing(struct bnxt_re_dev *rdev) +{ + rdev->qplib_res.pacing_data->do_pacing = rdev->dbr_def_do_pacing; +} + +static inline void bnxt_re_set_pacing_dev_state(struct bnxt_re_dev *rdev) +{ + rdev->qplib_res.pacing_data->dev_err_state = + test_bit(BNXT_RE_FLAG_ERR_DEVICE_DETACHED, &rdev->flags); +} + +#endif diff --git a/bnxt_re-1.10.3-229.0.139.0/bnxt_setupcc.sh b/bnxt_re-1.10.3-229.0.139.0/bnxt_setupcc.sh new file mode 100644 index 0000000..2d950b0 --- /dev/null +++ b/bnxt_re-1.10.3-229.0.139.0/bnxt_setupcc.sh @@ -0,0 +1,528 @@ +#!/bin/bash + +function usage { + +cat </dev/null 2>&1 || + { echo >&2 "bnxtqos utility is not installed. Aborting."; exit 1; } +else + type lldptool >/dev/null 2>&1 || + { echo >&2 "lldptool is not installed. Aborting."; exit 1; } +fi + +EN_ROCE_DSCP=0 +#s p r c needs to be checked for format conversion +scale='^[0-9]+$' +hexscale='^(0[xX])[0-9a-fA-F]+$' + +if [ ! -z "${s}" ] +then + if [[ $s =~ $scale ]] + then + tmp=`printf "0x%x" $s` + s=$tmp + else + if ! [[ $s =~ $hexscale ]] + then + echo Invalid RoCE DSCP value + exit -1 + fi + fi +EN_ROCE_DSCP=1 +fi + +EN_CNP_DSCP=0 +if [ ! -z "${p}" ] +then + if [[ $p =~ $scale ]] + then + tmp=`printf "0x%x" $p` + p=$tmp + else + if ! [[ $p =~ $hexscale ]] + then + echo Invalid RoCE CNP packet DSCP value + exit -1 + fi + fi +EN_CNP_DSCP=1 +fi + + +EN_ROCE_PRI=0 +if [ ! -z "${r}" ] +then + + if [[ $r =~ $scale ]] + then + tmp=`printf "0x%x" $r` + r=$tmp + else + if ! [[ $r =~ $hexscale ]] + then + echo Invalid RoCE packet priority + exit -1 + fi + fi +EN_ROCE_PRI=1 +fi + + +EN_CNP_PRI=0 +if [ ! -z "${c}" ] +then + if [[ $c =~ $scale ]] + then + tmp=`printf "0x%x" $c` + c=$tmp + else + if ! [[ $c =~ $hexscale ]] + then + echo Invalid RoCE CNP packet priority + exit -1 + fi + fi +EN_CNP_PRI=1 +fi + + +if_count=0 + +for interface in $i +do +if_count=`expr $if_count + 1` +done + +if [ $if_count -gt $MAX_INTERFACE_COUNT ]; +then + echo "Number of interfaces more than supported" + exit 1; +fi + +INF_NAME1=`echo $i |cut -d ' ' -f1` +INF_NAME2=`echo $i |cut -d ' ' -f2` + +if [[ "$INF_NAME1" == "$INF_NAME2" ]]; +then + INF_NAME2= +fi + +if [ -z $m ] +then + ENABLE_PFC=1 + ENABLE_CC=1 +elif [ "$m" == "1" ] +then + ENABLE_PFC=1 + ENABLE_CC=0 +elif [ "$m" == "2" ] +then + ENABLE_CC=1 + ENABLE_PFC=0 +elif [ "$m" == "3" ] +then + ENABLE_PFC=1 + ENABLE_CC=1 +else + echo "Invalid value for mode (-m option)" + exit 1; +fi + +ENABLE_DSCP=0 +echo ENABLE_PFC = $ENABLE_PFC ENABLE_CC = $ENABLE_CC + +if [ $EN_ROCE_DSCP -eq 1 ] || [ $EN_CNP_DSCP -eq 1 ] +then + ENABLE_DSCP=1 +fi + +ENABLE_PRI=0 +if [ $EN_ROCE_PRI -eq 1 ] || [ $EN_CNP_PRI -eq 1] +then + ENABLE_PRI=1 +fi + +ENABLE_DSCP_BASED_PFC=1 +if [ "$v" == "1" ] +then + ENABLE_DSCP_BASED_PFC=0 +fi + +echo ENABLE_DSCP = $ENABLE_DSCP ENABLE_DSCP_BASED_PFC = $ENABLE_DSCP_BASED_PFC + +DEV_NAME=$d +ROCE_DSCP=$s +ROCE_PRI=$r +ROCE_CNP_DSCP=$p +ROCE_CNP_PRI=$c +ROCE_BW=$b + +if [ -z $ROCE_BW ] +then + ROCE_BW=50 +fi + +if [ $ROCE_BW -lt 100 ] +then + L2_BW=`expr 100 - $ROCE_BW` +else + L2_BW=50 + ROCE_BW=50 +fi + +# Only RoCE v2 is supported +ROCE_MODE=2 + +echo L2 $L2_BW RoCE $ROCE_BW + +echo "Using Ethernet interface $INF_NAME1 $INF_NAME2 and RoCE interface $DEV_NAME" + +CNP_SERVICE_TYPE=0 +if test -f "/sys/kernel/debug/bnxt_re/$DEV_NAME/info"; then + CNP_SERVICE_TYPE=`cat /sys/kernel/debug/bnxt_re/$DEV_NAME/info|grep fw_service_prof_type_sup|awk '{print $3}'` +fi + +# Define priority 2 tc mapping +pri2tc="" + +for i in `seq 0 7`; +do + if [ $EN_ROCE_PRI -eq 1 ] && [ $i -eq `printf "%d" $ROCE_PRI` ]; then + pri2tc+=",$i:1" + elif [ $EN_CNP_PRI -eq 1 ] && [ $i -eq `printf "%d" $ROCE_CNP_PRI` ] && [ $CNP_SERVICE_TYPE -eq 1 ] && [ $ENABLE_CC -eq 1 ]; then + pri2tc+=",$i:2" + else + pri2tc+=",$i:0" + fi +done + +pri2tc=${pri2tc:1} + +ethtool $INF_NAME1 +ethtool -i $INF_NAME1 +ethtool -A $INF_NAME1 rx off tx off +if [ "$INF_NAME2" != "" ]; +then + ethtool $INF_NAME2 + ethtool -i $INF_NAME2 + ethtool -A $INF_NAME2 rx off tx off +fi + +bnxt_qos_rem_app_tlvs() { + INF_NAME=$1 + j=0 + for i in `bnxtqos -dev=$INF_NAME get_qos|grep -e "Priority:" -e "Sel:" -e DSCP -e UDP -e "Ethertype:"|awk -F":" '{ print $2}'` + do + if [ $i == 0x8915 ] + then + i=35093 + fi + + if [ $j -eq 0 ] + then + APP_0=$i + else + APP_0=$APP_0,$i + fi + + j=`expr $j + 1` + + if [ $j -eq 3 ] + then + bnxtqos -dev=$INF_NAME set_apptlv -d app=$APP_0 + j=0 + fi + done +} + +lldptool_rem_app_tlvs() { + INF_NAME=$1 + for i in `lldptool -t -i $INF_NAME -V APP -c app|awk -F"(" '{ print $2}'|awk -F")" '{ print $1}'| sed "1 d"` + do + lldptool -T -i $INF_NAME -V APP -d app=$i + done +} + +bnxt_qos_pgm_pfc_ets() { + INF_NAME=$1 + echo "Setting pfc/ets on $INF_NAME" + bnxt_qos_rem_app_tlvs $INF_NAME + # bnxtqos requires nvm cfg 155,255,269 and 270 to be disabled + if [ $CNP_SERVICE_TYPE -eq 1 ] + then + bnxtqos -dev=$INF_NAME set_ets tsa=0:ets,1:ets,2:strict,3:strict,4:strict,5:strict,6:strict,7:strict priority2tc=$pri2tc tcbw=$L2_BW,$ROCE_BW + else + bnxtqos -dev=$INF_NAME set_ets tsa=0:ets,1:ets priority2tc=$pri2tc tcbw=$L2_BW,$ROCE_BW + fi + + if [ ! -z "$ROCE_PRI" ] && [ $ENABLE_PFC -eq 1 ] + then + bnxtqos -dev=$INF_NAME set_pfc enabled=`printf "%d" $ROCE_PRI` + sleep 1 + bnxtqos -dev=$INF_NAME set_apptlv app=`printf "%d" $ROCE_PRI`,3,4791 + sleep 1 + else + bnxtqos -dev=$INF_NAME set_pfc enabled=none + fi + + if [ $ENABLE_DSCP_BASED_PFC -eq 1 ] || [ $EN_ROCE_DSCP -eq 1 ] + then + bnxtqos -dev=$INF_NAME set_apptlv app=`printf "%d" $ROCE_PRI`,5,`printf "%d" $ROCE_DSCP` + sleep 1 + fi + if [ $ENABLE_CC -eq 1 ] && [ $CNP_SERVICE_TYPE -eq 1 ] + then + bnxtqos -dev=$INF_NAME set_apptlv app=`printf "%d" $ROCE_CNP_PRI`,5,`printf "%d" $ROCE_CNP_DSCP` + sleep 1 + fi + sleep 1 + bnxtqos -dev=$INF_NAME get_qos +} + +lldptool_pgm_pfc_ets() { + INF_NAME=$1 + lldptool_rem_app_tlvs $INF_NAME + lldptool -T -i $INF_NAME1 -V ETS-CFG tsa="0:ets,1:ets,2:strict,3:strict,4:strict,5:strict,6:strict,7:strict" up2tc=$pri2tc tcbw=$L2_BW,$ROCE_BW,0,0,0,0,0,0 + if [ ! -z "$ROCE_PRI" ] && [ $ENABLE_PFC -eq 1 ] + then + lldptool -L -i $INF_NAME1 adminStatus=rxtx + lldptool -T -i $INF_NAME1 -V PFC enabled=`printf "%d" $ROCE_PRI` + lldptool -T -i $INF_NAME1 -V APP app="`printf "%d" $ROCE_PRI`,3,4791" + if [ $ENABLE_DSCP_BASED_PFC -eq 1 ] || [ $EN_ROCE_DSCP -eq 1 ] + then + lldptool -T -i $INF_NAME1 -V APP app="`printf "%d" $ROCE_PRI`,5,`printf "%d" $ROCE_DSCP`" + fi + else + lldptool -T -i $INF_NAME1 -V PFC enabled=none + fi + if [ $ENABLE_CC -eq 1 ] && [ $CNP_SERVICE_TYPE -eq 1 ] + then + lldptool -T -i $INF_NAME1 -V APP app="`printf "%d" $ROCE_CNP_PRI`,5,`printf "%d" $ROCE_CNP_DSCP`" + sleep 1 + fi +} + +if [ $USE_BNXTQOS -eq 1 ] +then + SYSTEMCTL_STATUS=`command -v systemctl` + if [ "$SYSTEMCTL_STATUS" == "" ]; + then + echo "systemctl not found, install and re-run the script. exiting..." + exit -1 + fi + + STATUS="$(systemctl is-active lldpad)" + if [ "${STATUS}" = "active" ]; then + #Stop lldpad + echo "Disabling lldpad service, and using bnxtqos tool for configuration" + systemctl stop lldpad.service + else + echo "check if lldpad service is running : no action needed" + fi + + bnxt_qos_pgm_pfc_ets $INF_NAME1 + if [ "$INF_NAME2" != "" ]; + then + bnxt_qos_pgm_pfc_ets $INF_NAME2 + fi +else + IS_RUNNING=`ps -aef | grep lldpad | head -1 | grep "/usr/sbin/lldpad"` + if [ "$IS_RUNNING" != " " ] + then + #Stop lldpad + systemctl stop lldpad.service + fi + systemctl start lldpad.service + sleep 1 + STATUS="$(systemctl is-active lldpad)" + if [ "${STATUS}" != "active" ]; then + echo "Failed to start lldpad service" + exit 1 + fi + + echo "Setting up LLDP" + lldptool_pgm_pfc_ets INF_NAME1 + if [ "$INF_NAME2" != "" ]; + then + lldptool_pgm_pfc_ets INF_NAME2 + fi + systemctl restart lldpad.service +fi + +echo "Settings Default to use RoCE-v$ROCE_MODE" +mkdir -p /sys/kernel/config/rdma_cm/$DEV_NAME +echo "RoCE v2" > /sys/kernel/config/rdma_cm/$DEV_NAME/ports/1/default_roce_mode +if [ ! -z "$ROCE_DSCP" ] +then + echo -n $((ROCE_DSCP << 2)) > /sys/kernel/config/rdma_cm/$DEV_NAME/ports/1/default_roce_tos +else + echo -n 0 > /sys/kernel/config/rdma_cm/$DEV_NAME/ports/1/default_roce_tos +fi +PREVDIR=`pwd` +mkdir -p /sys/kernel/config/bnxt_re/$DEV_NAME +cd /sys/kernel/config/bnxt_re/$DEV_NAME/ports/1/cc/ + +#Disabling prio vlan insertion if dscp based pfc is enabled +if [ $ENABLE_DSCP_BASED_PFC -eq 1 ] +then + echo -n 0x1 > disable_prio_vlan_tx +else + echo -n 0x0 > disable_prio_vlan_tx +fi + +if [ $ENABLE_CC -eq 1 ] +then + echo "Setting up CC Settings" + echo -n 0x1 > ecn_marking + echo -n 0x1 > ecn_enable + echo -n 1 > cc_mode +else + echo -n 0x0 > ecn_marking + echo -n 0x0 > ecn_enable +fi + +if [ $CNP_SERVICE_TYPE != 1 ] +then + if [ ! -z "$ROCE_CNP_PRI" ] + then + echo -n $ROCE_CNP_PRI > cnp_prio + fi + if [ ! -z "$ROCE_PRI" ] + then + echo -n $ROCE_PRI > roce_prio + fi +fi + +if [ $ENABLE_DSCP -eq 1 ] +then + echo "Setting up DSCP/PRI" + if [ ! -z "$ROCE_DSCP" ] + then + echo -n $ROCE_DSCP > roce_dscp + fi + if [ ! -z "$ROCE_CNP_DSCP" ] + then + echo -n $ROCE_CNP_DSCP > cnp_dscp + fi +fi +echo -n 0x1 > apply + +cd $PREVDIR + +echo "Complete" diff --git a/bnxt_re-1.10.3-229.0.139.0/compat.c b/bnxt_re-1.10.3-229.0.139.0/compat.c new file mode 100644 index 0000000..fbdb7e9 --- /dev/null +++ b/bnxt_re-1.10.3-229.0.139.0/compat.c @@ -0,0 +1,842 @@ +/* + * Copyright (c) 2015-2023, Broadcom. All rights reserved. The term + * Broadcom refers to Broadcom Inc. and/or its subsidiaries. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * BSD license below: + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * + * Description: Compat file for supporting multiple distros + */ + +#include +#include +#include +#include +#include + +#if defined(HAVE_IB_UMEM_DMABUF) && !defined(HAVE_IB_UMEM_DMABUF_PINNED) +#include +#include +#endif + +#include "bnxt_ulp.h" +#include "bnxt_re.h" +#include "bnxt_re-abi.h" + +#ifndef RHEL_RELEASE_CODE +#define RHEL_RELEASE_CODE 0 +#endif + +#ifndef RHEL_RELEASE_VERSION +#define RHEL_RELEASE_VERSION(a, b) (((a) << 8) + (b)) +#endif + + +int bnxt_re_register_netdevice_notifier(struct notifier_block *nb) +{ + int rc; +#ifdef HAVE_REGISTER_NETDEVICE_NOTIFIER_RH + rc = register_netdevice_notifier_rh(nb); +#else + rc = register_netdevice_notifier(nb); +#endif + return rc; +} + +int bnxt_re_unregister_netdevice_notifier(struct notifier_block *nb) +{ + int rc; +#ifdef HAVE_REGISTER_NETDEVICE_NOTIFIER_RH + rc = unregister_netdevice_notifier_rh(nb); +#else + rc = unregister_netdevice_notifier(nb); +#endif + return rc; +} + +#ifdef HAVE_IB_MW_BIND_INFO +struct ib_mw_bind_info *get_bind_info(struct ib_send_wr *wr) +{ +#ifdef HAVE_IB_BIND_MW_WR + struct ib_bind_mw_wr *bind_mw = bind_mw_wr(wr); + + return &bind_mw->bind_info; +#else + return &wr->wr.bind_mw.bind_info; +#endif +} + +struct ib_mw *get_ib_mw(struct ib_send_wr *wr) +{ +#ifdef HAVE_IB_BIND_MW_WR + struct ib_bind_mw_wr *bind_mw = bind_mw_wr(wr); + + return bind_mw->mw; +#else + return wr->wr.bind_mw.mw; +#endif +} +#endif + +struct scatterlist *get_ib_umem_sgl(struct ib_umem *umem, u32 *nmap) +{ +#if !defined(HAVE_IB_UMEM_SG_TABLE) && !defined(HAVE_IB_UMEM_SG_APPEND_TABLE) + struct ib_umem_chunk *chunk; + struct scatterlist **sg = NULL; + u32 sg_nmap = 0; + int i = 0, j; + size_t n = 0; +#endif + +#if defined HAVE_IB_UMEM_SG_APPEND_TABLE + *nmap = umem->sgt_append.sgt.nents; + return umem->sgt_append.sgt.sgl; +#elif defined HAVE_IB_UMEM_SG_TABLE + *nmap = umem->nmap; + return umem->sg_head.sgl; +#else + list_for_each_entry(chunk, &umem->chunk_list, list) + n += chunk->nmap; + + *sg = kcalloc(n, sizeof(*sg), GFP_KERNEL); + if (!(*sg)) { + *nmap = 0; + return NULL; + } + list_for_each_entry(chunk, &umem->chunk_list, list) { + for (j = 0; j < chunk->nmap; ++j) + sg[i++] = &chunk->page_list[j]; + sg_nmap += chunk->nmap; + } + *nmap = sg_nmap; + return *sg; +#endif +} + +u8 bnxt_re_get_bond_link_status(struct bnxt_re_bond_info *binfo) +{ +#ifdef HAVE_NET_BONDING_H + struct list_head *iter; + struct bonding *bond; + struct slave *slave; + u8 active_port_map = 0; + u32 link; + u8 port; + + if (!binfo) + return 0; + + bond = netdev_priv(binfo->master); + bond_for_each_slave(bond, slave, iter) { + if (slave->dev == binfo->slave1) + port = 0; + else + port = 1; + link = bond_slave_can_tx(slave); + active_port_map |= (link << port); + } + + return active_port_map; +#else + return (netif_carrier_ok(binfo->slave1)) | + (netif_carrier_ok(binfo->slave2) << 1); +#endif +} + +/* Checks if adapter supports LAG. For Thor, LAG support is + * implemented in FW. For Thor2, LAG is implemented in HW. For Wh+ + * and Stratus only active-active LAG is supported. + */ +static bool _is_bnxt_re_dev_lag_supported(const struct bnxt_re_dev *rdev) +{ + return _is_bnxt_re_dev_lag_capable(rdev->dev_attr->dev_cap_flags) ^ + BNXT_EN_HW_LAG(rdev->en_dev); +} + +bool bnxt_re_is_lag_allowed(ifbond *master, ifslave *slave, + struct bnxt_re_dev *rdev) +{ + struct net_device *pf_peer_master; + struct net_device *pf_in_master; + struct bnxt_re_dev *pf_peer; + bool lag_supported = false; + u32 num_vfs, peer_num_vfs; + int pf_cnt; + + rtnl_lock(); + pf_in_master = netdev_master_upper_dev_get(rdev->netdev); + + if (strncmp(rdev->netdev->name, slave->slave_name, IFNAMSIZ)) + goto exit; + + /* Check if fw/hw supports LAG */ + if (!_is_bnxt_re_dev_lag_supported(rdev)) { + dev_info(rdev_to_dev(rdev), "Device is not capable of supporting RoCE LAG\n"); + goto exit; + } + + if (rdev->en_dev->port_count != 2) { + dev_info(rdev_to_dev(rdev), + "RoCE LAG not supported on phy port count %d\n", + rdev->en_dev->port_count); + goto exit; + } + + if (BNXT_EN_MH(rdev->en_dev)) { + dev_info(rdev_to_dev(rdev), "RoCE LAG not supported on multi host\n"); + goto exit; + } + + if (BNXT_EN_MR(rdev->en_dev)) { + dev_info(rdev_to_dev(rdev), "RoCE LAG not supported on multi root\n"); + goto exit; + } + + /* Master must have only 2 slaves */ + if (master->num_slaves != 2) + goto exit; + + /* PF count on our device can't be more than 2 */ + pf_cnt = bnxt_re_get_slot_pf_count(rdev); + if (pf_cnt > BNXT_RE_BOND_PF_MAX) + goto exit; + + /* Get the other PF */ + pf_peer = bnxt_re_get_peer_pf(rdev); + if (!pf_peer) + goto exit; + + /* Check if the PF-peer has a Master netdev */ + pf_peer_master = netdev_master_upper_dev_get(pf_peer->netdev); + if (!pf_peer_master) + goto exit; + + /* Master netdev of PF-peer must be same as ours */ + if (pf_in_master != pf_peer_master) + goto exit; + + num_vfs = pci_num_vf(rdev->en_dev->pdev); + peer_num_vfs = pci_num_vf(pf_peer->en_dev->pdev); + /* For Thor2, LAG is independent of VFs configured */ + if (!BNXT_RE_CHIP_P7(rdev->chip_ctx->chip_num) && + (num_vfs || peer_num_vfs)) + goto exit; + + /* Don't allow LAG on NPAR PFs */ + if (BNXT_EN_NPAR(rdev->en_dev) || BNXT_EN_NPAR(pf_peer->en_dev)) + goto exit; + + /* Bonding mode must be 1, 2 or 4 */ + if ((master->bond_mode != BOND_MODE_ACTIVEBACKUP) && + (master->bond_mode != BOND_MODE_XOR) && + (master->bond_mode != BOND_MODE_ROUNDROBIN) && + (master->bond_mode != BOND_MODE_8023AD)) { + dev_info(rdev_to_dev(rdev), + "RoCE LAG not supported for bond_mode %d\n", + master->bond_mode); + goto exit; + } + + lag_supported = true; + +exit: + rtnl_unlock(); + return lag_supported; +} + +static u32 bnxt_re_get_bond_info_port(struct bnxt_re_bond_info *binfo, + struct net_device *netdev) +{ + u32 port = 0; + + if (!binfo) + return 0; + + if (binfo->slave1 == netdev) + port = 1; + else if (binfo->slave2 == netdev) + port = 2; + return port; +} + +int bnxt_re_get_port_map(struct netdev_bonding_info *netdev_binfo, + struct bnxt_re_bond_info *binfo, + struct net_device *netdev) +{ + u32 port = bnxt_re_get_bond_info_port(binfo, netdev); + + if (!port) + return -ENODEV; + + dev_dbg(rdev_to_dev(binfo->rdev), + "%s: port = %d\n", __func__, port); + if (netdev_binfo->master.bond_mode == BOND_MODE_ACTIVEBACKUP) { + dev_dbg(rdev_to_dev(binfo->rdev), + "%s: active backup mode\n", __func__); + if (netdev_binfo->slave.state == BOND_STATE_BACKUP) { + /* + * If this slave is now in "backup mode", then + * other slave is in "active mode". + */ + binfo->active_port_map = (port == 1) ? + BNXT_RE_ACTIVE_MAP_PORT2: + BNXT_RE_ACTIVE_MAP_PORT1; + } else { /* Slave state is active */ + /* + * If this slave is now in "active mode", then + * other slave is in "backup mode". + */ + binfo->active_port_map = (port == 1) ? + BNXT_RE_ACTIVE_MAP_PORT1 : + BNXT_RE_ACTIVE_MAP_PORT2; + } + binfo->aggr_mode = + CMDQ_SET_LINK_AGGR_MODE_AGGR_MODE_ACTIVE_BACKUP; + } else { /* Active - Active */ + binfo->active_port_map = bnxt_re_get_bond_link_status(binfo); + dev_info(rdev_to_dev(binfo->rdev), + "LAG mode = active-active binfo->active_port_map = 0x%x\n", + binfo->active_port_map); + binfo->aggr_mode = + CMDQ_SET_LINK_AGGR_MODE_AGGR_MODE_ACTIVE_ACTIVE; + } + dev_dbg(rdev_to_dev(binfo->rdev), + "binfo->aggr_mode = 0x%x binfo->active_port_map = 0x%x\n", + binfo->aggr_mode, binfo->active_port_map); + return 0; +} + +#ifdef CONFIG_INFINIBAND_PEER_MEM +void bnxt_re_set_inflight_invalidation_ctx(struct ib_umem *umem) +{ +#ifdef IB_PEER_MEM_MOD_SUPPORT + struct ib_peer_umem *peer_umem = ib_peer_mem_get_data(umem); + + peer_umem->invalidation_ctx->inflight_invalidation = 1; +#else +#ifdef HAVE_IB_UMEM_GET_FLAGS + umem->invalidation_ctx->inflight_invalidation = 1; +#endif +#endif /* IB_PEER_MEM_MOD_SUPPORT */ +} + +void bnxt_re_set_inval_ctx_peer_callback(struct ib_umem *umem) +{ +#ifdef IB_PEER_MEM_MOD_SUPPORT + struct ib_peer_umem *peer_umem = ib_peer_mem_get_data(umem); + peer_umem->invalidation_ctx->peer_callback = 1; +#else +#ifdef HAVE_IB_UMEM_GET_FLAGS + umem->invalidation_ctx->peer_callback = 1; +#endif +#endif +} +void *bnxt_re_get_peer_mem(struct ib_umem *umem) +{ + +#ifdef IB_PEER_MEM_MOD_SUPPORT + struct ib_peer_umem *peer_umem = NULL; + peer_umem = ib_peer_mem_get_data(umem); + dev_dbg(NULL, + "%s: %d peer_umem = %p\n", __func__, __LINE__, peer_umem); + return (void *) peer_umem; +#else +#ifdef HAVE_IB_UMEM_GET_FLAGS + return (void *)umem->ib_peer_mem; +#else + if (umem->is_peer) + return (void *)umem; + return NULL; +#endif +#endif +} +#endif /* CONFIG_INFINIBAND_PEER_MEM */ + +void bnxt_re_peer_mem_release(struct ib_umem *umem) +{ +#if defined(HAVE_IB_UMEM_DMABUF) && !defined(HAVE_IB_UMEM_DMABUF_PINNED) + if (umem && umem->is_dmabuf) { + ib_umem_dmabuf_release_pinned(to_ib_umem_dmabuf(umem)); + return; + } +#endif + +#ifdef CONFIG_INFINIBAND_PEER_MEM + dev_dbg(NULL, "ib_umem_release_flags getting invoked \n"); +#ifdef HAVE_IB_UMEM_GET_FLAGS + ib_umem_release_flags(umem); +#else + ib_umem_release(umem); +#endif +#else + dev_dbg(NULL, "ib_umem_release getting invoked \n"); + ib_umem_release(umem); +#endif +} + +void bnxt_re_set_dma_device(struct ib_device *ibdev, struct bnxt_re_dev *rdev) +{ +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 3) && \ + !(defined(RHEL_RELEASE_CODE) && ((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,5)))) + /* From 4.11.3 kernel version and RHEL 7.5 onwards, IB HW drivers no longer set + * dma_device directly. However,they are expected to set the + * ibdev->dev.parent field before calling ib_register_device() + */ + ibdev->dma_device = &rdev->en_dev->pdev->dev; +#else + ibdev->dev.parent = &rdev->en_dev->pdev->dev; +#endif +} + +void bnxt_re_set_max_gid(u16 *max_sgid) +{ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 2, 0) || \ + (defined(RHEL_RELEASE_CODE) && ((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8, 2)))) + *max_sgid = max_t(u32, 256, *max_sgid); + *max_sgid = min_t(u32, 256, *max_sgid); +#else + *max_sgid = min_t(u32, 256, *max_sgid); +#endif +} + +#if defined(RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP) || defined(ENABLE_ROCEV2_QP1) +int bnxt_re_get_cached_gid(struct ib_device *dev, u8 port_num, int index, + union ib_gid *sgid, struct ib_gid_attr **sgid_attr, + struct ib_global_route *grh, struct ib_ah *ah) +{ + int ret = 0; + +#ifndef HAVE_GID_ATTR_IN_IB_AH +#ifndef HAVE_IB_GET_CACHED_GID + if (grh) + *sgid_attr = grh->sgid_attr; + else if (ah) + *sgid_attr = ah->sgid_attr; + else { + *sgid_attr = NULL; + ret = -EFAULT; + } +#else + ret = ib_get_cached_gid(dev, port_num, index, sgid, *sgid_attr); +#endif +#endif + return ret; +} + +enum rdma_network_type bnxt_re_gid_to_network_type(IB_GID_ATTR *sgid_attr, + union ib_gid *sgid) +{ +#ifdef HAVE_RDMA_GID_ATTR_NETWORK_TYPE + return rdma_gid_attr_network_type(sgid_attr); +#else + return ib_gid_to_network_type(sgid_attr->gid_type, sgid); +#endif +} +#endif + +int ib_register_device_compat(struct bnxt_re_dev *rdev) +{ + struct ib_device *ibdev = &rdev->ibdev; + char name[IB_DEVICE_NAME_MAX]; + + memset(name, 0, IB_DEVICE_NAME_MAX); + if (rdev->binfo) + strlcpy(name, "bnxt_re_bond%d", IB_DEVICE_NAME_MAX); + else + strlcpy(name, "bnxt_re%d", IB_DEVICE_NAME_MAX); + +#ifndef HAVE_NAME_IN_IB_REGISTER_DEVICE + strlcpy(ibdev->name, name, IB_DEVICE_NAME_MAX); + +#ifdef HAVE_DMA_DEVICE_IN_IB_REGISTER_DEVICE + dma_set_max_seg_size(&rdev->en_dev->pdev->dev, SZ_2G); + return ib_register_device(ibdev, name, &rdev->en_dev->pdev->dev); +#else + return ib_register_device(ibdev, NULL); +#endif +#else +#ifndef HAVE_VERB_INIT_PORT + return ib_register_device(ibdev, name, NULL); +#else + return ib_register_device(ibdev,name); +#endif +#endif +} + +bool ib_modify_qp_is_ok_compat(enum ib_qp_state cur_state, enum ib_qp_state next_state, + enum ib_qp_type type, enum ib_qp_attr_mask mask) +{ + return (ib_modify_qp_is_ok(cur_state, next_state, + type, mask +#ifdef HAVE_LL_IN_IB_MODIFY_QP_IS_OK + ,IB_LINK_LAYER_ETHERNET +#endif + )); +} + +void bnxt_re_init_resolve_wq(struct bnxt_re_dev *rdev) +{ + rdev->resolve_wq = create_singlethread_workqueue("bnxt_re_resolve_wq"); + INIT_LIST_HEAD(&rdev->mac_wq_list); +} + +void bnxt_re_uninit_resolve_wq(struct bnxt_re_dev *rdev) +{ + struct bnxt_re_resolve_dmac_work *tmp_work = NULL, *tmp_st; + if (!rdev->resolve_wq) + return; + flush_workqueue(rdev->resolve_wq); + list_for_each_entry_safe(tmp_work, tmp_st, &rdev->mac_wq_list, list) { + list_del(&tmp_work->list); + kfree(tmp_work); + } + destroy_workqueue(rdev->resolve_wq); + rdev->resolve_wq = NULL; +} + +void bnxt_re_resolve_dmac_task(struct work_struct *work) +{ + int rc = -1; + struct bnxt_re_dev *rdev; + RDMA_AH_ATTR *ah_attr; + struct bnxt_re_ah_info *ah_info; + int if_index; + struct bnxt_re_resolve_dmac_work *dmac_work = + container_of(work, struct bnxt_re_resolve_dmac_work, work); + + if_index = 0; + rdev = dmac_work->rdev; + ah_attr = dmac_work->ah_attr; + ah_info = dmac_work->ah_info; +#ifdef RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP +#ifdef HAVE_IB_RESOLVE_ETH_DMAC + rc = ib_resolve_eth_dmac(&rdev->ibdev, ah_attr); +#else +#ifndef HAVE_CREATE_USER_AH +#ifndef HAVE_RDMA_ADDR_FIND_L2_ETH_BY_GRH_WITH_NETDEV + if_index = ah_info->sgid_attr.ndev->ifindex; + rc = rdma_addr_find_l2_eth_by_grh(&ah_info->sgid, + &ah_attr->grh.dgid, ROCE_DMAC(ah_attr), + &ah_info->vlan_tag, + &if_index, NULL); +#endif /* HAVE_RDMA_ADDR_FIND_L2_ETH_BY_GRH_WITH_NETDEV */ +#endif /* else dmac is resolved by stack */ +#endif /* HAVE_IB_RESOLVE_ETH_DMAC */ + +#else /* RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP */ +#ifdef HAVE_RDMA_ADDR_FIND_DMAC_BY_GRH_V2 + rc = rdma_addr_find_dmac_by_grh(&ah_info->sgid, + &ah_attr->grh.dgid, + ah_attr->dmac, NULL, 0); +#endif +#endif + if (rc) + dev_err(rdev_to_dev(dmac_work->rdev), + "Failed to resolve dest mac rc = %d\n", rc); + atomic_set(&dmac_work->status_wait, rc << 8); +} + +#ifndef HAS_ENABLE_ATOMIC_OPS +/** + * pci_enable_atomic_ops_to_root - enable AtomicOp requests to root port + * @dev: the PCI device + * @cap_mask: mask of desired AtomicOp sizes, including one or more of: + * PCI_EXP_DEVCAP2_ATOMIC_COMP32 + * PCI_EXP_DEVCAP2_ATOMIC_COMP64 + * PCI_EXP_DEVCAP2_ATOMIC_COMP128 + * + * Return 0 if all upstream bridges support AtomicOp routing, egress + * blocking is disabled on all upstream ports, and the root port supports + * the requested completion capabilities (32-bit, 64-bit and/or 128-bit + * AtomicOp completion), or negative otherwise. + */ +int pci_enable_atomic_ops_to_root(struct pci_dev *dev, u32 cap_mask) +{ + struct pci_bus *bus = dev->bus; + struct pci_dev *bridge; + u32 cap; + + if (!pci_is_pcie(dev)) + return -EINVAL; + + /* + * Per PCIe r4.0, sec 6.15, endpoints and root ports may be + * AtomicOp requesters. For now, we only support endpoints as + * requesters and root ports as completers. No endpoints as + * completers, and no peer-to-peer. + */ + + switch (pci_pcie_type(dev)) { + case PCI_EXP_TYPE_ENDPOINT: + case PCI_EXP_TYPE_LEG_END: + case PCI_EXP_TYPE_RC_END: + break; + default: + return -EINVAL; + } + + while (bus->parent) { + bridge = bus->self; + + pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &cap); + + switch (pci_pcie_type(bridge)) { + /* Ensure switch ports support AtomicOp routing */ + case PCI_EXP_TYPE_UPSTREAM: + case PCI_EXP_TYPE_DOWNSTREAM: + if (!(cap & PCI_EXP_DEVCAP2_ATOMIC_ROUTE)) + return -EINVAL; + break; + + /* Ensure root port supports all the sizes we care about */ + case PCI_EXP_TYPE_ROOT_PORT: + if ((cap & cap_mask) != cap_mask) + return -EINVAL; + break; + } +#if !(defined(KYLIN_MAJOR) && (KYLIN_MAJOR == 10) && (KYLIN_MINOR == 3)) && \ + defined(HAS_PCI_SECONDARY_LINK) + /* TODO: In old kernel not checking may cause crashes */ + /* Ensure upstream ports don't block AtomicOps on egress */ + if (!bridge->has_secondary_link) { + u32 ctl2; + pcie_capability_read_dword(bridge, PCI_EXP_DEVCTL2, + &ctl2); + if (ctl2 & PCI_EXP_DEVCTL2_ATOMIC_EGRESS_BLOCK) + return -EINVAL; + } +#endif + bus = bus->parent; + } + + pcie_capability_set_word(dev, PCI_EXP_DEVCTL2, + PCI_EXP_DEVCTL2_ATOMIC_REQ); + return 0; +} +#endif + +struct ib_umem *ib_umem_get_compat(struct bnxt_re_dev *rdev, + struct ib_ucontext *ucontext, + struct ib_udata *udata, + unsigned long addr, + size_t size, int access, int dmasync) +{ +#ifdef HAVE_IB_DEVICE_IN_IB_UMEM_GET + return ib_umem_get(&rdev->ibdev, addr, size, access); +#else +#ifndef HAVE_UDATA_IN_IB_UMEM_GET + return ib_umem_get(ucontext, addr, size, access, dmasync); +#else + return ib_umem_get(udata, addr, size, access +#ifdef HAVE_DMASYNC_IB_UMEM_GET + , dmasync +#endif + ); +#endif +#endif +} + +struct ib_umem *ib_umem_get_flags_compat(struct bnxt_re_dev *rdev, + struct ib_ucontext *ucontext, + struct ib_udata *udata, + unsigned long addr, + size_t size, int access, int dmasync) +{ +#ifdef HAVE_IB_UMEM_GET_PEER + return ib_umem_get_peer(&rdev->ibdev, addr, size, access, + IB_PEER_MEM_INVAL_SUPP); +#else +#ifdef HAVE_IB_UMEM_GET_FLAGS + return ib_umem_get_flags(&rdev->ibdev, ucontext, udata, addr, size, + access, +#ifdef CONFIG_INFINIBAND_PEER_MEM + IB_UMEM_PEER_ALLOW | IB_UMEM_PEER_INVAL_SUPP | +#endif + 0); +#else + return ib_umem_get_compat(rdev, ucontext, udata, addr, size, + access, 0); +#endif +#endif +} + +int __bnxt_re_set_vma_data(void *bnxt_re_uctx, + struct vm_area_struct *vma) +{ +#ifdef HAVE_DISASSOCIATE_UCNTX +#ifndef HAVE_RDMA_USER_MMAP_IO + return bnxt_re_set_vma_data(bnxt_re_uctx, vma); +#endif +#endif + return 0; +} + +int remap_pfn_compat(struct ib_ucontext *ib_uctx, + struct vm_area_struct *vma, + u64 pfn) +{ + if (vma->vm_pgoff) { +#ifndef HAVE_RDMA_USER_MMAP_IO + return io_remap_pfn_range(vma, vma->vm_start, pfn, PAGE_SIZE, + vma->vm_page_prot); +#else + return rdma_user_mmap_io(ib_uctx, vma, pfn, PAGE_SIZE, + vma->vm_page_prot +#ifdef HAVE_RDMA_USER_MMAP_IO_USE_MMAP_ENTRY + , NULL +#endif + ); +#endif + } else { +#ifndef HAVE_RDMA_USER_MMAP_IO + return remap_pfn_range(vma, vma->vm_start, + pfn, PAGE_SIZE, vma->vm_page_prot); +#else + return rdma_user_mmap_io(ib_uctx, vma, pfn, PAGE_SIZE, + vma->vm_page_prot +#ifdef HAVE_RDMA_USER_MMAP_IO_USE_MMAP_ENTRY + , NULL +#endif + ); +#endif + } +} + +struct bnxt_re_cq *__get_cq_from_cq_in(ALLOC_CQ_IN *cq_in, + struct bnxt_re_dev *rdev) +{ + struct bnxt_re_cq *cq; +#ifndef HAVE_CQ_ALLOC_IN_IB_CORE + cq = kzalloc(sizeof(*cq), GFP_KERNEL); + if (!cq) + dev_err(rdev_to_dev(rdev), "Allocate CQ failed!"); +#else + cq = container_of(cq_in, struct bnxt_re_cq, ib_cq); +#endif + return cq; +} + +struct bnxt_re_qp *__get_qp_from_qp_in(ALLOC_QP_IN *qp_in, + struct bnxt_re_dev *rdev) +{ + struct bnxt_re_qp *qp; + +#ifdef HAVE_QP_ALLOC_IN_IB_CORE + qp = container_of(qp_in, struct bnxt_re_qp, ib_qp); +#else + qp = kzalloc(sizeof(*qp), GFP_KERNEL); + if (!qp) + dev_err(rdev_to_dev(rdev), "Allocate QP failed!"); +#endif + return qp; +} + +bool bnxt_re_check_if_vlan_valid(struct bnxt_re_dev *rdev, + u16 vlan_id) +{ + bool ret = true; + /* + * Check if the vlan is configured in the host. + * If not configured, it can be a transparent + * VLAN. So dont report the vlan id. + */ +#ifdef HAVE_VLAN_FIND_DEV_DEEP_RCU + if (!__vlan_find_dev_deep_rcu(rdev->netdev, + htons(ETH_P_8021Q), vlan_id)) + ret = false; +#endif + return ret; +} + +#if defined(HAVE_IB_UMEM_DMABUF) && !defined(HAVE_IB_UMEM_DMABUF_PINNED) +static void +ib_umem_dmabuf_unsupported_move_notify(struct dma_buf_attachment *attach) +{ + struct ib_umem_dmabuf *umem_dmabuf = attach->importer_priv; + + ibdev_warn_ratelimited(umem_dmabuf->umem.ibdev, + "Invalidate callback is called when memory is pinned!\n"); +} + +static struct dma_buf_attach_ops ib_umem_dmabuf_attach_pinned_ops = { + .allow_peer2peer = true, + .move_notify = ib_umem_dmabuf_unsupported_move_notify, +}; + +struct ib_umem_dmabuf *ib_umem_dmabuf_get_pinned(struct ib_device *device, + unsigned long offset, + size_t size, int fd, + int access) +{ + struct ib_umem_dmabuf *umem_dmabuf; + struct dma_buf *dmabuf; + int err; + + umem_dmabuf = ib_umem_dmabuf_get(device, offset, size, fd, access, + &ib_umem_dmabuf_attach_pinned_ops); + if (IS_ERR(umem_dmabuf)) + return umem_dmabuf; + + dmabuf = umem_dmabuf->attach->dmabuf; + dma_resv_lock(dmabuf->resv, NULL); + err = dma_buf_pin(umem_dmabuf->attach); + if (err) + goto err_release; + err = ib_umem_dmabuf_map_pages(umem_dmabuf); + if (err) + goto err_unpin; + dma_resv_unlock(dmabuf->resv); + + return umem_dmabuf; + +err_unpin: + dma_buf_unpin(umem_dmabuf->attach); +err_release: + dma_resv_unlock(dmabuf->resv); + dma_buf_detach(dmabuf, umem_dmabuf->attach); + dma_buf_put(dmabuf); + kfree(umem_dmabuf); + return ERR_PTR(err); +} + +void ib_umem_dmabuf_release_pinned(struct ib_umem_dmabuf *umem_dmabuf) +{ + struct dma_buf *dmabuf = umem_dmabuf->attach->dmabuf; + + dma_resv_lock(dmabuf->resv, NULL); + ib_umem_dmabuf_unmap_pages(umem_dmabuf); + dma_buf_unpin(umem_dmabuf->attach); + dma_resv_unlock(dmabuf->resv); + + dma_buf_detach(dmabuf, umem_dmabuf->attach); + dma_buf_put(dmabuf); + kfree(umem_dmabuf); +} +#endif diff --git a/bnxt_re-1.10.3-229.0.139.0/compat.h b/bnxt_re-1.10.3-229.0.139.0/compat.h new file mode 100644 index 0000000..64a3f9a --- /dev/null +++ b/bnxt_re-1.10.3-229.0.139.0/compat.h @@ -0,0 +1,650 @@ +/* + * Copyright (c) 2015-2022, Broadcom. All rights reserved. The term + * Broadcom refers to Broadcom Inc. and/or its subsidiaries. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * BSD license below: + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * Author: Eddie Wai + * + * Description: Compat file for compilation + */ + +#ifndef __BNXT_RE_COMPAT_H__ +#define __BNXT_RE_COMPAT_H__ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#ifdef HAVE_NET_BONDING_H +#include +#endif /* HAVE_NET_BONDING_H */ + +#ifdef IB_PEER_MEM_MOD_SUPPORT +#include "peer_mem.h" +#include "peer_umem.h" +#endif + +#include "roce_hsi.h" + +/* To avoid compilation failures. bnxt_re_dev is defined in bnxt_re.h */ +struct bnxt_re_dev; + +/* Defined in include/linux/kconfig.h */ +#ifndef IS_ENABLED +#define IS_ENABLED(option) defined(option) +#endif + +#if !IS_ENABLED(CONFIG_NET_DEVLINK) +#undef HAVE_DEVLINK +#endif + +#ifndef HAVE_DEVLINK +#undef HAVE_DEVLINK_INFO +#undef HAVE_DEVLINK_PARAM +#undef HAVE_NDO_DEVLINK_PORT +#undef HAVE_DEVLINK_FLASH_UPDATE +#undef HAVE_DEVLINK_HEALTH_REPORT +#undef HAVE_DEVLINK_RELOAD_ACTION +#endif + +/* Reconcile all dependencies for VF reps: + * SRIOV, Devlink, Switchdev and HW port info in metadata_dst + */ +#if defined(CONFIG_BNXT_SRIOV) && defined(HAVE_DEVLINK) && \ + defined(CONFIG_NET_SWITCHDEV) && defined(HAVE_METADATA_HW_PORT_MUX) && \ + (LINUX_VERSION_CODE >= 0x030a00) +#define CONFIG_VF_REPS 1 +#endif +/* DEVLINK code has dependencies on VF reps */ +#ifdef HAVE_DEVLINK_PARAM +#define CONFIG_VF_REPS 1 +#endif +#ifdef CONFIG_VF_REPS +#ifndef SWITCHDEV_SET_OPS +#define SWITCHDEV_SET_OPS(netdev, ops) ((netdev)->switchdev_ops = (ops)) +#endif +#endif + +#if !defined(CONFIG_DCB) +#warning "Data Center Bridging support (CONFIG_DCB) is not enabled in Linux" +#undef CONFIG_BNXT_DCB +#endif + +/* include/rdma/ib_verbs.h */ +#ifndef HAVE_IB_MR_INIT_ATTR +struct ib_mr_init_attr { + int max_reg_descriptors; + u32 flags; +}; +#endif + +#ifndef HAVE_IB_MW_TYPE +enum ib_mw_type { + IB_MW_TYPE_1 = 1, + IB_MW_TYPE_2 = 2 +}; + +#endif + +#ifdef NO_IB_DEVICE +/* Temp workaround to bypass the ib_core vermagic mismatch */ +#define ib_register_device(a, b) 0 +#define ib_unregister_device(a) +#define ib_alloc_device(a) kzalloc(a, GFP_KERNEL) +#define ib_dealloc_device(a) kfree(a) +#endif + +#ifndef HAVE_IB_MEM_WINDOW_TYPE +#define IB_DEVICE_MEM_WINDOW_TYPE_2A (1 << 23) +#define IB_DEVICE_MEM_WINDOW_TYPE_2B (1 << 24) +#endif + +#ifndef HAVE_IP_BASED_GIDS +#define IB_PORT_IP_BASED_GIDS (1 << 26) +#endif + +#ifndef IB_MTU_8192 +#define IB_MTU_8192 8192 +#endif + +#ifndef SPEED_20000 +#define SPEED_20000 20000 +#endif + +#ifndef SPEED_25000 +#define SPEED_25000 25000 +#endif + +#ifndef SPEED_40000 +#define SPEED_40000 40000 +#endif + +#ifndef SPEED_50000 +#define SPEED_50000 50000 +#endif + +#ifndef SPEED_100000 +#define SPEED_100000 100000 +#endif + +#ifndef SPEED_200000 +#define SPEED_200000 200000 +#endif + +#ifndef SPEED_400000 +#define SPEED_400000 400000 +#endif + +#ifndef IB_SPEED_HDR +#define IB_SPEED_HDR 64 +#endif + +#ifndef IB_SPEED_NDR +#define IB_SPEED_NDR 128 +#endif + +#ifndef HAVE_GID_TYPE_ROCE_UDP_ENCAP_ROCEV2 +#define RDMA_NETWORK_ROCE_V1 0 +#define RDMA_NETWORK_IPV4 1 +#define RDMA_NETWORK_IPV6 2 +#endif + +#ifndef HAVE_RDMA_ADDR_FIND_L2_ETH_BY_GRH +#define rdma_addr_find_l2_eth_by_grh(sgid, dgid, dmac, vlan_id, if_index, hoplimit )\ + rdma_addr_find_dmac_by_grh(sgid, dgid, dmac, vlan_id, if_index) +#endif + +#ifndef ETHTOOL_GEEE +struct ethtool_eee { + __u32 cmd; + __u32 supported; + __u32 advertised; + __u32 lp_advertised; + __u32 eee_active; + __u32 eee_enabled; + __u32 tx_lpi_enabled; + __u32 tx_lpi_timer; + __u32 reserved[2]; +}; +#endif + +#if !defined(NETDEV_RX_FLOW_STEER) || !defined(HAVE_FLOW_KEYS) || (LINUX_VERSION_CODE < 0x030300) +#undef CONFIG_RFS_ACCEL +#endif + +#ifndef HAVE_IB_GID_ATTR +#define ib_query_gid(device, port_num, index, gid, attr) \ + ib_query_gid(device, port_num, index, gid) +#endif + +#ifndef HAVE_RDMA_ADDR_FIND_DMAC_BY_GRH_V2 +#define rdma_addr_find_dmac_by_grh(sgid, dgid, smac, vlan, if_index) \ + rdma_addr_find_dmac_by_grh(sgid, dgid, smac, vlan) +#endif + +#ifndef smp_mb__before_atomic +#define smp_mb__before_atomic() smp_mb() +#endif + +struct ib_mw_bind_info *get_bind_info(struct ib_send_wr *wr); +struct ib_mw *get_ib_mw(struct ib_send_wr *wr); + +struct scatterlist *get_ib_umem_sgl(struct ib_umem *umem, u32 *nmap); + +int bnxt_re_register_netdevice_notifier(struct notifier_block *nb); +int bnxt_re_unregister_netdevice_notifier(struct notifier_block *nb); +struct bnxt_qplib_swqe; +void bnxt_re_set_fence_flag(struct ib_send_wr *wr, struct bnxt_qplib_swqe *wqe); + +#ifndef HAVE_SKB_HASH_TYPE +enum pkt_hash_types { + PKT_HASH_TYPE_NONE, + PKT_HASH_TYPE_L2, + PKT_HASH_TYPE_L3, + PKT_HASH_TYPE_L4, +}; +#endif + +#ifndef HAVE_ETHER_ADDR_COPY +static inline void ether_addr_copy(u8 *dst, const u8 *src) +{ + memcpy(dst, src, ETH_ALEN); +} +#endif + +#ifdef HAVE_ROCE_AH_ATTR +#define ROCE_DMAC(x) (x)->roce.dmac +#else +#define ROCE_DMAC(x) (x)->dmac +#endif + +#ifdef HAVE_OLD_CONFIGFS_API + +struct configfs_attr { + struct configfs_attribute attr; + ssize_t (*show)(struct config_item *item, + char *buf); + ssize_t (*store)(struct config_item *item, + const char *buf, size_t count); +}; + +#define CONFIGFS_ATTR(_pfx, _name) \ +static struct configfs_attr attr_##_name = \ + __CONFIGFS_ATTR(_name, S_IRUGO | S_IWUSR, _name##_show, _name##_store) + +#define CONFIGFS_ATTR_ADD(_name) &_name.attr +#else +#define CONFIGFS_ATTR_ADD(_name) &_name +#endif /*HAVE_OLD_CONFIGFS_API*/ + +#ifndef dma_rmb +#define dma_rmb() rmb() +#endif + +#ifndef writel_relaxed +#define writel_relaxed(v, a) writel(v, a) +#endif + +#ifndef writeq_relaxed +#define writeq_relaxed(v, a) writeq(v, a) +#endif + +#ifdef CONFIG_INFINIBAND_PEER_MEM +void bnxt_re_set_inflight_invalidation_ctx(struct ib_umem *umem); +void bnxt_re_set_inval_ctx_peer_callback(struct ib_umem *umem); +void *bnxt_re_get_peer_mem(struct ib_umem *umem); +#endif /* CONFIG_INFINIBAND_PEER_MEM */ + +void bnxt_re_peer_mem_release(struct ib_umem *umem); + +#ifndef U16_MAX +#define U16_MAX ((u16)~0U) +#endif + +#ifndef BIT_ULL +#define BIT_ULL(nr) (1ULL << (nr)) +#endif + +#ifndef HAVE_GID_ATTR_IN_IB_AH +typedef struct ib_gid_attr IB_GID_ATTR; +#else +typedef const struct ib_gid_attr IB_GID_ATTR; +#endif + +int __bnxt_re_set_vma_data(void *bnxt_re_uctx, + struct vm_area_struct *vma); + +int remap_pfn_compat(struct ib_ucontext *ib_uctx, + struct vm_area_struct *vma, + u64 pfn); + +void bnxt_re_set_max_gid(u16 *max_sgid); + +#if defined(RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP) || defined(ENABLE_ROCEV2_QP1) +int bnxt_re_get_cached_gid(struct ib_device *dev, u8 port_num, int index, + union ib_gid *sgid, struct ib_gid_attr **sgid_attr, + struct ib_global_route *grh, struct ib_ah *ah); +enum rdma_network_type bnxt_re_gid_to_network_type(IB_GID_ATTR *sgid_attr, + union ib_gid *sgid); +#endif + +int ib_register_device_compat(struct bnxt_re_dev *rdev); +bool ib_modify_qp_is_ok_compat(enum ib_qp_state cur_state, enum ib_qp_state next_state, + enum ib_qp_type type, enum ib_qp_attr_mask mask); + +#ifndef HAVE_DMA_ZALLOC_COHERENT +static inline void *dma_zalloc_coherent(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t flag) +{ + void *ret = dma_alloc_coherent(dev, size, dma_handle, + flag | __GFP_ZERO); + return ret; +} +#endif + +#ifndef PCI_EXP_DEVCAP2_ATOMIC_ROUTE +#define PCI_EXP_DEVCAP2_ATOMIC_ROUTE 0x00000040 /* Atomic Op routing */ +#endif + +#ifndef PCI_EXP_DEVCTL2_ATOMIC_EGRESS_BLOCK +#define PCI_EXP_DEVCTL2_ATOMIC_EGRESS_BLOCK 0x0080 /* Block atomic egress */ +#endif + +#ifndef PCI_EXP_DEVCTL2_ATOMIC_REQ +#define PCI_EXP_DEVCTL2_ATOMIC_REQ 0x0040 /* Set Atomic requests */ +#endif + +#ifndef HAS_ENABLE_ATOMIC_OPS +#define PCI_EXP_DEVCAP2_ATOMIC_COMP32 0x00000080 /* 32b AtomicOp completion */ +#define PCI_EXP_DEVCAP2_ATOMIC_COMP64 0x00000100 /* 64b AtomicOp completion */ + +int pci_enable_atomic_ops_to_root(struct pci_dev *dev, u32 cap_mask); +#endif + +#ifdef HAVE_MEMBER_IN_IB_ALLOC_DEVICE +#define compat_ib_alloc_device(size) ib_alloc_device(bnxt_re_dev, ibdev); +#else +#define compat_ib_alloc_device(size) ib_alloc_device(size); +#endif + +struct ib_umem *ib_umem_get_compat(struct bnxt_re_dev *rdev, + struct ib_ucontext *ucontext, + struct ib_udata *udata, + unsigned long addr, + size_t size, int access, int dmasync); +struct ib_umem *ib_umem_get_flags_compat(struct bnxt_re_dev *rdev, + struct ib_ucontext *ucontext, + struct ib_udata *udata, + unsigned long addr, + size_t size, int access, int dmasync); + +#ifndef HAVE_AH_ALLOC_IN_IB_CORE +typedef struct ib_ah* CREATE_AH_RET; +typedef struct ib_pd CREATE_AH_IN; +#else +typedef int CREATE_AH_RET; +typedef struct ib_ah CREATE_AH_IN; +#endif + +#ifndef HAVE_DESTROY_AH_RET_VOID +typedef int DESTROY_AH_RET; +#else +typedef void DESTROY_AH_RET; +#endif + +#ifndef HAVE_PD_ALLOC_IN_IB_CORE +typedef struct ib_pd* ALLOC_PD_RET; +typedef struct ib_device ALLOC_PD_IN; +#else +typedef int ALLOC_PD_RET; +typedef struct ib_pd ALLOC_PD_IN; +#endif + +#ifndef HAVE_DEALLOC_PD_RET_VOID +typedef int DEALLOC_PD_RET; +#else +typedef void DEALLOC_PD_RET; +#endif + +#ifndef HAVE_CQ_ALLOC_IN_IB_CORE +typedef struct ib_cq* ALLOC_CQ_RET; +typedef struct ib_device ALLOC_CQ_IN; +#else +typedef int ALLOC_CQ_RET; +typedef struct ib_cq ALLOC_CQ_IN; +#endif + +#ifndef HAVE_DESTROY_CQ_RET_VOID +typedef int DESTROY_CQ_RET; +#else +typedef void DESTROY_CQ_RET; +#endif + +#ifndef HAVE_QP_ALLOC_IN_IB_CORE +typedef struct ib_qp *ALLOC_QP_RET; +typedef struct ib_pd ALLOC_QP_IN; +#else +typedef int ALLOC_QP_RET; +typedef struct ib_qp ALLOC_QP_IN; +#endif + +#ifndef HAVE_SRQ_CREATE_IN_IB_CORE +typedef struct ib_srq* CREATE_SRQ_RET; +typedef struct ib_pd CREATE_SRQ_IN; +#else +typedef int CREATE_SRQ_RET; +typedef struct ib_srq CREATE_SRQ_IN; +#endif + +#ifndef HAVE_DESTROY_SRQ_RET_VOID +typedef int DESTROY_SRQ_RET; +#else +typedef void DESTROY_SRQ_RET; +#endif + +#ifdef HAVE_ALLOC_MW_RET_INT +typedef int ALLOC_MW_RET; +#else +typedef struct ib_mw *ALLOC_MW_RET; +#endif + +#ifdef HAVE_REREG_USER_MR_RET_PTR +typedef struct ib_mr *REREG_USER_MR_RET; +#else +typedef int REREG_USER_MR_RET; +#endif + +#ifdef HAVE_IB_SUPPORT_MORE_RDMA_PORTS +typedef u32 PORT_NUM; +#else +typedef u8 PORT_NUM; +#endif + +#ifndef HAVE_UCONTEXT_ALLOC_IN_IB_CORE +typedef struct ib_ucontext* ALLOC_UCONTEXT_RET; +typedef struct ib_device ALLOC_UCONTEXT_IN; +typedef int DEALLOC_UCONTEXT_RET; +#else +typedef int ALLOC_UCONTEXT_RET; +typedef struct ib_ucontext ALLOC_UCONTEXT_IN; +typedef void DEALLOC_UCONTEXT_RET; +#endif + +static inline size_t ib_umem_num_pages_compat(struct ib_umem *umem) +{ +#ifdef HAVE_IB_UMEM_NUM_DMA_BLOCKS + return ib_umem_num_dma_blocks(umem, PAGE_SIZE); +#endif +#ifdef HAVE_IB_UMEM_NUM_PAGES + return ib_umem_num_pages(umem); +#else +#ifdef HAVE_NPAGES_IB_UMEM + return umem->npages; +#else +#ifdef HAVE_IB_UMEM_PAGE_COUNT + return ib_umem_page_count(umem); +#endif +#endif +#endif +} +#ifndef PCI_DEVID +#define PCI_DEVID(bus, devfn) ((((u16)(bus)) << 8) | (devfn)) +#endif /* PCI_DEVID */ + +#ifdef HAVE_CQ_ALLOC_IN_IB_CORE +#define rdev_from_cq_in(cq_in) to_bnxt_re_dev(cq_in->device, ibdev) +#else +#define rdev_from_cq_in(cq_in) to_bnxt_re_dev(cq_in, ibdev) +#endif + +struct bnxt_re_cq *__get_cq_from_cq_in(ALLOC_CQ_IN *cq_in, + struct bnxt_re_dev *rdev); +struct bnxt_re_qp *__get_qp_from_qp_in(ALLOC_QP_IN *qp_in, + struct bnxt_re_dev *rdev); +#ifdef HAVE_IB_OWNER_IN_DEVICE_OPS +#define GET_UVERBS_ABI_VERSION(ibdev) (ibdev->ops.uverbs_abi_ver) +#else +#define GET_UVERBS_ABI_VERSION(ibdev) (ibdev->uverbs_abi_ver) +#endif +bool bnxt_re_check_if_vlan_valid(struct bnxt_re_dev *rdev, u16 vlan_id); + +#ifndef IEEE_8021QAZ_APP_SEL_DSCP +#define IEEE_8021QAZ_APP_SEL_DSCP 5 +#endif + +#ifndef HAVE_PHYS_PORT_STATE_ENUM +enum ib_port_phys_state { + IB_PORT_PHYS_STATE_SLEEP = 1, + IB_PORT_PHYS_STATE_POLLING = 2, + IB_PORT_PHYS_STATE_DISABLED = 3, + IB_PORT_PHYS_STATE_PORT_CONFIGURATION_TRAINING = 4, + IB_PORT_PHYS_STATE_LINK_UP = 5, + IB_PORT_PHYS_STATE_LINK_ERROR_RECOVERY = 6, + IB_PORT_PHYS_STATE_PHY_TEST = 7, +}; +#endif + +#ifndef CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_256MB +#define CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_256MB 0x1cUL +#endif + +#ifdef HAVE_IB_GET_DEV_FW_STR +#ifdef IB_GET_DEV_FW_STR_HAS_STRLEN +#define bnxt_re_compat_qfwstr(void) \ + bnxt_re_query_fw_str(struct ib_device *ibdev, \ + char *str, size_t str_len) +#else +#define bnxt_re_compat_qfwstr(void) \ + bnxt_re_query_fw_str(struct ib_device *ibdev, \ + char *str) +#endif +#endif + +#ifdef HAS_TASKLET_SETUP +typedef void (*tasklet_cb)(struct tasklet_struct *t); +#else +typedef void (*tasklet_cb)(unsigned long data); +#endif + +static inline void compat_tasklet_init(struct tasklet_struct *t, + tasklet_cb cb, + unsigned long cb_data) +{ +#ifndef HAS_TASKLET_SETUP + tasklet_init(t, cb, cb_data); +#else + tasklet_setup(t, cb); +#endif +} + +#ifndef fallthrough +#if defined __has_attribute +#ifndef __GCC4_has_attribute___fallthrough__ +#define __GCC4_has_attribute___fallthrough__ 0 +#endif +#if __has_attribute(__fallthrough__) +#define fallthrough __attribute__((__fallthrough__)) +#else +#define fallthrough do {} while (0) +#endif +#else +#define fallthrough do {} while (0) +#endif +#endif + +#if !defined(HAVE_FLOW_DISSECTOR) +struct bnxt_compat_key_ports { + __be16 src; + __be16 dst; +}; + +struct bnxt_compat_key_ipv4 { + __be32 src; + __be32 dst; +}; + +struct bnxt_compat_key_ipv6 { + struct in6_addr src; + struct in6_addr dst; +}; + +struct bnxt_compat_key_addrs { + union { + struct bnxt_compat_key_ipv4 v4addrs; + struct bnxt_compat_key_ipv6 v6addrs; + }; +}; + +#define flow_dissector_key_ports bnxt_compat_key_ports +#define flow_dissector_key_addrs bnxt_compat_key_addrs +#endif /* HAVE_FLOW_DISSECTOR */ + +#ifndef HAVE_PCI_NUM_VF +static inline int pci_num_vf(struct pci_dev *dev) +{ + if (!dev->is_physfn) + return 0; + + return dev->sriov->nr_virtfn; +} +#endif + +#ifndef __struct_group +#define __struct_group(TAG, NAME, ATTRS, MEMBERS...) \ + union { \ + struct { MEMBERS } ATTRS; \ + struct TAG { MEMBERS } ATTRS NAME; \ + } +#endif /* __struct_group */ +#ifndef struct_group_attr +#define struct_group_attr(NAME, ATTRS, MEMBERS...) \ + __struct_group(/* no tag */, NAME, ATTRS, MEMBERS) +#endif /* struct_group_attr */ + +struct bnxt_compat_dma_pool { + struct dma_pool *pool; + size_t size; +}; + +#define dma_pool bnxt_compat_dma_pool + +#ifndef HAVE_IB_POLL_UNBOUND_WORKQUEUE +#define IB_POLL_UNBOUND_WORKQUEUE IB_POLL_WORKQUEUE +#endif + +#ifndef HAVE_VMALLOC_ARRAY +static inline void *vmalloc_array(u32 n, size_t size) +{ + return vmalloc(n * size); +} +#endif + +#ifndef HAVE_ADDRCONF_ADDR_EUI48 +static inline void addrconf_addr_eui48(u8 *eui, const char *const addr) +{ + memcpy(eui, addr, 3); + eui[3] = 0xFF; + eui[4] = 0xFE; + memcpy(eui + 5, addr + 3, 3); + eui[0] ^= 2; +} +#endif + +#endif /* __BNXT_RE_COMPAT_H__ */ diff --git a/bnxt_re-1.10.3-229.0.139.0/configfs.c b/bnxt_re-1.10.3-229.0.139.0/configfs.c new file mode 100644 index 0000000..b50b57c --- /dev/null +++ b/bnxt_re-1.10.3-229.0.139.0/configfs.c @@ -0,0 +1,4406 @@ +/* + * Copyright (c) 2015-2023, Broadcom. All rights reserved. The term + * Broadcom refers to Broadcom Inc. and/or its subsidiaries. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * BSD license below: + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * + * Description: Enables configfs interface + */ + +#include "configfs.h" +#include "bnxt.h" + +static const char *mode_name[] = {"DCQCN-D", "TCP", "Invalid"}; +static const char *mode_name_p5[] = {"DCQCN-D", "DCQCN-P", "Invalid"}; + +static const char *_get_mode_str(u8 mode, bool is_p5) +{ + return is_p5 ? mode_name_p5[mode] : mode_name[mode]; +} + +static struct bnxt_re_dev *__get_rdev_from_name(const char *name) +{ + struct bnxt_re_dev *rdev; + u8 found = false; + + mutex_lock(&bnxt_re_mutex); + list_for_each_entry(rdev, &bnxt_re_dev_list, list) { + if (!strcmp(name, rdev->ibdev.name)) { + found = true; + break; + } + } + mutex_unlock(&bnxt_re_mutex); + + return found ? rdev : ERR_PTR(-ENODEV); +} + +static struct bnxt_re_dev * bnxt_re_get_valid_rdev(struct bnxt_re_cc_group *ccgrp) +{ + struct bnxt_re_dev *rdev = NULL; + + if (ccgrp->portgrp && ccgrp->portgrp->devgrp) + rdev = __get_rdev_from_name(ccgrp->portgrp->devgrp->name); + + if (!rdev || (PTR_ERR(rdev) == -ENODEV)) + { + pr_err("bnxt_re: %s : Invalid rdev received rdev = %p\n", + __func__, ccgrp->rdev); + return NULL; + } + + + if (ccgrp->rdev != rdev) + ccgrp->rdev = rdev; + + return rdev; +} + +static int bnxt_re_is_dscp_mapping_set(u32 mask) +{ + return (mask & + (CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_TOS_DSCP | + CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_ALT_TOS_DSCP)); +} + +static int bnxt_re_is_pri_mapping_set(struct bnxt_re_dev *rdev) +{ + return rdev->cc_param.cur_mask & BNXT_QPLIB_CC_PARAM_MASK_ROCE_PRI; +} + +static int bnxt_re_init_d2p_map(struct bnxt_re_dev *rdev, + struct bnxt_re_dscp2pri *d2p) +{ + u32 cc_mask; + int mapcnt = 0; + + cc_mask = rdev->cc_param.mask; + + if (!bnxt_re_is_dscp_mapping_set(rdev->cc_param.mask)) + goto bail; + + if (cc_mask & CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_TOS_DSCP || + cc_mask & BNXT_QPLIB_CC_PARAM_MASK_ROCE_PRI) { + d2p->dscp = rdev->cc_param.tos_dscp; + d2p->pri = rdev->cc_param.roce_pri; + d2p->mask = 0x3F; + mapcnt++; + d2p++; + } + + if (cc_mask & CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_ALT_TOS_DSCP || + cc_mask & CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_ALT_VLAN_PCP) { + d2p->dscp = rdev->cc_param.alt_tos_dscp; + d2p->pri = rdev->cc_param.alt_vlan_pcp; + d2p->mask = 0x3F; + mapcnt++; + } +bail: + + return mapcnt; +} + +static int __bnxt_re_clear_dscp(struct bnxt_re_dev *rdev, u16 portid) +{ + struct bnxt_re_dscp2pri d2p[8] = {}; + u16 count = 8; + int rc = 0; + u16 i; + + /* Get older values to be reseted. Set mask to 0 */ + rc = bnxt_re_query_hwrm_dscp2pri(rdev, d2p, &count, portid); + if (rc) { + dev_err(rdev_to_dev(rdev), + "Failed to query dscp on pci function %d\n", + bnxt_re_dev_pcifn_id(rdev)); + goto bail; + } + if (!count) + goto bail; + + /* Clear mask of all d2p mapping in HW */ + for (i = 0; i < count; i++) + d2p[i].mask = 0; + + rc = bnxt_re_set_hwrm_dscp2pri(rdev, d2p, count, portid); + if (rc) { + dev_err(rdev_to_dev(rdev), + "Failed to clear dscp on pci function %d\n", + bnxt_re_dev_pcifn_id(rdev)); + goto bail; + } +bail: + return rc; +} + +int bnxt_re_clear_dscp(struct bnxt_re_dev *rdev) +{ + int rc = 0; + u16 portid; + + /* + * Target ID to be specified. + * 0xFFFF - if issued for the same function + * function_id - if issued for another function + */ + portid = 0xFFFF; + rc = __bnxt_re_clear_dscp(rdev, portid); + if (rc) + goto bail; + + if (rdev->binfo) { + /* Function id of the second function to be + * specified. + */ + portid = (PCI_FUNC(rdev->binfo->pdev2->devfn) + 1); + rc = __bnxt_re_clear_dscp(rdev, portid); + if (rc) + goto bail; + } +bail: + return rc; +} + +static int __bnxt_re_setup_dscp(struct bnxt_re_dev *rdev, u16 portid) +{ + struct bnxt_re_dscp2pri d2p[2] = {}; + int rc = 0, mapcnt = 0; + + /*Init dscp to pri map */ + mapcnt = bnxt_re_init_d2p_map(rdev, d2p); + if (!mapcnt) + goto bail; + rc = bnxt_re_set_hwrm_dscp2pri(rdev, d2p, mapcnt, portid); + if (rc) { + dev_err(rdev_to_dev(rdev), + "Failed to updated dscp on pci function %d\n", + bnxt_re_dev_pcifn_id(rdev)); + goto bail; + } +bail: + return rc; +} + +int bnxt_re_setup_dscp(struct bnxt_re_dev *rdev) +{ + int rc = 0; + u16 portid; + + /* + * Target ID to be specified. + * 0xFFFF - if issued for the same function + * function_id - if issued for another function + */ + portid = 0xFFFF; + rc = __bnxt_re_setup_dscp(rdev, portid); + if (rc) + goto bail; + + if (rdev->binfo) { + /* Function id of the second function to be + * specified. + */ + portid = (PCI_FUNC(rdev->binfo->pdev2->devfn) + 1); + rc = __bnxt_re_setup_dscp(rdev, portid); + if (rc) + goto bail; + } +bail: + return rc; +} + +static struct bnxt_re_cc_group * __get_cc_group(struct config_item *item) +{ + struct config_group *group = container_of(item, struct config_group, + cg_item); + struct bnxt_re_cc_group *ccgrp = + container_of(group, struct bnxt_re_cc_group, group); + return ccgrp; +} + +static bool _is_cc_gen1_plus(struct bnxt_re_dev *rdev) +{ + u16 cc_gen; + + cc_gen = rdev->dev_attr->dev_cap_flags & + CREQ_QUERY_FUNC_RESP_SB_CC_GENERATION_MASK; + return cc_gen >= CREQ_QUERY_FUNC_RESP_SB_CC_GENERATION_CC_GEN1; +} + +static int print_cc_gen1_adv(struct bnxt_qplib_cc_param_ext *cc_ext, char *buf) +{ + int bytes = 0; + + bytes += sprintf(buf+bytes,"extended inactivity threshold\t\t: %#x\n", + cc_ext->inact_th_hi); + bytes += sprintf(buf+bytes,"minimum time between cnps\t\t: %#x usec\n", + cc_ext->min_delta_cnp); + bytes += sprintf(buf+bytes,"initial congestion probability\t\t: %#x\n", + cc_ext->init_cp); + bytes += sprintf(buf + bytes, "target rate update mode\t\t\t: %d\n", + cc_ext->tr_update_mode); + bytes += sprintf(buf+bytes,"target rate update cycle\t\t: %#x\n", + cc_ext->tr_update_cyls); + bytes += sprintf(buf+bytes,"fast recovery rtt\t\t\t: %#x rtts\n", + cc_ext->fr_rtt); + bytes += sprintf(buf+bytes,"active increase time quanta\t\t: %#x\n", + cc_ext->ai_rate_incr); + bytes += sprintf(buf+bytes,"reduc. relax rtt threshold\t\t: %#x rtts\n", + cc_ext->rr_rtt_th); + bytes += sprintf(buf+bytes,"additional relax cr rtt \t\t: %#x rtts\n", + cc_ext->ar_cr_th); + bytes += sprintf(buf+bytes,"minimum current rate threshold\t\t: %#x\n", + cc_ext->cr_min_th); + bytes += sprintf(buf+bytes,"bandwidth weight\t\t\t: %#x\n", + cc_ext->bw_avg_weight); + bytes += sprintf(buf+bytes,"actual current rate factor\t\t: %#x\n", + cc_ext->cr_factor); + bytes += sprintf(buf+bytes,"current rate level to max cp\t\t: %#x\n", + cc_ext->cr_th_max_cp); + bytes += sprintf(buf+bytes,"cp bias state\t\t\t\t: %s\n", + cc_ext->cp_bias_en ? "Enabled" : "Disabled"); + bytes += sprintf(buf+bytes,"log of cr fraction added to cp\t\t: %#x\n", + cc_ext->cp_bias); + bytes += sprintf(buf+bytes,"cr threshold to reset cc\t\t: %#x\n", + cc_ext->cc_cr_reset_th); + bytes += sprintf(buf+bytes,"target rate lower bound\t\t\t: %#x\n", + cc_ext->tr_lb); + bytes += sprintf(buf+bytes,"current rate probability factor\t\t: %#x\n", + cc_ext->cr_prob_fac); + bytes += sprintf(buf+bytes,"target rate probability factor\t\t: %#x\n", + cc_ext->tr_prob_fac); + bytes += sprintf(buf+bytes,"current rate fairness threshold\t\t: %#x\n", + cc_ext->fair_cr_th); + bytes += sprintf(buf+bytes,"reduction divider\t\t\t: %#x\n", + cc_ext->red_div); + bytes += sprintf(buf+bytes,"rate reduction threshold\t\t: %#x cnps\n", + cc_ext->cnp_ratio_th); + bytes += sprintf(buf+bytes,"extended no congestion rtts\t\t: %#x rtt\n", + cc_ext->ai_ext_rtt); + bytes += sprintf(buf+bytes,"log of cp to cr ratio\t\t\t: %#x\n", + cc_ext->exp_crcp_ratio); + bytes += sprintf(buf+bytes,"use lower rate table entries\t\t: %s\n", + cc_ext->low_rate_en ? "Enabled" : "Disabled"); + bytes += sprintf(buf+bytes,"rtts to start cp track cr\t\t: %#x rtt\n", + cc_ext->cpcr_update_th); + bytes += sprintf(buf+bytes,"first threshold to rise ai\t\t: %#x rtt\n", + cc_ext->ai_rtt_th1); + bytes += sprintf(buf+bytes,"second threshold to rise ai\t\t: %#x rtt\n", + cc_ext->ai_rtt_th2); + bytes += sprintf(buf+bytes,"actual rate base reduction threshold\t: %#x rtt\n", + cc_ext->cf_rtt_th); + bytes += sprintf(buf+bytes,"first severe cong. cr threshold\t\t: %#x\n", + cc_ext->sc_cr_th1); + bytes += sprintf(buf+bytes,"second severe cong. cr threshold\t: %#x\n", + cc_ext->sc_cr_th2); + bytes += sprintf(buf+bytes,"cc ack bytes\t\t\t\t: %#x\n", + cc_ext->cc_ack_bytes); + bytes += sprintf(buf+bytes,"reduce to init rtts threshold\t\t: %#x rtt\n", + cc_ext->reduce_cf_rtt_th); + return bytes; +} + +static int print_cc_gen1(struct bnxt_qplib_cc_param_ext *cc_ext, char *buf, + u8 show_adv_cc) +{ + int bytes = 0; + + bytes += sprintf(buf+bytes,"cnp header ecn status\t\t\t: %s\n", + !cc_ext->cnp_ecn ? "Not-ECT" : + (cc_ext->cnp_ecn == 0x01) ? "ECT(1)" : + "ECT(0)"); + bytes += sprintf(buf+bytes,"rtt jitter\t\t\t\t: %s\n", + cc_ext->rtt_jitter_en ? "Enabled" : "Disabled"); + bytes += sprintf(buf+bytes,"link bytes per usec\t\t\t: %#x byte/usec\n", + cc_ext->bytes_per_usec); + bytes += sprintf(buf+bytes,"current rate width\t\t\t: %#x bits\n", + cc_ext->cr_width); + bytes += sprintf(buf+bytes,"minimum quota period\t\t\t: %#x\n", + cc_ext->min_quota); + bytes += sprintf(buf+bytes,"maximum quota period\t\t\t: %#x\n", + cc_ext->max_quota); + bytes += sprintf(buf+bytes,"absolute maximum quota period\t\t: %#x\n", + cc_ext->abs_max_quota); + bytes += sprintf(buf+bytes,"64B transmitted in one rtt\t\t: %#x\n", + cc_ext->l64B_per_rtt); + /* Print advanced parameters */ + if (show_adv_cc) + bytes += print_cc_gen1_adv(cc_ext, (buf+bytes)); + return bytes; +} + +static int __get_d2p_index(struct bnxt_re_dev *rdev, + struct bnxt_re_dscp2pri *d2p, + u32 num_d2ps, + u32 *roce_index, + u32 *cnp_index) +{ + int i; + + if (!num_d2ps || !d2p || !roce_index || !cnp_index) + return -EINVAL; + + for (i=0; i < num_d2ps; i++) { + if (rdev->cc_param.roce_pri == d2p[i].pri) + *roce_index = i; + if (rdev->cc_param.alt_vlan_pcp == d2p[i].pri) + *cnp_index = i; + } + + return 0; +} + +#define SLAVE_STR(slave) (slave ? "slave " : "") +static int __print_pri_dscp_values_from_query + (struct bnxt_re_dev *rdev, char *buf, + int bytes, struct bnxt_re_dscp2pri *d2p, + u32 roce_index, u32 cnp_index, bool slave, + struct bnxt_qplib_cc_param *cc_param) +{ + bytes += sprintf(buf + bytes, "%sroce prio\t\t\t\t: %d\n", + SLAVE_STR(slave), d2p[roce_index].pri); + /* If CC is disabled, skip displaying the following values */ + if (!bnxt_re_is_dscp_mapping_set(rdev->cc_param.cur_mask)) + return bytes; + + bytes += sprintf(buf + bytes, "%sroce dscp\t\t\t\t: %d\n", + SLAVE_STR(slave), d2p[roce_index].dscp); + if (!cc_param->enable) + return bytes; + bytes += sprintf(buf + bytes, "%scnp prio\t\t\t\t: %d\n", + SLAVE_STR(slave), d2p[cnp_index].pri); + bytes += sprintf(buf + bytes, "%scnp dscp\t\t\t\t: %d\n", + SLAVE_STR(slave), d2p[cnp_index].dscp); + return bytes; +} + +static int __print_pri_dscp_values_from_async_event + (struct bnxt_re_dev *rdev, + bool slave, char *buf, + struct bnxt_re_tc_rec *tc_rec) +{ + struct bnxt_qplib_cc_param *cc_param = &rdev->cc_param; + int bytes = 0; + + bytes += sprintf(buf + bytes, "%sroce prio\t\t\t\t: %d\n", + SLAVE_STR(slave), tc_rec->roce_prio); + bytes += sprintf(buf + bytes, "%sroce dscp\t\t\t\t: %d\n", + SLAVE_STR(slave), tc_rec->roce_dscp); + if (!cc_param->enable) + return bytes; + bytes += sprintf(buf + bytes, "%scnp prio\t\t\t\t: %d\n", + SLAVE_STR(slave), tc_rec->cnp_prio); + bytes += sprintf(buf + bytes, "%scnp dscp\t\t\t\t: %d\n", + SLAVE_STR(slave), tc_rec->cnp_dscp); + return bytes; +} + +int bnxt_re_get_print_dscp_pri_mapping(struct bnxt_re_dev *rdev, + char *buf, + struct bnxt_qplib_cc_param *ccparam) +{ + struct bnxt_re_dscp2pri base_d2p[8] = {}; + u32 roce_index = 0, cnp_index = 0; + struct bnxt_re_tc_rec *tc_rec; + u16 portid = 0xFFFF; + u16 count = 8; + int bytes = 0; + u8 prio_map; + int rc; + + if (is_qport_service_type_supported(rdev)) { + tc_rec = &rdev->tc_rec[0]; + if (bnxt_re_get_pri_dscp_settings(rdev, -1, tc_rec)) + goto out; + bytes += __print_pri_dscp_values_from_async_event + (rdev, false, buf + bytes, tc_rec); + if (rdev->binfo) { + tc_rec = &rdev->tc_rec[1]; + if (bnxt_re_get_pri_dscp_settings(rdev, 2, tc_rec)) + goto out; + bytes += __print_pri_dscp_values_from_async_event + (rdev, true, buf + bytes, tc_rec); + } + } else { + if (bnxt_re_is_dscp_mapping_set(rdev->cc_param.cur_mask)) { + rc = bnxt_re_query_hwrm_dscp2pri(rdev, base_d2p, + &count, portid); + if (rc) { + dev_err(rdev_to_dev(rdev), + "Query DSCP paras failed on fn %d\n", + bnxt_re_dev_pcifn_id(rdev)); + bytes = rc; + goto out; + } + } else { + if (bnxt_re_is_pri_mapping_set(rdev)) { + prio_map = bnxt_re_get_priority_mask + (rdev, (IEEE_8021QAZ_APP_SEL_ETHERTYPE | + IEEE_8021QAZ_APP_SEL_DGRAM)); + if (prio_map & (1 << rdev->cc_param.roce_pri)) + base_d2p[0].pri = rdev->cc_param.roce_pri; + } + } + + if (!__get_d2p_index(rdev, base_d2p, 2, &roce_index, &cnp_index)) + bytes = __print_pri_dscp_values_from_query + (rdev, buf, bytes, + base_d2p, roce_index, + cnp_index, false, + ccparam); + if (rdev->binfo) { + struct bnxt_re_dscp2pri slave_d2p[8] = {}; + u16 count = 8; + + portid = (PCI_FUNC(rdev->binfo->pdev2->devfn) + 1); + if (bnxt_re_is_dscp_mapping_set(rdev->cc_param.cur_mask)) { + rc = bnxt_re_query_hwrm_dscp2pri + (rdev, slave_d2p, &count, portid); + if (rc) { + dev_err(rdev_to_dev(rdev), + "Query DSCP paras failed on fn %d\n", + PCI_FUNC(rdev->binfo->pdev2->devfn)); + bytes = rc; + goto out; + } + } else { + if (bnxt_re_is_pri_mapping_set(rdev)) { + prio_map = bnxt_re_get_priority_mask + (rdev, (IEEE_8021QAZ_APP_SEL_ETHERTYPE | + IEEE_8021QAZ_APP_SEL_DGRAM)); + if (prio_map & (1 << rdev->cc_param.roce_pri)) + slave_d2p[0].pri = + rdev->cc_param.roce_pri; + } + } + roce_index = 0; + cnp_index = 0; + if (!__get_d2p_index(rdev, slave_d2p, 2, + &roce_index, &cnp_index)) + bytes = __print_pri_dscp_values_from_query + (rdev, buf, bytes, + slave_d2p, roce_index, + cnp_index, true, + ccparam); + } + } +out: + return bytes; +} + +static int __print_pri_dscp_values(struct bnxt_re_dev *rdev, + bool slave, char *buf, + struct bnxt_re_tc_rec *tc_rec) +{ + int bytes = 0; + + if (tc_rec->prio_valid & 1 << ROCE_PRIO_VALID) { + bytes += sprintf(buf + bytes, "%sroce prio\t\t\t\t: %d\n", + SLAVE_STR(slave), tc_rec->roce_prio); + bytes += sprintf(buf + bytes, "%sroce dscp\t\t\t\t: %d\n", + SLAVE_STR(slave), tc_rec->roce_dscp); + } + if (!tc_rec->ecn_enabled) + return bytes; + + if (!rdev->is_virtfn && (tc_rec->prio_valid & 1 << CNP_PRIO_VALID)) { + bytes += sprintf(buf + bytes, "%scnp prio\t\t\t\t: %d\n", + SLAVE_STR(slave), tc_rec->cnp_prio); + bytes += sprintf(buf + bytes, "%scnp dscp\t\t\t\t: %d\n", + SLAVE_STR(slave), tc_rec->cnp_dscp); + } + return bytes; +} + +int bnxt_re_get_print_dscp_pri(struct bnxt_re_dev *rdev, char *buf, + struct bnxt_qplib_cc_param *ccparam, + bool slave) +{ + struct bnxt_re_tc_rec *tc_rec; + int rc = 0, bytes = 0; + + if (slave) + tc_rec = &rdev->tc_rec[1]; + else + tc_rec = &rdev->tc_rec[0]; + + tc_rec->roce_dscp = ccparam->tos_dscp; + tc_rec->cnp_dscp = ccparam->alt_tos_dscp; + tc_rec->ecn_enabled = ccparam->enable; + + rc = bnxt_re_hwrm_pri2cos_qcfg(rdev, tc_rec, (slave ? 2 : -1)); + if (rc) { + dev_err(rdev_to_dev(rdev), + "Failed to query pri2cos settings on pci function %d\n", + bnxt_re_dev_pcifn_id(rdev)); + goto end; + } + + bytes += __print_pri_dscp_values(rdev, slave, buf + bytes, tc_rec); +end: + return bytes; +} + +static ssize_t apply_show(struct config_item *item, char *buf) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_qplib_cc_param ccparam = {0}; + struct bnxt_qplib_drv_modes *drv_mode; + struct bnxt_re_dev *rdev; + int rc = 0, bytes = 0; + + if (!ccgrp) + return -EINVAL; + + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + + drv_mode = &rdev->chip_ctx->modes; + rc = bnxt_qplib_query_cc_param(&rdev->qplib_res, &ccparam); + if (rc) { + dev_err(rdev_to_dev(rdev), + "Failed to query CC parameters\n"); + bytes = rc; + goto out; + } + + + bytes += sprintf(buf+bytes, "ecn status\t\t\t\t: %s\n", + ccparam.enable ? "Enabled" : "Disabled"); + bytes += sprintf(buf+bytes, "ecn marking\t\t\t\t: %s\n", + !ccparam.tos_ecn ? "Not-ECT" : + (ccparam.tos_ecn == 0x01) ? "ECT(1)" : + "ECT(0)"); + bytes += sprintf(buf+bytes,"congestion control mode\t\t\t: %s\n", + _get_mode_str(ccparam.cc_mode, _is_cc_gen1_plus(rdev))); + bytes += sprintf(buf+bytes, "send priority vlan (VLAN 0)\t\t: %s\n", + rdev->qplib_res.prio ? "Enabled" : "Disabled"); + + bytes += sprintf(buf+bytes, "running avg. weight(g)\t\t\t: %u\n", + ccparam.g); + bytes += sprintf(buf+bytes,"inactivity threshold\t\t\t: %u usec\n", + ccparam.inact_th); + bytes += sprintf(buf+bytes,"initial current rate\t\t\t: %#x\n", + ccparam.init_cr); + bytes += sprintf(buf+bytes, "initial target rate\t\t\t: %#x\n", + ccparam.init_tr); + + if (drv_mode->cc_pr_mode) { + bytes += sprintf(buf+bytes, "round trip time\t\t\t\t: %u usec\n", + ccparam.rtt); + if (!_is_chip_gen_p5_p7(rdev->chip_ctx)) { + bytes += sprintf(buf+bytes, + "phases in fast recovery\t\t\t: %u\n", + ccparam.nph_per_state); + bytes += sprintf(buf+bytes, + "quanta in recovery phase\t\t: %u\n", + ccparam.time_pph); + bytes += sprintf(buf+bytes, + "packets in recovery phase\t\t: %u\n", + ccparam.pkts_pph); + } + + if (ccparam.cc_mode == 1 && !_is_cc_gen1_plus(rdev)) { + bytes += sprintf(buf+bytes, + "tcp congestion probability\t\t: %#x\n", + ccparam.tcp_cp); + } + } + + if (_is_chip_gen_p5_p7(rdev->chip_ctx)) + bytes += print_cc_gen1(&ccparam.cc_ext, (buf + bytes), + drv_mode->cc_pr_mode); + + bytes += bnxt_re_get_print_dscp_pri(rdev, buf + bytes, &ccparam, false); + + if (rdev->binfo) + bytes += bnxt_re_get_print_dscp_pri(rdev, buf + bytes, &ccparam, true); + +out: + return bytes; +} + +static int bnxt_re_program_cnp_dcb_values(struct bnxt_re_dev *rdev) +{ + int rc; + + rc = bnxt_re_setup_cnp_cos(rdev, !is_cc_enabled(rdev)); + if (rc) { + dev_err(rdev_to_dev(rdev), + "Failed to setup cnp cos\n"); + goto exit; + } + + /* Clear the previous dscp table */ + rc = bnxt_re_clear_dscp(rdev); + if (rc) { + dev_err(rdev_to_dev(rdev), + "Failed to clear the dscp - pri table\n"); + goto exit; + } + if (!is_cc_enabled(rdev)) { + /* + * Reset the CNP pri and dscp masks if + * dscp is not programmed + */ + rdev->cc_param.mask &= + (~CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_ALT_VLAN_PCP); + rdev->cc_param.mask &= + (~CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_ALT_TOS_DSCP); + rdev->cc_param.alt_tos_dscp = 0; + rdev->cc_param.alt_vlan_pcp = 0; + } + /* Setup cnp and roce dscp */ + rc = bnxt_re_setup_dscp(rdev); + if (rc) { + dev_err(rdev_to_dev(rdev), + "Failed to setup the dscp - pri table\n"); + goto exit; + } + return 0; + +exit: + return rc; +} + +static ssize_t apply_store(struct config_item *item, const char *buf, + size_t count) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_re_dev *rdev; + unsigned int val; + u8 prio_map; + int rc = 0; + + if (!ccgrp) + return -EINVAL; + + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + + sscanf(buf, "%x\n", &val); + + mutex_lock(&rdev->cc_lock); + if (val == BNXT_RE_MODIFY_CC) { + /* Update current priority setting */ + prio_map = bnxt_re_get_priority_mask(rdev, + (IEEE_8021QAZ_APP_SEL_ETHERTYPE | + IEEE_8021QAZ_APP_SEL_DGRAM)); + if (rdev->cur_prio_map != prio_map) + rdev->cur_prio_map = prio_map; + /* For VLAN transmission disablement */ + if (rdev->cc_param.mask & + BNXT_QPLIB_CC_PARAM_MASK_VLAN_TX_DISABLE) { + rdev->cc_param.mask &= + ~BNXT_QPLIB_CC_PARAM_MASK_VLAN_TX_DISABLE; + rc = bnxt_re_prio_vlan_tx_update(rdev); + if (rc) + dev_err(rdev_to_dev(rdev), + "Failed to disable VLAN tx\n"); + } + + if (rdev->cc_param.mask || rdev->cc_param.cc_ext.ext_mask || + rdev->cc_param.cc_ext2.ext2_mask) { + if (!is_qport_service_type_supported(rdev)) { + rc = bnxt_re_program_cnp_dcb_values(rdev); + if (rc) { + dev_err(rdev_to_dev(rdev), + "Failed to set cnp values\n"); + goto exit; + } + } + rc = bnxt_qplib_modify_cc(&rdev->qplib_res, + &rdev->cc_param); + if (rc) + dev_err(rdev_to_dev(rdev), + "Failed to apply cc settings\n"); + } + } +exit: + /* Reset the cc param */ + rdev->cc_param.cur_mask = rdev->cc_param.mask; + rdev->cc_param.mask = 0; + mutex_unlock(&rdev->cc_lock); + return rc ? -EINVAL : strnlen(buf, count); +} +CONFIGFS_ATTR(, apply); + +static ssize_t advanced_show(struct config_item *item, char *buf) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_qplib_drv_modes *drv_mode; + struct bnxt_re_dev *rdev; + + if (!ccgrp) + return -EINVAL; + + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + + drv_mode = &rdev->chip_ctx->modes; + return sprintf(buf, "%#x\n", drv_mode->cc_pr_mode); +} + +#define BNXT_RE_CONFIGFS_HIDE_ADV_CC_PARAMS 0x0 +#define BNXT_RE_CONFIGFS_SHOW_ADV_CC_PARAMS 0x1 +static ssize_t advanced_store(struct config_item *item, const char *buf, + size_t count) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_qplib_drv_modes *drv_mode; + struct bnxt_re_dev *rdev; + int val; + + if (!ccgrp) + return -EINVAL; + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + + drv_mode = &rdev->chip_ctx->modes; + sscanf(buf, "%x\n", &val); + if (val > 0) + drv_mode->cc_pr_mode = BNXT_RE_CONFIGFS_SHOW_ADV_CC_PARAMS; + else + drv_mode->cc_pr_mode = BNXT_RE_CONFIGFS_HIDE_ADV_CC_PARAMS; + return strnlen(buf, count); +} +CONFIGFS_ATTR(, advanced); + +static ssize_t cnp_dscp_show(struct config_item *item, char *buf) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_re_dev *rdev; + + if (!ccgrp) + return -EINVAL; + + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + return sprintf(buf,"%#x\n", rdev->cc_param.alt_tos_dscp); +} + +static ssize_t cnp_dscp_store(struct config_item *item, const char *buf, + size_t count) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_re_tc_rec *tc_rec; + struct bnxt_re_dev *rdev; + unsigned int val; + + if (!ccgrp) + return -EINVAL; + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + sscanf(buf, "%x\n", &val); + if (val > 0x3F) + return -EINVAL; + + mutex_lock(&rdev->cc_lock); + + tc_rec = &rdev->tc_rec[0]; + if (bnxt_re_get_pri_dscp_settings(rdev, -1, tc_rec)) + goto fail; + + /* + * When use_profile_type on qportcfg_output is set (indicates + * service_profile will carry either lossy/lossless type), + * Validate the DSCP and reject if it is not configured + * for CNP Traffic + */ + if (is_qport_service_type_supported(rdev) && + (!(tc_rec->cnp_dscp_bv & (1ul << val)))) + goto fail; + + rdev->cc_param.prev_alt_tos_dscp = rdev->cc_param.alt_tos_dscp; + rdev->cc_param.alt_tos_dscp = val; + rdev->cc_param.cnp_dscp_user = val; + rdev->cc_param.cnp_dscp_user |= BNXT_QPLIB_USER_DSCP_VALID; + rdev->cc_param.mask |= CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_ALT_TOS_DSCP; + mutex_unlock(&rdev->cc_lock); + + return strnlen(buf, count); +fail: + mutex_unlock(&rdev->cc_lock); + return -EINVAL; +} + +CONFIGFS_ATTR(, cnp_dscp); + +static ssize_t cnp_prio_show(struct config_item *item, char *buf) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_re_dev *rdev; + + if (!ccgrp) + return -EINVAL; + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + + return sprintf(buf,"%#x\n", rdev->cc_param.alt_vlan_pcp); +} + +static ssize_t cnp_prio_store(struct config_item *item, const char *buf, + size_t count) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_re_dev *rdev; + unsigned int val; + + if (!ccgrp) + return -EINVAL; + + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + + if (is_qport_service_type_supported(rdev)) + return -EINVAL; + + sscanf(buf, "%x\n", &val); + if (rdev->cc_param.alt_vlan_pcp > 7) + return -EINVAL; + rdev->cc_param.prev_alt_vlan_pcp = rdev->cc_param.alt_vlan_pcp; + rdev->cc_param.alt_vlan_pcp = val & 0x07; + rdev->cc_param.mask |= CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_ALT_VLAN_PCP; + + return strnlen(buf, count); +} +CONFIGFS_ATTR(, cnp_prio); + +static ssize_t cc_mode_show(struct config_item *item, char *buf) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_re_dev *rdev; + + if (!ccgrp) + return -EINVAL; + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + + return sprintf(buf,"%#x\n", rdev->cc_param.cc_mode); +} + +static ssize_t cc_mode_store(struct config_item *item, const char *buf, + size_t count) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_re_dev *rdev; + unsigned int val; + + if (!ccgrp) + return -EINVAL; + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + + sscanf(buf, "%x\n", &val); + if (val > 1) + return -EINVAL; + rdev->cc_param.cc_mode = val; + rdev->cc_param.mask |= CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_CC_MODE; + + return strnlen(buf, count); +} +CONFIGFS_ATTR(, cc_mode); + +static ssize_t dcn_show(struct config_item *item, char *buf) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_qplib_cc_param_ext2 *cc_ext2; + struct bnxt_re_dev *rdev; + ssize_t cnt = 0; + int i; + + if (!ccgrp) + return -EINVAL; + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + + cc_ext2 = &rdev->cc_param.cc_ext2; + cnt += sprintf(buf + cnt, "index ql_thr(kB) cr tr cnp_inc upd_imm\n"); + for (i = 7; i >= 0; i--) + cnt += sprintf(buf + cnt, "%5d %8u %6lu %6lu %7d %7d\n", + i, cc_ext2->dcn_qlevel_tbl_thr[i], + DCN_GET_CR(cc_ext2->dcn_qlevel_tbl_act[i]), + DCN_GET_TR(cc_ext2->dcn_qlevel_tbl_act[i]), + DCN_GET_INC_CNP(cc_ext2->dcn_qlevel_tbl_act[i]), + DCN_GET_UPD_IMM(cc_ext2->dcn_qlevel_tbl_act[i])); + cnt += sprintf(buf + cnt, "Change pending apply:\n"); + if (cc_ext2->ext2_mask) + cnt += sprintf(buf + cnt, "%5d %8u %6u %6u %7d %7d\n", + cc_ext2->idx, cc_ext2->thr, cc_ext2->cr, + cc_ext2->tr, cc_ext2->cnp_inc, cc_ext2->upd_imm); + else + cnt += sprintf(buf + cnt, " None\n"); + + return cnt; +} + +static ssize_t dcn_store(struct config_item *item, const char *buf, + size_t count) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + int idx, ql_thr, cr, tr, cnp_inc, upd_imm; + struct bnxt_qplib_cc_param_ext2 *cc_ext2; + struct bnxt_re_dev *rdev; + + if (!ccgrp) + return -EINVAL; + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + + if (sscanf(buf, "%d %d %d %d %d %d\n", + &idx, &ql_thr, &cr, &tr, &cnp_inc, &upd_imm) != 6) + return -EINVAL; + + /* Input values range check */ + if (idx < 0 || idx > 7) + return -EINVAL; + if (ql_thr < 0 || ql_thr > 0xFFFF) + return -EINVAL; + if (cr < 0 || cr > (MODIFY_DCN_QT_ACT_CR_MASK >> MODIFY_DCN_QT_ACT_CR_SFT)) + return -EINVAL; + if (tr < 0 || tr > (MODIFY_DCN_QT_ACT_TR_MASK >> MODIFY_DCN_QT_ACT_TR_SFT)) + return -EINVAL; + if (cnp_inc < 0 || cnp_inc > 1) + return -EINVAL; + if (upd_imm < 0 || upd_imm > 1) + return -EINVAL; + + /* Store the values, pending to apply */ + cc_ext2 = &rdev->cc_param.cc_ext2; + cc_ext2->idx = idx; + cc_ext2->ext2_mask = 0; + cc_ext2->thr = ql_thr; + if (cc_ext2->thr != cc_ext2->dcn_qlevel_tbl_thr[idx]) + cc_ext2->ext2_mask |= MODIFY_MASK_DCN_QLEVEL_TBL_THR; + cc_ext2->cr = cr; + if (cc_ext2->cr != DCN_GET_CR(cc_ext2->dcn_qlevel_tbl_act[idx])) + cc_ext2->ext2_mask |= MODIFY_MASK_DCN_QLEVEL_TBL_CR; + cc_ext2->tr = tr; + if (cc_ext2->tr != DCN_GET_TR(cc_ext2->dcn_qlevel_tbl_act[idx])) + cc_ext2->ext2_mask |= MODIFY_MASK_DCN_QLEVEL_TBL_TR; + cc_ext2->cnp_inc = cnp_inc; + if (cc_ext2->cnp_inc != DCN_GET_INC_CNP(cc_ext2->dcn_qlevel_tbl_act[idx])) + cc_ext2->ext2_mask |= MODIFY_MASK_DCN_QLEVEL_TBL_INC_CNP; + cc_ext2->upd_imm = upd_imm; + if (cc_ext2->upd_imm != DCN_GET_UPD_IMM(cc_ext2->dcn_qlevel_tbl_act[idx])) + cc_ext2->ext2_mask |= MODIFY_MASK_DCN_QLEVEL_TBL_UPD_IMM; + if (cc_ext2->ext2_mask) + cc_ext2->ext2_mask |= MODIFY_MASK_DCN_QLEVEL_TBL_IDX; + + return strnlen(buf, count); +} +CONFIGFS_ATTR(, dcn); + +static ssize_t ecn_enable_show(struct config_item *item, char *buf) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_re_dev *rdev; + + if (!ccgrp) + return -EINVAL; + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + + return sprintf(buf,"%#x\n", rdev->cc_param.enable); +} + +static ssize_t ecn_enable_store(struct config_item *item, const char *buf, + size_t count) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_re_dev *rdev; + unsigned int val; + + if (!ccgrp) + return -EINVAL; + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + + sscanf(buf, "%x\n", &val); + rdev->cc_param.enable = val & 0xFF; + rdev->cc_param.admin_enable = rdev->cc_param.enable; + rdev->cc_param.mask |= CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_ENABLE_CC; + + return strnlen(buf, count); +} +CONFIGFS_ATTR(, ecn_enable); + +static ssize_t g_show(struct config_item *item, char *buf) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_re_dev *rdev; + if (!ccgrp) + return -EINVAL; + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + + return sprintf(buf,"%#x\n", rdev->cc_param.g); +} + +static ssize_t g_store(struct config_item *item, const char *buf, + size_t count) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_re_dev *rdev; + unsigned int val; + + if (!ccgrp) + return -EINVAL; + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + + sscanf(buf, "%x\n", &val); + rdev->cc_param.g = val & 0xFF; + rdev->cc_param.mask |= CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_G; + + return strnlen(buf, count); +} +CONFIGFS_ATTR(, g); + +static ssize_t init_cr_show(struct config_item *item, char *buf) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_re_dev *rdev; + + if (!ccgrp) + return -EINVAL; + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + + return sprintf(buf,"%#x\n", rdev->cc_param.init_cr); +} + +static ssize_t init_cr_store(struct config_item *item, const char *buf, + size_t count) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_re_dev *rdev; + unsigned int val; + + if (!ccgrp) + return -EINVAL; + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + + sscanf(buf, "%x\n", &val); + rdev->cc_param.init_cr = val & 0xFFFF; + rdev->cc_param.mask |= CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_INIT_CR; + + return strnlen(buf, count); +} +CONFIGFS_ATTR(, init_cr); + +static ssize_t inact_th_show(struct config_item *item, char *buf) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_re_dev *rdev; + if (!ccgrp) + return -EINVAL; + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + + return sprintf(buf,"%#x\n", rdev->cc_param.inact_th); +} + +static ssize_t inact_th_store(struct config_item *item, const char *buf, + size_t count) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_re_dev *rdev; + unsigned int val; + + if (!ccgrp) + return -EINVAL; + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + + sscanf(buf, "%x\n", &val); + rdev->cc_param.inact_th = val & 0xFFFF; + rdev->cc_param.mask |= CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_INACTIVITY_CP; + + return strnlen(buf, count); +} +CONFIGFS_ATTR(, inact_th); + +static ssize_t init_tr_show(struct config_item *item, char *buf) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_re_dev *rdev; + + if (!ccgrp) + return -EINVAL; + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + + return sprintf(buf,"%#x\n", rdev->cc_param.init_tr); +} + +static ssize_t init_tr_store(struct config_item *item, const char *buf, + size_t count) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_re_dev *rdev; + unsigned int val; + + if (!ccgrp) + return -EINVAL; + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + + sscanf(buf, "%x\n", &val); + rdev->cc_param.init_tr = val & 0xFFFF; + rdev->cc_param.mask |= CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_INIT_TR; + + return strnlen(buf, count); +} +CONFIGFS_ATTR(, init_tr); + +static ssize_t nph_per_state_show(struct config_item *item, char *buf) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_re_dev *rdev; + + if (!ccgrp) + return -EINVAL; + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + + return sprintf(buf,"%#x\n", rdev->cc_param.nph_per_state); +} + +static ssize_t nph_per_state_store(struct config_item *item, const char *buf, + size_t count) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_re_dev *rdev; + unsigned int val; + + if (!ccgrp) + return -EINVAL; + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + + sscanf(buf, "%x\n", &val); + rdev->cc_param.nph_per_state = val & 0xFF; + rdev->cc_param.mask |= + CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_NUMPHASEPERSTATE; + + return strnlen(buf, count); +} +CONFIGFS_ATTR(, nph_per_state); + +static ssize_t time_pph_show(struct config_item *item, char *buf) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_re_dev *rdev; + + if (!ccgrp) + return -EINVAL; + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + + return sprintf(buf,"%#x\n", rdev->cc_param.time_pph); +} + +static ssize_t time_pph_store(struct config_item *item, const char *buf, + size_t count) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_re_dev *rdev; + unsigned int val; + + if (!ccgrp) + return -EINVAL; + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + + sscanf(buf, "%x\n", &val); + if (val < 1 || val > 0xF) + return -EINVAL; + rdev->cc_param.time_pph = val & 0xFF; + rdev->cc_param.mask |= + CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_TIME_PER_PHASE; + + return strnlen(buf, count); +} +CONFIGFS_ATTR(, time_pph); + +static ssize_t pkts_pph_show(struct config_item *item, char *buf) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_re_dev *rdev; + + if (!ccgrp) + return -EINVAL; + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + + return sprintf(buf,"%#x\n", rdev->cc_param.pkts_pph); +} + +static ssize_t pkts_pph_store(struct config_item *item, const char *buf, + size_t count) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_re_dev *rdev; + unsigned int val; + + if (!ccgrp) + return -EINVAL; + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + + sscanf(buf, "%x\n", &val); + if (val < 1 || val > 0xFF) + return -EINVAL; + rdev->cc_param.pkts_pph = val & 0xFF; + rdev->cc_param.mask |= + CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_PKTS_PER_PHASE; + + return strnlen(buf, count); +} +CONFIGFS_ATTR(, pkts_pph); + +static ssize_t rtt_show(struct config_item *item, char *buf) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_re_dev *rdev; + + if (!ccgrp) + return -EINVAL; + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + + return sprintf(buf,"%#x\n", rdev->cc_param.rtt); +} + +static ssize_t rtt_store(struct config_item *item, const char *buf, + size_t count) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_re_dev *rdev; + unsigned int val; + + if (!ccgrp) + return -EINVAL; + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + + sscanf(buf, "%x\n", &val); + rdev->cc_param.rtt = val & 0xFFFF; + rdev->cc_param.mask |= CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_RTT; + + return strnlen(buf, count); +} +CONFIGFS_ATTR(, rtt); + +static ssize_t tcp_cp_show(struct config_item *item, char *buf) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_re_dev *rdev; + + if (!ccgrp) + return -EINVAL; + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + + return sprintf(buf,"%#x\n", rdev->cc_param.tcp_cp); +} + +static ssize_t tcp_cp_store(struct config_item *item, const char *buf, + size_t count) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_re_dev *rdev; + unsigned int val; + + if (!ccgrp) + return -EINVAL; + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + + sscanf(buf, "%x\n", &val); + rdev->cc_param.tcp_cp = val & 0xFFFF; + rdev->cc_param.mask |= CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_TCP_CP; + + return strnlen(buf, count); +} +CONFIGFS_ATTR(, tcp_cp); + +static ssize_t roce_dscp_show(struct config_item *item, char *buf) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_re_dev *rdev; + + if (!ccgrp) + return -EINVAL; + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + + return sprintf(buf,"%#x\n", rdev->cc_param.tos_dscp); +} + +static ssize_t roce_dscp_store(struct config_item *item, const char *buf, + size_t count) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_re_tc_rec *tc_rec; + struct bnxt_re_dev *rdev; + unsigned int val; + + if (!ccgrp) + return -EINVAL; + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + + sscanf(buf, "%x\n", &val); + if (val > 0x3F) + return -EINVAL; + + mutex_lock(&rdev->cc_lock); + + tc_rec = &rdev->tc_rec[0]; + if (bnxt_re_get_pri_dscp_settings(rdev, -1, tc_rec)) + goto fail; + + /* + * When use_profile_type on qportcfg_output is set (indicates + * service_profile will carry either lossy/lossless type), + * Validate the DSCP and reject if it is not configured + * for RoCE Traffic + */ + if (is_qport_service_type_supported(rdev) && + (!(tc_rec->roce_dscp_bv & (1ul << val)))) + goto fail; + + rdev->cc_param.prev_tos_dscp = rdev->cc_param.tos_dscp; + rdev->cc_param.tos_dscp = val; + rdev->cc_param.roce_dscp_user = val; + rdev->cc_param.roce_dscp_user |= BNXT_QPLIB_USER_DSCP_VALID; + rdev->cc_param.mask |= CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_TOS_DSCP; + mutex_unlock(&rdev->cc_lock); + + return strnlen(buf, count); +fail: + mutex_unlock(&rdev->cc_lock); + return -EINVAL; +} +CONFIGFS_ATTR(, roce_dscp); + +static ssize_t roce_prio_show(struct config_item *item, char *buf) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_re_dev *rdev; + + if (!ccgrp) + return -EINVAL; + + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + + return sprintf(buf,"%#x\n", rdev->cc_param.roce_pri); +} + +static ssize_t roce_prio_store(struct config_item *item, const char *buf, + size_t count) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_re_dev *rdev; + unsigned int val; + + if (!ccgrp) + return -EINVAL; + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + + if (is_qport_service_type_supported(rdev)) + return -EINVAL; + + sscanf(buf, "%x\n", &val); + if (rdev->cc_param.roce_pri > 7) + return -EINVAL; + rdev->cc_param.prev_roce_pri = rdev->cc_param.roce_pri; + rdev->cc_param.roce_pri = val & 0x07; + rdev->cc_param.mask |= BNXT_QPLIB_CC_PARAM_MASK_ROCE_PRI; + return strnlen(buf, count); +} +CONFIGFS_ATTR(, roce_prio); + +static ssize_t ecn_marking_show(struct config_item *item, char *buf) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_re_dev *rdev; + + if (!ccgrp) + return -EINVAL; + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + + return sprintf(buf,"%#x\n", rdev->cc_param.tos_ecn); +} + +static ssize_t ecn_marking_store(struct config_item *item, const char *buf, + size_t count) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_re_dev *rdev; + unsigned int val; + + if (!ccgrp) + return -EINVAL; + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + + sscanf(buf, "%x", &val); + if (val >= 0x03) + return -EINVAL; + rdev->cc_param.tos_ecn = val & 0x3; + rdev->cc_param.mask |= CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_TOS_ECN; + + return strnlen(buf, count); +} +CONFIGFS_ATTR(, ecn_marking); + +static ssize_t disable_prio_vlan_tx_show(struct config_item *item, char *buf) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_re_dev *rdev; + + if (!ccgrp) + return -EINVAL; + + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + return sprintf(buf,"%#x\n", rdev->cc_param.disable_prio_vlan_tx); +} + +static ssize_t disable_prio_vlan_tx_store(struct config_item *item, const char *buf, + size_t count) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_re_dev *rdev; + unsigned int val; + + if (!ccgrp) + return -EINVAL; + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + sscanf(buf, "%x\n", &val); + rdev->cc_param.disable_prio_vlan_tx = val & 0x1; + rdev->cc_param.mask |= BNXT_QPLIB_CC_PARAM_MASK_VLAN_TX_DISABLE; + + return strnlen(buf, count); +} +CONFIGFS_ATTR(, disable_prio_vlan_tx); + +static ssize_t inact_th_hi_show(struct config_item *item, char *buf) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_re_dev *rdev; + + if (!ccgrp) + return -EINVAL; + + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + return sprintf(buf,"%#x\n", rdev->cc_param.cc_ext.inact_th_hi); +} + +static ssize_t inact_th_hi_store(struct config_item *item, const char *buf, + size_t count) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_re_dev *rdev; + unsigned int val; + + if (!ccgrp) + return -EINVAL; + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + sscanf(buf, "%x\n", &val); + rdev->cc_param.cc_ext.inact_th_hi = val & 0xFFFF; + /* rdev->cc_param.cc_ext.ext_mask |= ;*/ + + return strnlen(buf, count); +} +CONFIGFS_ATTR(, inact_th_hi); + +static ssize_t min_time_bet_cnp_show(struct config_item *item, char *buf) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_re_dev *rdev; + + if (!ccgrp) + return -EINVAL; + + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + return sprintf(buf,"%#x\n", rdev->cc_param.cc_ext.min_delta_cnp); +} + +static ssize_t min_time_bet_cnp_store(struct config_item *item, + const char *buf, size_t count) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_re_dev *rdev; + unsigned int val; + + if (!ccgrp) + return -EINVAL; + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + sscanf(buf, "%x\n", &val); + rdev->cc_param.cc_ext.min_delta_cnp = val & 0xFFFF; + rdev->cc_param.cc_ext.ext_mask |= + CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_MIN_TIME_BETWEEN_CNPS; + return strnlen(buf, count); +} +CONFIGFS_ATTR(, min_time_bet_cnp); + +static ssize_t init_cp_show(struct config_item *item, char *buf) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_re_dev *rdev; + + if (!ccgrp) + return -EINVAL; + + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + return sprintf(buf,"%#x\n", rdev->cc_param.cc_ext.init_cp); +} + +static ssize_t init_cp_store(struct config_item *item, + const char *buf, size_t count) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_re_dev *rdev; + unsigned int val; + + if (!ccgrp) + return -EINVAL; + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + sscanf(buf, "%x\n", &val); + rdev->cc_param.cc_ext.init_cp = val & 0xFFFF; + rdev->cc_param.cc_ext.ext_mask |= + CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_INIT_CP; + return strnlen(buf, count); +} +CONFIGFS_ATTR(, init_cp); + +static ssize_t tr_update_mode_show(struct config_item *item, char *buf) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_re_dev *rdev; + + if (!ccgrp) + return -EINVAL; + + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + return sprintf(buf,"%#x\n", rdev->cc_param.cc_ext.tr_update_mode); +} + +static ssize_t tr_update_mode_store(struct config_item *item, + const char *buf, size_t count) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_re_dev *rdev; + unsigned int val; + + if (!ccgrp) + return -EINVAL; + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + sscanf(buf, "%x\n", &val); + rdev->cc_param.cc_ext.tr_update_mode = val & 0xFF; + rdev->cc_param.cc_ext.ext_mask |= + CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_TR_UPDATE_MODE; + return strnlen(buf, count); +} +CONFIGFS_ATTR(, tr_update_mode); + +static ssize_t tr_update_cyls_show(struct config_item *item, char *buf) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_re_dev *rdev; + + if (!ccgrp) + return -EINVAL; + + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + return sprintf(buf,"%#x\n", rdev->cc_param.cc_ext.tr_update_cyls); +} + +static ssize_t tr_update_cyls_store(struct config_item *item, + const char *buf, size_t count) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_re_dev *rdev; + unsigned int val; + + if (!ccgrp) + return -EINVAL; + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + sscanf(buf, "%x\n", &val); + rdev->cc_param.cc_ext.tr_update_cyls = val & 0xFF; + rdev->cc_param.cc_ext.ext_mask |= + CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_TR_UPDATE_CYCLES; + return strnlen(buf, count); +} +CONFIGFS_ATTR(, tr_update_cyls); + +static ssize_t fr_num_rtts_show(struct config_item *item, char *buf) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_re_dev *rdev; + + if (!ccgrp) + return -EINVAL; + + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + return sprintf(buf,"%#x\n", rdev->cc_param.cc_ext.fr_rtt); +} + +static ssize_t fr_num_rtts_store(struct config_item *item, const char *buf, + size_t count) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_re_dev *rdev; + unsigned int val; + + if (!ccgrp) + return -EINVAL; + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + sscanf(buf, "%x\n", &val); + rdev->cc_param.cc_ext.fr_rtt = val & 0xFF; + rdev->cc_param.cc_ext.ext_mask |= + CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_FR_NUM_RTTS; + return strnlen(buf, count); +} +CONFIGFS_ATTR(, fr_num_rtts); + +static ssize_t ai_rate_incr_show(struct config_item *item, char *buf) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_re_dev *rdev; + + if (!ccgrp) + return -EINVAL; + + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + return sprintf(buf,"%#x\n", rdev->cc_param.cc_ext.ai_rate_incr); +} + +static ssize_t ai_rate_incr_store(struct config_item *item, const char *buf, + size_t count) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_re_dev *rdev; + unsigned int val; + + if (!ccgrp) + return -EINVAL; + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + sscanf(buf, "%x\n", &val); + rdev->cc_param.cc_ext.ai_rate_incr = val & 0xFF; + rdev->cc_param.cc_ext.ext_mask |= + CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_AI_RATE_INCREASE; + return strnlen(buf, count); +} +CONFIGFS_ATTR(, ai_rate_incr); + +static ssize_t red_rel_rtts_th_show(struct config_item *item, char *buf) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_re_dev *rdev; + + if (!ccgrp) + return -EINVAL; + + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + return sprintf(buf,"%#x\n", rdev->cc_param.cc_ext.rr_rtt_th); +} + +static ssize_t red_rel_rtts_th_store(struct config_item *item, + const char *buf, size_t count) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_re_dev *rdev; + unsigned int val; + + if (!ccgrp) + return -EINVAL; + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + sscanf(buf, "%x\n", &val); + rdev->cc_param.cc_ext.rr_rtt_th = val & 0xFFFF; + rdev->cc_param.cc_ext.ext_mask |= + CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_REDUCTION_RELAX_RTTS_TH; + return strnlen(buf, count); +} +CONFIGFS_ATTR(, red_rel_rtts_th); + +static ssize_t act_rel_cr_th_show(struct config_item *item, char *buf) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_re_dev *rdev; + + if (!ccgrp) + return -EINVAL; + + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + return sprintf(buf,"%#x\n", rdev->cc_param.cc_ext.ar_cr_th); +} + +static ssize_t act_rel_cr_th_store(struct config_item *item, const char *buf, + size_t count) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_re_dev *rdev; + unsigned int val; + + if (!ccgrp) + return -EINVAL; + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + sscanf(buf, "%x\n", &val); + rdev->cc_param.cc_ext.ar_cr_th = val & 0xFFFF; + rdev->cc_param.cc_ext.ext_mask |= + CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_ADDITIONAL_RELAX_CR_TH; + return strnlen(buf, count); +} +CONFIGFS_ATTR(, act_rel_cr_th); + +static ssize_t cr_min_th_show(struct config_item *item, char *buf) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_re_dev *rdev; + + if (!ccgrp) + return -EINVAL; + + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + return sprintf(buf,"%#x\n", rdev->cc_param.cc_ext.cr_min_th); +} + +static ssize_t cr_min_th_store(struct config_item *item, const char *buf, + size_t count) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_re_dev *rdev; + unsigned int val; + + if (!ccgrp) + return -EINVAL; + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + sscanf(buf, "%x\n", &val); + rdev->cc_param.cc_ext.cr_min_th = val & 0xFFFF; + rdev->cc_param.cc_ext.ext_mask |= + CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_CR_MIN_TH; + return strnlen(buf, count); +} +CONFIGFS_ATTR(, cr_min_th); + +static ssize_t bw_avg_weight_show(struct config_item *item, char *buf) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_re_dev *rdev; + + if (!ccgrp) + return -EINVAL; + + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + return sprintf(buf,"%#x\n", rdev->cc_param.cc_ext.bw_avg_weight); +} + +static ssize_t bw_avg_weight_store(struct config_item *item, const char *buf, + size_t count) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_re_dev *rdev; + unsigned int val; + + if (!ccgrp) + return -EINVAL; + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + sscanf(buf, "%x\n", &val); + rdev->cc_param.cc_ext.bw_avg_weight = val & 0xFF; + rdev->cc_param.cc_ext.ext_mask |= + CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_BW_AVG_WEIGHT; + return strnlen(buf, count); +} +CONFIGFS_ATTR(, bw_avg_weight); + +static ssize_t act_cr_factor_show(struct config_item *item, char *buf) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_re_dev *rdev; + + if (!ccgrp) + return -EINVAL; + + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + return sprintf(buf,"%#x\n", rdev->cc_param.cc_ext.cr_factor); +} + +static ssize_t act_cr_factor_store(struct config_item *item, const char *buf, + size_t count) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_re_dev *rdev; + unsigned int val; + + if (!ccgrp) + return -EINVAL; + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + sscanf(buf, "%x\n", &val); + rdev->cc_param.cc_ext.cr_factor = val & 0xFFFF; + rdev->cc_param.cc_ext.ext_mask |= + CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_ACTUAL_CR_FACTOR; + return strnlen(buf, count); +} +CONFIGFS_ATTR(, act_cr_factor); + +static ssize_t max_cp_cr_th_show(struct config_item *item, char *buf) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_re_dev *rdev; + + if (!ccgrp) + return -EINVAL; + + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + return sprintf(buf,"%#x\n", rdev->cc_param.cc_ext.cr_th_max_cp); +} + +static ssize_t max_cp_cr_th_store(struct config_item *item, const char *buf, + size_t count) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_re_dev *rdev; + unsigned int val; + + if (!ccgrp) + return -EINVAL; + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + sscanf(buf, "%x\n", &val); + rdev->cc_param.cc_ext.cr_th_max_cp = val & 0xFFFF; + rdev->cc_param.cc_ext.ext_mask |= + CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_MAX_CP_CR_TH; + return strnlen(buf, count); +} +CONFIGFS_ATTR(, max_cp_cr_th); + +static ssize_t cp_bias_en_show(struct config_item *item, char *buf) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_re_dev *rdev; + + if (!ccgrp) + return -EINVAL; + + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + return sprintf(buf,"%#x\n", rdev->cc_param.cc_ext.cp_bias_en); +} + +static ssize_t cp_bias_en_store(struct config_item *item, const char *buf, + size_t count) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_re_dev *rdev; + unsigned int val; + + if (!ccgrp) + return -EINVAL; + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + sscanf(buf, "%x\n", &val); + rdev->cc_param.cc_ext.cp_bias_en = val & 0xFF; + rdev->cc_param.cc_ext.ext_mask |= + CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_CP_BIAS_EN; + return strnlen(buf, count); +} +CONFIGFS_ATTR(, cp_bias_en); + +static ssize_t cp_bias_show(struct config_item *item, char *buf) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_re_dev *rdev; + + if (!ccgrp) + return -EINVAL; + + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + return sprintf(buf,"%#x\n", rdev->cc_param.cc_ext.cp_bias); +} + +static ssize_t cp_bias_store(struct config_item *item, const char *buf, + size_t count) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_re_dev *rdev; + unsigned int val; + + if (!ccgrp) + return -EINVAL; + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + sscanf(buf, "%x\n", &val); + rdev->cc_param.cc_ext.cp_bias = val & 0xFF; + rdev->cc_param.cc_ext.ext_mask |= + CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_CP_BIAS; + return strnlen(buf, count); +} +CONFIGFS_ATTR(, cp_bias); + +static ssize_t cnp_ecn_show(struct config_item *item, char *buf) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_re_dev *rdev; + + if (!ccgrp) + return -EINVAL; + + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + return sprintf(buf,"%#x\n", rdev->cc_param.cc_ext.cnp_ecn); +} + +static ssize_t cnp_ecn_store(struct config_item *item, const char *buf, + size_t count) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_re_dev *rdev; + unsigned int val; + + if (!ccgrp) + return -EINVAL; + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + sscanf(buf, "%x\n", &val); + rdev->cc_param.cc_ext.cnp_ecn = val & 0xFF; + rdev->cc_param.cc_ext.ext_mask |= + CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_CNP_ECN; + return strnlen(buf, count); +} +CONFIGFS_ATTR(, cnp_ecn); + +static ssize_t rtt_jitter_en_show(struct config_item *item, char *buf) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_re_dev *rdev; + + if (!ccgrp) + return -EINVAL; + + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + return sprintf(buf,"%#x\n", rdev->cc_param.cc_ext.rtt_jitter_en); +} + +static ssize_t rtt_jitter_en_store(struct config_item *item, const char *buf, + size_t count) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_re_dev *rdev; + unsigned int val; + + if (!ccgrp) + return -EINVAL; + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + sscanf(buf, "%x\n", &val); + rdev->cc_param.cc_ext.rtt_jitter_en = val & 0xFF; + rdev->cc_param.cc_ext.ext_mask |= + CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_RTT_JITTER_EN; + return strnlen(buf, count); +} +CONFIGFS_ATTR(, rtt_jitter_en); + +static ssize_t lbytes_per_usec_show(struct config_item *item, char *buf) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_re_dev *rdev; + + if (!ccgrp) + return -EINVAL; + + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + return sprintf(buf,"%#x\n", rdev->cc_param.cc_ext.bytes_per_usec); +} + +static ssize_t lbytes_per_usec_store(struct config_item *item, const char *buf, + size_t count) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_re_dev *rdev; + unsigned int val; + + if (!ccgrp) + return -EINVAL; + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + sscanf(buf, "%x\n", &val); + rdev->cc_param.cc_ext.bytes_per_usec = val & 0xFFFF; + rdev->cc_param.cc_ext.ext_mask |= + CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_LINK_BYTES_PER_USEC; + return strnlen(buf, count); +} + +CONFIGFS_ATTR(, lbytes_per_usec); + +static ssize_t reset_cc_cr_th_show(struct config_item *item, char *buf) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_re_dev *rdev; + + if (!ccgrp) + return -EINVAL; + + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + return sprintf(buf,"%#x\n", rdev->cc_param.cc_ext.cc_cr_reset_th); +} + +static ssize_t reset_cc_cr_th_store(struct config_item *item, const char *buf, + size_t count) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_re_dev *rdev; + unsigned int val; + + if (!ccgrp) + return -EINVAL; + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + sscanf(buf, "%x\n", &val); + rdev->cc_param.cc_ext.cc_cr_reset_th = val & 0xFFFF; + rdev->cc_param.cc_ext.ext_mask |= + CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_RESET_CC_CR_TH; + return strnlen(buf, count); +} +CONFIGFS_ATTR(, reset_cc_cr_th); + +static ssize_t cr_width_show(struct config_item *item, char *buf) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_re_dev *rdev; + + if (!ccgrp) + return -EINVAL; + + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + return sprintf(buf,"%#x\n", rdev->cc_param.cc_ext.cr_width); +} + +static ssize_t cr_width_store(struct config_item *item, const char *buf, + size_t count) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_re_dev *rdev; + unsigned int val; + + if (!ccgrp) + return -EINVAL; + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + sscanf(buf, "%x\n", &val); + rdev->cc_param.cc_ext.cr_width = val & 0xFF; + rdev->cc_param.cc_ext.ext_mask |= + CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_CR_WIDTH; + return strnlen(buf, count); +} +CONFIGFS_ATTR(, cr_width); + +static ssize_t min_quota_show(struct config_item *item, char *buf) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_re_dev *rdev; + + if (!ccgrp) + return -EINVAL; + + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + return sprintf(buf,"%#x\n", rdev->cc_param.cc_ext.min_quota); +} + +static ssize_t min_quota_store(struct config_item *item, const char *buf, + size_t count) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_re_dev *rdev; + unsigned int val; + + if (!ccgrp) + return -EINVAL; + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + sscanf(buf, "%x\n", &val); + rdev->cc_param.cc_ext.min_quota = val & 0xFF; + rdev->cc_param.cc_ext.ext_mask |= + CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_QUOTA_PERIOD_MIN; + return strnlen(buf, count); +} +CONFIGFS_ATTR(, min_quota); + +static ssize_t max_quota_show(struct config_item *item, char *buf) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_re_dev *rdev; + + if (!ccgrp) + return -EINVAL; + + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + return sprintf(buf,"%#x\n", rdev->cc_param.cc_ext.max_quota); +} + +static ssize_t max_quota_store(struct config_item *item, const char *buf, + size_t count) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_re_dev *rdev; + unsigned int val; + + if (!ccgrp) + return -EINVAL; + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + sscanf(buf, "%x\n", &val); + rdev->cc_param.cc_ext.max_quota = val & 0xFF; + rdev->cc_param.cc_ext.ext_mask |= + CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_QUOTA_PERIOD_MAX; + return strnlen(buf, count); +} +CONFIGFS_ATTR(, max_quota); + +static ssize_t abs_max_quota_show(struct config_item *item, char *buf) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_re_dev *rdev; + + if (!ccgrp) + return -EINVAL; + + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + return sprintf(buf,"%#x\n", rdev->cc_param.cc_ext.abs_max_quota); +} + +static ssize_t abs_max_quota_store(struct config_item *item, const char *buf, + size_t count) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_re_dev *rdev; + unsigned int val; + + if (!ccgrp) + return -EINVAL; + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + sscanf(buf, "%x\n", &val); + rdev->cc_param.cc_ext.abs_max_quota = val & 0xFF; + rdev->cc_param.cc_ext.ext_mask |= + CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_QUOTA_PERIOD_ABS_MAX; + return strnlen(buf, count); +} +CONFIGFS_ATTR(, abs_max_quota); + +static ssize_t tr_lb_show(struct config_item *item, char *buf) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_re_dev *rdev; + + if (!ccgrp) + return -EINVAL; + + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + return sprintf(buf,"%#x\n", rdev->cc_param.cc_ext.tr_lb); +} + +static ssize_t tr_lb_store(struct config_item *item, const char *buf, + size_t count) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_re_dev *rdev; + unsigned int val; + + if (!ccgrp) + return -EINVAL; + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + sscanf(buf, "%x\n", &val); + rdev->cc_param.cc_ext.tr_lb = val & 0xFFFF; + rdev->cc_param.cc_ext.ext_mask |= + CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_TR_LOWER_BOUND; + return strnlen(buf, count); +} +CONFIGFS_ATTR(, tr_lb); + +static ssize_t cr_prob_fac_show(struct config_item *item, char *buf) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_re_dev *rdev; + + if (!ccgrp) + return -EINVAL; + + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + return sprintf(buf,"%#x\n", rdev->cc_param.cc_ext.cr_prob_fac); +} + +static ssize_t cr_prob_fac_store(struct config_item *item, const char *buf, + size_t count) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_re_dev *rdev; + unsigned int val; + + if (!ccgrp) + return -EINVAL; + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + sscanf(buf, "%x\n", &val); + rdev->cc_param.cc_ext.cr_prob_fac = val & 0xFF; + rdev->cc_param.cc_ext.ext_mask |= + CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_CR_PROB_FACTOR; + return strnlen(buf, count); +} +CONFIGFS_ATTR(, cr_prob_fac); + +static ssize_t tr_prob_fac_show(struct config_item *item, char *buf) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_re_dev *rdev; + + if (!ccgrp) + return -EINVAL; + + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + return sprintf(buf,"%#x\n", rdev->cc_param.cc_ext.tr_prob_fac); +} + +static ssize_t tr_prob_fac_store(struct config_item *item, const char *buf, + size_t count) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_re_dev *rdev; + unsigned int val; + + if (!ccgrp) + return -EINVAL; + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + sscanf(buf, "%x\n", &val); + rdev->cc_param.cc_ext.tr_prob_fac = val & 0xFF; + rdev->cc_param.cc_ext.ext_mask |= + CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_TR_PROB_FACTOR; + return strnlen(buf, count); +} +CONFIGFS_ATTR(, tr_prob_fac); + +static ssize_t fair_cr_th_show(struct config_item *item, char *buf) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_re_dev *rdev; + + if (!ccgrp) + return -EINVAL; + + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + return sprintf(buf,"%#x\n", rdev->cc_param.cc_ext.fair_cr_th); +} + +static ssize_t fair_cr_th_store(struct config_item *item, const char *buf, + size_t count) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_re_dev *rdev; + unsigned int val; + + if (!ccgrp) + return -EINVAL; + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + sscanf(buf, "%x\n", &val); + rdev->cc_param.cc_ext.fair_cr_th = val & 0xFFFF; + rdev->cc_param.cc_ext.ext_mask |= + CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_FAIRNESS_CR_TH; + + return strnlen(buf, count); +} +CONFIGFS_ATTR(, fair_cr_th); + +static ssize_t red_div_show(struct config_item *item, char *buf) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_re_dev *rdev; + + if (!ccgrp) + return -EINVAL; + + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + return sprintf(buf,"%#x\n", rdev->cc_param.cc_ext.red_div); +} + +static ssize_t red_div_store(struct config_item *item, const char *buf, + size_t count) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_re_dev *rdev; + unsigned int val; + + if (!ccgrp) + return -EINVAL; + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + sscanf(buf, "%x\n", &val); + rdev->cc_param.cc_ext.red_div = val & 0xFF; + rdev->cc_param.cc_ext.ext_mask |= + CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_RED_DIV; + + return strnlen(buf, count); +} +CONFIGFS_ATTR(, red_div); + +static ssize_t cnp_ratio_th_show(struct config_item *item, char *buf) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_re_dev *rdev; + + if (!ccgrp) + return -EINVAL; + + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + return sprintf(buf,"%#x\n", rdev->cc_param.cc_ext.cnp_ratio_th); +} + +static ssize_t cnp_ratio_th_store(struct config_item *item, const char *buf, + size_t count) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_re_dev *rdev; + unsigned int val; + + if (!ccgrp) + return -EINVAL; + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + sscanf(buf, "%x\n", &val); + rdev->cc_param.cc_ext.cnp_ratio_th = val & 0xFF; + rdev->cc_param.cc_ext.ext_mask |= + CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_CNP_RATIO_TH; + + return strnlen(buf, count); +} +CONFIGFS_ATTR(, cnp_ratio_th); + +static ssize_t exp_ai_rtts_show(struct config_item *item, char *buf) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_re_dev *rdev; + + if (!ccgrp) + return -EINVAL; + + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + return sprintf(buf,"%#x\n", rdev->cc_param.cc_ext.ai_ext_rtt); +} + +static ssize_t exp_ai_rtts_store(struct config_item *item, const char *buf, + size_t count) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_re_dev *rdev; + unsigned int val; + + if (!ccgrp) + return -EINVAL; + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + sscanf(buf, "%x\n", &val); + rdev->cc_param.cc_ext.ai_ext_rtt = val & 0xFFFF; + rdev->cc_param.cc_ext.ext_mask |= + CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_EXP_AI_RTTS; + + return strnlen(buf, count); +} +CONFIGFS_ATTR(, exp_ai_rtts); + +static ssize_t exp_crcp_ratio_show(struct config_item *item, char *buf) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_re_dev *rdev; + + if (!ccgrp) + return -EINVAL; + + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + return sprintf(buf,"%#x\n", rdev->cc_param.cc_ext.exp_crcp_ratio); +} + +static ssize_t exp_crcp_ratio_store(struct config_item *item, const char *buf, + size_t count) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_re_dev *rdev; + unsigned int val; + + if (!ccgrp) + return -EINVAL; + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + sscanf(buf, "%x\n", &val); + rdev->cc_param.cc_ext.exp_crcp_ratio = val & 0xFF; + rdev->cc_param.cc_ext.ext_mask |= + CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_EXP_AI_CR_CP_RATIO; + + return strnlen(buf, count); +} +CONFIGFS_ATTR(, exp_crcp_ratio); + +static ssize_t rt_en_show(struct config_item *item, char *buf) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_re_dev *rdev; + + if (!ccgrp) + return -EINVAL; + + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + return sprintf(buf,"%#x\n", rdev->cc_param.cc_ext.low_rate_en); +} + +static ssize_t rt_en_store(struct config_item *item, const char *buf, + size_t count) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_re_dev *rdev; + unsigned int val; + + if (!ccgrp) + return -EINVAL; + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + sscanf(buf, "%x\n", &val); + rdev->cc_param.cc_ext.low_rate_en = val & 0xFF; + rdev->cc_param.cc_ext.ext_mask |= + CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_USE_RATE_TABLE; + + return strnlen(buf, count); +} +CONFIGFS_ATTR(, rt_en); + +static ssize_t cp_exp_update_th_show(struct config_item *item, char *buf) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_re_dev *rdev; + + if (!ccgrp) + return -EINVAL; + + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + return sprintf(buf,"%#x\n", rdev->cc_param.cc_ext.cpcr_update_th); +} + +static ssize_t cp_exp_update_th_store(struct config_item *item, + const char *buf, size_t count) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_re_dev *rdev; + unsigned int val; + + if (!ccgrp) + return -EINVAL; + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + sscanf(buf, "%x\n", &val); + rdev->cc_param.cc_ext.cpcr_update_th = val & 0xFFFF; + rdev->cc_param.cc_ext.ext_mask |= + CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_CP_EXP_UPDATE_TH; + + return strnlen(buf, count); +} +CONFIGFS_ATTR(, cp_exp_update_th); + +static ssize_t ai_rtt_th1_show(struct config_item *item, char *buf) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_re_dev *rdev; + + if (!ccgrp) + return -EINVAL; + + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + return sprintf(buf,"%#x\n", rdev->cc_param.cc_ext.ai_rtt_th1); +} + +static ssize_t ai_rtt_th1_store(struct config_item *item, const char *buf, + size_t count) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_re_dev *rdev; + unsigned int val; + + if (!ccgrp) + return -EINVAL; + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + sscanf(buf, "%x\n", &val); + rdev->cc_param.cc_ext.ai_rtt_th1 = val & 0xFFFF; + rdev->cc_param.cc_ext.ext_mask |= + CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_HIGH_EXP_AI_RTTS_TH1; + + return strnlen(buf, count); +} +CONFIGFS_ATTR(, ai_rtt_th1); + +static ssize_t ai_rtt_th2_show(struct config_item *item, char *buf) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_re_dev *rdev; + + if (!ccgrp) + return -EINVAL; + + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + return sprintf(buf,"%#x\n", rdev->cc_param.cc_ext.ai_rtt_th2); +} + +static ssize_t ai_rtt_th2_store(struct config_item *item, const char *buf, + size_t count) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_re_dev *rdev; + unsigned int val; + + if (!ccgrp) + return -EINVAL; + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + sscanf(buf, "%x\n", &val); + rdev->cc_param.cc_ext.ai_rtt_th2 = val & 0xFFFF; + rdev->cc_param.cc_ext.ext_mask |= + CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_HIGH_EXP_AI_RTTS_TH2; + + return strnlen(buf, count); +} +CONFIGFS_ATTR(, ai_rtt_th2); + +static ssize_t cf_rtt_th_show(struct config_item *item, char *buf) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_re_dev *rdev; + + if (!ccgrp) + return -EINVAL; + + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + return sprintf(buf,"%#x\n", rdev->cc_param.cc_ext.cf_rtt_th); +} + +static ssize_t cf_rtt_th_store(struct config_item *item, const char *buf, + size_t count) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_re_dev *rdev; + unsigned int val; + + if (!ccgrp) + return -EINVAL; + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + sscanf(buf, "%x\n", &val); + rdev->cc_param.cc_ext.cf_rtt_th = val & 0xFFFF; + rdev->cc_param.cc_ext.ext_mask |= + CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_ACTUAL_CR_CONG_FREE_RTTS_TH; + + return strnlen(buf, count); +} +CONFIGFS_ATTR(, cf_rtt_th); + +static ssize_t reduce_cf_rtt_th_show(struct config_item *item, char *buf) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_re_dev *rdev; + + if (!ccgrp) + return -EINVAL; + + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + return sprintf(buf,"%#x\n", rdev->cc_param.cc_ext.reduce_cf_rtt_th); +} + +static ssize_t reduce_cf_rtt_th_store(struct config_item *item, const char *buf, + size_t count) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_re_dev *rdev; + unsigned int val; + + if (!ccgrp) + return -EINVAL; + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + sscanf(buf, "%x\n", &val); + rdev->cc_param.cc_ext.reduce_cf_rtt_th = val & 0xFFFF; + rdev->cc_param.cc_ext.ext_mask |= + CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_REDUCE_INIT_CONG_FREE_RTTS_TH; + + return strnlen(buf, count); +} +CONFIGFS_ATTR(, reduce_cf_rtt_th); + +static ssize_t sc_cr_th1_show(struct config_item *item, char *buf) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_re_dev *rdev; + + if (!ccgrp) + return -EINVAL; + + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + return sprintf(buf,"%#x\n", rdev->cc_param.cc_ext.sc_cr_th1); +} + +static ssize_t sc_cr_th1_store(struct config_item *item, const char *buf, + size_t count) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_re_dev *rdev; + unsigned int val; + + if (!ccgrp) + return -EINVAL; + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + sscanf(buf, "%x\n", &val); + rdev->cc_param.cc_ext.sc_cr_th1 = val & 0xFFFF; + rdev->cc_param.cc_ext.ext_mask |= + CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_SEVERE_CONG_CR_TH1; + + return strnlen(buf, count); +} +CONFIGFS_ATTR(, sc_cr_th1); + +static ssize_t sc_cr_th2_show(struct config_item *item, char *buf) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_re_dev *rdev; + + if (!ccgrp) + return -EINVAL; + + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + return sprintf(buf,"%#x\n", rdev->cc_param.cc_ext.sc_cr_th2); +} + +static ssize_t sc_cr_th2_store(struct config_item *item, const char *buf, + size_t count) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_re_dev *rdev; + unsigned int val; + + if (!ccgrp) + return -EINVAL; + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + sscanf(buf, "%x\n", &val); + rdev->cc_param.cc_ext.sc_cr_th2 = val & 0xFFFF; + rdev->cc_param.cc_ext.ext_mask |= + CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_SEVERE_CONG_CR_TH2; + + return strnlen(buf, count); +} +CONFIGFS_ATTR(, sc_cr_th2); + +static ssize_t l64B_per_rtt_show(struct config_item *item, char *buf) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_re_dev *rdev; + + if (!ccgrp) + return -EINVAL; + + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + return sprintf(buf,"%#x\n", rdev->cc_param.cc_ext.l64B_per_rtt); +} + +static ssize_t l64B_per_rtt_store(struct config_item *item, const char *buf, + size_t count) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_re_dev *rdev; + unsigned int val; + + if (!ccgrp) + return -EINVAL; + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + sscanf(buf, "%x\n", &val); + rdev->cc_param.cc_ext.l64B_per_rtt = val; + rdev->cc_param.cc_ext.ext_mask |= + CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_LINK64B_PER_RTT; + + return strnlen(buf, count); +} +CONFIGFS_ATTR(, l64B_per_rtt); + +static ssize_t cc_ack_bytes_show(struct config_item *item, char *buf) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_re_dev *rdev; + + if (!ccgrp) + return -EINVAL; + + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + return sprintf(buf,"%#x\n", rdev->cc_param.cc_ext.cc_ack_bytes); +} + +static ssize_t cc_ack_bytes_store(struct config_item *item, const char *buf, + size_t count) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_re_dev *rdev; + unsigned int val; + + if (!ccgrp) + return -EINVAL; + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + sscanf(buf, "%x\n", &val); + rdev->cc_param.cc_ext.cc_ack_bytes = val & 0xFF; + rdev->cc_param.cc_ext.ext_mask |= + CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_CC_ACK_BYTES; + + return strnlen(buf, count); +} +CONFIGFS_ATTR(, cc_ack_bytes); + +static struct configfs_attribute *bnxt_re_cc_attrs[] = { + CONFIGFS_ATTR_ADD(attr_advanced), + CONFIGFS_ATTR_ADD(attr_apply), + CONFIGFS_ATTR_ADD(attr_cnp_dscp), + CONFIGFS_ATTR_ADD(attr_cnp_prio), + CONFIGFS_ATTR_ADD(attr_cc_mode), + CONFIGFS_ATTR_ADD(attr_ecn_enable), + CONFIGFS_ATTR_ADD(attr_g), + CONFIGFS_ATTR_ADD(attr_init_cr), + CONFIGFS_ATTR_ADD(attr_inact_th), + CONFIGFS_ATTR_ADD(attr_init_tr), + CONFIGFS_ATTR_ADD(attr_nph_per_state), + CONFIGFS_ATTR_ADD(attr_time_pph), + CONFIGFS_ATTR_ADD(attr_pkts_pph), + CONFIGFS_ATTR_ADD(attr_rtt), + CONFIGFS_ATTR_ADD(attr_tcp_cp), + CONFIGFS_ATTR_ADD(attr_roce_dscp), + CONFIGFS_ATTR_ADD(attr_roce_prio), + CONFIGFS_ATTR_ADD(attr_ecn_marking), + CONFIGFS_ATTR_ADD(attr_disable_prio_vlan_tx), + NULL, +}; + +static struct configfs_attribute *bnxt_re_cc_attrs_ext[] = { + CONFIGFS_ATTR_ADD(attr_advanced), + CONFIGFS_ATTR_ADD(attr_apply), + CONFIGFS_ATTR_ADD(attr_cnp_dscp), + CONFIGFS_ATTR_ADD(attr_cnp_prio), + CONFIGFS_ATTR_ADD(attr_cc_mode), + CONFIGFS_ATTR_ADD(attr_ecn_enable), + CONFIGFS_ATTR_ADD(attr_g), + CONFIGFS_ATTR_ADD(attr_init_cr), + CONFIGFS_ATTR_ADD(attr_inact_th), + CONFIGFS_ATTR_ADD(attr_init_tr), + CONFIGFS_ATTR_ADD(attr_rtt), + CONFIGFS_ATTR_ADD(attr_roce_dscp), + CONFIGFS_ATTR_ADD(attr_roce_prio), + CONFIGFS_ATTR_ADD(attr_ecn_marking), + CONFIGFS_ATTR_ADD(attr_disable_prio_vlan_tx), + CONFIGFS_ATTR_ADD(attr_inact_th_hi), + CONFIGFS_ATTR_ADD(attr_min_time_bet_cnp), + CONFIGFS_ATTR_ADD(attr_init_cp), + CONFIGFS_ATTR_ADD(attr_tr_update_mode), + CONFIGFS_ATTR_ADD(attr_tr_update_cyls), + CONFIGFS_ATTR_ADD(attr_fr_num_rtts), + CONFIGFS_ATTR_ADD(attr_ai_rate_incr), + CONFIGFS_ATTR_ADD(attr_red_rel_rtts_th), + CONFIGFS_ATTR_ADD(attr_act_rel_cr_th), + CONFIGFS_ATTR_ADD(attr_cr_min_th), + CONFIGFS_ATTR_ADD(attr_bw_avg_weight), + CONFIGFS_ATTR_ADD(attr_act_cr_factor), + CONFIGFS_ATTR_ADD(attr_max_cp_cr_th), + CONFIGFS_ATTR_ADD(attr_cp_bias_en), + CONFIGFS_ATTR_ADD(attr_cp_bias), + CONFIGFS_ATTR_ADD(attr_cnp_ecn), + CONFIGFS_ATTR_ADD(attr_rtt_jitter_en), + CONFIGFS_ATTR_ADD(attr_lbytes_per_usec), + CONFIGFS_ATTR_ADD(attr_reset_cc_cr_th), + CONFIGFS_ATTR_ADD(attr_cr_width), + CONFIGFS_ATTR_ADD(attr_min_quota), + CONFIGFS_ATTR_ADD(attr_max_quota), + CONFIGFS_ATTR_ADD(attr_abs_max_quota), + CONFIGFS_ATTR_ADD(attr_tr_lb), + CONFIGFS_ATTR_ADD(attr_cr_prob_fac), + CONFIGFS_ATTR_ADD(attr_tr_prob_fac), + CONFIGFS_ATTR_ADD(attr_fair_cr_th), + CONFIGFS_ATTR_ADD(attr_red_div), + CONFIGFS_ATTR_ADD(attr_cnp_ratio_th), + CONFIGFS_ATTR_ADD(attr_exp_ai_rtts), + CONFIGFS_ATTR_ADD(attr_exp_crcp_ratio), + CONFIGFS_ATTR_ADD(attr_rt_en), + CONFIGFS_ATTR_ADD(attr_cp_exp_update_th), + CONFIGFS_ATTR_ADD(attr_ai_rtt_th1), + CONFIGFS_ATTR_ADD(attr_ai_rtt_th2), + CONFIGFS_ATTR_ADD(attr_cf_rtt_th), + CONFIGFS_ATTR_ADD(attr_sc_cr_th1), + CONFIGFS_ATTR_ADD(attr_sc_cr_th2), + CONFIGFS_ATTR_ADD(attr_l64B_per_rtt), + CONFIGFS_ATTR_ADD(attr_cc_ack_bytes), + CONFIGFS_ATTR_ADD(attr_reduce_cf_rtt_th), + NULL, +}; + +static struct configfs_attribute *bnxt_re_cc_attrs_ext2[] = { + CONFIGFS_ATTR_ADD(attr_advanced), + CONFIGFS_ATTR_ADD(attr_apply), + CONFIGFS_ATTR_ADD(attr_cnp_dscp), + CONFIGFS_ATTR_ADD(attr_cnp_prio), + CONFIGFS_ATTR_ADD(attr_cc_mode), + CONFIGFS_ATTR_ADD(attr_dcn), + CONFIGFS_ATTR_ADD(attr_ecn_enable), + CONFIGFS_ATTR_ADD(attr_g), + CONFIGFS_ATTR_ADD(attr_init_cr), + CONFIGFS_ATTR_ADD(attr_inact_th), + CONFIGFS_ATTR_ADD(attr_init_tr), + CONFIGFS_ATTR_ADD(attr_rtt), + CONFIGFS_ATTR_ADD(attr_roce_dscp), + CONFIGFS_ATTR_ADD(attr_roce_prio), + CONFIGFS_ATTR_ADD(attr_ecn_marking), + CONFIGFS_ATTR_ADD(attr_disable_prio_vlan_tx), + CONFIGFS_ATTR_ADD(attr_inact_th_hi), + CONFIGFS_ATTR_ADD(attr_min_time_bet_cnp), + CONFIGFS_ATTR_ADD(attr_init_cp), + CONFIGFS_ATTR_ADD(attr_tr_update_mode), + CONFIGFS_ATTR_ADD(attr_tr_update_cyls), + CONFIGFS_ATTR_ADD(attr_fr_num_rtts), + CONFIGFS_ATTR_ADD(attr_ai_rate_incr), + CONFIGFS_ATTR_ADD(attr_red_rel_rtts_th), + CONFIGFS_ATTR_ADD(attr_act_rel_cr_th), + CONFIGFS_ATTR_ADD(attr_cr_min_th), + CONFIGFS_ATTR_ADD(attr_bw_avg_weight), + CONFIGFS_ATTR_ADD(attr_act_cr_factor), + CONFIGFS_ATTR_ADD(attr_max_cp_cr_th), + CONFIGFS_ATTR_ADD(attr_cp_bias_en), + CONFIGFS_ATTR_ADD(attr_cp_bias), + CONFIGFS_ATTR_ADD(attr_cnp_ecn), + CONFIGFS_ATTR_ADD(attr_rtt_jitter_en), + CONFIGFS_ATTR_ADD(attr_lbytes_per_usec), + CONFIGFS_ATTR_ADD(attr_reset_cc_cr_th), + CONFIGFS_ATTR_ADD(attr_cr_width), + CONFIGFS_ATTR_ADD(attr_min_quota), + CONFIGFS_ATTR_ADD(attr_max_quota), + CONFIGFS_ATTR_ADD(attr_abs_max_quota), + CONFIGFS_ATTR_ADD(attr_tr_lb), + CONFIGFS_ATTR_ADD(attr_cr_prob_fac), + CONFIGFS_ATTR_ADD(attr_tr_prob_fac), + CONFIGFS_ATTR_ADD(attr_fair_cr_th), + CONFIGFS_ATTR_ADD(attr_red_div), + CONFIGFS_ATTR_ADD(attr_cnp_ratio_th), + CONFIGFS_ATTR_ADD(attr_exp_ai_rtts), + CONFIGFS_ATTR_ADD(attr_exp_crcp_ratio), + CONFIGFS_ATTR_ADD(attr_rt_en), + CONFIGFS_ATTR_ADD(attr_cp_exp_update_th), + CONFIGFS_ATTR_ADD(attr_ai_rtt_th1), + CONFIGFS_ATTR_ADD(attr_ai_rtt_th2), + CONFIGFS_ATTR_ADD(attr_cf_rtt_th), + CONFIGFS_ATTR_ADD(attr_sc_cr_th1), + CONFIGFS_ATTR_ADD(attr_sc_cr_th2), + CONFIGFS_ATTR_ADD(attr_l64B_per_rtt), + CONFIGFS_ATTR_ADD(attr_cc_ack_bytes), + CONFIGFS_ATTR_ADD(attr_reduce_cf_rtt_th), + NULL, +}; + +static struct bnxt_re_dev *cfgfs_update_auxbus_re(struct bnxt_re_dev *rdev, + u32 gsi_mode, u8 wqe_mode) +{ + struct bnxt_re_dev *new_rdev = NULL; + struct net_device *netdev; + struct bnxt_en_dev *en_dev; + struct auxiliary_device *adev; + int rc = 0; + + /* check again if context is changed by roce driver */ + if (!rdev) + return NULL; + + mutex_lock(&bnxt_re_mutex); + en_dev = rdev->en_dev; + netdev = en_dev->net; + adev = rdev->adev; + + /* Remove and add the device. + * Before removing unregister with IB. + */ + bnxt_re_ib_uninit(rdev); + bnxt_re_remove_device(rdev, BNXT_RE_COMPLETE_REMOVE, adev); + rc = bnxt_re_add_device(&new_rdev, netdev, NULL, gsi_mode, + BNXT_RE_COMPLETE_INIT, wqe_mode, + adev); + if (rc) + goto clean_dev; + _bnxt_re_ib_init(new_rdev); + _bnxt_re_ib_init2(new_rdev); + + /* update the auxdev container */ + rdev = new_rdev; + + /* Don't crash for usermodes. + * Return gracefully they will retry + */ + if (rtnl_trylock()) { + bnxt_re_get_link_speed(rdev); + rtnl_unlock(); + } else { + pr_err("Setting link speed failed, retry config again"); + goto clean_dev; + } + mutex_unlock(&bnxt_re_mutex); + return rdev; +clean_dev: + mutex_unlock(&bnxt_re_mutex); + if (new_rdev) { + bnxt_re_ib_uninit(rdev); + bnxt_re_remove_device(rdev, BNXT_RE_COMPLETE_REMOVE, adev); + } + return NULL; +} + +#ifdef HAVE_OLD_CONFIGFS_API +static ssize_t bnxt_re_ccgrp_attr_show(struct config_item *item, + struct configfs_attribute *attr, + char *page) +{ + struct configfs_attr *ccgrp_attr = + container_of(attr, struct configfs_attr, attr); + ssize_t rc = -EINVAL; + + if (!ccgrp_attr) + goto out; + + if (ccgrp_attr->show) + rc = ccgrp_attr->show(item, page); +out: + return rc; +} + +static ssize_t bnxt_re_ccgrp_attr_store(struct config_item *item, + struct configfs_attribute *attr, + const char *page, size_t count) +{ + struct configfs_attr *ccgrp_attr = + container_of(attr, struct configfs_attr, attr); + ssize_t rc = -EINVAL; + + if (!ccgrp_attr) + goto out; + if (ccgrp_attr->store) + rc = ccgrp_attr->store(item, page, count); +out: + return rc; +} + +static struct configfs_item_operations bnxt_re_ccgrp_ops = { + .show_attribute = bnxt_re_ccgrp_attr_show, + .store_attribute = bnxt_re_ccgrp_attr_store, +}; + +#else +static struct configfs_item_operations bnxt_re_ccgrp_ops = { +}; +#endif + +static struct config_item_type bnxt_re_ccgrp_type = { + .ct_attrs = bnxt_re_cc_attrs, + .ct_item_ops = &bnxt_re_ccgrp_ops, + .ct_owner = THIS_MODULE, +}; + +static struct config_item_type bnxt_re_ccgrp_type_ext = { + .ct_attrs = bnxt_re_cc_attrs_ext, + .ct_item_ops = &bnxt_re_ccgrp_ops, + .ct_owner = THIS_MODULE, +}; + +static struct config_item_type bnxt_re_ccgrp_type_ext2 = { + .ct_attrs = bnxt_re_cc_attrs_ext2, + .ct_item_ops = &bnxt_re_ccgrp_ops, + .ct_owner = THIS_MODULE, +}; + +static int make_bnxt_re_cc(struct bnxt_re_port_group *portgrp, + struct bnxt_re_dev *rdev, u32 gidx) +{ + struct config_item_type *grp_type; + struct bnxt_re_cc_group *ccgrp; + int rc; + + /* + * TODO: If there is confirmed use case that users need to read cc + * params from VF instance, we would enable cc node for VF with + * selected params. + */ + if (rdev->is_virtfn) + return 0; + + ccgrp = kzalloc(sizeof(*ccgrp), GFP_KERNEL); + if (!ccgrp) { + rc = -ENOMEM; + goto out; + } + + ccgrp->rdev = rdev; + grp_type = &bnxt_re_ccgrp_type; + if (_is_chip_gen_p5_p7(rdev->chip_ctx)) { + if (BNXT_RE_DCN_ENABLED(rdev->rcfw.res)) + grp_type = &bnxt_re_ccgrp_type_ext2; + else + grp_type = &bnxt_re_ccgrp_type_ext; + } + + config_group_init_type_name(&ccgrp->group, "cc", grp_type); +#ifndef HAVE_CFGFS_ADD_DEF_GRP + portgrp->nportgrp.default_groups = portgrp->default_grp; + portgrp->default_grp[gidx] = &ccgrp->group; + portgrp->default_grp[gidx + 1] = NULL; +#else + configfs_add_default_group(&ccgrp->group, &portgrp->nportgrp); +#endif + portgrp->ccgrp = ccgrp; + ccgrp->portgrp = portgrp; + + return 0; +out: + kfree(ccgrp); + return rc; +} + +static ssize_t min_tx_depth_show(struct config_item *item, char *buf) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_re_dev *rdev; + + if (!ccgrp) + return -EINVAL; + + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + return sprintf(buf, "%u\n", rdev->min_tx_depth); +} + +static ssize_t min_tx_depth_store(struct config_item *item, const char *buf, + size_t count) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_re_dev *rdev; + unsigned int val; + int rc; + + if (!ccgrp) + return -EINVAL; + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + rc = sscanf(buf, "%u\n", &val); + if (val > rdev->dev_attr->max_qp_wqes || rc <= 0) { + dev_err(rdev_to_dev(rdev), + "min_tx_depth %u cannot be greater than max_qp_wqes %u", + val, rdev->dev_attr->max_qp_wqes); + return -EINVAL; + } + + rdev->min_tx_depth = val; + + return strnlen(buf, count); +} + +CONFIGFS_ATTR(, min_tx_depth); + +static ssize_t stats_query_sec_show(struct config_item *item, char *buf) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_re_dev *rdev; + + if (!ccgrp) + return -EINVAL; + + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + return sprintf(buf, "%#x\n", rdev->stats.stats_query_sec); +} + +static ssize_t stats_query_sec_store(struct config_item *item, const char *buf, + size_t count) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_re_dev *rdev; + unsigned int val; + + if (!ccgrp) + return -EINVAL; + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + + if (sscanf(buf, "%x\n", &val) != 1) + return -EINVAL; + /* Valid values are 0 - 8 now. default value is 1 + * 0 means disable periodic query. + * 1 means bnxt_re_worker queries every sec, 2 - every 2 sec and so on + */ + + if (val > 8) + return -EINVAL; + + rdev->stats.stats_query_sec = val; + + return strnlen(buf, count); +} + +CONFIGFS_ATTR(, stats_query_sec); + +static ssize_t gsi_qp_mode_store(struct config_item *item, + const char *buf, size_t count) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_re_dev *rdev, *new_rdev = NULL; + struct mutex *mutexp; /* Aquire subsys mutex */ + u32 gsi_mode; + u8 wqe_mode; + int rc; + + if (!ccgrp) + return -EINVAL; + + /* Hold the subsytem lock to serialize */ + mutexp = &item->ci_group->cg_subsys->su_mutex; + mutex_lock(mutexp); + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + goto ret_err; + if (_is_chip_gen_p5_p7(rdev->chip_ctx)) + goto ret_err; + sscanf(buf, "%x\n", (unsigned int *)&gsi_mode); + if (!gsi_mode || gsi_mode > BNXT_RE_GSI_MODE_ROCE_V2_IPV6) + goto ret_err; + if (gsi_mode == rdev->gsi_ctx.gsi_qp_mode) + goto done; + wqe_mode = rdev->chip_ctx->modes.wqe_mode; + + if (rdev->binfo) { + struct bnxt_re_bond_info binfo; + struct netdev_bonding_info *nbinfo; + + memcpy(&binfo, rdev->binfo, sizeof(*(rdev->binfo))); + nbinfo = &binfo.nbinfo; + bnxt_re_destroy_lag(&rdev); + bnxt_re_create_base_interface(&binfo, true); + bnxt_re_create_base_interface(&binfo, false); + + /* TODO: Wait for sched count to become 0 on both rdevs */ + msleep(10000); + + /* Recreate lag. */ + rc = bnxt_re_create_lag(&nbinfo->master, &nbinfo->slave, + nbinfo, binfo.slave2, &new_rdev, + gsi_mode, wqe_mode); + if (rc) + dev_warn(rdev_to_dev(rdev), "%s: failed to create lag %d\n", + __func__, rc); + } else { + /* driver functions takes care of locking */ + new_rdev = cfgfs_update_auxbus_re(rdev, gsi_mode, wqe_mode); + if (!new_rdev) + goto ret_err; + } + + if (new_rdev) + ccgrp->rdev = new_rdev; +done: + mutex_unlock(mutexp); + return strnlen(buf, count); +ret_err: + mutex_unlock(mutexp); + return -EINVAL; +} + +static const char *bnxt_re_mode_to_str [] = { + "GSI Mode Invalid", + "GSI Mode All", + "GSI Mode RoCE_v1 Only", + "GSI Mode RoCE_v2 IPv4 Only", + "GSI Mode RoCE_v2 IPv6 Only", + "GSI Mode UD" +}; + +static inline const char * mode_to_str(u8 gsi_mode) +{ + return bnxt_re_mode_to_str[gsi_mode]; +} + +static ssize_t gsi_qp_mode_show(struct config_item *item, char *buf) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_re_dev *rdev; + int bytes = 0; + u8 gsi_mode; + + if (!ccgrp) + return -EINVAL; + + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + /* Little too much? */ + gsi_mode = rdev->gsi_ctx.gsi_qp_mode; + bytes += sprintf(buf + bytes, "%s (%#x): %s\n", + mode_to_str(BNXT_RE_GSI_MODE_ALL), + (int)BNXT_RE_GSI_MODE_ALL, + gsi_mode == BNXT_RE_GSI_MODE_ALL ? + "Enabled" : "Disabled"); + bytes += sprintf(buf + bytes, "%s (%#x): %s\n", + mode_to_str(BNXT_RE_GSI_MODE_ROCE_V1), + (int)BNXT_RE_GSI_MODE_ROCE_V1, + gsi_mode == BNXT_RE_GSI_MODE_ROCE_V1 ? + "Enabled" : "Disabled"); + bytes += sprintf(buf + bytes, "%s (%#x): %s\n", + mode_to_str(BNXT_RE_GSI_MODE_ROCE_V2_IPV4), + (int)BNXT_RE_GSI_MODE_ROCE_V2_IPV4, + gsi_mode == BNXT_RE_GSI_MODE_ROCE_V2_IPV4 ? + "Enabled" : "Disabled"); + bytes += sprintf(buf + bytes, "%s (%#x): %s\n", + mode_to_str(BNXT_RE_GSI_MODE_ROCE_V2_IPV6), + (int)BNXT_RE_GSI_MODE_ROCE_V2_IPV6, + gsi_mode == BNXT_RE_GSI_MODE_ROCE_V2_IPV6 ? + "Enabled" : "Disabled"); + bytes += sprintf(buf + bytes, "%s (%#x): %s\n", + mode_to_str(BNXT_RE_GSI_MODE_UD), + (int)BNXT_RE_GSI_MODE_UD, + gsi_mode == BNXT_RE_GSI_MODE_UD ? + "Enabled" : "Disabled"); + return bytes; +} +CONFIGFS_ATTR(, gsi_qp_mode); + +static const char *bnxt_re_wqe_mode_to_str [] = { + "STATIC", "VARIABLE" +}; + +static ssize_t wqe_mode_show(struct config_item *item, char *buf) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_qplib_drv_modes *drv_mode; + struct bnxt_re_dev *rdev; + + if (!ccgrp) + return -EINVAL; + + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + + drv_mode = &rdev->chip_ctx->modes; + return sprintf(buf, "sq wqe mode: %s (%#x)\n", + bnxt_re_wqe_mode_to_str[drv_mode->wqe_mode], + drv_mode->wqe_mode); +} + +static ssize_t wqe_mode_store(struct config_item *item, const char *buf, + size_t count) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_re_dev *rdev, *new_rdev = NULL; + struct bnxt_qplib_drv_modes *drv_mode; + struct mutex *mutexp; /* subsys lock */ + int mode, rc; + u8 gsi_mode; + + if (!ccgrp) + return -EINVAL; + /* Hold the subsys lock */ + mutexp = &item->ci_group->cg_subsys->su_mutex; + mutex_lock(mutexp); + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + goto ret_err; + rc = sscanf(buf, "%d\n", &mode); + if (mode < 0 || mode > BNXT_QPLIB_WQE_MODE_VARIABLE || rc <= 0) + goto ret_err; + if (mode == BNXT_QPLIB_WQE_MODE_VARIABLE && + !_is_chip_gen_p5_p7(rdev->chip_ctx)) + goto ret_err; + + drv_mode = &rdev->chip_ctx->modes; + if (drv_mode->wqe_mode == mode) + goto done; + + gsi_mode = rdev->gsi_ctx.gsi_qp_mode; + new_rdev = cfgfs_update_auxbus_re(rdev, gsi_mode, mode); + if (!new_rdev) + goto ret_err; + ccgrp->rdev = new_rdev; +done: + mutex_unlock(mutexp); + return strnlen(buf, count); +ret_err: + mutex_unlock(mutexp); + return -EINVAL; +} +CONFIGFS_ATTR(, wqe_mode); + +static ssize_t acc_tx_path_show(struct config_item *item, char *buf) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_qplib_drv_modes *drv_mode; + struct bnxt_re_dev *rdev; + + if (!ccgrp) + return -EINVAL; + + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + + drv_mode = &rdev->chip_ctx->modes; + return sprintf(buf, "Accelerated transmit path: %s\n", + drv_mode->te_bypass ? "Enabled" : "Disabled"); +} + +static ssize_t acc_tx_path_store(struct config_item *item, const char *buf, + size_t count) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_re_en_dev_info *en_info; + struct bnxt_qplib_drv_modes *drv_mode; + struct bnxt_re_dev *rdev; + unsigned int mode; + + if (!ccgrp) + return -EINVAL; + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + sscanf(buf, "%x\n", &mode); + if (mode >= 2) + return -EINVAL; + if (mode) { + if (!_is_chip_gen_p5_p7(rdev->chip_ctx)) + return -EINVAL; + + if (_is_chip_p7(rdev->chip_ctx) && BNXT_EN_HW_LAG(rdev->en_dev)) + return -EINVAL; + } + + drv_mode = &rdev->chip_ctx->modes; + drv_mode->te_bypass = mode; + + /* Update the container */ + en_info = auxiliary_get_drvdata(rdev->adev); + if (en_info) + en_info->te_bypass = (mode == 0x1); + + return strnlen(buf, count); +} +CONFIGFS_ATTR(, acc_tx_path); + +static ssize_t en_qp_dbg_show(struct config_item *item, char *buf) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_re_dev *rdev; + + if (!ccgrp) + return -EINVAL; + + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + return sprintf(buf, "%#x\n", rdev->en_qp_dbg); +} + +static ssize_t en_qp_dbg_store(struct config_item *item, const char *buf, + size_t count) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_re_dev *rdev; + unsigned int val = 0; + + if (!ccgrp) + return -EINVAL; + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + if (sscanf(buf, "%x\n", &val) != 1) + return -EINVAL; + if (val > 1) + return -EINVAL; + + if (rdev->en_qp_dbg && val == 0) + bnxt_re_rem_dbg_files(rdev); + else if (!rdev->en_qp_dbg && val) + bnxt_re_add_dbg_files(rdev); + + rdev->en_qp_dbg = val; + + return strnlen(buf, count); +} + +CONFIGFS_ATTR(, en_qp_dbg); + +static ssize_t user_dbr_drop_recov_timeout_show(struct config_item *item, char *buf) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_re_dev *rdev; + + if (!ccgrp) + return -EINVAL; + + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + return sprintf(buf, "%d\n", rdev->user_dbr_drop_recov_timeout); +} + +static ssize_t user_dbr_drop_recov_timeout_store(struct config_item *item, const char *buf, + size_t count) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_re_dev *rdev; + unsigned int val = 0; + + if (!ccgrp) + return -EINVAL; + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + if (sscanf(buf, "%d\n", &val) != 1) + return -EINVAL; + if ((val < BNXT_DBR_DROP_MIN_TIMEOUT) || (val > BNXT_DBR_DROP_MAX_TIMEOUT)) + return -EINVAL; + + rdev->user_dbr_drop_recov_timeout = val; + + return strnlen(buf, count); +} + +CONFIGFS_ATTR(, user_dbr_drop_recov_timeout); + +static ssize_t user_dbr_drop_recov_show(struct config_item *item, char *buf) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_re_dev *rdev; + + if (!ccgrp) + return -EINVAL; + + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + return sprintf(buf, "%#x\n", rdev->user_dbr_drop_recov); +} + +static ssize_t user_dbr_drop_recov_store(struct config_item *item, const char *buf, size_t count) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_re_dev *rdev; + unsigned int val = 0; + int rc = 0; + + if (!ccgrp) + return -EINVAL; + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + if (sscanf(buf, "%x\n", &val) != 1) + return -EINVAL; + if (val > 1) + return -EINVAL; + + if (!val) { + /* Disable DBR drop recovery */ + if (!rdev->user_dbr_drop_recov) { + dev_info(rdev_to_dev(rdev), + "User DBR drop recovery already disabled. Returning\n"); + goto exit; + } + rdev->user_dbr_drop_recov = false; + } else { + if (rdev->user_dbr_drop_recov) { + dev_info(rdev_to_dev(rdev), + "User DBR drop recovery already enabled. Returning\n"); + goto exit; + } + + if (!rdev->dbr_drop_recov) { + dev_info(rdev_to_dev(rdev), + "Can not enable User DBR drop recovery as FW doesn't support\n"); + rdev->user_dbr_drop_recov = false; + rc = -EINVAL; + goto exit; + } + + rdev->user_dbr_drop_recov = true; + } +exit: + return (rc ? -EINVAL : strnlen(buf, count)); +} + +CONFIGFS_ATTR(, user_dbr_drop_recov); + +static ssize_t dbr_pacing_enable_show(struct config_item *item, char *buf) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_re_dev *rdev; + + if (!ccgrp) + return -EINVAL; + + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + + if (!rdev->dbr_bar_addr) + dev_info(rdev_to_dev(rdev), + "DBR pacing is not supported on this device\n"); + return sprintf(buf, "%#x\n", rdev->dbr_pacing); +} + +static ssize_t dbr_pacing_enable_store(struct config_item *item, const char *buf, + size_t count) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_re_dev *rdev; + struct bnxt_qplib_nq *nq; + unsigned int val = 0; + int rc = 0; + + if (!ccgrp) + return -EINVAL; + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + if (sscanf(buf, "%x\n", &val) != 1) + return -EINVAL; + if (val > 1) + return -EINVAL; + + if (!rdev->dbr_bar_addr) { + dev_info(rdev_to_dev(rdev), + "DBR pacing is not supported on this device\n"); + return -EINVAL; + } + + nq = &rdev->nqr->nq[0]; + if (!val) { + /* Disable DBR Pacing */ + if (!rdev->dbr_pacing) { + dev_info(rdev_to_dev(rdev), + "DBR pacing already disabled. Returning\n"); + goto exit; + } + if (!bnxt_qplib_dbr_pacing_ext_en(rdev->chip_ctx)) + rc = bnxt_re_disable_dbr_pacing(rdev); + } else { + if (rdev->dbr_pacing) { + dev_info(rdev_to_dev(rdev), + "DBR pacing already enabled. Returning\n"); + goto exit; + } + if (!bnxt_qplib_dbr_pacing_ext_en(rdev->chip_ctx)) + rc = bnxt_re_enable_dbr_pacing(rdev); + else + bnxt_re_set_dbq_throttling_reg(rdev, nq->ring_id, + rdev->dbq_watermark); + } + rdev->dbr_pacing = !!val; +exit: + return (rc ? -EINVAL : strnlen(buf, count)); +} + +CONFIGFS_ATTR(, dbr_pacing_enable); + +static ssize_t dbr_pacing_dbq_watermark_show(struct config_item *item, + char *buf) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_re_dev *rdev; + + if (!ccgrp) + return -EINVAL; + + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + return sprintf(buf, "%#x\n", rdev->dbq_watermark); +} + +static ssize_t dbr_pacing_dbq_watermark_store(struct config_item *item, + const char *buf, size_t count) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_re_dev *rdev; + struct bnxt_qplib_nq *nq; + unsigned int val = 0; + int rc = 0; + + if (!ccgrp) + return -EINVAL; + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + if (sscanf(buf, "%x\n", &val) != 1) + return -EINVAL; + if (val > BNXT_RE_PACING_DBQ_HIGH_WATERMARK) + return -EINVAL; + + rdev->dbq_watermark = val; + + if (bnxt_qplib_dbr_pacing_ext_en(rdev->chip_ctx)) { + nq = &rdev->nqr->nq[0]; + bnxt_re_set_dbq_throttling_reg(rdev, nq->ring_id, rdev->dbq_watermark); + } else { + if (bnxt_re_enable_dbr_pacing(rdev)) { + dev_err(rdev_to_dev(rdev), + "Failed to set dbr pacing config\n"); + rc = -EIO; + } + } + return rc ? rc : strnlen(buf, count); +} + +CONFIGFS_ATTR(, dbr_pacing_dbq_watermark); + +static ssize_t dbr_pacing_time_show(struct config_item *item, char *buf) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_re_dev *rdev; + + if (!ccgrp) + return -EINVAL; + + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + return sprintf(buf, "%#x\n", rdev->dbq_pacing_time); +} + +static ssize_t dbr_pacing_time_store(struct config_item *item, const char *buf, + size_t count) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_re_dev *rdev; + unsigned int val = 0; + + if (!ccgrp) + return -EINVAL; + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + if (sscanf(buf, "%x\n", &val) != 1) + return -EINVAL; + + rdev->dbq_pacing_time = val; + return strnlen(buf, count); +} + +CONFIGFS_ATTR(, dbr_pacing_time); + +static ssize_t dbr_pacing_primary_fn_show(struct config_item *item, char *buf) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_re_dev *rdev; + + if (!ccgrp) + return -EINVAL; + + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + return sprintf(buf, "%#x\n", + bnxt_qplib_dbr_pacing_is_primary_pf(rdev->chip_ctx)); +} + +static ssize_t dbr_pacing_primary_fn_store(struct config_item *item, const char *buf, + size_t count) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_re_dev *rdev; + unsigned int val = 0; + int rc = 0; + + if (!ccgrp) + return -EINVAL; + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + if (sscanf(buf, "%x\n", &val) != 1) + return -EINVAL; + + if (bnxt_qplib_dbr_pacing_ext_en(rdev->chip_ctx)) { + dev_info(rdev_to_dev(rdev), + "FW is responsible for picking the primary function\n"); + return -EOPNOTSUPP; + } + if (!val) { + if (bnxt_qplib_dbr_pacing_is_primary_pf(rdev->chip_ctx)) { + rc = bnxt_re_disable_dbr_pacing(rdev); + bnxt_qplib_dbr_pacing_set_primary_pf(rdev->chip_ctx, 0); + } + } else { + rc = bnxt_re_enable_dbr_pacing(rdev); + bnxt_qplib_dbr_pacing_set_primary_pf(rdev->chip_ctx, 1); + } + return rc ? rc : strnlen(buf, count); +} + +CONFIGFS_ATTR(, dbr_pacing_primary_fn); + +static ssize_t dbr_pacing_algo_threshold_show(struct config_item *item, char *buf) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_re_dev *rdev; + + if (!ccgrp) + return -EINVAL; + + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + return sprintf(buf, "%#x\n", rdev->pacing_algo_th); +} + +static ssize_t dbr_pacing_algo_threshold_store(struct config_item *item, + const char *buf, size_t count) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_re_dev *rdev; + unsigned int val = 0; + + if (!ccgrp) + return -EINVAL; + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + if (sscanf(buf, "%x\n", &val) != 1) + return -EINVAL; + if (val > rdev->qplib_res.pacing_data->fifo_max_depth) + return -EINVAL; + + rdev->pacing_algo_th = val; + bnxt_re_set_def_pacing_threshold(rdev); + + return strnlen(buf, count); +} + +CONFIGFS_ATTR(, dbr_pacing_algo_threshold); + +static ssize_t dbr_pacing_en_int_threshold_show(struct config_item *item, char *buf) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_re_dev *rdev; + + if (!ccgrp) + return -EINVAL; + + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + return sprintf(buf, "%#x\n", rdev->pacing_en_int_th); +} + +static ssize_t dbr_pacing_en_int_threshold_store(struct config_item *item, + const char *buf, size_t count) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_re_dev *rdev; + unsigned int val = 0; + + if (!ccgrp) + return -EINVAL; + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + if (sscanf(buf, "%x\n", &val) != 1) + return -EINVAL; + if (val > rdev->qplib_res.pacing_data->fifo_max_depth) + return -EINVAL; + + rdev->pacing_en_int_th = val; + + return strnlen(buf, count); +} + +CONFIGFS_ATTR(, dbr_pacing_en_int_threshold); + +static ssize_t dbr_def_do_pacing_show(struct config_item *item, char *buf) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_re_dev *rdev; + + if (!ccgrp) + return -EINVAL; + + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + return sprintf(buf, "%#x\n", rdev->dbr_def_do_pacing); +} + +static ssize_t dbr_def_do_pacing_store(struct config_item *item, const char *buf, + size_t count) +{ + struct bnxt_re_cc_group *ccgrp = __get_cc_group(item); + struct bnxt_re_dev *rdev; + unsigned int val = 0; + + if (!ccgrp) + return -EINVAL; + rdev = bnxt_re_get_valid_rdev(ccgrp); + if (!rdev) + return -EINVAL; + if (sscanf(buf, "%x\n", &val) != 1) + return -EINVAL; + if (val > BNXT_RE_MAX_DBR_DO_PACING) + return -EINVAL; + rdev->dbr_def_do_pacing = val; + bnxt_re_set_def_do_pacing(rdev); + return strnlen(buf, count); +} + +CONFIGFS_ATTR(, dbr_def_do_pacing); + +static struct configfs_attribute *bnxt_re_tun_attrs[] = { + CONFIGFS_ATTR_ADD(attr_min_tx_depth), + CONFIGFS_ATTR_ADD(attr_stats_query_sec), + CONFIGFS_ATTR_ADD(attr_gsi_qp_mode), + CONFIGFS_ATTR_ADD(attr_wqe_mode), + CONFIGFS_ATTR_ADD(attr_acc_tx_path), + CONFIGFS_ATTR_ADD(attr_en_qp_dbg), + CONFIGFS_ATTR_ADD(attr_dbr_pacing_enable), + CONFIGFS_ATTR_ADD(attr_dbr_pacing_dbq_watermark), + CONFIGFS_ATTR_ADD(attr_dbr_pacing_time), + CONFIGFS_ATTR_ADD(attr_dbr_pacing_primary_fn), + CONFIGFS_ATTR_ADD(attr_dbr_pacing_algo_threshold), + CONFIGFS_ATTR_ADD(attr_dbr_pacing_en_int_threshold), + CONFIGFS_ATTR_ADD(attr_dbr_def_do_pacing), + CONFIGFS_ATTR_ADD(attr_user_dbr_drop_recov), + CONFIGFS_ATTR_ADD(attr_user_dbr_drop_recov_timeout), + NULL, +}; + +static struct configfs_attribute *bnxt_re_p7_tun_attrs[] = { + CONFIGFS_ATTR_ADD(attr_min_tx_depth), + CONFIGFS_ATTR_ADD(attr_stats_query_sec), + CONFIGFS_ATTR_ADD(attr_gsi_qp_mode), + CONFIGFS_ATTR_ADD(attr_acc_tx_path), + CONFIGFS_ATTR_ADD(attr_en_qp_dbg), + CONFIGFS_ATTR_ADD(attr_dbr_pacing_enable), + CONFIGFS_ATTR_ADD(attr_dbr_pacing_time), + CONFIGFS_ATTR_ADD(attr_dbr_pacing_algo_threshold), + CONFIGFS_ATTR_ADD(attr_dbr_pacing_en_int_threshold), + CONFIGFS_ATTR_ADD(attr_dbr_def_do_pacing), + CONFIGFS_ATTR_ADD(attr_user_dbr_drop_recov), + CONFIGFS_ATTR_ADD(attr_user_dbr_drop_recov_timeout), + NULL, +}; + +#ifdef HAVE_OLD_CONFIGFS_API +static ssize_t bnxt_re_tungrp_attr_show(struct config_item *item, + struct configfs_attribute *attr, + char *page) +{ + struct configfs_attr *tungrp_attr = + container_of(attr, struct configfs_attr, attr); + ssize_t rc = -EINVAL; + + if (!tungrp_attr) + goto out; + + if (tungrp_attr->show) + rc = tungrp_attr->show(item, page); +out: + return rc; +} + +static ssize_t bnxt_re_tungrp_attr_store(struct config_item *item, + struct configfs_attribute *attr, + const char *page, size_t count) +{ + struct configfs_attr *tungrp_attr = + container_of(attr, struct configfs_attr, attr); + ssize_t rc = -EINVAL; + + if (!tungrp_attr) + goto out; + if (tungrp_attr->store) + rc = tungrp_attr->store(item, page, count); +out: + return rc; +} + +static struct configfs_item_operations bnxt_re_tungrp_ops = { + .show_attribute = bnxt_re_tungrp_attr_show, + .store_attribute = bnxt_re_tungrp_attr_store, +}; + +#else +static struct configfs_item_operations bnxt_re_tungrp_ops = { +}; +#endif + +static struct config_item_type bnxt_re_tungrp_type = { + .ct_attrs = bnxt_re_tun_attrs, + .ct_item_ops = &bnxt_re_tungrp_ops, + .ct_owner = THIS_MODULE, +}; + +static struct config_item_type bnxt_re_p7_tungrp_type = { + .ct_attrs = bnxt_re_p7_tun_attrs, + .ct_item_ops = &bnxt_re_tungrp_ops, + .ct_owner = THIS_MODULE, +}; + +static int make_bnxt_re_tunables(struct bnxt_re_port_group *portgrp, + struct bnxt_re_dev *rdev, u32 gidx) +{ + struct bnxt_re_tunable_group *tungrp; + int rc; + + tungrp = kzalloc(sizeof(*tungrp), GFP_KERNEL); + if (!tungrp) { + rc = -ENOMEM; + goto out; + } + + tungrp->rdev = rdev; + if (_is_chip_p7(rdev->chip_ctx)) + config_group_init_type_name(&tungrp->group, "tunables", + &bnxt_re_p7_tungrp_type); + else + config_group_init_type_name(&tungrp->group, "tunables", + &bnxt_re_tungrp_type); +#ifndef HAVE_CFGFS_ADD_DEF_GRP + portgrp->nportgrp.default_groups = portgrp->default_grp; + portgrp->default_grp[gidx] = &tungrp->group; + portgrp->default_grp[gidx + 1] = NULL; +#else + configfs_add_default_group(&tungrp->group, &portgrp->nportgrp); +#endif + portgrp->tungrp = tungrp; + tungrp->portgrp = portgrp; + + return 0; +out: + kfree(tungrp); + return rc; +} + + +static void bnxt_re_release_nport_group(struct bnxt_re_port_group *portgrp) +{ + kfree(portgrp->ccgrp); + kfree(portgrp->tungrp); +} + +static struct config_item_type bnxt_re_nportgrp_type = { + .ct_owner = THIS_MODULE, +}; + +static int make_bnxt_re_ports(struct bnxt_re_dev_group *devgrp, + struct bnxt_re_dev *rdev) +{ +#ifndef HAVE_CFGFS_ADD_DEF_GRP + struct config_group **portsgrp = NULL; +#endif + struct bnxt_re_port_group *ports; + struct ib_device *ibdev; + int nports, rc, indx; + + if (!rdev) + return -ENODEV; + ibdev = &rdev->ibdev; + devgrp->nports = ibdev->phys_port_cnt; + nports = devgrp->nports; + ports = kcalloc(nports, sizeof(*ports), GFP_KERNEL); + if (!ports) { + rc = -ENOMEM; + goto out; + } + +#ifndef HAVE_CFGFS_ADD_DEF_GRP + portsgrp = kcalloc(nports + 1, sizeof(*portsgrp), GFP_KERNEL); + if (!portsgrp) { + rc = -ENOMEM; + goto out; + } +#endif + for (indx = 0; indx < nports; indx++) { + char port_name[10]; + ports[indx].port_num = indx + 1; + snprintf(port_name, sizeof(port_name), "%u", indx + 1); + ports[indx].devgrp = devgrp; + config_group_init_type_name(&ports[indx].nportgrp, + port_name, &bnxt_re_nportgrp_type); + rc = make_bnxt_re_cc(&ports[indx], rdev, 0); + if (rc) + goto out; + rc = make_bnxt_re_tunables(&ports[indx], rdev, 1); + if (rc) + goto out; +#ifndef HAVE_CFGFS_ADD_DEF_GRP + portsgrp[indx] = &ports[indx].nportgrp; +#else + configfs_add_default_group(&ports[indx].nportgrp, + &devgrp->port_group); +#endif + } + +#ifndef HAVE_CFGFS_ADD_DEF_GRP + portsgrp[indx] = NULL; + devgrp->default_portsgrp = portsgrp; +#endif + devgrp->ports = ports; + + return 0; +out: +#ifndef HAVE_CFGFS_ADD_DEF_GRP + kfree(portsgrp); +#endif + kfree(ports); + return rc; +} + +static void bnxt_re_release_ports_group(struct bnxt_re_dev_group *devgrp) +{ + int i; + + /* + * nport group is dynamically created along with ports creation, so + * that it should also be released along with ports group release. + */ + for (i = 0; i < devgrp->nports; i++) + bnxt_re_release_nport_group(&devgrp->ports[i]); + +#ifndef HAVE_CFGFS_ADD_DEF_GRP + kfree(devgrp->default_portsgrp); + devgrp->default_portsgrp = NULL; +#endif + kfree(devgrp->ports); + devgrp->ports = NULL; +} + +static void bnxt_re_release_device_group(struct config_item *item) +{ + struct config_group *group = container_of(item, struct config_group, + cg_item); + struct bnxt_re_dev_group *devgrp = + container_of(group, struct bnxt_re_dev_group, + dev_group); + + /* + * ports group is dynamically created along dev group creation, so that + * it should also be released along with dev group release. + */ + bnxt_re_release_ports_group(devgrp); + + kfree(devgrp); +} + +static struct config_item_type bnxt_re_ports_group_type = { + .ct_owner = THIS_MODULE, +}; + +static struct configfs_item_operations bnxt_re_dev_item_ops = { + .release = bnxt_re_release_device_group +}; + +static struct config_item_type bnxt_re_dev_group_type = { + .ct_item_ops = &bnxt_re_dev_item_ops, + .ct_owner = THIS_MODULE, +}; + +static struct config_group *make_bnxt_re_dev(struct config_group *group, + const char *name) +{ + struct bnxt_re_dev_group *devgrp = NULL; + struct bnxt_re_dev *rdev; + int rc = -ENODEV; + + rdev = __get_rdev_from_name(name); + if (PTR_ERR(rdev) == -ENODEV) + goto out; + + devgrp = kzalloc(sizeof(*devgrp), GFP_KERNEL); + if (!devgrp) { + rc = -ENOMEM; + goto out; + } + + if (strlen(name) >= sizeof(devgrp->name)) { + rc = -EINVAL; + goto out; + } + strcpy(devgrp->name, name); + config_group_init_type_name(&devgrp->port_group, "ports", + &bnxt_re_ports_group_type); + rc = make_bnxt_re_ports(devgrp, rdev); + if (rc) + goto out; + config_group_init_type_name(&devgrp->dev_group, name, + &bnxt_re_dev_group_type); +#ifndef HAVE_CFGFS_ADD_DEF_GRP + devgrp->port_group.default_groups = devgrp->default_portsgrp; + devgrp->dev_group.default_groups = devgrp->default_devgrp; + devgrp->default_devgrp[0] = &devgrp->port_group; + devgrp->default_devgrp[1] = NULL; +#else + configfs_add_default_group(&devgrp->port_group, + &devgrp->dev_group); +#endif + + return &devgrp->dev_group; +out: + kfree(devgrp); + return ERR_PTR(rc); +} + +static void drop_bnxt_re_dev(struct config_group *group, struct config_item *item) +{ + config_item_put(item); +} + +static struct configfs_group_operations bnxt_re_group_ops = { + .make_group = &make_bnxt_re_dev, + .drop_item = &drop_bnxt_re_dev +}; + +static struct config_item_type bnxt_re_subsys_type = { + .ct_group_ops = &bnxt_re_group_ops, + .ct_owner = THIS_MODULE, +}; + +static struct configfs_subsystem bnxt_re_subsys = { + .su_group = { + .cg_item = { + .ci_namebuf = "bnxt_re", + .ci_type = &bnxt_re_subsys_type, + }, + }, +}; + +int bnxt_re_configfs_init(void) +{ + config_group_init(&bnxt_re_subsys.su_group); + mutex_init(&bnxt_re_subsys.su_mutex); + return configfs_register_subsystem(&bnxt_re_subsys); +} + +void bnxt_re_configfs_exit(void) +{ + configfs_unregister_subsystem(&bnxt_re_subsys); +} diff --git a/bnxt_re-1.10.3-229.0.139.0/configfs.h b/bnxt_re-1.10.3-229.0.139.0/configfs.h new file mode 100644 index 0000000..afd624e --- /dev/null +++ b/bnxt_re-1.10.3-229.0.139.0/configfs.h @@ -0,0 +1,112 @@ +/* + * Copyright (c) 2015-2023, Broadcom. All rights reserved. The term + * Broadcom refers to Broadcom Inc. and/or its subsidiaries. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * BSD license below: + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * + * Description: defines data-structure for configfs interface + */ +#ifndef __CONFIGFS_H__ +#define __CONFIGFS_H__ + +#include +#include + +#include +#include +#include +#include + +#include "compat.h" +#include "bnxt_ulp.h" +#include "roce_hsi.h" +#include "qplib_res.h" +#include "qplib_sp.h" +#include "qplib_fp.h" +#include "qplib_rcfw.h" +#include "bnxt_re.h" + +#define BNXT_RE_MAX_CONFIGFS_ENTRIES 3 +#define BNXT_DBR_DROP_MIN_TIMEOUT 1 /* 1 ms */ +#define BNXT_DBR_DROP_MAX_TIMEOUT 1000 /* 1000 ms */ + +extern struct list_head bnxt_re_dev_list; +extern struct mutex bnxt_re_mutex; + +enum bnxt_re_configfs_cmd { + BNXT_RE_MODIFY_CC = 0x01, +}; + +struct bnxt_re_cc_group; +struct bnxt_re_port_group; +struct bnxt_re_dev_group; + +struct bnxt_re_cc_group +{ + struct bnxt_re_dev *rdev; + struct bnxt_re_port_group *portgrp; + struct config_group group; +}; + +struct bnxt_re_tunable_group { + struct bnxt_re_dev *rdev; + struct bnxt_re_port_group *portgrp; + struct config_group group; +}; + +struct bnxt_re_port_group +{ + unsigned int port_num; + struct bnxt_re_dev_group *devgrp; + struct bnxt_re_cc_group *ccgrp; + struct bnxt_re_tunable_group *tungrp; + struct config_group nportgrp; +#ifndef HAVE_CFGFS_ADD_DEF_GRP + struct config_group *default_grp[BNXT_RE_MAX_CONFIGFS_ENTRIES]; +#endif +}; + +struct bnxt_re_dev_group +{ + char name[IB_DEVICE_NAME_MAX]; + struct config_group dev_group; + struct config_group port_group; +#ifndef HAVE_CFGFS_ADD_DEF_GRP + struct config_group *default_devgrp[2]; + struct config_group **default_portsgrp; +#endif + struct bnxt_re_port_group *ports; + int nports; +}; + +int bnxt_re_configfs_init(void); +void bnxt_re_configfs_exit(void); +#endif diff --git a/bnxt_re-1.10.3-229.0.139.0/dcb.c b/bnxt_re-1.10.3-229.0.139.0/dcb.c new file mode 100644 index 0000000..351fb83 --- /dev/null +++ b/bnxt_re-1.10.3-229.0.139.0/dcb.c @@ -0,0 +1,289 @@ +/* + * Copyright (c) 2015-2022, Broadcom. All rights reserved. The term + * Broadcom refers to Broadcom Inc. and/or its subsidiaries. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * BSD license below: + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * Description: Enables netlink Data center bridging (DCB) + */ + +#include +#include +#include +#include +#include + +#include "bnxt_re.h" +#include "qplib_sp.h" + +#ifdef CONFIG_BNXT_DCB +u8 bnxt_re_get_priority_mask(struct bnxt_re_dev *rdev, u8 selector) +{ + struct net_device *netdev; + struct dcb_app app; + u8 prio_map = 0, tmp_map = 0; + + netdev = rdev->en_dev->net; + memset(&app, 0, sizeof(app)); + if (selector & IEEE_8021QAZ_APP_SEL_ETHERTYPE) { + app.selector = IEEE_8021QAZ_APP_SEL_ETHERTYPE; + app.protocol = BNXT_RE_ROCE_V1_ETH_TYPE; + tmp_map = dcb_ieee_getapp_mask(netdev, &app); + prio_map = tmp_map; + } + + if (selector & IEEE_8021QAZ_APP_SEL_DGRAM) { + app.selector = IEEE_8021QAZ_APP_SEL_DGRAM; + app.protocol = BNXT_RE_ROCE_V2_PORT_NO; + tmp_map = dcb_ieee_getapp_mask(netdev, &app); + prio_map |= tmp_map; + } + + return prio_map; +} + +static int __bnxt_re_del_app(struct net_device *dev, + u8 sel, u8 prio, u16 protocol) +{ + struct dcb_app app; + + app.selector = sel; + app.priority = prio; + app.protocol = protocol; + return dev->dcbnl_ops->ieee_delapp(dev, &app); +} + +static int bnxt_re_del_app(struct bnxt_re_dev *rdev, + struct net_device *dev) +{ + if (is_qport_service_type_supported(rdev)) + __bnxt_re_del_app(dev, IEEE_8021QAZ_APP_SEL_DSCP, + BNXT_RE_DEFAULT_CNP_PRI, + BNXT_RE_DEFAULT_CNP_DSCP); + + + __bnxt_re_del_app(dev, IEEE_8021QAZ_APP_SEL_DSCP, + BNXT_RE_DEFAULT_ROCE_PRI, + BNXT_RE_DEFAULT_ROCE_DSCP); + __bnxt_re_del_app(dev, IEEE_8021QAZ_APP_SEL_DGRAM, + BNXT_RE_DEFAULT_ROCE_PRI, + BNXT_RE_ROCE_V2_PORT_NO); + return 0; +} + +static int __bnxt_re_set_app(struct net_device *dev, + u8 sel, u8 prio, u16 protocol) +{ + struct dcb_app app; + + app.selector = sel; + app.priority = prio; + app.protocol = protocol; + return dev->dcbnl_ops->ieee_setapp(dev, &app); +} + +static int bnxt_re_set_default_app(struct bnxt_re_dev *rdev, + struct net_device *dev) +{ + __bnxt_re_set_app(dev, IEEE_8021QAZ_APP_SEL_DGRAM, + BNXT_RE_DEFAULT_ROCE_PRI, + BNXT_RE_ROCE_V2_PORT_NO); + __bnxt_re_set_app(dev, IEEE_8021QAZ_APP_SEL_DSCP, + BNXT_RE_DEFAULT_ROCE_PRI, + BNXT_RE_DEFAULT_ROCE_DSCP); + + if (is_qport_service_type_supported(rdev)) + __bnxt_re_set_app(dev, IEEE_8021QAZ_APP_SEL_DSCP, + BNXT_RE_DEFAULT_CNP_PRI, + BNXT_RE_DEFAULT_CNP_DSCP); + return 0; +} + +void bnxt_re_clear_dcb(struct bnxt_re_dev *rdev, + struct net_device *dev, + struct bnxt_re_tc_rec *tc_rec) +{ + struct bnxt_qplib_cc_param *cc_param = &rdev->cc_param; + struct ieee_ets ets = {}; + struct ieee_pfc pfc = {}; + u8 roce_prio, cnp_prio; + + if (!dev->dcbnl_ops) + return; + + cnp_prio = cc_param->alt_vlan_pcp; + roce_prio = cc_param->roce_pri; + + if (dev->dcbnl_ops->ieee_getets) + dev->dcbnl_ops->ieee_getets(dev, &ets); + if (dev->dcbnl_ops->ieee_getpfc) + dev->dcbnl_ops->ieee_getpfc(dev, &pfc); + + if (dev->dcbnl_ops->ieee_delapp) + bnxt_re_del_app(rdev, dev); + + if (dev->dcbnl_ops->ieee_setpfc) { + if (pfc.pfc_en & (1 << BNXT_RE_DEFAULT_ROCE_PRI)) { + pfc.pfc_en &= ~(1 << BNXT_RE_DEFAULT_ROCE_PRI); + rtnl_lock(); + dev->dcbnl_ops->ieee_setpfc(dev, &pfc); + rtnl_unlock(); + } + } + + if (dev->dcbnl_ops->ieee_setets) { + ets.tc_tx_bw[tc_rec->tc_roce] = 0; + ets.tc_tsa[tc_rec->tc_roce] = IEEE_8021QAZ_TSA_STRICT; + ets.prio_tc[roce_prio] = 0; + if (is_qport_service_type_supported(rdev)) + ets.prio_tc[cnp_prio] = 0; + + rtnl_lock(); + dev->dcbnl_ops->ieee_setets(dev, &ets); + rtnl_unlock(); + if (!is_qport_service_type_supported(rdev)) + (void)bnxt_re_setup_cnp_cos(rdev, true); + } +} + +int bnxt_re_setup_dcb(struct bnxt_re_dev *rdev, + struct net_device *dev, + struct bnxt_re_tc_rec *tc_rec, + u16 port_id) +{ + struct ieee_ets ets = {}; + struct ieee_pfc pfc = {}; + int rc; + + if (!dev->dcbnl_ops) + return -EOPNOTSUPP; + + rc = bnxt_re_query_hwrm_qportcfg(rdev, tc_rec, port_id); + if (rc) { + dev_err(rdev_to_dev(rdev), "Failed to query port config rc:%d", + rc); + return rc; + } + + if (dev->dcbnl_ops->ieee_getets) { + rc = dev->dcbnl_ops->ieee_getets(dev, &ets); + if (rc) { + dev_err(rdev_to_dev(rdev), "Failed to getets rc:%d", + rc); + return rc; + } + } + + if (dev->dcbnl_ops->ieee_getpfc) { + rc = dev->dcbnl_ops->ieee_getpfc(dev, &pfc); + if (rc) { + dev_err(rdev_to_dev(rdev), "Failed to getpfc rc:%d", + rc); + return rc; + } + } + + if (dev->dcbnl_ops->ieee_setets) { + ets.tc_tx_bw[0] = BNXT_RE_DEFAULT_L2_BW; + ets.tc_tx_bw[tc_rec->tc_roce] = BNXT_RE_DEFAULT_ROCE_BW; + + ets.tc_tsa[0] = IEEE_8021QAZ_TSA_ETS; + ets.tc_tsa[tc_rec->tc_roce] = IEEE_8021QAZ_TSA_ETS; + + ets.prio_tc[BNXT_RE_DEFAULT_ROCE_PRI] = tc_rec->tc_roce; + if (is_qport_service_type_supported(rdev)) + ets.prio_tc[BNXT_RE_DEFAULT_CNP_PRI] = tc_rec->tc_cnp; + + rtnl_lock(); + rc = dev->dcbnl_ops->ieee_setets(dev, &ets); + rtnl_unlock(); + if (rc) { + if (rc != -EBUSY) + dev_err(rdev_to_dev(rdev), "Fail to setets rc:%d", rc); + return rc; + } + if (!is_qport_service_type_supported(rdev)) { + /* Setup CNP COS queue using an HWRM for older HWRM */ + rc = bnxt_re_setup_cnp_cos(rdev, false); + if (rc) { + dev_err(rdev_to_dev(rdev), + "Failed to set cnp cos rc:%d", rc); + goto clear; + } + } + } + + if (dev->dcbnl_ops->ieee_setpfc) { + /* Default RoCE priority to be enabled = 0x3 */ + pfc.pfc_en = 1 << BNXT_RE_DEFAULT_ROCE_PRI; + rtnl_lock(); + rc = dev->dcbnl_ops->ieee_setpfc(dev, &pfc); + rtnl_unlock(); + if (rc) { + dev_err(rdev_to_dev(rdev), "Fail to setpfc rc:%d", rc); + goto clear; + } + } + + if (dev->dcbnl_ops->ieee_setapp) { + rc = bnxt_re_set_default_app(rdev, dev); + if (rc) { + dev_err(rdev_to_dev(rdev), "Fail to setapp tlvs rc:%d", + rc); + goto clear; + } + } + return 0; +clear: + bnxt_re_clear_dcb(rdev, rdev->en_dev->net, tc_rec); + return rc; +} +#else +u8 bnxt_re_get_priority_mask(struct bnxt_re_dev *rdev, u8 selector) +{ + return 0; +} + +int bnxt_re_setup_dcb(struct bnxt_re_dev *rdev, + struct net_device *dev, + struct bnxt_re_tc_rec *tc_rec, + u16 port_id) +{ + dev_warn(rdev_to_dev(rdev), "CONFIG_DCB is not enabled in Linux\n"); + return 0; +} + +void bnxt_re_clear_dcb(struct bnxt_re_dev *rdev, + struct net_device *dev, + struct bnxt_re_tc_rec *tc_rec) +{ +} +#endif /* CONFIG_BNXT_DCB */ + diff --git a/bnxt_re-1.10.3-229.0.139.0/dcb.h b/bnxt_re-1.10.3-229.0.139.0/dcb.h new file mode 100644 index 0000000..2a21397 --- /dev/null +++ b/bnxt_re-1.10.3-229.0.139.0/dcb.h @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2015-2021, Broadcom. All rights reserved. The term + * Broadcom refers to Broadcom Inc. and/or its subsidiaries. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * BSD license below: + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#ifndef __BNXT_RE_DCB_H__ +#define __BNXT_RE_DCB_H__ + +u8 bnxt_re_get_priority_mask(struct bnxt_re_dev *rdev, u8 selector); +int bnxt_re_setup_dcb(struct bnxt_re_dev *rdev, + struct net_device *dev, + struct bnxt_re_tc_rec *tc_rec, + u16 port_id); + +void bnxt_re_clear_dcb(struct bnxt_re_dev *rdev, + struct net_device *dev, + struct bnxt_re_tc_rec *tc_rec); +#endif diff --git a/bnxt_re-1.10.3-229.0.139.0/debugfs.c b/bnxt_re-1.10.3-229.0.139.0/debugfs.c new file mode 100644 index 0000000..a17643a --- /dev/null +++ b/bnxt_re-1.10.3-229.0.139.0/debugfs.c @@ -0,0 +1,1238 @@ +/* + * Copyright (c) 2015-2023, Broadcom. All rights reserved. The term + * Broadcom refers to Broadcom Inc. and/or its subsidiaries. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * BSD license below: + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * Author: Eddie Wai + * + * Description: DebugFS specifics + */ + +#include "bnxt_re.h" +#include "bnxt.h" +#include "debugfs.h" +#include "ib_verbs.h" +#include "hdbr.h" + +#ifdef ENABLE_DEBUGFS + +#define BNXT_RE_DEBUGFS_NAME_BUF_SIZE 128 + +static struct dentry *bnxt_re_debugfs_root; +extern unsigned int restrict_stats; + +static void bnxt_re_print_ext_stat(struct bnxt_re_dev *rdev, + struct seq_file *s); + +static const char *qp_type_str[] = { + "IB_QPT_SMI", + "IB_QPT_GSI", + "IB_QPT_RC", + "IB_QPT_UC", + "IB_QPT_UD", + "IB_QPT_RAW_IPV6", + "IB_QPT_RAW_ETHERTYPE", + "IB_QPT_UNKNOWN", + "IB_QPT_RAW_PACKET", + "IB_QPT_XRC_INI", + "IB_QPT_XRC_TGT", + "IB_QPT_MAX" +}; + +static const char *qp_state_str[] = { + "IB_QPS_RESET", + "IB_QPS_INIT", + "IB_QPS_RTR", + "IB_QPS_RTS", + "IB_QPS_SQD", + "IB_QPS_SQE", + "IB_QPS_ERR" +}; + + +static void bnxt_re_fill_qp_info(struct bnxt_re_qp *qp) +{ + struct bnxt_re_dev *rdev = qp->rdev; + struct bnxt_qplib_qp *qplib_qp; + u16 type, state; + u8 *cur_ptr; + int rc; + + cur_ptr = qp->qp_data; + if (!cur_ptr) + return; + + qplib_qp = kcalloc(1, sizeof(*qplib_qp), GFP_KERNEL); + if (!qplib_qp) + return; + + qplib_qp->id = qp->qplib_qp.id; + rc = bnxt_qplib_query_qp(&rdev->qplib_res, qplib_qp); + if (rc) + goto bail; + type = __from_hw_to_ib_qp_type(qp->qplib_qp.type); + cur_ptr += sprintf(cur_ptr, "type \t = %s(%d)\n", + (type > IB_QPT_MAX) ? + "IB_QPT_UNKNOWN" : qp_type_str[type], + type); + state = __to_ib_qp_state(qplib_qp->state); + cur_ptr += sprintf(cur_ptr, "state \t = %s(%d)\n", + (state > IB_QPS_ERR) ? + "IB_QPS_UNKNOWN" : qp_state_str[state], + state); + cur_ptr += sprintf(cur_ptr, "source qpn \t = %d\n", qplib_qp->id); + + if (type != IB_QPT_UD) { + cur_ptr += sprintf(cur_ptr, "dest qpn \t = %d\n", qplib_qp->dest_qpn); + cur_ptr += sprintf(cur_ptr, "source port \t = %d\n", qp->qp_info_entry.s_port); + } + + cur_ptr += sprintf(cur_ptr, "dest port \t = %d\n", qp->qp_info_entry.d_port); + cur_ptr += sprintf(cur_ptr, "port \t = %d\n", qplib_qp->port_id); + + if (type != IB_QPT_UD) { + if (qp->qplib_qp.nw_type == CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV4) { + cur_ptr += sprintf(cur_ptr, "source_ipaddr \t = %pI4\n", + &qp->qp_info_entry.s_ip.ipv4_addr); + cur_ptr += sprintf(cur_ptr, "destination_ipaddr \t = %pI4\n", + &qp->qp_info_entry.d_ip.ipv4_addr); + } else { + cur_ptr += sprintf(cur_ptr, "source_ipaddr \t = %pI6\n", + qp->qp_info_entry.s_ip.ipv6_addr); + cur_ptr += sprintf(cur_ptr, "destination_ipaddr \t = %pI6\n", + qp->qp_info_entry.d_ip.ipv6_addr); + } + } +bail: + kfree(qplib_qp); +} + +static ssize_t bnxt_re_qp_info_qp_read(struct file *filp, char __user *buffer, + size_t usr_buf_len, loff_t *ppos) +{ + struct bnxt_re_qp *qp = filp->private_data; + + if (usr_buf_len < BNXT_RE_DEBUGFS_QP_INFO_MAX_SIZE) + return -ENOSPC; + + if (!qp->qp_data) + return -ENOMEM; + + if (*ppos >= BNXT_RE_DEBUGFS_QP_INFO_MAX_SIZE) + return 0; + + bnxt_re_fill_qp_info(qp); + + return simple_read_from_buffer(buffer, usr_buf_len, ppos, + (u8 *)(qp->qp_data), + strlen((char *)qp->qp_data)); +} + +static const struct file_operations bnxt_re_qp_info_ops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = bnxt_re_qp_info_qp_read, +}; + +void bnxt_re_qp_info_add_qpinfo(struct bnxt_re_dev *rdev, struct bnxt_re_qp *qp) +{ + char qp_name[32]; + + if (IS_ERR_OR_NULL(rdev->pdev_qpinfo_dir)) + return; + + qp->qp_data = kzalloc(BNXT_RE_DEBUGFS_QP_INFO_MAX_SIZE, GFP_KERNEL); + if (!qp->qp_data) + return; + + sprintf(qp_name, "0x%x", qp->qplib_qp.id); + qp->qp_info_pdev_dentry = debugfs_create_file(qp_name, 0400, + rdev->pdev_qpinfo_dir, + qp, + &bnxt_re_qp_info_ops); + if (IS_ERR_OR_NULL(qp->qp_info_pdev_dentry)) + dev_dbg(rdev_to_dev(rdev), "Unable to create debugfs file for QP%d", + qp->qplib_qp.id); +} + +void bnxt_re_qp_info_rem_qpinfo(struct bnxt_re_dev *rdev, struct bnxt_re_qp *qp) +{ + debugfs_remove(qp->qp_info_pdev_dentry); + qp->qp_info_pdev_dentry = NULL; + + kfree(qp->qp_data); + qp->qp_data = NULL; +} + +/* Clear the driver statistics maintained in the info file */ +static ssize_t bnxt_re_info_debugfs_clear(struct file *fil, const char __user *u, + size_t size, loff_t *off) +{ + struct seq_file *m = fil->private_data; + struct bnxt_re_dev *rdev = m->private; + struct bnxt_re_res_cntrs *rsors; + + if (!rdev) + return 0; + + rsors = &rdev->stats.rsors; + + /* Clear the driver statistics only */ + atomic_set(&rsors->max_qp_count, atomic_read(&rsors->qp_count)); + atomic_set(&rsors->max_rc_qp_count, atomic_read(&rsors->rc_qp_count)); + atomic_set(&rsors->max_ud_qp_count, atomic_read(&rsors->ud_qp_count)); + atomic_set(&rsors->max_srq_count, atomic_read(&rsors->srq_count)); + atomic_set(&rsors->max_cq_count, atomic_read(&rsors->cq_count)); + atomic_set(&rsors->max_mr_count, atomic_read(&rsors->mr_count)); + atomic_set(&rsors->max_mw_count, atomic_read(&rsors->mw_count)); + atomic_set(&rsors->max_ah_count, atomic_read(&rsors->ah_count)); + atomic_set(&rsors->max_pd_count, atomic_read(&rsors->pd_count)); + atomic_set(&rsors->resize_count, 0); + + if (rdev->dbr_sw_stats) { + rdev->dbr_sw_stats->dbq_int_recv = 0; + rdev->dbr_sw_stats->dbq_int_en = 0; + rdev->dbr_sw_stats->dbq_pacing_resched = 0; + rdev->dbr_sw_stats->dbq_pacing_complete = 0; + rdev->dbr_sw_stats->dbq_pacing_alerts = 0; + + rdev->dbr_evt_curr_epoch = 0; + rdev->dbr_sw_stats->dbr_drop_recov_events = 0; + rdev->dbr_sw_stats->dbr_drop_recov_timeouts = 0; + rdev->dbr_sw_stats->dbr_drop_recov_timeout_users = 0; + rdev->dbr_sw_stats->dbr_drop_recov_event_skips = 0; + } + + return size; +} + +/* Clear perf state irrespective value passed. + * Any value written to debugfs entry will clear the stats + */ +static ssize_t bnxt_re_perf_debugfs_clear(struct file *fil, const char __user *u, + size_t size, loff_t *off) +{ + struct seq_file *m = fil->private_data; + struct bnxt_re_dev *rdev = m->private; + int i; + + if (!rdev->rcfw.sp_perf_stats_enabled) + return size; + + for (i = 0; i < RCFW_MAX_STAT_INDEX; i++) { + rdev->rcfw.qp_create_stats[i] = 0; + rdev->rcfw.qp_destroy_stats[i] = 0; + rdev->rcfw.mr_create_stats[i] = 0; + rdev->rcfw.mr_destroy_stats[i] = 0; + rdev->rcfw.qp_modify_stats[i] = 0; + } + + rdev->rcfw.qp_create_stats_id = 0; + rdev->rcfw.qp_destroy_stats_id = 0; + rdev->rcfw.mr_create_stats_id = 0; + rdev->rcfw.mr_destroy_stats_id = 0; + rdev->rcfw.qp_modify_stats_id = 0; + + for (i = 0; i < RCFW_MAX_LATENCY_MSEC_SLAB_INDEX; i++) + rdev->rcfw.rcfw_lat_slab_msec[i] = 0; + + return size; +} + +/* Clear the driver debug statistics */ +static ssize_t bnxt_re_drv_stats_debugfs_clear(struct file *fil, const char __user *u, + size_t size, loff_t *off) +{ + struct seq_file *m = fil->private_data; + struct bnxt_re_dev *rdev = m->private; + + if (!rdev) + return 0; + + rdev->dbg_stats->dbq.fifo_occup_slab_1 = 0; + rdev->dbg_stats->dbq.fifo_occup_slab_2 = 0; + rdev->dbg_stats->dbq.fifo_occup_slab_3 = 0; + rdev->dbg_stats->dbq.fifo_occup_slab_4 = 0; + rdev->dbg_stats->dbq.fifo_occup_water_mark = 0; + rdev->dbg_stats->dbq.do_pacing_slab_1 = 0; + rdev->dbg_stats->dbq.do_pacing_slab_2 = 0; + rdev->dbg_stats->dbq.do_pacing_slab_3 = 0; + rdev->dbg_stats->dbq.do_pacing_slab_4 = 0; + rdev->dbg_stats->dbq.do_pacing_slab_5 = 0; + rdev->dbg_stats->dbq.do_pacing_water_mark = 0; + + return size; +} + +static void bnxt_re_print_roce_only_counters(struct bnxt_re_dev *rdev, + struct seq_file *s) +{ + struct bnxt_re_ro_counters *roce_only = &rdev->stats.dstat.cur[0]; + + /* Do not polulate RoCE Only stats for VF from Thor onwards */ + if (_is_chip_gen_p5_p7(rdev->chip_ctx) && rdev->is_virtfn) + return; + + seq_printf(s, "\tRoCE Only Rx Pkts: %llu\n", roce_only->rx_pkts); + seq_printf(s, "\tRoCE Only Rx Bytes: %llu\n", roce_only->rx_bytes); + seq_printf(s, "\tRoCE Only Tx Pkts: %llu\n", roce_only->tx_pkts); + seq_printf(s, "\tRoCE Only Tx Bytes: %llu\n", roce_only->tx_bytes); +} + +static void bnxt_re_print_normal_total_counters(struct bnxt_re_dev *rdev, + struct seq_file *s) +{ + + if (_is_chip_gen_p5_p7(rdev->chip_ctx) && rdev->is_virtfn) { + struct bnxt_re_rdata_counters *rstat = &rdev->stats.dstat.rstat[0]; + + /* Only for VF from Thor onwards */ + seq_printf(s, "\tRx Pkts: %llu\n", rstat->rx_ucast_pkts); + seq_printf(s, "\tRx Bytes: %llu\n", rstat->rx_ucast_bytes); + seq_printf(s, "\tTx Pkts: %llu\n", rstat->tx_ucast_pkts); + seq_printf(s, "\tTx Bytes: %llu\n", rstat->tx_ucast_bytes); + } else { + struct bnxt_re_ro_counters *roce_only; + struct bnxt_re_cc_stat *cnps; + + cnps = &rdev->stats.cnps; + roce_only = &rdev->stats.dstat.cur[0]; + + seq_printf(s, "\tRx Pkts: %llu\n", cnps->cur[0].cnp_rx_pkts + + roce_only->rx_pkts); + seq_printf(s, "\tRx Bytes: %llu\n", + cnps->cur[0].cnp_rx_bytes + roce_only->rx_bytes); + seq_printf(s, "\tTx Pkts: %llu\n", + cnps->cur[0].cnp_tx_pkts + roce_only->tx_pkts); + seq_printf(s, "\tTx Bytes: %llu\n", + cnps->cur[0].cnp_tx_bytes + roce_only->tx_bytes); + } +} + +static void bnxt_re_print_bond_total_counters(struct bnxt_re_dev *rdev, + struct seq_file *s) +{ + struct bnxt_re_ro_counters *roce_only; + struct bnxt_re_cc_stat *cnps; + + cnps = &rdev->stats.cnps; + roce_only = &rdev->stats.dstat.cur[0]; + + seq_printf(s, "\tRx Pkts: %llu\n", + cnps->cur[0].cnp_rx_pkts + + cnps->cur[1].cnp_rx_pkts + + roce_only[0].rx_pkts + + roce_only[1].rx_pkts); + + seq_printf(s, "\tRx Bytes: %llu\n", + cnps->cur[0].cnp_rx_bytes + + cnps->cur[1].cnp_rx_bytes + + roce_only[0].rx_bytes + + roce_only[1].rx_bytes); + + seq_printf(s, "\tTx Pkts: %llu\n", + cnps->cur[0].cnp_tx_pkts + + cnps->cur[1].cnp_tx_pkts + + roce_only[0].tx_pkts + + roce_only[1].tx_pkts); + + seq_printf(s, "\tTx Bytes: %llu\n", + cnps->cur[0].cnp_tx_bytes + + cnps->cur[1].cnp_tx_bytes + + roce_only[0].tx_bytes + + roce_only[1].tx_bytes); + + /* Disable per port stat display for gen-p5 */ + if (_is_chip_gen_p5_p7(rdev->chip_ctx)) + return; + seq_printf(s, "\tRx Pkts P0: %llu\n", + cnps->cur[0].cnp_rx_pkts + roce_only[0].rx_pkts); + seq_printf(s, "\tRx Bytes P0: %llu\n", + cnps->cur[0].cnp_rx_bytes + roce_only[0].rx_bytes); + seq_printf(s, "\tTx Pkts P0: %llu\n", + cnps->cur[0].cnp_tx_pkts + roce_only[0].tx_pkts); + seq_printf(s, "\tTx Bytes P0: %llu\n", + cnps->cur[0].cnp_tx_bytes + roce_only[0].tx_bytes); + + seq_printf(s, "\tRx Pkts P1: %llu\n", + cnps->cur[1].cnp_rx_pkts + roce_only[1].rx_pkts); + seq_printf(s, "\tRx Bytes P1: %llu\n", + cnps->cur[1].cnp_rx_bytes + roce_only[1].rx_bytes); + seq_printf(s, "\tTx Pkts P1: %llu\n", + cnps->cur[1].cnp_tx_pkts + roce_only[1].tx_pkts); + seq_printf(s, "\tTx Bytes P1: %llu\n", + cnps->cur[1].cnp_tx_bytes + roce_only[1].tx_bytes); +} + +static void bnxt_re_print_bond_roce_only_counters(struct bnxt_re_dev *rdev, + struct seq_file *s) +{ + struct bnxt_re_ro_counters *roce_only; + + roce_only = rdev->stats.dstat.cur; + seq_printf(s, "\tRoCE Only Rx Pkts: %llu\n" ,roce_only[0].rx_pkts + + roce_only[1].rx_pkts); + seq_printf(s, "\tRoCE Only Rx Bytes: %llu\n", roce_only[0].rx_bytes + + roce_only[1].rx_bytes); + seq_printf(s, "\tRoCE Only Tx Pkts: %llu\n", roce_only[0].tx_pkts + + roce_only[1].tx_pkts); + seq_printf(s, "\tRoCE Only Tx Bytes: %llu\n", roce_only[0].tx_bytes + + roce_only[1].tx_bytes); + + /* Disable per port stat display for gen-p5 onwards. */ + if (_is_chip_gen_p5_p7(rdev->chip_ctx)) + return; + seq_printf(s, "\tRoCE Only Rx Pkts P0: %llu\n", roce_only[0].rx_pkts); + seq_printf(s, "\tRoCE Only Rx Bytes P0: %llu\n", roce_only[0].rx_bytes); + seq_printf(s, "\tRoCE Only Tx Pkts P0: %llu\n", roce_only[0].tx_pkts); + seq_printf(s, "\tRoCE Only Tx Bytes P0: %llu\n", roce_only[0].tx_bytes); + + seq_printf(s, "\tRoCE Only Rx Pkts P1: %llu\n", roce_only[1].rx_pkts); + seq_printf(s, "\tRoCE Only Rx Bytes P1: %llu\n", roce_only[1].rx_bytes); + seq_printf(s, "\tRoCE Only Tx Pkts P1: %llu\n", roce_only[1].tx_pkts); + seq_printf(s, "\tRoCE Only Tx Bytes P1: %llu\n", roce_only[1].tx_bytes); +} + +static void bnxt_re_print_bond_counters(struct bnxt_re_dev *rdev, + struct seq_file *s) +{ + struct bnxt_qplib_roce_stats *roce_stats; + struct bnxt_re_rdata_counters *stats1; + struct bnxt_re_rdata_counters *stats2; + struct bnxt_re_cc_stat *cnps; + long long oob_cnt = 0; + bool en_disp; + + roce_stats = &rdev->stats.dstat.errs; + stats1 = &rdev->stats.dstat.rstat[0]; + stats2 = &rdev->stats.dstat.rstat[1]; + cnps = &rdev->stats.cnps; + en_disp = !_is_chip_gen_p5_p7(rdev->chip_ctx); + + seq_printf(s, "\tActive QPs P0: %lld\n", roce_stats->active_qp_count_p0); + seq_printf(s, "\tActive QPs P1: %lld\n", roce_stats->active_qp_count_p1); + + bnxt_re_print_bond_total_counters(rdev, s); + + seq_printf(s, "\tCNP Tx Pkts: %llu\n", + cnps->cur[0].cnp_tx_pkts + cnps->cur[1].cnp_tx_pkts); + if (en_disp) + seq_printf(s, "\tCNP Tx Bytes: %llu\n", + cnps->cur[0].cnp_tx_bytes + + cnps->cur[1].cnp_tx_bytes); + seq_printf(s, "\tCNP Rx Pkts: %llu\n", + cnps->cur[0].cnp_rx_pkts + cnps->cur[1].cnp_rx_pkts); + if (en_disp) + seq_printf(s, "\tCNP Rx Bytes: %llu\n", + cnps->cur[0].cnp_rx_bytes + + cnps->cur[1].cnp_rx_bytes); + + seq_printf(s, "\tCNP Tx Pkts P0: %llu\n", cnps->cur[0].cnp_tx_pkts); + if (en_disp) + seq_printf(s, "\tCNP Tx Bytes P0: %llu\n", + cnps->cur[0].cnp_tx_bytes); + seq_printf(s, "\tCNP Rx Pkts P0: %llu\n", cnps->cur[0].cnp_rx_pkts); + if (en_disp) + seq_printf(s, "\tCNP Rx Bytes P0: %llu\n", + cnps->cur[0].cnp_rx_bytes); + seq_printf(s, "\tCNP Tx Pkts P1: %llu\n", cnps->cur[1].cnp_tx_pkts); + if (en_disp) + seq_printf(s, "\tCNP Tx Bytes P1: %llu\n", + cnps->cur[1].cnp_tx_bytes); + seq_printf(s, "\tCNP Rx Pkts P1: %llu\n", cnps->cur[1].cnp_rx_pkts); + if (en_disp) + seq_printf(s, "\tCNP Rx Bytes P1: %llu\n", + cnps->cur[1].cnp_rx_bytes); + /* Print RoCE only bytes.. CNP counters include RoCE packets also */ + bnxt_re_print_bond_roce_only_counters(rdev, s); + + + seq_printf(s, "\trx_roce_error_pkts: %lld\n", + (stats1 ? stats1->rx_error_pkts : 0) + + (stats2 ? stats2->rx_error_pkts : 0)); + seq_printf(s, "\trx_roce_discard_pkts: %lld\n", + (stats1 ? stats1->rx_discard_pkts : 0) + + (stats2 ? stats2->rx_discard_pkts : 0)); + if (!en_disp) { + /* show only for Gen P5 or higher */ + seq_printf(s, "\ttx_roce_error_pkts: %lld\n", + (stats1 ? stats1->tx_error_pkts : 0) + + (stats2 ? stats2->tx_error_pkts : 0)); + seq_printf(s, "\ttx_roce_discard_pkts: %lld\n", + (stats1 ? stats1->tx_discard_pkts : 0) + + (stats2 ? stats2->tx_discard_pkts : 0)); + } + /* No need to sum-up both port stat counts in bond mode */ + if (bnxt_ext_stats_supported(rdev->chip_ctx, rdev->dev_attr->dev_cap_flags, + rdev->is_virtfn)) { + seq_printf(s, "\tres_oob_drop_count: %lld\n", + rdev->stats.dstat.e_errs.oob); + bnxt_re_print_ext_stat(rdev, s); + } else { + oob_cnt = (stats1 ? stats1->rx_discard_pkts : 0) + + (stats2 ? stats2->rx_discard_pkts : 0) - + rdev->stats.dstat.errs.res_oos_drop_count; + + /* + * oob count is calculated from the output of two seperate + * HWRM commands. To avoid reporting inconsistent values + * due to the time delta between two different queries, + * report newly calculated value only if it is more than the + * previously reported OOB value. + */ + if (oob_cnt < rdev->stats.dstat.prev_oob) + oob_cnt = rdev->stats.dstat.prev_oob; + seq_printf(s, "\tres_oob_drop_count: %lld\n", oob_cnt); + rdev->stats.dstat.prev_oob = oob_cnt; + } +} + +static void bnxt_re_print_ext_stat(struct bnxt_re_dev *rdev, + struct seq_file *s) +{ + struct bnxt_re_ext_rstat *ext_s; + struct bnxt_re_cc_stat *cnps; + + ext_s = &rdev->stats.dstat.ext_rstat[0]; + cnps = &rdev->stats.cnps; + + seq_printf(s, "\ttx_atomic_req: %llu\n", ext_s->tx.atomic_req); + seq_printf(s, "\trx_atomic_req: %llu\n", ext_s->rx.atomic_req); + seq_printf(s, "\ttx_read_req: %llu\n", ext_s->tx.read_req); + seq_printf(s, "\ttx_read_resp: %llu\n", ext_s->tx.read_resp); + seq_printf(s, "\trx_read_req: %llu\n", ext_s->rx.read_req); + seq_printf(s, "\trx_read_resp: %llu\n", ext_s->rx.read_resp); + seq_printf(s, "\ttx_write_req: %llu\n", ext_s->tx.write_req); + seq_printf(s, "\trx_write_req: %llu\n", ext_s->rx.write_req); + seq_printf(s, "\ttx_send_req: %llu\n", ext_s->tx.send_req); + seq_printf(s, "\trx_send_req: %llu\n", ext_s->rx.send_req); + seq_printf(s, "\trx_good_pkts: %llu\n", ext_s->grx.rx_pkts); + seq_printf(s, "\trx_good_bytes: %llu\n", ext_s->grx.rx_bytes); + if (_is_chip_p7(rdev->chip_ctx)) { + seq_printf(s, "\trx_dcn_payload_cut: %llu\n", ext_s->rx_dcn_payload_cut); + seq_printf(s, "\tte_bypassed: %llu\n", ext_s->te_bypassed); + } + + if (rdev->binfo) { + seq_printf(s, "\trx_ecn_marked_pkts: %llu\n", + cnps->cur[0].ecn_marked + cnps->cur[1].ecn_marked); + seq_printf(s, "\trx_ecn_marked_pkts P0: %llu\n", cnps->cur[0].ecn_marked); + seq_printf(s, "\trx_ecn_marked_pkts P1: %llu\n", cnps->cur[1].ecn_marked); + } else { + seq_printf(s, "\trx_ecn_marked_pkts: %llu\n", cnps->cur[0].ecn_marked); + } +} + +static void bnxt_re_print_normal_counters(struct bnxt_re_dev *rdev, + struct seq_file *s) +{ + struct bnxt_re_rdata_counters *stats; + struct bnxt_re_cc_stat *cnps; + bool en_disp; + + stats = &rdev->stats.dstat.rstat[0]; + cnps = &rdev->stats.cnps; + en_disp = !_is_chip_gen_p5_p7(rdev->chip_ctx); + + bnxt_re_print_normal_total_counters(rdev, s); + if (!rdev->is_virtfn) { + seq_printf(s, "\tCNP Tx Pkts: %llu\n", + cnps->cur[0].cnp_tx_pkts); + if (en_disp) + seq_printf(s, "\tCNP Tx Bytes: %llu\n", + cnps->cur[0].cnp_tx_bytes); + seq_printf(s, "\tCNP Rx Pkts: %llu\n", + cnps->cur[0].cnp_rx_pkts); + if (en_disp) + seq_printf(s, "\tCNP Rx Bytes: %llu\n", + cnps->cur[0].cnp_rx_bytes); + } + /* Print RoCE only bytes.. CNP counters include RoCE packets also */ + bnxt_re_print_roce_only_counters(rdev, s); + + seq_printf(s, "\trx_roce_error_pkts: %lld\n", + stats ? stats->rx_error_pkts : 0); + seq_printf(s, "\trx_roce_discard_pkts: %lld\n", + stats ? stats->rx_discard_pkts : 0); + if (!en_disp) { + seq_printf(s, "\ttx_roce_error_pkts: %lld\n", + stats ? stats->tx_error_pkts : 0); + seq_printf(s, "\ttx_roce_discards_pkts: %lld\n", + stats ? stats->tx_discard_pkts : 0); + } + + if (bnxt_ext_stats_supported(rdev->chip_ctx, rdev->dev_attr->dev_cap_flags, + rdev->is_virtfn)) { + seq_printf(s, "\tres_oob_drop_count: %lld\n", + rdev->stats.dstat.e_errs.oob); + bnxt_re_print_ext_stat(rdev, s); + } +} + +static int bnxt_re_info_debugfs_show(struct seq_file *s, void *unused) +{ + struct bnxt_re_dev *rdev = s->private; + struct bnxt_re_ext_roce_stats *e_errs; + struct bnxt_re_rdata_counters *rstat; + struct bnxt_qplib_roce_stats *errs; + unsigned long tstamp_diff; + struct pci_dev *pdev; + int sched_msec, i; + int rc = 0; + + seq_printf(s, "bnxt_re debug info:\n"); + if (!rdev) + return -ENODEV; + + if (!mutex_trylock(&bnxt_re_mutex)) + return restart_syscall(); + + if (!bnxt_re_is_rdev_valid(rdev)) { + rc = -ENODEV; + goto err; + } + + pdev = rdev->en_dev->pdev; + + errs = &rdev->stats.dstat.errs; + rstat = &rdev->stats.dstat.rstat[0]; + e_errs = &rdev->stats.dstat.e_errs; + sched_msec = BNXT_RE_STATS_CTX_UPDATE_TIMER; + tstamp_diff = jiffies - rdev->stats.read_tstamp; + if (test_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags)) { + if (restrict_stats && tstamp_diff < + msecs_to_jiffies(sched_msec)) + goto skip_query; + rc = bnxt_re_get_device_stats(rdev); + if (rc) + dev_err(rdev_to_dev(rdev), + "Failed to query device stats\n"); + rdev->stats.read_tstamp = jiffies; + } +skip_query: + seq_printf(s, "=====[ IBDEV %s ]=============================\n", + rdev->ibdev.name); + if (rdev->netdev) + seq_printf(s, "\tlink state: %s\n", + bnxt_re_link_state_str(rdev)); + seq_printf(s, "\tMax QP:\t\t%d\n", rdev->dev_attr->max_qp); + seq_printf(s, "\tMax SRQ:\t%d\n", rdev->dev_attr->max_srq); + seq_printf(s, "\tMax CQ:\t\t%d\n", rdev->dev_attr->max_cq); + seq_printf(s, "\tMax MR:\t\t%d\n", rdev->dev_attr->max_mr); + seq_printf(s, "\tMax MW:\t\t%d\n", rdev->dev_attr->max_mw); + seq_printf(s, "\tMax AH:\t\t%d\n", rdev->dev_attr->max_ah); + seq_printf(s, "\tMax PD:\t\t%d\n", rdev->dev_attr->max_pd); + seq_printf(s, "\tActive QP:\t%d\n", + atomic_read(&rdev->stats.rsors.qp_count)); + seq_printf(s, "\tActive RC QP:\t%d\n", + atomic_read(&rdev->stats.rsors.rc_qp_count)); + seq_printf(s, "\tActive UD QP:\t%d\n", + atomic_read(&rdev->stats.rsors.ud_qp_count)); + seq_printf(s, "\tActive SRQ:\t%d\n", + atomic_read(&rdev->stats.rsors.srq_count)); + seq_printf(s, "\tActive CQ:\t%d\n", + atomic_read(&rdev->stats.rsors.cq_count)); + seq_printf(s, "\tActive MR:\t%d\n", + atomic_read(&rdev->stats.rsors.mr_count)); + seq_printf(s, "\tActive MW:\t%d\n", + atomic_read(&rdev->stats.rsors.mw_count)); + seq_printf(s, "\tActive AH:\t%d\n", + atomic_read(&rdev->stats.rsors.ah_count)); + seq_printf(s, "\tActive PD:\t%d\n", + atomic_read(&rdev->stats.rsors.pd_count)); + seq_printf(s, "\tQP Watermark:\t%d\n", + atomic_read(&rdev->stats.rsors.max_qp_count)); + seq_printf(s, "\tRC QP Watermark: %d\n", + atomic_read(&rdev->stats.rsors.max_rc_qp_count)); + seq_printf(s, "\tUD QP Watermark: %d\n", + atomic_read(&rdev->stats.rsors.max_ud_qp_count)); + seq_printf(s, "\tSRQ Watermark:\t%d\n", + atomic_read(&rdev->stats.rsors.max_srq_count)); + seq_printf(s, "\tCQ Watermark:\t%d\n", + atomic_read(&rdev->stats.rsors.max_cq_count)); + seq_printf(s, "\tMR Watermark:\t%d\n", + atomic_read(&rdev->stats.rsors.max_mr_count)); + seq_printf(s, "\tMW Watermark:\t%d\n", + atomic_read(&rdev->stats.rsors.max_mw_count)); + seq_printf(s, "\tAH Watermark:\t%d\n", + atomic_read(&rdev->stats.rsors.max_ah_count)); + seq_printf(s, "\tPD Watermark:\t%d\n", + atomic_read(&rdev->stats.rsors.max_pd_count)); + seq_printf(s, "\tResize CQ count: %d\n", + atomic_read(&rdev->stats.rsors.resize_count)); + seq_printf(s, "\tHW retransmission: %d\n", + BNXT_RE_HW_RETX(rdev->dev_attr->dev_cap_flags) ? + 1 : 0); + seq_printf(s, "\tRecoverable Errors: %lld\n", + rstat ? rstat->tx_bcast_pkts : 0); + if (rdev->binfo) + bnxt_re_print_bond_counters(rdev, s); + else + bnxt_re_print_normal_counters(rdev, s); + + seq_printf(s, "\tmax_retry_exceeded: %llu\n", errs->max_retry_exceeded); + /* handle Thor2 & ext attr stats supporting nics here */ + if (bnxt_ext_stats_supported(rdev->chip_ctx, rdev->dev_attr->dev_cap_flags, + rdev->is_virtfn) && + _is_hw_retx_supported(rdev->dev_attr->dev_cap_flags)) { + seq_printf(s, "\tto_retransmits: %llu\n", e_errs->to_retransmits); + seq_printf(s, "\tseq_err_naks_rcvd: %llu\n", e_errs->seq_err_naks_rcvd); + seq_printf(s, "\trnr_naks_rcvd: %llu\n", e_errs->rnr_naks_rcvd); + seq_printf(s, "\tmissing_resp: %llu\n", e_errs->missing_resp); + seq_printf(s, "\tdup_reqs: %llu\n", e_errs->dup_req); + } else { + seq_printf(s, "\tto_retransmits: %llu\n", errs->to_retransmits); + seq_printf(s, "\tseq_err_naks_rcvd: %llu\n", errs->seq_err_naks_rcvd); + seq_printf(s, "\trnr_naks_rcvd: %llu\n", errs->rnr_naks_rcvd); + seq_printf(s, "\tmissing_resp: %llu\n", errs->missing_resp); + seq_printf(s, "\tdup_req: %llu\n", errs->dup_req); + } + seq_printf(s, "\tunrecoverable_err: %llu\n", errs->unrecoverable_err); + seq_printf(s, "\tbad_resp_err: %llu\n", errs->bad_resp_err); + seq_printf(s, "\tlocal_qp_op_err: %llu\n", errs->local_qp_op_err); + seq_printf(s, "\tlocal_protection_err: %llu\n", errs->local_protection_err); + seq_printf(s, "\tmem_mgmt_op_err: %llu\n", errs->mem_mgmt_op_err); + seq_printf(s, "\tremote_invalid_req_err: %llu\n", errs->remote_invalid_req_err); + seq_printf(s, "\tremote_access_err: %llu\n", errs->remote_access_err); + seq_printf(s, "\tremote_op_err: %llu\n", errs->remote_op_err); + seq_printf(s, "\tres_exceed_max: %llu\n", errs->res_exceed_max); + seq_printf(s, "\tres_length_mismatch: %llu\n", errs->res_length_mismatch); + seq_printf(s, "\tres_exceeds_wqe: %llu\n", errs->res_exceeds_wqe); + seq_printf(s, "\tres_opcode_err: %llu\n", errs->res_opcode_err); + seq_printf(s, "\tres_rx_invalid_rkey: %llu\n", errs->res_rx_invalid_rkey); + seq_printf(s, "\tres_rx_domain_err: %llu\n", errs->res_rx_domain_err); + seq_printf(s, "\tres_rx_no_perm: %llu\n", errs->res_rx_no_perm); + seq_printf(s, "\tres_rx_range_err: %llu\n", errs->res_rx_range_err); + seq_printf(s, "\tres_tx_invalid_rkey: %llu\n", errs->res_tx_invalid_rkey); + seq_printf(s, "\tres_tx_domain_err: %llu\n", errs->res_tx_domain_err); + seq_printf(s, "\tres_tx_no_perm: %llu\n", errs->res_tx_no_perm); + seq_printf(s, "\tres_tx_range_err: %llu\n", errs->res_tx_range_err); + seq_printf(s, "\tres_irrq_oflow: %llu\n", errs->res_irrq_oflow); + seq_printf(s, "\tres_unsup_opcode: %llu\n", errs->res_unsup_opcode); + seq_printf(s, "\tres_unaligned_atomic: %llu\n", errs->res_unaligned_atomic); + seq_printf(s, "\tres_rem_inv_err: %llu\n", errs->res_rem_inv_err); + seq_printf(s, "\tres_mem_error64: %llu\n", errs->res_mem_error); + seq_printf(s, "\tres_srq_err: %llu\n", errs->res_srq_err); + seq_printf(s, "\tres_cmp_err: %llu\n", errs->res_cmp_err); + seq_printf(s, "\tres_invalid_dup_rkey: %llu\n", errs->res_invalid_dup_rkey); + seq_printf(s, "\tres_wqe_format_err: %llu\n", errs->res_wqe_format_err); + seq_printf(s, "\tres_cq_load_err: %llu\n", errs->res_cq_load_err); + seq_printf(s, "\tres_srq_load_err: %llu\n", errs->res_srq_load_err); + seq_printf(s, "\tres_tx_pci_err: %llu\n", errs->res_tx_pci_err); + seq_printf(s, "\tres_rx_pci_err: %llu\n", errs->res_rx_pci_err); + if (bnxt_ext_stats_supported(rdev->chip_ctx, rdev->dev_attr->dev_cap_flags, + rdev->is_virtfn)) { + seq_printf(s, "\tres_oos_drop_count: %llu\n", + e_errs->oos); + } else { + /* Display on function 0 as OOS counters are chip-wide */ + if (PCI_FUNC(pdev->devfn) == 0) + seq_printf(s, "\tres_oos_drop_count: %llu\n", + errs->res_oos_drop_count); + } + + seq_printf(s, "\tnum_irq_started : %u\n", rdev->rcfw.num_irq_started); + seq_printf(s, "\tnum_irq_stopped : %u\n", rdev->rcfw.num_irq_stopped); + seq_printf(s, "\tpoll_in_intr_en : %u\n", rdev->rcfw.poll_in_intr_en); + seq_printf(s, "\tpoll_in_intr_dis : %u\n", rdev->rcfw.poll_in_intr_dis); + seq_printf(s, "\tcmdq_full_dbg_cnt : %u\n", rdev->rcfw.cmdq_full_dbg); + if (!rdev->is_virtfn) + seq_printf(s, "\tfw_service_prof_type_sup : %u\n", + is_qport_service_type_supported(rdev)); + if (rdev->dbr_pacing) { + seq_printf(s, "\tdbq_int_recv: %llu\n", rdev->dbr_sw_stats->dbq_int_recv); + if (!_is_chip_p7(rdev->chip_ctx)) + seq_printf(s, "\tdbq_int_en: %llu\n", rdev->dbr_sw_stats->dbq_int_en); + seq_printf(s, "\tdbq_pacing_resched: %llu\n", + rdev->dbr_sw_stats->dbq_pacing_resched); + seq_printf(s, "\tdbq_pacing_complete: %llu\n", + rdev->dbr_sw_stats->dbq_pacing_complete); + seq_printf(s, "\tdbq_pacing_alerts: %llu\n", + rdev->dbr_sw_stats->dbq_pacing_alerts); + seq_printf(s, "\tdbq_dbr_fifo_reg: 0x%x\n", + readl(rdev->en_dev->bar0 + rdev->dbr_db_fifo_reg_off)); + } + + if (rdev->dbr_drop_recov) { + seq_printf(s, "\tdbr_drop_recov_epoch: %d\n", + rdev->dbr_evt_curr_epoch); + seq_printf(s, "\tdbr_drop_recov_events: %lld\n", + rdev->dbr_sw_stats->dbr_drop_recov_events); + seq_printf(s, "\tdbr_drop_recov_timeouts: %lld\n", + rdev->dbr_sw_stats->dbr_drop_recov_timeouts); + seq_printf(s, "\tdbr_drop_recov_timeout_users: %lld\n", + rdev->dbr_sw_stats->dbr_drop_recov_timeout_users); + seq_printf(s, "\tdbr_drop_recov_event_skips: %lld\n", + rdev->dbr_sw_stats->dbr_drop_recov_event_skips); + } + + if (BNXT_RE_PPP_ENABLED(rdev->chip_ctx)) { + seq_printf(s, "\tppp_enabled_contexts: %d\n", + rdev->ppp_stats.ppp_enabled_ctxs); + seq_printf(s, "\tppp_enabled_qps: %d\n", + rdev->ppp_stats.ppp_enabled_qps); + } + + for (i = 0; i < RCFW_MAX_LATENCY_SEC_SLAB_INDEX; i++) { + if (rdev->rcfw.rcfw_lat_slab_sec[i]) + seq_printf(s, "\tlatency_slab [%d - %d] sec = %d\n", + i, i + 1, rdev->rcfw.rcfw_lat_slab_sec[i]); + } + + /* show wqe mode */ + seq_printf(s, "\tsq wqe mode: %d\n", rdev->chip_ctx->modes.wqe_mode); + + seq_printf(s, "\n"); +err: + mutex_unlock(&bnxt_re_mutex); + return rc; +} + +static int bnxt_re_perf_debugfs_show(struct seq_file *s, void *unused) +{ + u64 qp_create_total_msec = 0, qp_destroy_total_msec = 0; + u64 mr_create_total_msec = 0, mr_destroy_total_msec = 0; + int qp_create_total = 0, qp_destroy_total = 0; + int mr_create_total = 0, mr_destroy_total = 0; + u64 qp_modify_err_total_msec = 0; + int qp_modify_err_total = 0; + struct bnxt_re_dev *rdev; + bool add_entry = false; + int i; + + rdev = s->private; + seq_printf(s, "bnxt_re perf stats: %s shadow qd %d Driver Version - %s\n", + rdev->rcfw.sp_perf_stats_enabled ? "Enabled" : "Disabled", + rdev->rcfw.curr_shadow_qd, + ROCE_DRV_MODULE_VERSION); + + if (!rdev->rcfw.sp_perf_stats_enabled) + return -ENOMEM; + + for (i = 0; i < RCFW_MAX_LATENCY_MSEC_SLAB_INDEX; i++) { + if (rdev->rcfw.rcfw_lat_slab_msec[i]) + seq_printf(s, "\tlatency_slab [%d - %d] msec = %d\n", + i, i + 1, rdev->rcfw.rcfw_lat_slab_msec[i]); + } + + if (!mutex_trylock(&bnxt_re_mutex)) + return restart_syscall(); + + if (!bnxt_re_is_rdev_valid(rdev)) { + mutex_unlock(&bnxt_re_mutex); + return -ENODEV; + } + + for (i = 0; i < RCFW_MAX_STAT_INDEX; i++) { + if (rdev->rcfw.qp_create_stats[i] > 0) { + qp_create_total++; + qp_create_total_msec += rdev->rcfw.qp_create_stats[i]; + add_entry = true; + } + if (rdev->rcfw.qp_destroy_stats[i] > 0) { + qp_destroy_total++; + qp_destroy_total_msec += rdev->rcfw.qp_destroy_stats[i]; + add_entry = true; + } + if (rdev->rcfw.mr_create_stats[i] > 0) { + mr_create_total++; + mr_create_total_msec += rdev->rcfw.mr_create_stats[i]; + add_entry = true; + } + if (rdev->rcfw.mr_destroy_stats[i] > 0) { + mr_destroy_total++; + mr_destroy_total_msec += rdev->rcfw.mr_destroy_stats[i]; + add_entry = true; + } + if (rdev->rcfw.qp_modify_stats[i] > 0) { + qp_modify_err_total++; + qp_modify_err_total_msec += rdev->rcfw.qp_modify_stats[i]; + add_entry = true; + } + + if (add_entry) + seq_printf(s, " %lld %lld %lld " + " %lld %lld\n", + rdev->rcfw.qp_create_stats[i], + rdev->rcfw.qp_destroy_stats[i], + rdev->rcfw.mr_create_stats[i], + rdev->rcfw.mr_destroy_stats[i], + rdev->rcfw.qp_modify_stats[i]); + + add_entry = false; + } + + seq_printf(s, "Total qp_create %d in msec %lld\n", + qp_create_total, qp_create_total_msec); + seq_printf(s, "Total qp_destroy %d in msec %lld\n", + qp_destroy_total, qp_destroy_total_msec); + seq_printf(s, "Total mr_create %d in msec %lld\n", + mr_create_total, mr_create_total_msec); + seq_printf(s, "Total mr_destroy %d in msec %lld\n", + mr_destroy_total, mr_destroy_total_msec); + seq_printf(s, "Total qp_modify_err_total %d in msec %lld\n", + qp_modify_err_total, qp_modify_err_total_msec); + seq_puts(s, "\n"); + + mutex_unlock(&bnxt_re_mutex); + return 0; +} + +static int bnxt_re_drv_stats_debugfs_show(struct seq_file *s, void *unused) +{ + struct bnxt_re_dev *rdev = s->private; + int rc = 0; + + seq_puts(s, "bnxt_re debug stats:\n"); + + if (!mutex_trylock(&bnxt_re_mutex)) + return restart_syscall(); + + seq_printf(s, "=====[ IBDEV %s ]=============================\n", + rdev->ibdev.name); + if (rdev->dbr_pacing) { + seq_printf(s, "\tdbq_fifo_occup_slab_1: %llu\n", + rdev->dbg_stats->dbq.fifo_occup_slab_1); + seq_printf(s, "\tdbq_fifo_occup_slab_2: %llu\n", + rdev->dbg_stats->dbq.fifo_occup_slab_2); + seq_printf(s, "\tdbq_fifo_occup_slab_3: %llu\n", + rdev->dbg_stats->dbq.fifo_occup_slab_3); + seq_printf(s, "\tdbq_fifo_occup_slab_4: %llu\n", + rdev->dbg_stats->dbq.fifo_occup_slab_4); + seq_printf(s, "\tdbq_fifo_occup_water_mark: %llu\n", + rdev->dbg_stats->dbq.fifo_occup_water_mark); + seq_printf(s, "\tdbq_do_pacing_slab_1: %llu\n", + rdev->dbg_stats->dbq.do_pacing_slab_1); + seq_printf(s, "\tdbq_do_pacing_slab_2: %llu\n", + rdev->dbg_stats->dbq.do_pacing_slab_2); + seq_printf(s, "\tdbq_do_pacing_slab_3: %llu\n", + rdev->dbg_stats->dbq.do_pacing_slab_3); + seq_printf(s, "\tdbq_do_pacing_slab_4: %llu\n", + rdev->dbg_stats->dbq.do_pacing_slab_4); + seq_printf(s, "\tdbq_do_pacing_slab_5: %llu\n", + rdev->dbg_stats->dbq.do_pacing_slab_5); + seq_printf(s, "\tdbq_do_pacing_water_mark: %llu\n", + rdev->dbg_stats->dbq.do_pacing_water_mark); + seq_printf(s, "\tmad_consumed: %llu\n", + rdev->dbg_stats->mad.mad_consumed); + seq_printf(s, "\tmad_processed: %llu\n", + rdev->dbg_stats->mad.mad_processed); + } + seq_puts(s, "\n"); + + mutex_unlock(&bnxt_re_mutex); + return rc; +} + +static int bnxt_re_info_debugfs_open(struct inode *inode, struct file *file) +{ + struct bnxt_re_dev *rdev = inode->i_private; + + return single_open(file, bnxt_re_info_debugfs_show, rdev); +} + +static int bnxt_re_perf_debugfs_open(struct inode *inode, struct file *file) +{ + struct bnxt_re_dev *rdev = inode->i_private; + + return single_open(file, bnxt_re_perf_debugfs_show, rdev); +} + +static int bnxt_re_drv_stats_debugfs_open(struct inode *inode, struct file *file) +{ + struct bnxt_re_dev *rdev = inode->i_private; + + return single_open(file, bnxt_re_drv_stats_debugfs_show, rdev); +} + +static int bnxt_re_debugfs_release(struct inode *inode, struct file *file) +{ + return single_release(inode, file); +} + +static const struct file_operations bnxt_re_info_dbg_ops = { + .owner = THIS_MODULE, + .open = bnxt_re_info_debugfs_open, + .read = seq_read, + .write = bnxt_re_info_debugfs_clear, + .llseek = seq_lseek, + .release = bnxt_re_debugfs_release, +}; + +static const struct file_operations bnxt_re_perf_dbg_ops = { + .owner = THIS_MODULE, + .open = bnxt_re_perf_debugfs_open, + .read = seq_read, + .write = bnxt_re_perf_debugfs_clear, + .llseek = seq_lseek, + .release = bnxt_re_debugfs_release, +}; + +static const struct file_operations bnxt_re_drv_stats_dbg_ops = { + .owner = THIS_MODULE, + .open = bnxt_re_drv_stats_debugfs_open, + .read = seq_read, + .write = bnxt_re_drv_stats_debugfs_clear, + .llseek = seq_lseek, + .release = bnxt_re_debugfs_release, +}; + +void bnxt_re_add_dbg_files(struct bnxt_re_dev *rdev) +{ + if (IS_ERR_OR_NULL(rdev->pdev_debug_dir)) + return; + + rdev->pdev_qpinfo_dir = debugfs_create_dir("qp_info", + rdev->pdev_debug_dir); + if (IS_ERR_OR_NULL(rdev->pdev_qpinfo_dir)) + dev_dbg(rdev_to_dev(rdev), "Unable to create debugfs info for qp_info"); +} + +static ssize_t bnxt_re_hdbr_dfs_read(struct file *filp, char __user *buffer, + size_t usr_buf_len, loff_t *ppos) +{ + struct bnxt_re_hdbr_dbgfs_file_data *data = filp->private_data; + size_t len; + char *buf; + + if (*ppos) + return 0; + if (!data) + return -ENODEV; + + buf = bnxt_re_hdbr_dump(data->rdev, data->group, data->user); + if (!buf) + return -ENOMEM; + len = strlen(buf); + if (usr_buf_len < len) { + kfree(buf); + return -ENOSPC; + } + len = simple_read_from_buffer(buffer, usr_buf_len, ppos, buf, len); + kfree(buf); + return len; +} + +static const struct file_operations bnxt_re_hdbr_dfs_ops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = bnxt_re_hdbr_dfs_read, +}; + +#define HDBR_DEBUGFS_SUB_TYPES 2 +void bnxt_re_add_hdbr_knobs(struct bnxt_re_dev *rdev) +{ + char *dirs[HDBR_DEBUGFS_SUB_TYPES] = {"driver", "apps"}; + char *names[DBC_GROUP_MAX] = {"sq", "rq", "srq", "cq"}; + struct bnxt_re_hdbr_dfs_data *data = rdev->hdbr_dbgfs; + struct dentry *sub_dir, *f; + int i, j; + + if (!rdev->hdbr_enabled) + return; + + if (data || IS_ERR_OR_NULL(rdev->pdev_debug_dir)) + return; + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return; + data->hdbr_dir = debugfs_create_dir("hdbr", rdev->pdev_debug_dir); + if (IS_ERR_OR_NULL(data->hdbr_dir)) { + dev_dbg(rdev_to_dev(rdev), "Unable to create debugfs hdbr"); + kfree(data); + return; + } + rdev->hdbr_dbgfs = data; + for (i = 0; i < HDBR_DEBUGFS_SUB_TYPES; i++) { + sub_dir = debugfs_create_dir(dirs[i], data->hdbr_dir); + if (IS_ERR_OR_NULL(sub_dir)) { + dev_dbg(rdev_to_dev(rdev), "Unable to create debugfs %s", dirs[i]); + return; + } + for (j = 0; j < DBC_GROUP_MAX; j++) { + data->file_data[i][j].rdev = rdev; + data->file_data[i][j].group = j; + data->file_data[i][j].user = !!i; + f = debugfs_create_file(names[j], 0600, sub_dir, &data->file_data[i][j], + &bnxt_re_hdbr_dfs_ops); + if (IS_ERR_OR_NULL(f)) { + dev_dbg(rdev_to_dev(rdev), "Unable to create hdbr debugfs file"); + return; + } + } + } +} + +void bnxt_re_rem_hdbr_knobs(struct bnxt_re_dev *rdev) +{ + struct bnxt_re_hdbr_dfs_data *data = rdev->hdbr_dbgfs; + + if (!data) + return; + debugfs_remove_recursive(data->hdbr_dir); + kfree(data); + rdev->hdbr_dbgfs = NULL; +} + +void bnxt_re_rename_debugfs_entry(struct bnxt_re_dev *rdev) +{ + struct dentry *port_debug_dir; + + if (IS_ERR_OR_NULL(bnxt_re_debugfs_root)) + return; + + if (!test_bit(BNXT_RE_FLAG_PER_PORT_DEBUG_INFO, &rdev->flags)) { + strncpy(rdev->dev_name, dev_name(&rdev->ibdev.dev), IB_DEVICE_NAME_MAX); + bnxt_re_debugfs_add_port(rdev, rdev->dev_name); + set_bit(BNXT_RE_FLAG_PER_PORT_DEBUG_INFO, &rdev->flags); + dev_info(rdev_to_dev(rdev), "Device %s registered successfully", + rdev->dev_name); + } else if (strncmp(rdev->dev_name, dev_name(&rdev->ibdev.dev), IB_DEVICE_NAME_MAX)) { + if (IS_ERR_OR_NULL(rdev->port_debug_dir)) + return; + strncpy(rdev->dev_name, dev_name(&rdev->ibdev.dev), IB_DEVICE_NAME_MAX); + port_debug_dir = debugfs_rename(bnxt_re_debugfs_root, + rdev->port_debug_dir, + bnxt_re_debugfs_root, + rdev->dev_name); + if (IS_ERR(port_debug_dir)) { + dev_warn(rdev_to_dev(rdev), "Unable to rename debugfs %s", + rdev->dev_name); + return; + } + rdev->port_debug_dir = port_debug_dir; + dev_info(rdev_to_dev(rdev), "Device renamed to %s successfully", + rdev->dev_name); + } +} + +void bnxt_re_debugfs_add_pdev(struct bnxt_re_dev *rdev) +{ + const char *pdev_name; + + if (IS_ERR_OR_NULL(bnxt_re_debugfs_root)) + return; + + pdev_name = pci_name(rdev->en_dev->pdev); + rdev->pdev_debug_dir = debugfs_create_dir(pdev_name, + bnxt_re_debugfs_root); + if (IS_ERR_OR_NULL(rdev->pdev_debug_dir)) { + dev_dbg(rdev_to_dev(rdev), "Unable to create debugfs %s", + pdev_name); + return; + } + rdev->en_qp_dbg = 1; + bnxt_re_add_dbg_files(rdev); + bnxt_re_add_hdbr_knobs(rdev); +} + +void bnxt_re_debugfs_rem_pdev(struct bnxt_re_dev *rdev) +{ + bnxt_re_rem_hdbr_knobs(rdev); + debugfs_remove_recursive(rdev->pdev_debug_dir); + rdev->pdev_debug_dir = NULL; +} + +void bnxt_re_debugfs_add_port(struct bnxt_re_dev *rdev, char *dev_name) +{ + if (IS_ERR_OR_NULL(bnxt_re_debugfs_root)) + return; + + if (rdev->en_dev) { + rdev->port_debug_dir = debugfs_create_dir(dev_name, + bnxt_re_debugfs_root); + if (IS_ERR_OR_NULL(rdev->port_debug_dir)) { + dev_dbg(rdev_to_dev(rdev), + "Unable to create debugfs %s", + dev_name); + return; + } + + rdev->info = debugfs_create_file("info", 00400, + rdev->port_debug_dir, rdev, + &bnxt_re_info_dbg_ops); + if (IS_ERR_OR_NULL(rdev->info)) { + dev_dbg(rdev_to_dev(rdev), + "Unable to create debugfs info"); + return; + } + + rdev->sp_perf_stats = debugfs_create_file("sp_perf_stats", 0644, + rdev->port_debug_dir, rdev, + &bnxt_re_perf_dbg_ops); + if (IS_ERR_OR_NULL(rdev->sp_perf_stats)) { + dev_dbg(rdev_to_dev(rdev), + "Unable to create debugfs info"); + return; + } + + rdev->drv_dbg_stats = debugfs_create_file("drv_dbg_stats", 0644, + rdev->port_debug_dir, rdev, + &bnxt_re_drv_stats_dbg_ops); + if (IS_ERR_OR_NULL(rdev->drv_dbg_stats)) { + dev_dbg(rdev_to_dev(rdev), + "Unable to create debugfs driver stats"); + return; + } + } +} + +void bnxt_re_rem_dbg_files(struct bnxt_re_dev *rdev) +{ + debugfs_remove_recursive(rdev->pdev_qpinfo_dir); + rdev->pdev_qpinfo_dir = NULL; +} + +void bnxt_re_debugfs_rem_port(struct bnxt_re_dev *rdev) +{ + debugfs_remove_recursive(rdev->port_debug_dir); + rdev->port_debug_dir = NULL; + rdev->info = NULL; +} + +void bnxt_re_debugfs_remove(void) +{ + debugfs_remove_recursive(bnxt_re_debugfs_root); + bnxt_re_debugfs_root = NULL; +} + +void bnxt_re_debugfs_init(void) +{ + bnxt_re_debugfs_root = debugfs_create_dir(ROCE_DRV_MODULE_NAME, NULL); + if (IS_ERR_OR_NULL(bnxt_re_debugfs_root)) { + dev_dbg(NULL, "%s: Unable to create debugfs root directory ", + ROCE_DRV_MODULE_NAME); + dev_dbg(NULL, "with err 0x%lx", PTR_ERR(bnxt_re_debugfs_root)); + return; + } +} +#endif diff --git a/bnxt_re-1.10.3-229.0.139.0/debugfs.h b/bnxt_re-1.10.3-229.0.139.0/debugfs.h new file mode 100644 index 0000000..4686fb3 --- /dev/null +++ b/bnxt_re-1.10.3-229.0.139.0/debugfs.h @@ -0,0 +1,49 @@ +/* + * Copyright (c) 2015-2023, Broadcom. All rights reserved. The term + * Broadcom refers to Broadcom Inc. and/or its subsidiaries. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * BSD license below: + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * Author: Eddie Wai + * + * Description: DebugFS header + */ + +#ifndef __BNXT_RE_DEBUGFS__ +#define __BNXT_RE_DEBUGFS__ + +#define BNXT_RE_DEBUGFS_QP_INFO_MAX_SIZE 512 + +extern struct list_head bnxt_re_dev_list; + +void bnxt_re_debugfs_init(void); +void bnxt_re_debugfs_remove(void); + +#endif diff --git a/bnxt_re-1.10.3-229.0.139.0/hdbr.c b/bnxt_re-1.10.3-229.0.139.0/hdbr.c new file mode 100644 index 0000000..8df7254 --- /dev/null +++ b/bnxt_re-1.10.3-229.0.139.0/hdbr.c @@ -0,0 +1,642 @@ +/* Broadcom NetXtreme-C/E network driver. + * + * Copyright (c) 2022-2023 Broadcom Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#include +#include +#include +#include + +#include "bnxt_re.h" +#include "bnxt.h" +#include "bnxt_hdbr.h" +#include "hdbr.h" + +static void bnxt_re_hdbr_wait_hw_read_complete(struct bnxt_re_dev *rdev, int group) +{ + /* + * TODO: We need a deterministic signal/event/operation here to make sure + * HW doesn't read host memory of DB copy region. Then we could go ahead + * to free the page safely. + */ +} + +static void bnxt_re_hdbr_free_pg_task(struct work_struct *work) +{ + struct bnxt_re_hdbr_free_pg_work *wk = + container_of(work, struct bnxt_re_hdbr_free_pg_work, work); + + bnxt_re_hdbr_wait_hw_read_complete(wk->rdev, wk->group); + + /* + * TODO: As a temporary solution of preventing HW access a freed page, we'll + * not free the page. Instead, we put it into reusable free page list. + * dma_free_coherent(&wk->rdev->en_dev->pdev->dev, wk->size, wk->pg->kptr, + * wk->pg->da); + * kfree(wk->pg); + */ + mutex_lock(&wk->rdev->hdbr_fpg_lock); + list_add_tail(&wk->pg->pg_node, &wk->rdev->hdbr_fpgs); + mutex_unlock(&wk->rdev->hdbr_fpg_lock); + + kfree(wk); +} + +static struct hdbr_pg *hdbr_reuse_page(struct bnxt_re_dev *rdev) +{ + struct hdbr_pg *pg = NULL; + + mutex_lock(&rdev->hdbr_fpg_lock); + if (!list_empty(&rdev->hdbr_fpgs)) { + pg = list_first_entry(&rdev->hdbr_fpgs, struct hdbr_pg, pg_node); + list_del(&pg->pg_node); + } + mutex_unlock(&rdev->hdbr_fpg_lock); + + return pg; +} + +/* + * This function allocates a 4K page as DB copy app page, and link it to the + * main kernel table which is managed by L2 driver. + * + * Inside RoCE DB copy app page, DBs are grouped by group type. + * DBC_GROUP_SQ : grp_size = 1, + * offset 0: SQ producer index doorbell + * DBC_GROUP_RQ : grp_size = 1, + * offset 0: RQ producer index doorbell + * DBC_GROUP_SRQ : grp_size = 3, + * offset 0: SRQ producer index doorbell + * offset 1: SRQ_ARMENA (must before SRQ_ARM) + * offset 2: SRQ_ARM + * DBC_GROUP_CQ : grp_size = 4, + * offset 0: CQ consumer index doorbell + * offset 1: CQ_ARMENA (must before CQ_ARMALL/SE) + * offset 2: CQ_ARMALL/CQ_ARMASE (share slot) + * offset 3: CUTOFF_ACK + */ +static struct hdbr_pg *hdbr_alloc_page(struct bnxt_re_dev *rdev, int group, u16 pi) +{ + struct bnxt_hdbr_ktbl *ktbl; + struct hdbr_pg *pg; + int rc; + + ktbl = rdev->en_dev->hdbr_info->ktbl[group]; + if (!ktbl) + return NULL; + pg = hdbr_reuse_page(rdev); + if (pg) { + u64 *kptr = pg->kptr; + dma_addr_t da = pg->da; + + memset(pg, 0, sizeof(*pg)); + memset(kptr, 0, PAGE_SIZE_4K); + pg->kptr = kptr; + pg->da = da; + } else { + pg = kzalloc(sizeof(*pg), GFP_KERNEL); + if (!pg) + return NULL; + pg->kptr = dma_alloc_coherent(&rdev->en_dev->pdev->dev, PAGE_SIZE_4K, &pg->da, + GFP_KERNEL | __GFP_ZERO); + if (!pg->kptr) + goto alloc_err; + } + pg->grp_size = bnxt_re_hdbr_group_size(group); + pg->first_avail = 0; + pg->first_empty = 0; + pg->size = PAGE_SIZE_4K / HDBR_DB_SIZE / pg->grp_size; + pg->blk_avail = pg->size; + /* Register this page to main kernel table in L2 driver */ + rc = bnxt_hdbr_reg_apg(ktbl, pg->da, &pg->ktbl_idx, pi); + if (rc) + goto reg_page_err; + + return pg; + +reg_page_err: + dma_free_coherent(&rdev->en_dev->pdev->dev, PAGE_SIZE_4K, pg->kptr, pg->da); + +alloc_err: + kfree(pg); + + return NULL; +} + +static void hdbr_dealloc_page(struct bnxt_re_dev *rdev, struct hdbr_pg *pg, int group) +{ + struct bnxt_hdbr_ktbl *ktbl = rdev->en_dev->hdbr_info->ktbl[group]; + struct bnxt_re_hdbr_free_pg_work *wk; + + if (!ktbl) { + dev_err(rdev_to_dev(rdev), "L2 driver has no support for unreg page!"); + return; + } + /* Unregister this page from main kernel table in L2 driver */ + bnxt_hdbr_unreg_apg(ktbl, pg->ktbl_idx); + + /* Free page and structure memory in background */ + wk = kzalloc(sizeof(*wk), GFP_ATOMIC); + if (!wk) { + dev_err(rdev_to_dev(rdev), "Failed to allocate wq for freeing page!"); + return; + } + wk->rdev = rdev; + wk->pg = pg; + wk->size = PAGE_SIZE_4K; + wk->group = group; + INIT_WORK(&wk->work, bnxt_re_hdbr_free_pg_task); + queue_work(rdev->hdbr_wq, &wk->work); +} + +static __le64 *hdbr_claim_slot(struct hdbr_pg *pg) +{ + int i, n, idx; + + n = pg->grp_size; + idx = pg->first_avail * n; + for (i = 0; i < n; i++) + pg->kptr[idx + i] = cpu_to_le64(DBC_VALUE_INIT); + pg->blk_avail--; + + /* Update indice for next */ + if (pg->first_avail == pg->first_empty) { + pg->first_avail++; + pg->first_empty++; + if (pg->first_empty < pg->size) + pg->kptr[pg->first_empty * n] = cpu_to_le64(DBC_VALUE_LAST); + } else { + while (++pg->first_avail < pg->first_empty) { + if (!pg->kptr[pg->first_avail * n]) + break; + } + } + return pg->kptr + idx; +} + +static void hdbr_clear_slot(struct hdbr_pg *pg, int pos) +{ + int i; + + for (i = 0; i < pg->grp_size; i++) + pg->kptr[pos * pg->grp_size + i] = 0; + pg->blk_avail++; + if (pos < pg->first_avail) + pg->first_avail = pos; +} + +static void bnxt_re_hdbr_db_unreg(struct bnxt_re_dev *rdev, int group, + struct bnxt_qplib_db_info *dbinfo) +{ + struct bnxt_re_hdbr_app *app; + struct hdbr_pg_lst *plst; + struct hdbr_pg *pg; + bool found = false; + int ktbl_idx; + __le64 *dbc; + + if (group >= DBC_GROUP_MAX) + return; + app = dbinfo->app; + ktbl_idx = dbinfo->ktbl_idx; + dbc = dbinfo->dbc; + if (!app || !dbc) { + dev_err(rdev_to_dev(rdev), "Invalid unreg db params, app=0x%px, ktbl_idx=%d," + " dbc=0x%px\n", app, ktbl_idx, dbc); + return; + } + + plst = &app->pg_lst[group]; + mutex_lock(&plst->lst_lock); + list_for_each_entry(pg, &plst->pg_head, pg_node) { + if (pg->ktbl_idx == ktbl_idx) { + int pos; + + pos = ((u64)dbc - (u64)pg->kptr) / HDBR_DB_SIZE / pg->grp_size; + hdbr_clear_slot(pg, pos); + plst->blk_avail++; + found = true; + break; + } + } + + /* Additionally, free the page if it is empty. */ + if (found && pg->blk_avail == pg->size) { + plst->blk_avail -= pg->blk_avail; + list_del(&pg->pg_node); + hdbr_dealloc_page(rdev, pg, group); + } + + mutex_unlock(&plst->lst_lock); + + dbinfo->app = NULL; + dbinfo->ktbl_idx = 0; + dbinfo->dbc = NULL; + + if (!found) + dev_err(rdev_to_dev(rdev), "Fatal: DB copy not found\n"); +} + +void bnxt_re_hdbr_db_unreg_srq(struct bnxt_re_dev *rdev, struct bnxt_re_srq *srq) +{ + struct bnxt_qplib_db_info *dbinfo = &srq->qplib_srq.dbinfo; + + bnxt_re_hdbr_db_unreg(rdev, DBC_GROUP_SRQ, dbinfo); +} + +void bnxt_re_hdbr_db_unreg_qp(struct bnxt_re_dev *rdev, struct bnxt_re_qp *qp) +{ + struct bnxt_qplib_db_info *dbinfo; + + dbinfo = &qp->qplib_qp.sq.dbinfo; + bnxt_re_hdbr_db_unreg(rdev, DBC_GROUP_SQ, dbinfo); + if (qp->qplib_qp.srq) + return; + dbinfo = &qp->qplib_qp.rq.dbinfo; + bnxt_re_hdbr_db_unreg(rdev, DBC_GROUP_RQ, dbinfo); +} + +void bnxt_re_hdbr_db_unreg_cq(struct bnxt_re_dev *rdev, struct bnxt_re_cq *cq) +{ + struct bnxt_qplib_db_info *dbinfo = &cq->qplib_cq.dbinfo; + + bnxt_re_hdbr_db_unreg(rdev, DBC_GROUP_CQ, dbinfo); +} + +static __le64 *bnxt_re_hdbr_db_reg(struct bnxt_re_dev *rdev, struct bnxt_re_hdbr_app *app, + int group, int *ktbl_idx, u16 pi) +{ + struct hdbr_pg_lst *plst; + struct hdbr_pg *pg; + __le64 *dbc = NULL; + + if (group >= DBC_GROUP_MAX) + return NULL; + + plst = &app->pg_lst[group]; + mutex_lock(&plst->lst_lock); + if (plst->blk_avail == 0) { + pg = hdbr_alloc_page(rdev, group, pi); + if (!pg) + goto exit; + list_add(&pg->pg_node, &plst->pg_head); + plst->blk_avail += pg->blk_avail; + } + list_for_each_entry(pg, &plst->pg_head, pg_node) { + if (pg->blk_avail > 0) { + dbc = hdbr_claim_slot(pg); + *ktbl_idx = pg->ktbl_idx; + plst->blk_avail--; + break; + } + } + +exit: + mutex_unlock(&plst->lst_lock); + return dbc; +} + +int bnxt_re_hdbr_db_reg_srq(struct bnxt_re_dev *rdev, struct bnxt_re_srq *srq, + struct bnxt_re_ucontext *cntx, struct bnxt_re_srq_resp *resp) +{ + struct bnxt_qplib_db_info *dbinfo; + struct bnxt_re_hdbr_app *app; + u16 pi = 0; + + dbinfo = &srq->qplib_srq.dbinfo; + if (cntx) { + app = cntx->hdbr_app; + pi = (u16)cntx->dpi.dpi; + } else { + app = container_of(rdev->hdbr_privileged, struct bnxt_re_hdbr_app, lst); + } + + dbinfo->dbc = bnxt_re_hdbr_db_reg(rdev, app, DBC_GROUP_SRQ, &dbinfo->ktbl_idx, pi); + if (!dbinfo->dbc) + return -ENOMEM; + dbinfo->app = app; + dbinfo->dbc_dt = 0; + if (resp) + resp->hdbr_kaddr = (__u64)dbinfo->dbc; + return 0; +} + +int bnxt_re_hdbr_db_reg_qp(struct bnxt_re_dev *rdev, struct bnxt_re_qp *qp, + struct bnxt_re_pd *pd, struct bnxt_re_qp_resp *resp) +{ + struct bnxt_re_ucontext *cntx = NULL; + struct ib_ucontext *context = NULL; + struct bnxt_qplib_db_info *dbinfo; + struct bnxt_re_hdbr_app *app; + u16 pi = 0; + + if (pd) { + context = pd->ib_pd.uobject->context; + cntx = to_bnxt_re(context, struct bnxt_re_ucontext, ib_uctx); + } + if (cntx) { + app = cntx->hdbr_app; + pi = (u16)cntx->dpi.dpi; + } else { + app = container_of(rdev->hdbr_privileged, struct bnxt_re_hdbr_app, lst); + } + + /* sq */ + dbinfo = &qp->qplib_qp.sq.dbinfo; + dbinfo->dbc = bnxt_re_hdbr_db_reg(rdev, app, DBC_GROUP_SQ, &dbinfo->ktbl_idx, pi); + if (!dbinfo->dbc) + return -ENOMEM; + dbinfo->app = app; + if (*rdev->hdbr_dt) + dbinfo->dbc_dt = 1; + else + dbinfo->dbc_dt = 0; + if (resp) { + resp->hdbr_kaddr_sq = (__u64)dbinfo->dbc; + resp->hdbr_dt = (__u32)dbinfo->dbc_dt; + } + + if (qp->qplib_qp.srq) + return 0; + + /* rq */ + dbinfo = &qp->qplib_qp.rq.dbinfo; + dbinfo->dbc = bnxt_re_hdbr_db_reg(rdev, app, DBC_GROUP_RQ, &dbinfo->ktbl_idx, pi); + if (!dbinfo->dbc) { + bnxt_re_hdbr_db_unreg_qp(rdev, qp); + return -ENOMEM; + } + dbinfo->app = app; + dbinfo->dbc_dt = 0; + if (resp) + resp->hdbr_kaddr_rq = (__u64)dbinfo->dbc; + + return 0; +} + +int bnxt_re_hdbr_db_reg_cq(struct bnxt_re_dev *rdev, struct bnxt_re_cq *cq, + struct bnxt_re_ucontext *cntx, struct bnxt_re_cq_resp *resp, + struct bnxt_re_cq_req *ureq) +{ + struct bnxt_qplib_db_info *dbinfo; + struct bnxt_re_hdbr_app *app; + u16 pi = 0; + + dbinfo = &cq->qplib_cq.dbinfo; + if (cntx) { + app = cntx->hdbr_app; + pi = (u16)cntx->dpi.dpi; + } else { + app = container_of(rdev->hdbr_privileged, struct bnxt_re_hdbr_app, lst); + } + + dbinfo->dbc = bnxt_re_hdbr_db_reg(rdev, app, DBC_GROUP_CQ, &dbinfo->ktbl_idx, pi); + if (!dbinfo->dbc) + return -ENOMEM; + dbinfo->app = app; + dbinfo->dbc_dt = 0; + if (resp && ureq && ureq->comp_mask & BNXT_RE_COMP_MASK_CQ_REQ_HAS_HDBR_KADDR) { + resp->hdbr_kaddr = (__u64)dbinfo->dbc; + resp->comp_mask |= BNXT_RE_COMP_MASK_CQ_HAS_HDBR_KADDR; + } + return 0; +} + +struct bnxt_re_hdbr_app *bnxt_re_hdbr_alloc_app(struct bnxt_re_dev *rdev, bool user) +{ + struct bnxt_re_hdbr_app *app; + int group; + + app = kzalloc(sizeof(*app), GFP_KERNEL); + if (!app) { + dev_err(rdev_to_dev(rdev), "hdbr app alloc failed!"); + return NULL; + } + INIT_LIST_HEAD(&app->lst); + for (group = DBC_GROUP_SQ; group < DBC_GROUP_MAX; group++) { + app->pg_lst[group].group = group; + INIT_LIST_HEAD(&app->pg_lst[group].pg_head); + app->pg_lst[group].blk_avail = 0; + mutex_init(&app->pg_lst[group].lst_lock); + } + + if (user) { + mutex_lock(&rdev->hdbr_lock); + list_add(&app->lst, &rdev->hdbr_apps); + mutex_unlock(&rdev->hdbr_lock); + } + + return app; +} + +void bnxt_re_hdbr_dealloc_app(struct bnxt_re_dev *rdev, struct bnxt_re_hdbr_app *app) +{ + struct list_head *head; + struct hdbr_pg *pg; + int group; + + for (group = DBC_GROUP_SQ; group < DBC_GROUP_MAX; group++) { + head = &app->pg_lst[group].pg_head; + while (!list_empty(head)) { + pg = list_first_entry(head, struct hdbr_pg, pg_node); + list_del(&pg->pg_node); + hdbr_dealloc_page(rdev, pg, group); + } + } + + kfree(app); +} + +int bnxt_re_hdbr_init(struct bnxt_re_dev *rdev) +{ + struct bnxt_re_hdbr_app *drv; + + /* HDBR init for normal apps */ + INIT_LIST_HEAD(&rdev->hdbr_apps); + + if (rdev->en_dev->hdbr_info->hdbr_enabled) { + rdev->hdbr_enabled = true; + rdev->chip_ctx->modes.hdbr_enabled = true; + } else { + rdev->hdbr_enabled = false; + return 0; + } + + /* Init free page list */ + mutex_init(&rdev->hdbr_fpg_lock); + INIT_LIST_HEAD(&rdev->hdbr_fpgs); + + rdev->hdbr_wq = create_singlethread_workqueue("bnxt_re_hdbr_wq"); + if (!rdev->hdbr_wq) + return -ENOMEM; + + mutex_init(&rdev->hdbr_lock); + rdev->hdbr_dt = &rdev->en_dev->hdbr_info->debug_trace; + + /* HDBR init for driver app */ + drv = bnxt_re_hdbr_alloc_app(rdev, false); + if (!drv) { + destroy_workqueue(rdev->hdbr_wq); + rdev->hdbr_wq = NULL; + return -ENOMEM; + } + rdev->hdbr_privileged = &drv->lst; + + return 0; +} + +void bnxt_re_hdbr_uninit(struct bnxt_re_dev *rdev) +{ + struct bnxt_re_hdbr_app *app; + struct list_head *head; + struct hdbr_pg *pg; + + if (!rdev->hdbr_enabled) + return; + + /* Uninitialize normal apps */ + mutex_lock(&rdev->hdbr_lock); + head = &rdev->hdbr_apps; + while (!list_empty(head)) { + app = list_first_entry(head, struct bnxt_re_hdbr_app, lst); + list_del(&app->lst); + bnxt_re_hdbr_dealloc_app(rdev, app); + } + mutex_unlock(&rdev->hdbr_lock); + + /* Uninitialize driver app */ + if (rdev->hdbr_privileged) { + app = container_of(rdev->hdbr_privileged, struct bnxt_re_hdbr_app, lst); + bnxt_re_hdbr_dealloc_app(rdev, app); + rdev->hdbr_privileged = NULL; + } + + if (rdev->hdbr_wq) { + flush_workqueue(rdev->hdbr_wq); + destroy_workqueue(rdev->hdbr_wq); + rdev->hdbr_wq = NULL; + } + + /* + * At this point, all app pages are flushed into free page list. + * Dealloc all free pages. + */ + mutex_lock(&rdev->hdbr_fpg_lock); + head = &rdev->hdbr_fpgs; + while (!list_empty(head)) { + pg = list_first_entry(head, struct hdbr_pg, pg_node); + list_del(&pg->pg_node); + dma_free_coherent(&rdev->en_dev->pdev->dev, PAGE_SIZE_4K, pg->kptr, pg->da); + kfree(pg); + } + mutex_unlock(&rdev->hdbr_fpg_lock); +} + +static void bnxt_re_hdbr_pages_dump(struct hdbr_pg_lst *plst) +{ + struct hdbr_pg *pg; + __le64 *dbc_ptr; + int i, cnt = 0; + + mutex_lock(&plst->lst_lock); + list_for_each_entry(pg, &plst->pg_head, pg_node) { + pr_info("page cnt = %d\n", cnt); + pr_info("kptr = 0x%016llX\n", (u64)pg->kptr); + pr_info("dma = 0x%016llX\n", pg->da); + pr_info("grp_size = %d\n", pg->grp_size); + pr_info("first_avail = %d\n", pg->first_avail); + pr_info("first_empty = %d\n", pg->first_empty); + pr_info("blk_avail = %d\n", pg->blk_avail); + pr_info("ktbl_idx = %d\n", pg->ktbl_idx); + dbc_ptr = pg->kptr; + if (!dbc_ptr) { + pr_info("Page content not available\n"); + break; + } + for (i = 0; i < 512; i++) { + if (i > 0 && i < 511 && !dbc_ptr[i]) + continue; + pr_info("page[%d][%3d] 0x%016llX\n", cnt, i, le64_to_cpu(dbc_ptr[i])); + } + cnt++; + } + mutex_unlock(&plst->lst_lock); +} + +char *bnxt_re_hdbr_user_dump(struct bnxt_re_dev *rdev, int group) +{ + struct list_head *head = &rdev->hdbr_apps; + struct bnxt_re_hdbr_app *app; + int cnt = 0; + char *buf; + + mutex_lock(&rdev->hdbr_lock); + list_for_each_entry(app, head, lst) + cnt++; + buf = kasprintf(GFP_KERNEL, "Total apps = %d\n", cnt); + + /* Page content dump to dmesg console */ + pr_info("====== Dumping %s user apps DB copy page info ======\n%s", rdev->dev_name, buf); + cnt = 0; + list_for_each_entry(app, head, lst) { + struct hdbr_pg_lst *plst; + + plst = &app->pg_lst[group]; + pr_info("App cnt = %d\n", cnt); + pr_info("group = %d\n", plst->group); + pr_info("blk_avail = %d\n", plst->blk_avail); + bnxt_re_hdbr_pages_dump(plst); + cnt++; + } + mutex_unlock(&rdev->hdbr_lock); + + return buf; +} + +char *bnxt_re_hdbr_driver_dump(char *dev_name, struct list_head *head, int group) +{ + struct bnxt_re_hdbr_app *app; + struct hdbr_pg_lst *plst; + char *buf; + + app = container_of(head, struct bnxt_re_hdbr_app, lst); + plst = &app->pg_lst[group]; + + /* Structure data to debugfs console */ + buf = kasprintf(GFP_KERNEL, + "group = %d\n" + "blk_avail = %d\n", + plst->group, + plst->blk_avail); + + /* Page content dump to dmesg console */ + pr_info("====== Dumping %s driver DB copy page info ======\n%s", dev_name, buf); + bnxt_re_hdbr_pages_dump(plst); + + return buf; +} + +char *bnxt_re_hdbr_dump(struct bnxt_re_dev *rdev, int group, bool user) +{ + struct list_head *lst; + + if (user) { + lst = &rdev->hdbr_apps; + if (list_empty(lst)) + goto no_data; + return bnxt_re_hdbr_user_dump(rdev, group); + } + + lst = rdev->hdbr_privileged; + if (!lst) + goto no_data; + return bnxt_re_hdbr_driver_dump(rdev->dev_name, lst, group); + +no_data: + return kasprintf(GFP_KERNEL, "No data available!\n"); +} diff --git a/bnxt_re-1.10.3-229.0.139.0/hdbr.h b/bnxt_re-1.10.3-229.0.139.0/hdbr.h new file mode 100644 index 0000000..0878d1e --- /dev/null +++ b/bnxt_re-1.10.3-229.0.139.0/hdbr.h @@ -0,0 +1,88 @@ +/* Broadcom NetXtreme-C/E network driver. + * + * Copyright (c) 2022-2023 Broadcom Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#ifndef __HDBR_H__ +#define __HDBR_H__ + +#include "bnxt_hdbr.h" + +/* RoCE HW based doorbell drop recovery defination */ +struct hdbr_pg { + struct list_head pg_node; + __le64 *kptr; + dma_addr_t da; + int grp_size; + int first_avail; + int first_empty; + int size; + int blk_avail; + int ktbl_idx; +}; + +struct hdbr_pg_lst { + int group; + int blk_avail; + struct list_head pg_head; + struct mutex lst_lock; /* protect pg_list */ +}; + +struct bnxt_re_hdbr_app { + struct list_head lst; + struct hdbr_pg_lst pg_lst[DBC_GROUP_MAX]; +}; + +struct bnxt_re_hdbr_dbgfs_file_data { + struct bnxt_re_dev *rdev; + int group; + bool user; +}; + +struct bnxt_re_hdbr_dfs_data { + struct dentry *hdbr_dir; + struct bnxt_re_hdbr_dbgfs_file_data file_data[2][DBC_GROUP_MAX]; +}; + +struct bnxt_re_hdbr_free_pg_work { + struct work_struct work; + struct bnxt_re_dev *rdev; + struct hdbr_pg *pg; + size_t size; + int group; +}; + +#define DBC_GROUP_SIZE_SQ_RQ 1 +#define DBC_GROUP_SIZE_SRQ 3 +#define DBC_GROUP_SIZE_CQ 4 + +static inline int bnxt_re_hdbr_group_size(int group) +{ + if (group == DBC_GROUP_SRQ) + return DBC_GROUP_SIZE_SRQ; + if (group == DBC_GROUP_CQ) + return DBC_GROUP_SIZE_CQ; + return DBC_GROUP_SIZE_SQ_RQ; +} + +int bnxt_re_hdbr_init(struct bnxt_re_dev *rdev); +void bnxt_re_hdbr_uninit(struct bnxt_re_dev *rdev); +struct bnxt_re_hdbr_app *bnxt_re_hdbr_alloc_app(struct bnxt_re_dev *rdev, bool user); +void bnxt_re_hdbr_dealloc_app(struct bnxt_re_dev *rdev, struct bnxt_re_hdbr_app *app); +void bnxt_re_hdbr_db_unreg_srq(struct bnxt_re_dev *rdev, struct bnxt_re_srq *srq); +void bnxt_re_hdbr_db_unreg_qp(struct bnxt_re_dev *rdev, struct bnxt_re_qp *qp); +void bnxt_re_hdbr_db_unreg_cq(struct bnxt_re_dev *rdev, struct bnxt_re_cq *cq); +int bnxt_re_hdbr_db_reg_srq(struct bnxt_re_dev *rdev, struct bnxt_re_srq *srq, + struct bnxt_re_ucontext *cntx, struct bnxt_re_srq_resp *resp); +int bnxt_re_hdbr_db_reg_qp(struct bnxt_re_dev *rdev, struct bnxt_re_qp *qp, + struct bnxt_re_pd *pd, struct bnxt_re_qp_resp *resp); +int bnxt_re_hdbr_db_reg_cq(struct bnxt_re_dev *rdev, struct bnxt_re_cq *cq, + struct bnxt_re_ucontext *cntx, struct bnxt_re_cq_resp *resp, + struct bnxt_re_cq_req *ureq); +char *bnxt_re_hdbr_dump(struct bnxt_re_dev *rdev, int group, bool user); + +#endif diff --git a/bnxt_re-1.10.3-229.0.139.0/hw_counters.c b/bnxt_re-1.10.3-229.0.139.0/hw_counters.c new file mode 100644 index 0000000..ba449ae --- /dev/null +++ b/bnxt_re-1.10.3-229.0.139.0/hw_counters.c @@ -0,0 +1,511 @@ +/* + * Copyright (c) 2023, Broadcom. All rights reserved. The term + * Broadcom refers to Broadcom Inc. and/or its subsidiaries. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * BSD license below: + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * Description: Statistics + * + */ + +#include + +#include "roce_hsi.h" +#include "qplib_res.h" +#include "qplib_rcfw.h" +#include "bnxt_re.h" +#include "hw_counters.h" + +#ifdef HAVE_RDMA_STAT_DESC +static const struct rdma_stat_desc bnxt_re_stat_descs[] = { + [BNXT_RE_ACTIVE_PD].name = "active_pds", + [BNXT_RE_ACTIVE_AH].name = "active_ahs", + [BNXT_RE_ACTIVE_QP].name = "active_qps", + [BNXT_RE_ACTIVE_RC_QP].name = "active_rc_qps", + [BNXT_RE_ACTIVE_UD_QP].name = "active_ud_qps", + [BNXT_RE_ACTIVE_SRQ].name = "active_srqs", + [BNXT_RE_ACTIVE_CQ].name = "active_cqs", + [BNXT_RE_ACTIVE_MR].name = "active_mrs", + [BNXT_RE_ACTIVE_MW].name = "active_mws", + [BNXT_RE_WATERMARK_PD].name = "watermark_pds", + [BNXT_RE_WATERMARK_AH].name = "watermark_ahs", + [BNXT_RE_WATERMARK_QP].name = "watermark_qps", + [BNXT_RE_WATERMARK_RC_QP].name = "watermark_rc_qps", + [BNXT_RE_WATERMARK_UD_QP].name = "watermark_ud_qps", + [BNXT_RE_WATERMARK_SRQ].name = "watermark_srqs", + [BNXT_RE_WATERMARK_CQ].name = "watermark_cqs", + [BNXT_RE_WATERMARK_MR].name = "watermark_mrs", + [BNXT_RE_WATERMARK_MW].name = "watermark_mws", + [BNXT_RE_RESIZE_CQ_CNT].name = "resize_cq_cnt", + [BNXT_RE_RX_PKTS].name = "rx_pkts", + [BNXT_RE_RX_BYTES].name = "rx_bytes", + [BNXT_RE_TX_PKTS].name = "tx_pkts", + [BNXT_RE_TX_BYTES].name = "tx_bytes", + [BNXT_RE_RECOVERABLE_ERRORS].name = "recoverable_errors", + [BNXT_RE_TX_ERRORS].name = "tx_roce_errors", + [BNXT_RE_TX_DISCARDS].name = "tx_roce_discards", + [BNXT_RE_RX_ERRORS].name = "rx_roce_errors", + [BNXT_RE_RX_DISCARDS].name = "rx_roce_discards", + [BNXT_RE_TO_RETRANSMITS].name = "to_retransmits", + [BNXT_RE_SEQ_ERR_NAKS_RCVD].name = "seq_err_naks_rcvd", + [BNXT_RE_MAX_RETRY_EXCEEDED].name = "max_retry_exceeded", + [BNXT_RE_RNR_NAKS_RCVD].name = "rnr_naks_rcvd", + [BNXT_RE_MISSING_RESP].name = "missing_resp", + [BNXT_RE_UNRECOVERABLE_ERR].name = "unrecoverable_err", + [BNXT_RE_BAD_RESP_ERR].name = "bad_resp_err", + [BNXT_RE_LOCAL_QP_OP_ERR].name = "local_qp_op_err", + [BNXT_RE_LOCAL_PROTECTION_ERR].name = "local_protection_err", + [BNXT_RE_MEM_MGMT_OP_ERR].name = "mem_mgmt_op_err", + [BNXT_RE_REMOTE_INVALID_REQ_ERR].name = "remote_invalid_req_err", + [BNXT_RE_REMOTE_ACCESS_ERR].name = "remote_access_err", + [BNXT_RE_REMOTE_OP_ERR].name = "remote_op_err", + [BNXT_RE_DUP_REQ].name = "dup_req", + [BNXT_RE_RES_EXCEED_MAX].name = "res_exceed_max", + [BNXT_RE_RES_LENGTH_MISMATCH].name = "res_length_mismatch", + [BNXT_RE_RES_EXCEEDS_WQE].name = "res_exceeds_wqe", + [BNXT_RE_RES_OPCODE_ERR].name = "res_opcode_err", + [BNXT_RE_RES_RX_INVALID_RKEY].name = "res_rx_invalid_rkey", + [BNXT_RE_RES_RX_DOMAIN_ERR].name = "res_rx_domain_err", + [BNXT_RE_RES_RX_NO_PERM].name = "res_rx_no_perm", + [BNXT_RE_RES_RX_RANGE_ERR].name = "res_rx_range_err", + [BNXT_RE_RES_TX_INVALID_RKEY].name = "res_tx_invalid_rkey", + [BNXT_RE_RES_TX_DOMAIN_ERR].name = "res_tx_domain_err", + [BNXT_RE_RES_TX_NO_PERM].name = "res_tx_no_perm", + [BNXT_RE_RES_TX_RANGE_ERR].name = "res_tx_range_err", + [BNXT_RE_RES_IRRQ_OFLOW].name = "res_irrq_oflow", + [BNXT_RE_RES_UNSUP_OPCODE].name = "res_unsup_opcode", + [BNXT_RE_RES_UNALIGNED_ATOMIC].name = "res_unaligned_atomic", + [BNXT_RE_RES_REM_INV_ERR].name = "res_rem_inv_err", + [BNXT_RE_RES_MEM_ERROR].name = "res_mem_err", + [BNXT_RE_RES_SRQ_ERR].name = "res_srq_err", + [BNXT_RE_RES_CMP_ERR].name = "res_cmp_err", + [BNXT_RE_RES_INVALID_DUP_RKEY].name = "res_invalid_dup_rkey", + [BNXT_RE_RES_WQE_FORMAT_ERR].name = "res_wqe_format_err", + [BNXT_RE_RES_CQ_LOAD_ERR].name = "res_cq_load_err", + [BNXT_RE_RES_SRQ_LOAD_ERR].name = "res_srq_load_err", + [BNXT_RE_RES_TX_PCI_ERR].name = "res_tx_pci_err", + [BNXT_RE_RES_RX_PCI_ERR].name = "res_rx_pci_err", + [BNXT_RE_OUT_OF_SEQ_ERR].name = "oos_drop_count", + [BNXT_RE_TX_ATOMIC_REQ].name = "tx_atomic_req", + [BNXT_RE_TX_READ_REQ].name = "tx_read_req", + [BNXT_RE_TX_READ_RES].name = "tx_read_resp", + [BNXT_RE_TX_WRITE_REQ].name = "tx_write_req", + [BNXT_RE_TX_SEND_REQ].name = "tx_send_req", + [BNXT_RE_TX_ROCE_PKTS].name = "tx_roce_only_pkts", + [BNXT_RE_TX_ROCE_BYTES].name = "tx_roce_only_bytes", + [BNXT_RE_RX_ATOMIC_REQ].name = "rx_atomic_req", + [BNXT_RE_RX_READ_REQ].name = "rx_read_req", + [BNXT_RE_RX_READ_RESP].name = "rx_read_resp", + [BNXT_RE_RX_WRITE_REQ].name = "rx_write_req", + [BNXT_RE_RX_SEND_REQ].name = "rx_send_req", + [BNXT_RE_RX_ROCE_PKTS].name = "rx_roce_only_pkts", + [BNXT_RE_RX_ROCE_BYTES].name = "rx_roce_only_bytes", + [BNXT_RE_RX_ROCE_GOOD_PKTS].name = "rx_roce_good_pkts", + [BNXT_RE_RX_ROCE_GOOD_BYTES].name = "rx_roce_good_bytes", + [BNXT_RE_OOB].name = "rx_out_of_buffer", + [BNXT_RE_TX_CNP].name = "tx_cnp_pkts", + [BNXT_RE_RX_CNP].name = "rx_cnp_pkts", + [BNXT_RE_RX_ECN].name = "rx_ecn_marked_pkts", + [BNXT_RE_PACING_RESCHED].name = "pacing_reschedule", + [BNXT_RE_PACING_CMPL].name = "pacing_complete", + [BNXT_RE_PACING_ALERT].name = "pacing_alerts", + [BNXT_RE_DB_FIFO_REG].name = "db_fifo_register", +}; +#else +static const char *const bnxt_re_stat_name[] = { + [BNXT_RE_ACTIVE_PD] = "active_pds", + [BNXT_RE_ACTIVE_AH] = "active_ahs", + [BNXT_RE_ACTIVE_QP] = "active_qps", + [BNXT_RE_ACTIVE_RC_QP] = "active_rc_qps", + [BNXT_RE_ACTIVE_UD_QP] = "active_ud_qps", + [BNXT_RE_ACTIVE_SRQ] = "active_srqs", + [BNXT_RE_ACTIVE_CQ] = "active_cqs", + [BNXT_RE_ACTIVE_MR] = "active_mrs", + [BNXT_RE_ACTIVE_MW] = "active_mws", + [BNXT_RE_WATERMARK_PD] = "watermark_pds", + [BNXT_RE_WATERMARK_AH] = "watermark_ahs", + [BNXT_RE_WATERMARK_QP] = "watermark_qps", + [BNXT_RE_WATERMARK_RC_QP] = "watermark_rc_qps", + [BNXT_RE_WATERMARK_UD_QP] = "watermark_ud_qps", + [BNXT_RE_WATERMARK_SRQ] = "watermark_srqs", + [BNXT_RE_WATERMARK_CQ] = "watermark_cqs", + [BNXT_RE_WATERMARK_MR] = "watermark_mrs", + [BNXT_RE_WATERMARK_MW] = "watermark_mws", + [BNXT_RE_RESIZE_CQ_CNT] = "resize_cq_cnt", + [BNXT_RE_RX_PKTS] = "rx_pkts", + [BNXT_RE_RX_BYTES] = "rx_bytes", + [BNXT_RE_TX_PKTS] = "tx_pkts", + [BNXT_RE_TX_BYTES] = "tx_bytes", + [BNXT_RE_RECOVERABLE_ERRORS] = "recoverable_errors", + [BNXT_RE_TX_ERRORS] = "tx_roce_errors", + [BNXT_RE_TX_DISCARDS] = "tx_roce_discards", + [BNXT_RE_RX_ERRORS] = "rx_roce_errors", + [BNXT_RE_RX_DISCARDS] = "rx_roce_discards", + [BNXT_RE_TO_RETRANSMITS] = "to_retransmits", + [BNXT_RE_SEQ_ERR_NAKS_RCVD] = "seq_err_naks_rcvd", + [BNXT_RE_MAX_RETRY_EXCEEDED] = "max_retry_exceeded", + [BNXT_RE_RNR_NAKS_RCVD] = "rnr_naks_rcvd", + [BNXT_RE_MISSING_RESP] = "missing_resp", + [BNXT_RE_UNRECOVERABLE_ERR] = "unrecoverable_err", + [BNXT_RE_BAD_RESP_ERR] = "bad_resp_err", + [BNXT_RE_LOCAL_QP_OP_ERR] = "local_qp_op_err", + [BNXT_RE_LOCAL_PROTECTION_ERR] = "local_protection_err", + [BNXT_RE_MEM_MGMT_OP_ERR] = "mem_mgmt_op_err", + [BNXT_RE_REMOTE_INVALID_REQ_ERR] = "remote_invalid_req_err", + [BNXT_RE_REMOTE_ACCESS_ERR] = "remote_access_err", + [BNXT_RE_REMOTE_OP_ERR] = "remote_op_err", + [BNXT_RE_DUP_REQ] = "dup_req", + [BNXT_RE_RES_EXCEED_MAX] = "res_exceed_max", + [BNXT_RE_RES_LENGTH_MISMATCH] = "res_length_mismatch", + [BNXT_RE_RES_EXCEEDS_WQE] = "res_exceeds_wqe", + [BNXT_RE_RES_OPCODE_ERR] = "res_opcode_err", + [BNXT_RE_RES_RX_INVALID_RKEY] = "res_rx_invalid_rkey", + [BNXT_RE_RES_RX_DOMAIN_ERR] = "res_rx_domain_err", + [BNXT_RE_RES_RX_NO_PERM] = "res_rx_no_perm", + [BNXT_RE_RES_RX_RANGE_ERR] = "res_rx_range_err", + [BNXT_RE_RES_TX_INVALID_RKEY] = "res_tx_invalid_rkey", + [BNXT_RE_RES_TX_DOMAIN_ERR] = "res_tx_domain_err", + [BNXT_RE_RES_TX_NO_PERM] = "res_tx_no_perm", + [BNXT_RE_RES_TX_RANGE_ERR] = "res_tx_range_err", + [BNXT_RE_RES_IRRQ_OFLOW] = "res_irrq_oflow", + [BNXT_RE_RES_UNSUP_OPCODE] = "res_unsup_opcode", + [BNXT_RE_RES_UNALIGNED_ATOMIC] = "res_unaligned_atomic", + [BNXT_RE_RES_REM_INV_ERR] = "res_rem_inv_err", + [BNXT_RE_RES_MEM_ERROR] = "res_mem_err", + [BNXT_RE_RES_SRQ_ERR] = "res_srq_err", + [BNXT_RE_RES_CMP_ERR] = "res_cmp_err", + [BNXT_RE_RES_INVALID_DUP_RKEY] = "res_invalid_dup_rkey", + [BNXT_RE_RES_WQE_FORMAT_ERR] = "res_wqe_format_err", + [BNXT_RE_RES_CQ_LOAD_ERR] = "res_cq_load_err", + [BNXT_RE_RES_SRQ_LOAD_ERR] = "res_srq_load_err", + [BNXT_RE_RES_TX_PCI_ERR] = "res_tx_pci_err", + [BNXT_RE_RES_RX_PCI_ERR] = "res_rx_pci_err", + [BNXT_RE_OUT_OF_SEQ_ERR] = "oos_drop_count", + [BNXT_RE_TX_ATOMIC_REQ] = "tx_atomic_req", + [BNXT_RE_TX_READ_REQ] = "tx_read_req", + [BNXT_RE_TX_READ_RES] = "tx_read_resp", + [BNXT_RE_TX_WRITE_REQ] = "tx_write_req", + [BNXT_RE_TX_SEND_REQ] = "tx_send_req", + [BNXT_RE_TX_ROCE_PKTS] = "tx_roce_only_pkts", + [BNXT_RE_TX_ROCE_BYTES] = "tx_roce_only_bytes", + [BNXT_RE_RX_ATOMIC_REQ] = "rx_atomic_req", + [BNXT_RE_RX_READ_REQ] = "rx_read_req", + [BNXT_RE_RX_READ_RESP] = "rx_read_resp", + [BNXT_RE_RX_WRITE_REQ] = "rx_write_req", + [BNXT_RE_RX_SEND_REQ] = "rx_send_req", + [BNXT_RE_RX_ROCE_PKTS] = "rx_roce_only_pkts", + [BNXT_RE_RX_ROCE_BYTES] = "rx_roce_only_bytes", + [BNXT_RE_RX_ROCE_GOOD_PKTS] = "rx_roce_good_pkts", + [BNXT_RE_RX_ROCE_GOOD_BYTES] = "rx_roce_good_bytes", + [BNXT_RE_OOB] = "rx_out_of_buffer", + [BNXT_RE_TX_CNP] = "tx_cnp_pkts", + [BNXT_RE_RX_CNP] = "rx_cnp_pkts", + [BNXT_RE_RX_ECN] = "rx_ecn_marked_pkts", + [BNXT_RE_PACING_RESCHED] = "pacing_reschedule", + [BNXT_RE_PACING_CMPL] = "pacing_complete", + [BNXT_RE_PACING_ALERT] = "pacing_alerts", + [BNXT_RE_DB_FIFO_REG] = "db_fifo_register", +}; +#endif /* HAVE_RDMA_STAT_DESC */ + +static void bnxt_re_copy_ext_stats(struct bnxt_re_dev *rdev, + struct rdma_hw_stats *stats, + struct bnxt_qplib_ext_stat *s) +{ + stats->value[BNXT_RE_TX_ATOMIC_REQ] = s->tx_atomic_req; + stats->value[BNXT_RE_TX_READ_REQ] = s->tx_read_req; + stats->value[BNXT_RE_TX_READ_RES] = s->tx_read_res; + stats->value[BNXT_RE_TX_WRITE_REQ] = s->tx_write_req; + stats->value[BNXT_RE_TX_SEND_REQ] = s->tx_send_req; + stats->value[BNXT_RE_TX_ROCE_PKTS] = s->tx_roce_pkts; + stats->value[BNXT_RE_TX_ROCE_BYTES] = s->tx_roce_bytes; + stats->value[BNXT_RE_RX_ATOMIC_REQ] = s->rx_atomic_req; + stats->value[BNXT_RE_RX_READ_REQ] = s->rx_read_req; + stats->value[BNXT_RE_RX_READ_RESP] = s->rx_read_res; + stats->value[BNXT_RE_RX_WRITE_REQ] = s->rx_write_req; + stats->value[BNXT_RE_RX_SEND_REQ] = s->rx_send_req; + stats->value[BNXT_RE_RX_ROCE_PKTS] = s->rx_roce_pkts; + stats->value[BNXT_RE_RX_ROCE_BYTES] = s->rx_roce_bytes; + stats->value[BNXT_RE_RX_ROCE_GOOD_PKTS] = s->rx_roce_good_pkts; + stats->value[BNXT_RE_RX_ROCE_GOOD_BYTES] = s->rx_roce_good_bytes; + stats->value[BNXT_RE_OOB] = s->rx_out_of_buffer; + stats->value[BNXT_RE_TX_CNP] = s->tx_cnp; + stats->value[BNXT_RE_RX_CNP] = s->rx_cnp; + stats->value[BNXT_RE_RX_ECN] = s->rx_ecn_marked; + stats->value[BNXT_RE_OUT_OF_SEQ_ERR] = s->rx_out_of_sequence; +} + +static int bnxt_re_get_ext_stat(struct bnxt_re_dev *rdev, + struct rdma_hw_stats *stats) +{ + struct bnxt_qplib_query_stats_info sinfo; + struct bnxt_qplib_ext_stat estat = {}; + u32 fid; + int rc; + + fid = PCI_FUNC(rdev->en_dev->pdev->devfn); + /* Set default values for sinfo */ + sinfo.function_id = 0xFFFFFFFF; + sinfo.collection_id = 0xFF; + sinfo.vf_valid = false; + rc = bnxt_qplib_qext_stat(&rdev->rcfw, fid, &estat, &sinfo); + if (rc) + return rc; + bnxt_re_copy_ext_stats(rdev, stats, &estat); + + return rc; +} + +static void bnxt_re_copy_err_stats(struct bnxt_re_dev *rdev, + struct rdma_hw_stats *stats, + struct bnxt_qplib_roce_stats *err_s) +{ + stats->value[BNXT_RE_TO_RETRANSMITS] = + err_s->to_retransmits; + stats->value[BNXT_RE_SEQ_ERR_NAKS_RCVD] = + err_s->seq_err_naks_rcvd; + stats->value[BNXT_RE_MAX_RETRY_EXCEEDED] = + err_s->max_retry_exceeded; + stats->value[BNXT_RE_RNR_NAKS_RCVD] = + err_s->rnr_naks_rcvd; + stats->value[BNXT_RE_MISSING_RESP] = + err_s->missing_resp; + stats->value[BNXT_RE_UNRECOVERABLE_ERR] = + err_s->unrecoverable_err; + stats->value[BNXT_RE_BAD_RESP_ERR] = + err_s->bad_resp_err; + stats->value[BNXT_RE_LOCAL_QP_OP_ERR] = + err_s->local_qp_op_err; + stats->value[BNXT_RE_LOCAL_PROTECTION_ERR] = + err_s->local_protection_err; + stats->value[BNXT_RE_MEM_MGMT_OP_ERR] = + err_s->mem_mgmt_op_err; + stats->value[BNXT_RE_REMOTE_INVALID_REQ_ERR] = + err_s->remote_invalid_req_err; + stats->value[BNXT_RE_REMOTE_ACCESS_ERR] = + err_s->remote_access_err; + stats->value[BNXT_RE_REMOTE_OP_ERR] = + err_s->remote_op_err; + stats->value[BNXT_RE_DUP_REQ] = + err_s->dup_req; + stats->value[BNXT_RE_RES_EXCEED_MAX] = + err_s->res_exceed_max; + stats->value[BNXT_RE_RES_LENGTH_MISMATCH] = + err_s->res_length_mismatch; + stats->value[BNXT_RE_RES_EXCEEDS_WQE] = + err_s->res_exceeds_wqe; + stats->value[BNXT_RE_RES_OPCODE_ERR] = + err_s->res_opcode_err; + stats->value[BNXT_RE_RES_RX_INVALID_RKEY] = + err_s->res_rx_invalid_rkey; + stats->value[BNXT_RE_RES_RX_DOMAIN_ERR] = + err_s->res_rx_domain_err; + stats->value[BNXT_RE_RES_RX_NO_PERM] = + err_s->res_rx_no_perm; + stats->value[BNXT_RE_RES_RX_RANGE_ERR] = + err_s->res_rx_range_err; + stats->value[BNXT_RE_RES_TX_INVALID_RKEY] = + err_s->res_tx_invalid_rkey; + stats->value[BNXT_RE_RES_TX_DOMAIN_ERR] = + err_s->res_tx_domain_err; + stats->value[BNXT_RE_RES_TX_NO_PERM] = + err_s->res_tx_no_perm; + stats->value[BNXT_RE_RES_TX_RANGE_ERR] = + err_s->res_tx_range_err; + stats->value[BNXT_RE_RES_IRRQ_OFLOW] = + err_s->res_irrq_oflow; + stats->value[BNXT_RE_RES_UNSUP_OPCODE] = + err_s->res_unsup_opcode; + stats->value[BNXT_RE_RES_UNALIGNED_ATOMIC] = + err_s->res_unaligned_atomic; + stats->value[BNXT_RE_RES_REM_INV_ERR] = + err_s->res_rem_inv_err; + stats->value[BNXT_RE_RES_MEM_ERROR] = + err_s->res_mem_error; + stats->value[BNXT_RE_RES_SRQ_ERR] = + err_s->res_srq_err; + stats->value[BNXT_RE_RES_CMP_ERR] = + err_s->res_cmp_err; + stats->value[BNXT_RE_RES_INVALID_DUP_RKEY] = + err_s->res_invalid_dup_rkey; + stats->value[BNXT_RE_RES_WQE_FORMAT_ERR] = + err_s->res_wqe_format_err; + stats->value[BNXT_RE_RES_CQ_LOAD_ERR] = + err_s->res_cq_load_err; + stats->value[BNXT_RE_RES_SRQ_LOAD_ERR] = + err_s->res_srq_load_err; + stats->value[BNXT_RE_RES_TX_PCI_ERR] = + err_s->res_tx_pci_err; + stats->value[BNXT_RE_RES_RX_PCI_ERR] = + err_s->res_rx_pci_err; + stats->value[BNXT_RE_OUT_OF_SEQ_ERR] = + err_s->res_oos_drop_count; +} + +static void bnxt_re_copy_db_pacing_stats(struct bnxt_re_dev *rdev, + struct rdma_hw_stats *stats) +{ + struct bnxt_re_dbr_sw_stats *dbr_sw_stats = rdev->dbr_sw_stats; + + stats->value[BNXT_RE_PACING_RESCHED] = dbr_sw_stats->dbq_pacing_resched; + stats->value[BNXT_RE_PACING_CMPL] = dbr_sw_stats->dbq_pacing_complete; + stats->value[BNXT_RE_PACING_ALERT] = dbr_sw_stats->dbq_pacing_alerts; + stats->value[BNXT_RE_DB_FIFO_REG] = + readl(rdev->en_dev->bar0 + rdev->dbr_db_fifo_reg_off); +} + +int bnxt_re_get_hw_stats(struct ib_device *ibdev, + struct rdma_hw_stats *stats, + PORT_NUM port, int index) +{ + struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev); + struct bnxt_re_res_cntrs *res_s = &rdev->stats.rsors; + struct bnxt_qplib_roce_stats *err_s = NULL; + struct bnxt_qplib_query_stats_info sinfo; + struct ctx_hw_stats *hw_stats = NULL; + int rc; + + err_s = &rdev->stats.dstat.errs; + hw_stats = rdev->qplib_res.hctx->stats.dma; + if (!port || !stats) + return -EINVAL; + + stats->value[BNXT_RE_ACTIVE_QP] = atomic_read(&res_s->qp_count); + stats->value[BNXT_RE_ACTIVE_RC_QP] = atomic_read(&res_s->rc_qp_count); + stats->value[BNXT_RE_ACTIVE_UD_QP] = atomic_read(&res_s->ud_qp_count); + stats->value[BNXT_RE_ACTIVE_SRQ] = atomic_read(&res_s->srq_count); + stats->value[BNXT_RE_ACTIVE_CQ] = atomic_read(&res_s->cq_count); + stats->value[BNXT_RE_ACTIVE_MR] = atomic_read(&res_s->mr_count); + stats->value[BNXT_RE_ACTIVE_MW] = atomic_read(&res_s->mw_count); + stats->value[BNXT_RE_ACTIVE_PD] = atomic_read(&res_s->pd_count); + stats->value[BNXT_RE_ACTIVE_AH] = atomic_read(&res_s->ah_count); + stats->value[BNXT_RE_WATERMARK_QP] = atomic_read(&res_s->max_qp_count); + stats->value[BNXT_RE_WATERMARK_RC_QP] = atomic_read(&res_s->max_rc_qp_count); + stats->value[BNXT_RE_WATERMARK_UD_QP] = atomic_read(&res_s->max_ud_qp_count); + stats->value[BNXT_RE_WATERMARK_SRQ] = atomic_read(&res_s->max_srq_count); + stats->value[BNXT_RE_WATERMARK_CQ] = atomic_read(&res_s->max_cq_count); + stats->value[BNXT_RE_WATERMARK_MR] = atomic_read(&res_s->max_mr_count); + stats->value[BNXT_RE_WATERMARK_MW] = atomic_read(&res_s->max_mw_count); + stats->value[BNXT_RE_WATERMARK_PD] = atomic_read(&res_s->max_pd_count); + stats->value[BNXT_RE_WATERMARK_AH] = atomic_read(&res_s->max_ah_count); + stats->value[BNXT_RE_RESIZE_CQ_CNT] = atomic_read(&res_s->resize_count); + + if (hw_stats) { + stats->value[BNXT_RE_RECOVERABLE_ERRORS] = + le64_to_cpu(hw_stats->tx_bcast_pkts); + stats->value[BNXT_RE_TX_DISCARDS] = + le64_to_cpu(hw_stats->tx_discard_pkts); + stats->value[BNXT_RE_TX_ERRORS] = + le64_to_cpu(hw_stats->tx_error_pkts); + stats->value[BNXT_RE_RX_ERRORS] = + le64_to_cpu(hw_stats->rx_error_pkts); + stats->value[BNXT_RE_RX_DISCARDS] = + le64_to_cpu(hw_stats->rx_discard_pkts); + stats->value[BNXT_RE_RX_PKTS] = + le64_to_cpu(hw_stats->rx_ucast_pkts); + stats->value[BNXT_RE_RX_BYTES] = + le64_to_cpu(hw_stats->rx_ucast_bytes); + stats->value[BNXT_RE_TX_PKTS] = + le64_to_cpu(hw_stats->tx_ucast_pkts); + stats->value[BNXT_RE_TX_BYTES] = + le64_to_cpu(hw_stats->tx_ucast_bytes); + } + + err_s = &rdev->stats.dstat.errs; + if (test_bit(BNXT_RE_FLAG_ISSUE_ROCE_STATS, &rdev->flags)) { + /* Set default values for sinfo */ + sinfo.function_id = 0xFFFFFFFF; + sinfo.collection_id = 0xFF; + sinfo.vf_valid = false; + rc = bnxt_qplib_get_roce_error_stats(&rdev->rcfw, err_s, &sinfo); + if (rc) { + clear_bit(BNXT_RE_FLAG_ISSUE_ROCE_STATS, &rdev->flags); + return rc; + } + bnxt_re_copy_err_stats(rdev, stats, err_s); + + if (_is_ext_stats_supported(rdev->dev_attr->dev_cap_flags) && + !rdev->is_virtfn) { + rc = bnxt_re_get_ext_stat(rdev, stats); + if (rc) { + clear_bit(BNXT_RE_FLAG_ISSUE_ROCE_STATS, &rdev->flags); + return rc; + } + } + + if (rdev->dbr_pacing) + bnxt_re_copy_db_pacing_stats(rdev, stats); + } + + return _is_chip_gen_p5_p7(rdev->chip_ctx) ? + BNXT_RE_NUM_EXT_COUNTERS : BNXT_RE_NUM_STD_COUNTERS; +} + +#ifdef HAVE_ALLOC_HW_PORT_STATS +struct rdma_hw_stats *bnxt_re_alloc_hw_port_stats(struct ib_device *ibdev, + PORT_NUM port_num) +{ + struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev); + int num_counters; + + if (_is_chip_gen_p5_p7(rdev->chip_ctx)) + num_counters = BNXT_RE_NUM_EXT_COUNTERS; + else + num_counters = BNXT_RE_NUM_STD_COUNTERS; + +#ifdef HAVE_RDMA_STAT_DESC + return rdma_alloc_hw_stats_struct(bnxt_re_stat_descs, num_counters, + RDMA_HW_STATS_DEFAULT_LIFESPAN); +#else + return rdma_alloc_hw_stats_struct(bnxt_re_stat_name, num_counters, + RDMA_HW_STATS_DEFAULT_LIFESPAN); +#endif +} +#else +struct rdma_hw_stats *bnxt_re_alloc_hw_stats(struct ib_device *ibdev, + u8 port_num) +{ + struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev); + int num_counters; + + /* We support only per port stats */ + if (!port_num) + return NULL; + + if (_is_chip_gen_p5_p7(rdev->chip_ctx)) + num_counters = BNXT_RE_NUM_EXT_COUNTERS; + else + num_counters = BNXT_RE_NUM_STD_COUNTERS; + + return rdma_alloc_hw_stats_struct(bnxt_re_stat_name, num_counters, + RDMA_HW_STATS_DEFAULT_LIFESPAN); +} +#endif /* HAVE_ALLOC_HW_PORT_STATS */ + diff --git a/bnxt_re-1.10.3-229.0.139.0/hw_counters.h b/bnxt_re-1.10.3-229.0.139.0/hw_counters.h new file mode 100644 index 0000000..8731e97 --- /dev/null +++ b/bnxt_re-1.10.3-229.0.139.0/hw_counters.h @@ -0,0 +1,149 @@ +/* + * Copyright (c) 2023, Broadcom. All rights reserved. The term + * Broadcom refers to Broadcom Inc. and/or its subsidiaries. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * BSD license below: + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * Description: Statistics (header) + * + */ + +#ifndef __HW_COUNTERS_H__ +#define __HW_COUNTERS_H__ + +enum bnxt_re_hw_stats { + BNXT_RE_ACTIVE_PD, + BNXT_RE_ACTIVE_AH, + BNXT_RE_ACTIVE_QP, + BNXT_RE_ACTIVE_RC_QP, + BNXT_RE_ACTIVE_UD_QP, + BNXT_RE_ACTIVE_SRQ, + BNXT_RE_ACTIVE_CQ, + BNXT_RE_ACTIVE_MR, + BNXT_RE_ACTIVE_MW, + BNXT_RE_WATERMARK_PD, + BNXT_RE_WATERMARK_AH, + BNXT_RE_WATERMARK_QP, + BNXT_RE_WATERMARK_RC_QP, + BNXT_RE_WATERMARK_UD_QP, + BNXT_RE_WATERMARK_SRQ, + BNXT_RE_WATERMARK_CQ, + BNXT_RE_WATERMARK_MR, + BNXT_RE_WATERMARK_MW, + BNXT_RE_RESIZE_CQ_CNT, + BNXT_RE_RX_PKTS, + BNXT_RE_RX_BYTES, + BNXT_RE_TX_PKTS, + BNXT_RE_TX_BYTES, + BNXT_RE_RECOVERABLE_ERRORS, + BNXT_RE_TX_ERRORS, + BNXT_RE_TX_DISCARDS, + BNXT_RE_RX_ERRORS, + BNXT_RE_RX_DISCARDS, + BNXT_RE_TO_RETRANSMITS, + BNXT_RE_SEQ_ERR_NAKS_RCVD, + BNXT_RE_MAX_RETRY_EXCEEDED, + BNXT_RE_RNR_NAKS_RCVD, + BNXT_RE_MISSING_RESP, + BNXT_RE_UNRECOVERABLE_ERR, + BNXT_RE_BAD_RESP_ERR, + BNXT_RE_LOCAL_QP_OP_ERR, + BNXT_RE_LOCAL_PROTECTION_ERR, + BNXT_RE_MEM_MGMT_OP_ERR, + BNXT_RE_REMOTE_INVALID_REQ_ERR, + BNXT_RE_REMOTE_ACCESS_ERR, + BNXT_RE_REMOTE_OP_ERR, + BNXT_RE_DUP_REQ, + BNXT_RE_RES_EXCEED_MAX, + BNXT_RE_RES_LENGTH_MISMATCH, + BNXT_RE_RES_EXCEEDS_WQE, + BNXT_RE_RES_OPCODE_ERR, + BNXT_RE_RES_RX_INVALID_RKEY, + BNXT_RE_RES_RX_DOMAIN_ERR, + BNXT_RE_RES_RX_NO_PERM, + BNXT_RE_RES_RX_RANGE_ERR, + BNXT_RE_RES_TX_INVALID_RKEY, + BNXT_RE_RES_TX_DOMAIN_ERR, + BNXT_RE_RES_TX_NO_PERM, + BNXT_RE_RES_TX_RANGE_ERR, + BNXT_RE_RES_IRRQ_OFLOW, + BNXT_RE_RES_UNSUP_OPCODE, + BNXT_RE_RES_UNALIGNED_ATOMIC, + BNXT_RE_RES_REM_INV_ERR, + BNXT_RE_RES_MEM_ERROR, + BNXT_RE_RES_SRQ_ERR, + BNXT_RE_RES_CMP_ERR, + BNXT_RE_RES_INVALID_DUP_RKEY, + BNXT_RE_RES_WQE_FORMAT_ERR, + BNXT_RE_RES_CQ_LOAD_ERR, + BNXT_RE_RES_SRQ_LOAD_ERR, + BNXT_RE_RES_TX_PCI_ERR, + BNXT_RE_RES_RX_PCI_ERR, + BNXT_RE_OUT_OF_SEQ_ERR, + BNXT_RE_TX_ATOMIC_REQ, + BNXT_RE_TX_READ_REQ, + BNXT_RE_TX_READ_RES, + BNXT_RE_TX_WRITE_REQ, + BNXT_RE_TX_SEND_REQ, + BNXT_RE_TX_ROCE_PKTS, + BNXT_RE_TX_ROCE_BYTES, + BNXT_RE_RX_ATOMIC_REQ, + BNXT_RE_RX_READ_REQ, + BNXT_RE_RX_READ_RESP, + BNXT_RE_RX_WRITE_REQ, + BNXT_RE_RX_SEND_REQ, + BNXT_RE_RX_ROCE_PKTS, + BNXT_RE_RX_ROCE_BYTES, + BNXT_RE_RX_ROCE_GOOD_PKTS, + BNXT_RE_RX_ROCE_GOOD_BYTES, + BNXT_RE_OOB, + BNXT_RE_TX_CNP, + BNXT_RE_RX_CNP, + BNXT_RE_RX_ECN, + BNXT_RE_PACING_RESCHED, + BNXT_RE_PACING_CMPL, + BNXT_RE_PACING_ALERT, + BNXT_RE_DB_FIFO_REG, + BNXT_RE_NUM_EXT_COUNTERS +}; + +#define BNXT_RE_NUM_STD_COUNTERS (BNXT_RE_OUT_OF_SEQ_ERR + 1) + +int bnxt_re_get_hw_stats(struct ib_device *ibdev, + struct rdma_hw_stats *stats, + PORT_NUM port, int index); +#ifdef HAVE_ALLOC_HW_PORT_STATS +struct rdma_hw_stats *bnxt_re_alloc_hw_port_stats(struct ib_device *ibdev, + PORT_NUM port_num); +#else +struct rdma_hw_stats *bnxt_re_alloc_hw_stats(struct ib_device *ibdev, + PORT_NUM port_num); +#endif +#endif /* __HW_COUNTERS_H__ */ diff --git a/bnxt_re-1.10.3-229.0.139.0/ib_verbs.c b/bnxt_re-1.10.3-229.0.139.0/ib_verbs.c new file mode 100644 index 0000000..c9eb608 --- /dev/null +++ b/bnxt_re-1.10.3-229.0.139.0/ib_verbs.c @@ -0,0 +1,7585 @@ +/* + * Copyright (c) 2015-2023, Broadcom. All rights reserved. The term + * Broadcom refers to Broadcom Inc. and/or its subsidiaries. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * BSD license below: + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * Author: Eddie Wai + * + * Description: IB Verbs interpreter + */ + +#include +#include + +#include "bnxt_re.h" +#include "ib_verbs.h" +#include "bnxt_re-abi.h" +#include "compat.h" +#include "bnxt.h" +#include "bnxt_hdbr.h" +#include "hdbr.h" + +static int __from_ib_access_flags(int iflags) +{ + int qflags = 0; + + if (iflags & IB_ACCESS_LOCAL_WRITE) + qflags |= BNXT_QPLIB_ACCESS_LOCAL_WRITE; + if (iflags & IB_ACCESS_REMOTE_READ) + qflags |= BNXT_QPLIB_ACCESS_REMOTE_READ; + if (iflags & IB_ACCESS_REMOTE_WRITE) + qflags |= BNXT_QPLIB_ACCESS_REMOTE_WRITE; + if (iflags & IB_ACCESS_REMOTE_ATOMIC) + qflags |= BNXT_QPLIB_ACCESS_REMOTE_ATOMIC; + if (iflags & IB_ACCESS_MW_BIND) + qflags |= BNXT_QPLIB_ACCESS_MW_BIND; +#ifdef HAVE_IB_ZERO_BASED + if (iflags & IB_ZERO_BASED) + qflags |= BNXT_QPLIB_ACCESS_ZERO_BASED; +#endif +#ifdef HAVE_IB_ACCESS_ON_DEMAND + if (iflags & IB_ACCESS_ON_DEMAND) + qflags |= BNXT_QPLIB_ACCESS_ON_DEMAND; +#endif + return qflags; +}; + +static enum ib_access_flags __to_ib_access_flags(int qflags) +{ + enum ib_access_flags iflags = 0; + + if (qflags & BNXT_QPLIB_ACCESS_LOCAL_WRITE) + iflags |= IB_ACCESS_LOCAL_WRITE; + if (qflags & BNXT_QPLIB_ACCESS_REMOTE_WRITE) + iflags |= IB_ACCESS_REMOTE_WRITE; + if (qflags & BNXT_QPLIB_ACCESS_REMOTE_READ) + iflags |= IB_ACCESS_REMOTE_READ; + if (qflags & BNXT_QPLIB_ACCESS_REMOTE_ATOMIC) + iflags |= IB_ACCESS_REMOTE_ATOMIC; + if (qflags & BNXT_QPLIB_ACCESS_MW_BIND) + iflags |= IB_ACCESS_MW_BIND; +#ifdef HAVE_IB_ZERO_BASED + if (qflags & BNXT_QPLIB_ACCESS_ZERO_BASED) + iflags |= IB_ZERO_BASED; +#endif +#ifdef HAVE_IB_ACCESS_ON_DEMAND + if (qflags & BNXT_QPLIB_ACCESS_ON_DEMAND) + iflags |= IB_ACCESS_ON_DEMAND; +#endif + return iflags; +}; + +static int bnxt_re_copy_to_udata(struct bnxt_re_dev *rdev, void *data, + int len, struct ib_udata *udata) +{ + int rc; + + rc = ib_copy_to_udata(udata, data, len); + if (rc) + dev_err(rdev_to_dev(rdev), + "ucontext copy failed from %ps rc %d", + __builtin_return_address(0), rc); + + return rc; +} + +#ifdef HAVE_IB_GET_NETDEV +struct net_device *bnxt_re_get_netdev(struct ib_device *ibdev, + PORT_NUM port_num) +{ + struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev); + struct net_device *netdev = NULL; + struct bonding *bond = NULL; + u8 active_port_map; + + rcu_read_lock(); + + if (!rdev || !rdev->netdev) + goto end; + + netdev = rdev->netdev; + + /* In case of active-backup bond mode, return active slave */ + if (rdev->binfo) { + bond = netdev_priv(netdev); + + if (bond && (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP)) { + active_port_map = bnxt_re_get_bond_link_status(rdev->binfo); + + if (active_port_map & BNXT_RE_ACTIVE_MAP_PORT1) + netdev = rdev->binfo->slave1; + else if (active_port_map & BNXT_RE_ACTIVE_MAP_PORT2) + netdev = rdev->binfo->slave2; + } + } + + if (netdev) + dev_hold(netdev); + +end: + rcu_read_unlock(); + return netdev; +} +#endif + +#ifdef HAVE_IB_QUERY_DEVICE_UDATA +int bnxt_re_query_device(struct ib_device *ibdev, + struct ib_device_attr *ib_attr, + struct ib_udata *udata) +#else +int bnxt_re_query_device(struct ib_device *ibdev, + struct ib_device_attr *ib_attr) +#endif +{ + struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev); + struct bnxt_qplib_dev_attr *dev_attr = rdev->dev_attr; + + memset(ib_attr, 0, sizeof(*ib_attr)); + + memcpy(&ib_attr->fw_ver, dev_attr->fw_ver, 4); + addrconf_addr_eui48((u8 *)&ib_attr->sys_image_guid, + rdev->netdev->dev_addr); + ib_attr->max_mr_size = BNXT_RE_MAX_MR_SIZE; + ib_attr->page_size_cap = dev_attr->page_size_cap; + ib_attr->vendor_id = rdev->en_dev->pdev->vendor; + ib_attr->vendor_part_id = rdev->en_dev->pdev->device; + ib_attr->hw_ver = rdev->en_dev->pdev->subsystem_device; + ib_attr->max_qp = dev_attr->max_qp; + ib_attr->max_qp_wr = dev_attr->max_qp_wqes; + /* + * Read and set from the module param 'min_tx_depth' + * only once after the driver load + */ + if (rdev->min_tx_depth == 1 && + min_tx_depth < dev_attr->max_qp_wqes) + rdev->min_tx_depth = min_tx_depth; + ib_attr->device_cap_flags = + IB_DEVICE_CURR_QP_STATE_MOD + | IB_DEVICE_RC_RNR_NAK_GEN + | IB_DEVICE_SHUTDOWN_PORT + | IB_DEVICE_SYS_IMAGE_GUID +#ifndef HAVE_IB_KERNEL_CAP_FLAGS + | IB_DEVICE_LOCAL_DMA_LKEY +#endif + | IB_DEVICE_RESIZE_MAX_WR + | IB_DEVICE_PORT_ACTIVE_EVENT + | IB_DEVICE_N_NOTIFY_CQ + | IB_DEVICE_MEM_WINDOW + | IB_DEVICE_MEM_WINDOW_TYPE_2B +#ifdef USE_SIGNATURE_HANDOVER + | IB_DEVICE_SIGNATURE_HANDOVER +#endif +#ifdef HAVE_IB_UMEM_GET_FLAGS + | IB_DEVICE_PEER_MEMORY +#endif + | IB_DEVICE_MEM_MGT_EXTENSIONS; +#ifdef HAVE_SEPARATE_SEND_RECV_SGE + ib_attr->max_send_sge = dev_attr->max_qp_sges; + ib_attr->max_recv_sge = dev_attr->max_qp_sges; +#else + ib_attr->max_sge = dev_attr->max_qp_sges; +#endif + ib_attr->max_sge_rd = dev_attr->max_qp_sges; + ib_attr->max_cq = dev_attr->max_cq; + ib_attr->max_cqe = dev_attr->max_cq_wqes; + ib_attr->max_mr = dev_attr->max_mr; + ib_attr->max_pd = dev_attr->max_pd; + ib_attr->max_qp_rd_atom = dev_attr->max_qp_rd_atom; + ib_attr->max_qp_init_rd_atom = dev_attr->max_qp_init_rd_atom; + if (dev_attr->is_atomic) { + ib_attr->atomic_cap = IB_ATOMIC_GLOB; + ib_attr->masked_atomic_cap = IB_ATOMIC_GLOB; + } + + ib_attr->max_ee_rd_atom = 0; + ib_attr->max_res_rd_atom = 0; + ib_attr->max_ee_init_rd_atom = 0; + ib_attr->max_ee = 0; + ib_attr->max_rdd = 0; + ib_attr->max_mw = dev_attr->max_mw; + ib_attr->max_raw_ipv6_qp = 0; + ib_attr->max_raw_ethy_qp = dev_attr->max_raw_ethy_qp; + ib_attr->max_mcast_grp = 0; + ib_attr->max_mcast_qp_attach = 0; + ib_attr->max_total_mcast_qp_attach = 0; + ib_attr->max_ah = dev_attr->max_ah; +#ifdef USE_IB_FMR + ib_attr->max_fmr = dev_attr->max_fmr; + ib_attr->max_map_per_fmr = 1; +#endif + + ib_attr->max_srq = dev_attr->max_srq; + ib_attr->max_srq_wr = dev_attr->max_srq_wqes; + ib_attr->max_srq_sge = dev_attr->max_srq_sges; + + ib_attr->max_fast_reg_page_list_len = MAX_PBL_LVL_1_PGS; + ib_attr->max_pkeys = 1; + ib_attr->local_ca_ack_delay = BNXT_RE_DEFAULT_ACK_DELAY; +#ifdef HAVE_IB_ODP_CAPS + ib_attr->sig_prot_cap = 0; + ib_attr->sig_guard_cap = 0; + ib_attr->odp_caps.general_caps = 0; +#endif +#ifdef HAVE_IB_KERNEL_CAP_FLAGS + ib_attr->kernel_cap_flags = IBK_LOCAL_DMA_LKEY; +#endif + return 0; +} + +int bnxt_re_modify_device(struct ib_device *ibdev, + int device_modify_mask, + struct ib_device_modify *device_modify) +{ + struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev); + + dev_dbg(rdev_to_dev(rdev), "Modify device with mask 0x%x", + device_modify_mask); + + switch (device_modify_mask) { + case IB_DEVICE_MODIFY_SYS_IMAGE_GUID: + /* Modify the GUID requires the modification of the GID table */ + /* GUID should be made as READ-ONLY */ + break; + case IB_DEVICE_MODIFY_NODE_DESC: + /* Node Desc should be made as READ-ONLY */ + break; + default: + break; + } + return 0; +} + +static void __to_ib_speed_width(u32 espeed, u8 *speed, u8 *width) +{ + switch (espeed) { + case SPEED_1000: + *speed = IB_SPEED_SDR; + *width = IB_WIDTH_1X; + break; + case SPEED_10000: + *speed = IB_SPEED_QDR; + *width = IB_WIDTH_1X; + break; + case SPEED_20000: + *speed = IB_SPEED_DDR; + *width = IB_WIDTH_4X; + break; + case SPEED_25000: + *speed = IB_SPEED_EDR; + *width = IB_WIDTH_1X; + break; + case SPEED_40000: + *speed = IB_SPEED_QDR; + *width = IB_WIDTH_4X; + break; + case SPEED_50000: +#ifdef HAVE_IB_WIDTH_2X + *speed = IB_SPEED_EDR; + *width = IB_WIDTH_2X; +#else + *speed = IB_SPEED_HDR; + *width = IB_WIDTH_1X; +#endif + break; + case SPEED_100000: + *speed = IB_SPEED_EDR; + *width = IB_WIDTH_4X; + break; + case SPEED_200000: + *speed = IB_SPEED_HDR; + *width = IB_WIDTH_4X; + break; + case SPEED_400000: + *speed = IB_SPEED_NDR; + *width = IB_WIDTH_4X; + break; + default: + *speed = IB_SPEED_SDR; + *width = IB_WIDTH_1X; + break; + } +} + +/* Port */ +int bnxt_re_query_port(struct ib_device *ibdev, PORT_NUM port_num, + struct ib_port_attr *port_attr) +{ + struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev); + struct bnxt_qplib_dev_attr *dev_attr = rdev->dev_attr; + u8 active_speed = 0, active_width = 0; + + dev_dbg(rdev_to_dev(rdev), "QUERY PORT with port_num 0x%x", port_num); + memset(port_attr, 0, sizeof(*port_attr)); + + port_attr->phys_state = IB_PORT_PHYS_STATE_DISABLED; + port_attr->state = bnxt_re_get_link_state(rdev); + if (port_attr->state == IB_PORT_ACTIVE) + port_attr->phys_state = IB_PORT_PHYS_STATE_LINK_UP; + port_attr->max_mtu = IB_MTU_4096; + port_attr->active_mtu = iboe_get_mtu(rdev->netdev->mtu); + port_attr->gid_tbl_len = dev_attr->max_sgid; + /* TODO: port_cap_flags needs to be revisited */ + port_attr->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP | + IB_PORT_DEVICE_MGMT_SUP | + IB_PORT_VENDOR_CLASS_SUP | + IB_PORT_IP_BASED_GIDS; + + port_attr->max_msg_sz = (u32)BNXT_RE_MAX_MR_SIZE_LOW; + port_attr->bad_pkey_cntr = 0; + port_attr->qkey_viol_cntr = 0; + port_attr->pkey_tbl_len = dev_attr->max_pkey; + port_attr->lid = 0; + port_attr->sm_lid = 0; + port_attr->lmc = 0; + port_attr->max_vl_num = 4; + port_attr->sm_sl = 0; + port_attr->subnet_timeout = 0; + port_attr->init_type_reply = 0; + + if (test_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags)) + __to_ib_speed_width(rdev->espeed, &active_speed, + &active_width); + + port_attr->active_speed = active_speed; + port_attr->active_width = active_width; + + return 0; +} + +int bnxt_re_modify_port(struct ib_device *ibdev, PORT_NUM port_num, + int port_modify_mask, + struct ib_port_modify *port_modify) +{ + struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev); + + dev_dbg(rdev_to_dev(rdev), "Modify port with mask 0x%x", + port_modify_mask); + + switch (port_modify_mask) { + case IB_PORT_SHUTDOWN: + break; + case IB_PORT_INIT_TYPE: + break; + case IB_PORT_RESET_QKEY_CNTR: + break; + default: + break; + } + return 0; +} + +#ifdef HAVE_IB_GET_PORT_IMMUTABLE +int bnxt_re_get_port_immutable(struct ib_device *ibdev, PORT_NUM port_num, + struct ib_port_immutable *immutable) +{ + struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev); + struct ib_port_attr port_attr; + + if (bnxt_re_query_port(ibdev, port_num, &port_attr)) + return -EINVAL; + + immutable->pkey_tbl_len = port_attr.pkey_tbl_len; + immutable->gid_tbl_len = port_attr.gid_tbl_len; +#ifdef RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP + if (rdev->roce_mode == BNXT_RE_FLAG_ROCEV1_CAP) + immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE; + else if (rdev->roce_mode == BNXT_RE_FLAG_ROCEV2_CAP) + immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP; + else + immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE | + RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP; +#else + immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE; +#endif + immutable->max_mad_size = IB_MGMT_MAD_SIZE; + return 0; +} +#endif + +#ifdef HAVE_IB_GET_DEV_FW_STR +void bnxt_re_compat_qfwstr(void) +{ + struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev); + + sprintf(str, "%d.%d.%d.%d", rdev->dev_attr->fw_ver[0], + rdev->dev_attr->fw_ver[1], rdev->dev_attr->fw_ver[2], + rdev->dev_attr->fw_ver[3]); +} +#endif + +int bnxt_re_query_pkey(struct ib_device *ibdev, PORT_NUM port_num, + u16 index, u16 *pkey) +{ + if (index > 0) + return -EINVAL; + + *pkey = IB_DEFAULT_PKEY_FULL; + + return 0; +} + +int bnxt_re_query_gid(struct ib_device *ibdev, PORT_NUM port_num, + int index, union ib_gid *gid) +{ + struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev); + int rc = 0; + + /* Ignore port_num */ + memset(gid, 0, sizeof(*gid)); +#ifdef USE_ROCE_GID_CACHE + if (ib_cache_use_roce_gid_cache(ibdev, port_num)) { + rc = bnxt_qplib_get_sgid(&rdev->qplib_res, + &rdev->qplib_res.sgid_tbl, index, + (struct bnxt_qplib_gid *)gid); + goto out; + } + rc = ib_get_cached_gid(ibdev, port_num, index, gid, NULL); + if (rc == -EAGAIN) { + dev_err(rdev_to_dev(rdev), + "GID not found in the gid cache table!"); + memcpy(gid, &zgid, sizeof(*gid)); + rc = 0; + } +out: +#else + rc = bnxt_qplib_get_sgid(&rdev->qplib_res, + &rdev->qplib_res.sgid_tbl, index, + (struct bnxt_qplib_gid *)gid); +#endif + return rc; +} + +#ifdef HAVE_IB_ADD_DEL_GID +#ifdef HAVE_SIMPLIFIED_ADD_DEL_GID +int bnxt_re_del_gid(const struct ib_gid_attr *attr, void **context) +#else +int bnxt_re_del_gid(struct ib_device *ibdev, u8 port_num, + unsigned int index, void **context) +#endif + +{ + int rc = 0; + struct bnxt_re_gid_ctx *ctx, **ctx_tbl; +#ifdef HAVE_SIMPLIFIED_ADD_DEL_GID + struct bnxt_re_dev *rdev = to_bnxt_re_dev(attr->device, ibdev); + unsigned int index = attr->index; +#else + struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev); +#endif + struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl; + struct bnxt_qplib_gid *gid_to_del; + u16 vlan_id = 0xFFFF; + + /* Delete the entry from the hardware */ + ctx = *context; + if (!ctx) { + dev_err(rdev_to_dev(rdev), "GID entry has no ctx?!"); + return -EINVAL; + } + if (sgid_tbl->active) { + if (ctx->idx >= sgid_tbl->max) { + dev_dbg(rdev_to_dev(rdev), "GID index out of range?!"); + return -EINVAL; + } + gid_to_del = &sgid_tbl->tbl[ctx->idx].gid; + vlan_id = sgid_tbl->tbl[ctx->idx].vlan_id; + ctx->refcnt--; + /* DEL_GID is called via WQ context(netdevice_event_work_handler) + * or via the ib_unregister_device path. In the former case QP1 + * may not be destroyed yet, in which case just return as FW + * needs that entry to be present and will fail it's deletion. + * We could get invoked again after QP1 is destroyed OR get an + * ADD_GID call with a different GID value for the same index + * where we issue MODIFY_GID cmd to update the GID entry -- TBD + */ + if (ctx->idx == 0 && + rdma_link_local_addr((struct in6_addr *)gid_to_del) && + (rdev->gsi_ctx.gsi_sqp || + rdev->gsi_ctx.gsi_qp_mode == BNXT_RE_GSI_MODE_UD)) { + dev_dbg(rdev_to_dev(rdev), + "Trying to delete GID0 while QP1 is alive\n"); + if (!ctx->refcnt) { +#ifdef RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP + rdev->gid_map[index] = -1; +#endif /* RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP */ + ctx_tbl = sgid_tbl->ctx; + ctx_tbl[ctx->idx] = NULL; + kfree(ctx); + } + return 0; + } +#ifdef RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP + rdev->gid_map[index] = -1; +#endif /* RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP */ + if (!ctx->refcnt) { + rc = bnxt_qplib_del_sgid(sgid_tbl, gid_to_del, + vlan_id, true); + if (!rc) { + dev_dbg(rdev_to_dev(rdev), "GID remove success"); + ctx_tbl = sgid_tbl->ctx; + ctx_tbl[ctx->idx] = NULL; + kfree(ctx); + } else { + dev_err(rdev_to_dev(rdev), + "Remove GID failed rc = 0x%x", rc); + } + } + } else { + dev_dbg(rdev_to_dev(rdev), "GID sgid_tbl does not exist!"); + return -EINVAL; + } + return rc; +} + +#ifdef HAVE_SIMPLIFIED_ADD_DEL_GID +#ifdef HAVE_SIMPLER_ADD_GID +int bnxt_re_add_gid(const struct ib_gid_attr *attr, void **context) +#else +int bnxt_re_add_gid(const union ib_gid *gid, + const struct ib_gid_attr *attr, void **context) +#endif +#else +int bnxt_re_add_gid(struct ib_device *ibdev, u8 port_num, + unsigned int index, const union ib_gid *gid, + const struct ib_gid_attr *attr, void **context) +#endif +{ + int rc; + u32 tbl_idx = 0; + u16 vlan_id = 0xFFFF; + struct bnxt_re_gid_ctx *ctx, **ctx_tbl; +#ifdef HAVE_SIMPLIFIED_ADD_DEL_GID + struct bnxt_re_dev *rdev = to_bnxt_re_dev(attr->device, ibdev); + unsigned int index = attr->index; +#ifdef HAVE_SIMPLER_ADD_GID + struct bnxt_qplib_gid *gid = (struct bnxt_qplib_gid *)&attr->gid; +#endif +#else + struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev); +#endif + struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl; + + if ((attr->ndev) && is_vlan_dev(attr->ndev)) + vlan_id = vlan_dev_vlan_id(attr->ndev); + + rc = bnxt_qplib_add_sgid(sgid_tbl, (struct bnxt_qplib_gid *)gid, + rdev->qplib_res.netdev->dev_addr, + vlan_id, true, &tbl_idx); + if (rc == -EALREADY) { + dev_dbg(rdev_to_dev(rdev), "GID %pI6 is already present", gid); + ctx_tbl = sgid_tbl->ctx; + if (!ctx_tbl[tbl_idx]) { + ctx = kmalloc(sizeof(*ctx), GFP_KERNEL); + if (!ctx) + return -ENOMEM; + ctx->idx = tbl_idx; + ctx->refcnt = 1; + ctx_tbl[tbl_idx] = ctx; + } else { + ctx_tbl[tbl_idx]->refcnt++; + } + *context = ctx_tbl[tbl_idx]; +#ifdef RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP + /* tbl_idx is the HW table index and index is the stack index */ + rdev->gid_map[index] = tbl_idx; +#endif /* RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP */ + return 0; + } else if (rc < 0) { + dev_err(rdev_to_dev(rdev), "Add GID failed rc = 0x%x", rc); + return rc; + } else { + ctx = kmalloc(sizeof(*ctx), GFP_KERNEL); + if (!ctx) { + dev_err(rdev_to_dev(rdev), "Add GID ctx failed"); + return -ENOMEM; + } + ctx_tbl = sgid_tbl->ctx; + ctx->idx = tbl_idx; + ctx->refcnt = 1; + ctx_tbl[tbl_idx] = ctx; +#ifdef RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP + /* tbl_idx is the HW table index and index is the stack index */ + rdev->gid_map[index] = tbl_idx; +#endif /* RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP */ + *context = ctx; + } + return rc; +} +#endif + +#ifdef HAVE_IB_MODIFY_GID +int bnxt_re_modify_gid(struct ib_device *ibdev, u8 port_num, + unsigned int index, const union ib_gid *gid, + const struct ib_gid_attr *attr, void **context) +{ + int rc = 0; + u16 vlan_id = 0xFFFF; + +#ifdef USE_ROCE_GID_CACHE + struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev); + struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl; + struct bnxt_re_gid_ctx *ctx, **ctx_tbl; + + if (ib_cache_use_roce_gid_cache(ibdev, port_num)) + return -EINVAL; + if (!memcmp(&zgid, gid, sizeof(*gid))) { + /* Delete the entry from the hardware */ + ctx = *context; + if (!ctx) { + dev_err(rdev_to_dev(rdev), "GID entry has no ctx?!"); + return -EINVAL; + } + if (sgid_tbl->active) { + if (ctx->idx >= sgid_tbl->max) { + dev_dbg(rdev_to_dev(rdev), + "GID index out of range?!"); + return -EINVAL; + } + rc = bnxt_qplib_del_sgid(sgid_tbl, + &sgid_tbl->tbl[ctx->idx], + vlan_id, true); + if (!rc) + dev_dbg(rdev_to_dev(rdev), + "GID removed successfully"); + else + dev_err(rdev_to_dev(rdev), + "Remove GID failed rc = 0x%x", rc); + ctx_tbl = sgid_tbl->ctx; + ctx_tbl[ctx->idx] = NULL; + kfree(ctx); + } else { + dev_dbg(rdev_to_dev(rdev), + "GID sgid_tbl does not exist!"); + return -EINVAL; + } + } else { + ctx = kmalloc(sizeof(*ctx), GFP_KERNEL); + if (!ctx) { + dev_err(rdev_to_dev(rdev), "Add GID ctx failed"); + return -ENOMEM; + } + rc = bnxt_qplib_add_sgid(sgid_tbl, (struct bnxt_qplib_gid *)gid, + rdev->qplib_res.netdev->dev_addr, + vlan_id, true, &ctx->idx); + if (rc == -EALREADY) { + dev_dbg(rdev_to_dev(rdev), + "GID is already present at index %d", ctx->idx); + ctx_tbl = sgid_tbl->ctx; + *context = ctx_tbl[ctx->idx]; + kfree(ctx); + rc = 0; + } else if (rc < 0) { + dev_err(rdev_to_dev(rdev), "Add GID failed rc = 0x%x", + rc); + kfree(ctx); + } else { + dev_dbg(rdev_to_dev(rdev), + "GID added to index sgid_idx %d", ctx->idx); + ctx_tbl = sgid_tbl->ctx; + ctx_tbl[ctx->idx] = ctx; + *context = ctx; + } + } +#endif + return rc; +} +#endif + +enum rdma_link_layer bnxt_re_get_link_layer(struct ib_device *ibdev, + PORT_NUM port_num) +{ + return IB_LINK_LAYER_ETHERNET; +} + +#define BNXT_RE_LEGACY_FENCE_BYTES 64 +#define BNXT_RE_LEGACY_FENCE_PBL_SIZE DIV_ROUND_UP(BNXT_RE_LEGACY_FENCE_BYTES, PAGE_SIZE) + +static void bnxt_re_legacy_create_fence_wqe(struct bnxt_re_pd *pd) +{ + struct bnxt_re_legacy_fence_data *fence = &pd->fence; + struct ib_mr *ib_mr = &fence->mr->ib_mr; + struct bnxt_qplib_swqe *wqe = &fence->bind_wqe; + struct bnxt_re_dev *rdev = pd->rdev; + + if (_is_chip_gen_p5_p7(rdev->chip_ctx)) + return; + + memset(wqe, 0, sizeof(*wqe)); + wqe->type = BNXT_QPLIB_SWQE_TYPE_BIND_MW; + wqe->wr_id = BNXT_QPLIB_FENCE_WRID; + wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP; + wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE; + wqe->bind.zero_based = false; + wqe->bind.parent_l_key = ib_mr->lkey; + wqe->bind.va = (u64)fence->va; + wqe->bind.length = fence->size; + wqe->bind.access_cntl = __from_ib_access_flags(IB_ACCESS_REMOTE_READ); + wqe->bind.mw_type = SQ_BIND_MW_TYPE_TYPE1; + + /* Save the initial rkey in fence structure for now; + * wqe->bind.r_key will be set at (re)bind time. + */ + fence->bind_rkey = ib_inc_rkey(fence->mw->rkey); +} + +static int bnxt_re_legacy_bind_fence_mw(struct bnxt_qplib_qp *qplib_qp) +{ + struct bnxt_re_qp *qp = container_of(qplib_qp, struct bnxt_re_qp, + qplib_qp); + struct ib_pd *ib_pd = qp->ib_qp.pd; + struct bnxt_re_pd *pd = to_bnxt_re(ib_pd, struct bnxt_re_pd, ib_pd); + struct bnxt_re_legacy_fence_data *fence = &pd->fence; + struct bnxt_qplib_swqe *fence_wqe = &fence->bind_wqe; + struct bnxt_qplib_swqe wqe; + int rc; + + /* TODO: Need SQ locking here when Fence WQE + * posting moves up into bnxt_re from bnxt_qplib. + */ + memcpy(&wqe, fence_wqe, sizeof(wqe)); + wqe.bind.r_key = fence->bind_rkey; + fence->bind_rkey = ib_inc_rkey(fence->bind_rkey); + + dev_dbg(rdev_to_dev(qp->rdev), + "Posting bind fence-WQE: rkey: %#x QP: %d PD: %p\n", + wqe.bind.r_key, qp->qplib_qp.id, pd); + rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe); + if (rc) { + dev_err(rdev_to_dev(qp->rdev), "Failed to bind fence-WQE\n"); + return rc; + } + bnxt_qplib_post_send_db(&qp->qplib_qp); + + return rc; +} + +static int bnxt_re_legacy_create_fence_mr(struct bnxt_re_pd *pd) +{ + int mr_access_flags = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_MW_BIND; + struct bnxt_re_legacy_fence_data *fence = &pd->fence; + struct bnxt_re_dev *rdev = pd->rdev; + struct bnxt_qplib_mrinfo mrinfo; + struct bnxt_re_mr *mr = NULL; + struct ib_mw *ib_mw = NULL; + dma_addr_t dma_addr = 0; +#ifdef HAVE_ALLOC_MW_IN_IB_CORE + struct bnxt_re_mw *mw = NULL; +#endif + u32 max_mr_count; + u64 pbl_tbl; + int rc; + + if (_is_chip_gen_p5_p7(rdev->chip_ctx)) + return 0; + + if (bnxt_re_get_total_mr_mw_count(rdev) >= rdev->dev_attr->max_mr) + return -ENOMEM; + + memset(&mrinfo, 0, sizeof(mrinfo)); + /* Allocate a small chunk of memory and dma-map it */ + fence->va = kzalloc(BNXT_RE_LEGACY_FENCE_BYTES, GFP_KERNEL); + if (!fence->va) + return -ENOMEM; + dma_addr = ib_dma_map_single(&rdev->ibdev, fence->va, + BNXT_RE_LEGACY_FENCE_BYTES, + DMA_BIDIRECTIONAL); + rc = ib_dma_mapping_error(&rdev->ibdev, dma_addr); + if (rc) { + dev_err(rdev_to_dev(rdev), "Failed to dma-map fence-MR-mem\n"); + rc = -EIO; + fence->dma_addr = 0; + goto free_va; + } + fence->dma_addr = dma_addr; + + /* Allocate a MR */ + mr = kzalloc(sizeof(*mr), GFP_KERNEL); + if (!mr) { + rc = -ENOMEM; + goto free_dma_addr; + } + fence->mr = mr; + mr->rdev = rdev; + mr->qplib_mr.pd = &pd->qplib_pd; + mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR; + mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags); + if (!_is_alloc_mr_unified(rdev->qplib_res.dattr)) { + rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr); + if (rc) { + dev_err(rdev_to_dev(rdev), "Failed to alloc fence-HW-MR\n"); + goto free_mr; + } + /* Register MR */ + mr->ib_mr.lkey = mr->qplib_mr.lkey; + } + mr->qplib_mr.va = (u64)fence->va; + mr->qplib_mr.total_size = BNXT_RE_LEGACY_FENCE_BYTES; + pbl_tbl = dma_addr; + + mrinfo.mrw = &mr->qplib_mr; + mrinfo.ptes = &pbl_tbl; + mrinfo.sg.npages = BNXT_RE_LEGACY_FENCE_PBL_SIZE; + +#ifndef HAVE_RDMA_UMEM_FOR_EACH_DMA_BLOCK + mrinfo.sg.nmap = 0; + mrinfo.sg.sghead = 0; +#else + mrinfo.sg.umem = NULL; +#endif + mrinfo.sg.pgshft = PAGE_SHIFT; + mrinfo.sg.pgsize = PAGE_SIZE; + rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mrinfo, false); + if (rc) { + dev_err(rdev_to_dev(rdev), "Failed to register fence-MR\n"); + goto free_mr; + } + mr->ib_mr.lkey = mr->qplib_mr.lkey; + mr->ib_mr.rkey = mr->qplib_mr.rkey; + atomic_inc(&rdev->stats.rsors.mr_count); + max_mr_count = atomic_read(&rdev->stats.rsors.mr_count); + if (max_mr_count > (atomic_read(&rdev->stats.rsors.max_mr_count))) + atomic_set(&rdev->stats.rsors.max_mr_count, max_mr_count); + /* Create a fence MW only for kernel consumers */ +#ifdef HAVE_ALLOC_MW_IN_IB_CORE + mw = kzalloc(sizeof(*mw), GFP_KERNEL); + if (!mw) + goto free_mr; + mw->ib_mw.device = &rdev->ibdev; + mw->ib_mw.pd = &pd->ib_pd; + mw->ib_mw.type = IB_MW_TYPE_1; + ib_mw = &mw->ib_mw; + rc = bnxt_re_alloc_mw(ib_mw, NULL); + if (rc) + goto free_mr; +#else +#ifdef HAVE_IB_MW_TYPE + ib_mw = bnxt_re_alloc_mw(&pd->ib_pd, IB_MW_TYPE_1 +#ifdef HAVE_ALLOW_MW_WITH_UDATA + , NULL +#endif + ); +#else + ib_mw = bnxt_re_alloc_mw(&pd->ib_pd); +#endif +#endif /* HAVE_ALLOC_MW_IN_IB_CORE */ + if (IS_ERR(ib_mw)) { + dev_err(rdev_to_dev(rdev), + "Failed to create fence-MW for PD: %p\n", pd); + rc = PTR_ERR(ib_mw); + goto free_mr; + } + fence->mw = ib_mw; + + bnxt_re_legacy_create_fence_wqe(pd); + return 0; + +free_mr: + #ifdef HAVE_ALLOC_MW_IN_IB_CORE + kfree(mw); + #endif + if (mr->ib_mr.lkey) { + bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr); + atomic_dec(&rdev->stats.rsors.mr_count); + } + kfree(mr); + fence->mr = NULL; + +free_dma_addr: + ib_dma_unmap_single(&rdev->ibdev, fence->dma_addr, + BNXT_RE_LEGACY_FENCE_BYTES, DMA_BIDIRECTIONAL); + fence->dma_addr = 0; + +free_va: + kfree(fence->va); + fence->va = NULL; + return rc; +} + +static void bnxt_re_legacy_destroy_fence_mr(struct bnxt_re_pd *pd) +{ + struct bnxt_re_legacy_fence_data *fence = &pd->fence; + struct bnxt_re_dev *rdev = pd->rdev; + struct bnxt_re_mr *mr = fence->mr; +#ifdef HAVE_ALLOC_MW_IN_IB_CORE + struct bnxt_re_mw *mw = NULL; +#endif + + if (_is_chip_gen_p5_p7(rdev->chip_ctx)) + return; + + if (fence->mw) { +#ifdef HAVE_ALLOC_MW_IN_IB_CORE + mw = to_bnxt_re(fence->mw, struct bnxt_re_mw, ib_mw); +#endif + bnxt_re_dealloc_mw(fence->mw); +#ifdef HAVE_ALLOC_MW_IN_IB_CORE + kfree(mw); +#endif + fence->mw = NULL; + } + if (mr) { + if (mr->ib_mr.rkey) + bnxt_qplib_dereg_mrw(&rdev->qplib_res, &mr->qplib_mr, + false); + if (mr->ib_mr.lkey) + bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr); + kfree(mr); + fence->mr = NULL; + atomic_dec(&rdev->stats.rsors.mr_count); + } + if (fence->dma_addr) { + ib_dma_unmap_single(&rdev->ibdev, fence->dma_addr, + BNXT_RE_LEGACY_FENCE_BYTES, + DMA_BIDIRECTIONAL); + fence->dma_addr = 0; + } + kfree(fence->va); + fence->va = NULL; +} + + +static int bnxt_re_get_user_dpi(struct bnxt_re_dev *rdev, + struct bnxt_re_ucontext *cntx) +{ + struct bnxt_qplib_chip_ctx *cctx = rdev->chip_ctx; + int ret = 0; + u8 type; + /* Allocate DPI in alloc_pd or in create_cq to avoid failing of + * ibv_devinfo and family of application when DPIs are depleted. + */ + type = BNXT_QPLIB_DPI_TYPE_UC; + ret = bnxt_qplib_alloc_dpi(&rdev->qplib_res, &cntx->dpi, cntx, type); + if (ret) { + dev_err(rdev_to_dev(rdev), "Alloc doorbell page failed!"); + goto out; + } + + if (BNXT_RE_PUSH_ENABLED(cctx->modes.db_push_mode)) { + type = BNXT_QPLIB_DPI_TYPE_WC; + ret = bnxt_qplib_alloc_dpi(&rdev->qplib_res, &cntx->wcdpi, + cntx, type); + if (ret) { + dev_err(rdev_to_dev(rdev), "push dp alloc failed"); + goto out; + } + if (BNXT_RE_PPP_ENABLED(cctx)) + rdev->ppp_stats.ppp_enabled_ctxs++; + } +out: + return ret; +} + +/* Protection Domains */ +#ifdef HAVE_DEALLOC_PD_UDATA +DEALLOC_PD_RET bnxt_re_dealloc_pd(struct ib_pd *ib_pd, struct ib_udata *udata) +#else +DEALLOC_PD_RET bnxt_re_dealloc_pd(struct ib_pd *ib_pd) +#endif +{ + struct bnxt_re_pd *pd = to_bnxt_re(ib_pd, struct bnxt_re_pd, ib_pd); + struct bnxt_re_dev *rdev = pd->rdev; + u32 resv_pdid; + int rc; + + bnxt_re_legacy_destroy_fence_mr(pd); + + resv_pdid = rdev->qplib_res.pd_tbl.max - 1; +#ifndef HAVE_PD_ALLOC_IN_IB_CORE + if (pd->qplib_pd.id != resv_pdid) { +#endif + rc = bnxt_qplib_dealloc_pd(&rdev->qplib_res, + &rdev->qplib_res.pd_tbl, + &pd->qplib_pd); + if (rc) + dev_err_ratelimited(rdev_to_dev(rdev), + "%s failed rc = %d", __func__, rc); +#ifndef HAVE_PD_ALLOC_IN_IB_CORE + } +#endif + atomic_dec(&rdev->stats.rsors.pd_count); + +#ifndef HAVE_PD_ALLOC_IN_IB_CORE + kfree(pd); +#endif + +#ifndef HAVE_DEALLOC_PD_RET_VOID + /* return success for destroy resources */ + return 0; +#endif +} + +ALLOC_PD_RET bnxt_re_alloc_pd(ALLOC_PD_IN *pd_in, +#ifdef HAVE_UCONTEXT_IN_ALLOC_PD + struct ib_ucontext *ucontext, +#endif + struct ib_udata *udata) +{ +#ifdef HAVE_PD_ALLOC_IN_IB_CORE + struct ib_pd *ibpd = pd_in; + struct ib_device *ibdev = ibpd->device; +#else + struct ib_device *ibdev = pd_in; +#endif + struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev); +#ifdef HAVE_RDMA_UDATA_TO_DRV_CONTEXT + struct bnxt_re_ucontext *ucntx = + rdma_udata_to_drv_context(udata, struct bnxt_re_ucontext, + ib_uctx); +#else + struct bnxt_re_ucontext *ucntx = to_bnxt_re(ucontext, + struct bnxt_re_ucontext, + ib_uctx); +#endif + u32 max_pd_count; + int rc; +#ifdef HAVE_PD_ALLOC_IN_IB_CORE + struct bnxt_re_pd *pd = container_of(ibpd, struct bnxt_re_pd, ib_pd); +#else + struct bnxt_re_pd *pd; + pd = kzalloc(sizeof(*pd), GFP_KERNEL); + if (!pd) + return ERR_PTR(-ENOMEM); +#endif /* HAVE_PD_ALLOC_IN_IB_CORE */ + + pd->rdev = rdev; + if (bnxt_qplib_alloc_pd(&rdev->qplib_res, &pd->qplib_pd)) { + dev_err(rdev_to_dev(rdev), + "Allocate HW Protection Domain failed!"); + rc = -ENOMEM; + goto fail; + } + + if (udata) { + struct bnxt_re_pd_resp resp = {}; + + if (!ucntx->dpi.dbr) { + rc = bnxt_re_get_user_dpi(rdev, ucntx); + if (rc) + goto dbfail; + } + + resp.pdid = pd->qplib_pd.id; + /* Still allow mapping this DBR to the new user PD. */ + resp.dpi = ucntx->dpi.dpi; + resp.dbr = (u64)ucntx->dpi.umdbr; + /* Copy only on a valid wcpdi */ + if (ucntx->wcdpi.dpi) { + resp.wcdpi = ucntx->wcdpi.dpi; + resp.comp_mask = BNXT_RE_COMP_MASK_PD_HAS_WC_DPI; + } + if (rdev->dbr_pacing) { + WARN_ON(!rdev->dbr_bar_addr); + resp.dbr_bar_addr = (u64)rdev->dbr_bar_addr; + resp.comp_mask |= BNXT_RE_COMP_MASK_PD_HAS_DBR_BAR_ADDR; + } + + rc = bnxt_re_copy_to_udata(rdev, &resp, + min(udata->outlen, sizeof(resp)), + udata); + if (rc) + goto dbfail; + } + + if (!udata) + if (bnxt_re_legacy_create_fence_mr(pd)) + dev_warn(rdev_to_dev(rdev), + "Failed to create Fence-MR\n"); + + atomic_inc(&rdev->stats.rsors.pd_count); + max_pd_count = atomic_read(&rdev->stats.rsors.pd_count); + if (max_pd_count > atomic_read(&rdev->stats.rsors.max_pd_count)) + atomic_set(&rdev->stats.rsors.max_pd_count, max_pd_count); + +#ifndef HAVE_PD_ALLOC_IN_IB_CORE + return &pd->ib_pd; +#else + return 0; +#endif /* HAVE_PD_ALLOC_IN_IB_CORE */ +dbfail: + (void)bnxt_qplib_dealloc_pd(&rdev->qplib_res, &rdev->qplib_res.pd_tbl, + &pd->qplib_pd); +#ifndef HAVE_PD_ALLOC_IN_IB_CORE +fail: + kfree(pd); + return ERR_PTR(rc); +#else +fail: + return rc; +#endif /* HAVE_PD_ALLOC_IN_IB_CORE */ +} + +/* Address Handles */ +#ifdef HAVE_SLEEPABLE_AH +DESTROY_AH_RET bnxt_re_destroy_ah(struct ib_ah *ib_ah, u32 flags) +#else +int bnxt_re_destroy_ah(struct ib_ah *ib_ah) +#endif +{ + struct bnxt_re_ah *ah = to_bnxt_re(ib_ah, struct bnxt_re_ah, ib_ah); + struct bnxt_re_dev *rdev = ah->rdev; + int rc = 0; + bool block = true; + +#ifdef HAVE_SLEEPABLE_AH + block = !(flags & RDMA_DESTROY_AH_SLEEPABLE); +#endif + + rc = bnxt_qplib_destroy_ah(&rdev->qplib_res, &ah->qplib_ah, block); + if (rc) + dev_err_ratelimited(rdev_to_dev(rdev), + "%s id = %d blocking %d failed rc = %d", + __func__, ah->qplib_ah.id, block, rc); + atomic_dec(&rdev->stats.rsors.ah_count); + +#ifndef HAVE_AH_ALLOC_IN_IB_CORE + kfree(ah); +#endif + +#ifndef HAVE_DESTROY_AH_RET_VOID + /* return success for destroy resources */ + return 0; +#endif +} + +#ifndef HAVE_IB_AH_DMAC +static void bnxt_re_resolve_dmac(struct bnxt_re_dev *rdev, u8 *dmac, + struct bnxt_qplib_gid *dgid) +{ + struct in6_addr in6; + + memcpy(&in6, dgid->data, sizeof(in6)); + if (rdma_is_multicast_addr(&in6)) + rdma_get_mcast_mac(&in6, dmac); + else if (rdma_link_local_addr(&in6)) + rdma_get_ll_mac(&in6, dmac); + else + dev_err(rdev_to_dev(rdev), + "Unable to resolve Dest MAC from the provided dgid"); +} +#endif + +#ifdef RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP +static u8 _to_bnxt_re_nw_type(enum rdma_network_type ntype) +{ + u8 nw_type; + switch (ntype) { + case RDMA_NETWORK_IPV4: + nw_type = CMDQ_CREATE_AH_TYPE_V2IPV4; + break; + case RDMA_NETWORK_IPV6: + nw_type = CMDQ_CREATE_AH_TYPE_V2IPV6; + break; + default: + nw_type = CMDQ_CREATE_AH_TYPE_V1; + break; + } + return nw_type; +} +#endif + +static int bnxt_re_get_ah_info(struct bnxt_re_dev *rdev, + RDMA_AH_ATTR *ah_attr, + struct bnxt_re_ah_info *ah_info) +{ +#ifdef RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP +#ifdef HAVE_GID_ATTR_IN_IB_AH + const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr); +#endif + IB_GID_ATTR *gattr; + enum rdma_network_type ib_ntype; +#endif + u8 ntype; + union ib_gid *gid; + int rc = 0; + + gid = &ah_info->sgid; +#ifdef RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP +#ifndef HAVE_GID_ATTR_IN_IB_AH + gattr = &ah_info->sgid_attr; + + rc = bnxt_re_get_cached_gid(&rdev->ibdev, 1, ah_attr->grh.sgid_index, + gid, &gattr, &ah_attr->grh, NULL); + if (rc) + return rc; + + /* Get vlan tag */ + if (gattr->ndev) { + if (is_vlan_dev(gattr->ndev)) + ah_info->vlan_tag = vlan_dev_vlan_id(gattr->ndev); + dev_put(gattr->ndev); + } + + /* Get network header type for this GID */ +#else + gattr = grh->sgid_attr; + memcpy(&ah_info->sgid_attr, gattr, sizeof(*gattr)); + /* Get vlan tag */ + if ((gattr->ndev) && is_vlan_dev(gattr->ndev)) + ah_info->vlan_tag = vlan_dev_vlan_id(gattr->ndev); +#endif /* HAVE_GID_ATTR_IN_IB_AH */ + + ib_ntype = bnxt_re_gid_to_network_type(gattr, gid); + ntype = _to_bnxt_re_nw_type(ib_ntype); +#else + rc = ib_query_gid(&rdev->ibdev, 1, ah_attr->grh.sgid_index, gid, 0); + if (rc) { + dev_err(rdev_to_dev(rdev), "Failed to query gid at index %d", + ah_attr->grh.sgid_index); + return rc; + } + + ntype = CMDQ_CREATE_AH_TYPE_V1; +#endif /* RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP */ + ah_info->nw_type = ntype; + + return rc; +} + +static u8 _get_sgid_index(struct bnxt_re_dev *rdev, u8 gindx) +{ +#ifdef RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP + gindx = rdev->gid_map[gindx]; +#endif + return gindx; +} + +static int bnxt_re_init_dmac(struct bnxt_re_dev *rdev, RDMA_AH_ATTR *ah_attr, + struct bnxt_re_ah_info *ah_info, bool is_user, + struct bnxt_re_ah *ah) +{ +#ifndef HAVE_IB_AH_DMAC + u8 dstmac[ETH_ALEN]; +#endif + int rc = 0; + u8 *dmac; + +#ifdef HAVE_IB_AH_DMAC + if (is_user && !rdma_is_multicast_addr((struct in6_addr *) + ah_attr->grh.dgid.raw) && + !rdma_link_local_addr((struct in6_addr *)ah_attr->grh.dgid.raw)) { + +#ifdef RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP +#if defined (HAVE_IB_RESOLVE_ETH_DMAC) || !defined (HAVE_CREATE_USER_AH) + + u32 retry_count = BNXT_RE_RESOLVE_RETRY_COUNT_US; + struct bnxt_re_resolve_dmac_work *resolve_dmac_work; + + + resolve_dmac_work = kzalloc(sizeof(*resolve_dmac_work), GFP_ATOMIC); + if (!resolve_dmac_work) + return -ENOMEM; + + resolve_dmac_work->rdev = rdev; + resolve_dmac_work->ah_attr = ah_attr; + resolve_dmac_work->ah_info = ah_info; + + atomic_set(&resolve_dmac_work->status_wait, 1); + INIT_WORK(&resolve_dmac_work->work, bnxt_re_resolve_dmac_task); + queue_work(rdev->resolve_wq, &resolve_dmac_work->work); + + do { + rc = atomic_read(&resolve_dmac_work->status_wait) & 0xFF; + if (!rc) + break; + udelay(1); + } while (--retry_count); + if (atomic_read(&resolve_dmac_work->status_wait)) { + INIT_LIST_HEAD(&resolve_dmac_work->list); + list_add_tail(&resolve_dmac_work->list, + &rdev->mac_wq_list); + return -EFAULT; + } + kfree(resolve_dmac_work); +#endif +#endif + } + dmac = ROCE_DMAC(ah_attr); +#else /* HAVE_IB_AH_DMAC */ + bnxt_re_resolve_dmac(rdev, dstmac, &ah->qplib_ah.dgid); + dmac = dstmac; +#endif + if (dmac) + memcpy(ah->qplib_ah.dmac, dmac, ETH_ALEN); + return rc; +} + +#ifdef HAVE_IB_CREATE_AH_UDATA +CREATE_AH_RET bnxt_re_create_ah(CREATE_AH_IN *ah_in, + RDMA_AH_ATTR_IN *attr, +#ifndef HAVE_RDMA_AH_INIT_ATTR +#ifdef HAVE_SLEEPABLE_AH + u32 flags, +#endif +#endif + struct ib_udata *udata) +#else +struct ib_ah *bnxt_re_create_ah(CREATE_AH_IN *ah_in, + RDMA_AH_ATTR_IN *attr) +#endif +{ + +#ifndef HAVE_AH_ALLOC_IN_IB_CORE + struct ib_pd *ib_pd = ah_in; + struct bnxt_re_pd *pd = to_bnxt_re(ib_pd, struct bnxt_re_pd, ib_pd); + struct bnxt_re_ah *ah; +#else + struct ib_ah *ib_ah = ah_in; + struct ib_pd *ib_pd = ib_ah->pd; + struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah); + struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd); +#endif + struct bnxt_qplib_dev_attr *dev_attr; + struct bnxt_re_dev *rdev = pd->rdev; + struct bnxt_re_ah_info ah_info; + u32 max_ah_count; + bool is_user; + int rc; + bool block = true; +#ifdef HAVE_RDMA_AH_INIT_ATTR + struct rdma_ah_attr *ah_attr = attr->ah_attr; + + block = !(attr->flags & RDMA_CREATE_AH_SLEEPABLE); +#else + RDMA_AH_ATTR *ah_attr = attr; +#ifdef HAVE_SLEEPABLE_AH + block = !(flags & RDMA_CREATE_AH_SLEEPABLE); +#endif +#endif + + if (!(ah_attr->ah_flags & IB_AH_GRH)) + dev_err(rdev_to_dev(rdev), "ah_attr->ah_flags GRH is not set"); + + dev_attr = rdev->dev_attr; + if (atomic_read(&rdev->stats.rsors.ah_count) >= dev_attr->max_ah) { + dev_err_ratelimited(rdev_to_dev(rdev), + "Max AH limit %d reached!", dev_attr->max_ah); +#ifndef HAVE_AH_ALLOC_IN_IB_CORE + return ERR_PTR(-EINVAL); +#else + return -EINVAL; +#endif + } + +#ifndef HAVE_AH_ALLOC_IN_IB_CORE + ah = kzalloc(sizeof(*ah), GFP_ATOMIC); + if (!ah) { + rc = -ENOMEM; + goto fail; + } +#endif + ah->rdev = rdev; + ah->qplib_ah.pd = &pd->qplib_pd; + is_user = ib_pd->uobject ? true : false; + + /* Supply the configuration for the HW */ + memcpy(ah->qplib_ah.dgid.data, ah_attr->grh.dgid.raw, + sizeof(union ib_gid)); + ah->qplib_ah.sgid_index = _get_sgid_index(rdev, ah_attr->grh.sgid_index); + if (ah->qplib_ah.sgid_index == 0xFF) { + rc = -EINVAL; + goto fail; + } + ah->qplib_ah.host_sgid_index = ah_attr->grh.sgid_index; + ah->qplib_ah.traffic_class = ah_attr->grh.traffic_class; + ah->qplib_ah.flow_label = ah_attr->grh.flow_label; + ah->qplib_ah.hop_limit = ah_attr->grh.hop_limit; + ah->qplib_ah.sl = ah_attr->sl; + rc = bnxt_re_get_ah_info(rdev, ah_attr, &ah_info); + if (rc) + goto fail; + ah->qplib_ah.nw_type = ah_info.nw_type; + + rc = bnxt_re_init_dmac(rdev, ah_attr, &ah_info, is_user, ah); + if (rc) + goto fail; + + rc = bnxt_qplib_create_ah(&rdev->qplib_res, &ah->qplib_ah, block); + if (rc) { + dev_err_ratelimited(rdev_to_dev(rdev), + "Create HW Address Handle failed!"); + goto fail; + } + + /* Write AVID to shared page. */ + if (ib_pd->uobject) { + struct ib_ucontext *ib_uctx = ib_pd->uobject->context; + struct bnxt_re_ucontext *uctx; + unsigned long flag; + u32 *wrptr; + + uctx = to_bnxt_re(ib_uctx, struct bnxt_re_ucontext, ib_uctx); + spin_lock_irqsave(&uctx->sh_lock, flag); + wrptr = (u32 *)(uctx->shpg + BNXT_RE_AVID_OFFT); + *wrptr = ah->qplib_ah.id; + wmb(); /* make sure cache is updated. */ + spin_unlock_irqrestore(&uctx->sh_lock, flag); + } + atomic_inc(&rdev->stats.rsors.ah_count); + max_ah_count = atomic_read(&rdev->stats.rsors.ah_count); + if (max_ah_count > atomic_read(&rdev->stats.rsors.max_ah_count)) + atomic_set(&rdev->stats.rsors.max_ah_count, max_ah_count); + +#ifndef HAVE_AH_ALLOC_IN_IB_CORE + return &ah->ib_ah; +fail: + kfree(ah); + return ERR_PTR(rc); +#else + return 0; +fail: + return rc; +#endif +} + +int bnxt_re_query_ah(struct ib_ah *ib_ah, RDMA_AH_ATTR *ah_attr) +{ + struct bnxt_re_ah *ah = to_bnxt_re(ib_ah, struct bnxt_re_ah, ib_ah); + +#ifdef HAVE_ROCE_AH_ATTR + ah_attr->type = ib_ah->type; +#endif + memcpy(ah_attr->grh.dgid.raw, ah->qplib_ah.dgid.data, + sizeof(union ib_gid)); + ah_attr->grh.sgid_index = ah->qplib_ah.host_sgid_index; + ah_attr->grh.traffic_class = ah->qplib_ah.traffic_class; + ah_attr->sl = ah->qplib_ah.sl; +#ifdef HAVE_IB_AH_DMAC + memcpy(ROCE_DMAC(ah_attr), ah->qplib_ah.dmac, ETH_ALEN); +#endif + ah_attr->ah_flags = IB_AH_GRH; + ah_attr->port_num = 1; + ah_attr->static_rate = 0; + return 0; +} + +/* Shared Receive Queues */ +DESTROY_SRQ_RET bnxt_re_destroy_srq(struct ib_srq *ib_srq +#ifdef HAVE_DESTROY_SRQ_UDATA + , struct ib_udata *udata +#endif + ) +{ + struct bnxt_re_srq *srq = to_bnxt_re(ib_srq, struct bnxt_re_srq, ib_srq); + struct bnxt_re_dev *rdev = srq->rdev; + struct bnxt_qplib_srq *qplib_srq = &srq->qplib_srq; + int rc = 0; + + BNXT_RE_DBR_LIST_DEL(rdev, srq, BNXT_RE_RES_TYPE_SRQ); + + if (rdev->hdbr_enabled) + bnxt_re_hdbr_db_unreg_srq(rdev, srq); + + rc = bnxt_qplib_destroy_srq(&rdev->qplib_res, qplib_srq); + if (rc) + dev_err_ratelimited(rdev_to_dev(rdev), + "%s id = %d failed rc = %d", + __func__, qplib_srq->id, rc); + + if (srq->umem && !IS_ERR(srq->umem)) + ib_umem_release(srq->umem); + /* TODO: Must free the actual SRQ DMA memory */ + + atomic_dec(&rdev->stats.rsors.srq_count); + +#ifndef HAVE_SRQ_CREATE_IN_IB_CORE + kfree(srq); +#endif + +#ifndef HAVE_DESTROY_SRQ_RET_VOID + /* return success for destroy resources */ + return 0; +#endif +} + +static u16 _max_rwqe_sz(int nsge) +{ + return sizeof(struct rq_wqe_hdr) + (nsge * sizeof(struct sq_sge)); +} + +static u16 bnxt_re_get_rwqe_size(struct bnxt_re_dev *rdev, + struct bnxt_qplib_qp *qplqp, + int rsge, int max) +{ + /* For Static wqe mode, use max wqe size only if + * FW doesn't support smaller recv wqes + */ + if (qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC && + !qplqp->small_recv_wqe_sup) + rsge = max; + return _max_rwqe_sz(rsge); +} + +static int bnxt_re_init_user_srq(struct bnxt_re_dev *rdev, + struct bnxt_re_pd *pd, + struct bnxt_re_srq *srq, + struct ib_udata *udata) +{ + struct bnxt_qplib_sg_info *sginfo; + struct bnxt_qplib_srq *qplib_srq; + struct bnxt_re_ucontext *cntx; + struct ib_ucontext *context; + struct bnxt_re_srq_req ureq; + struct ib_umem *umem; + int rc, bytes = 0; + + context = pd->ib_pd.uobject->context; + cntx = to_bnxt_re(context, struct bnxt_re_ucontext, ib_uctx); + qplib_srq = &srq->qplib_srq; + sginfo = &qplib_srq->sginfo; + + if (udata->inlen < sizeof(ureq)) + dev_warn_once(rdev_to_dev(rdev), + "Update the library ulen %d klen %d", + (unsigned int)udata->inlen, + (unsigned int)sizeof(ureq)); + + rc = ib_copy_from_udata(&ureq, udata, + min(udata->inlen, sizeof(ureq))); + if (rc) + return rc; + + bytes = (qplib_srq->max_wqe * qplib_srq->wqe_size); + bytes = PAGE_ALIGN(bytes); + umem = ib_umem_get_compat(rdev, context, udata, ureq.srqva, bytes, + IB_ACCESS_LOCAL_WRITE, 1); + if (IS_ERR(umem)) { + dev_err(rdev_to_dev(rdev), "%s: ib_umem_get failed with %ld\n", + __func__, PTR_ERR(umem)); + return PTR_ERR(umem); + } + + srq->umem = umem; + sginfo->npages = ib_umem_num_pages_compat(umem); +#ifndef HAVE_RDMA_UMEM_FOR_EACH_DMA_BLOCK + sginfo->sghead = get_ib_umem_sgl(umem, &sginfo->nmap); +#else + sginfo->umem = umem; +#endif + qplib_srq->srq_handle = ureq.srq_handle; + qplib_srq->dpi = &cntx->dpi; + qplib_srq->is_user = true; + + return 0; +} + +CREATE_SRQ_RET bnxt_re_create_srq(CREATE_SRQ_IN *srq_in, + struct ib_srq_init_attr *srq_init_attr, + struct ib_udata *udata) +{ + struct bnxt_qplib_dev_attr *dev_attr; + struct bnxt_re_ucontext *cntx = NULL; + struct ib_ucontext *context; + struct bnxt_re_dev *rdev; + struct bnxt_re_pd *pd; + int rc, entries; + int num_sge; +#ifdef HAVE_SRQ_CREATE_IN_IB_CORE + struct ib_srq *ib_srq = srq_in; + struct ib_pd *ib_pd = ib_srq->pd; + struct bnxt_re_srq *srq = + container_of(ib_srq, struct bnxt_re_srq, ib_srq); +#else + struct ib_pd *ib_pd = srq_in; + struct bnxt_re_srq *srq; +#endif + u32 max_srq_count; + + pd = to_bnxt_re(ib_pd, struct bnxt_re_pd, ib_pd); + rdev = pd->rdev; + dev_attr = rdev->dev_attr; + + if (srq_init_attr->srq_type != IB_SRQT_BASIC) { + dev_err(rdev_to_dev(rdev), "SRQ type not supported"); + rc = -ENOTSUPP; + goto exit; + } + + if (udata) { + context = pd->ib_pd.uobject->context; + cntx = to_bnxt_re(context, struct bnxt_re_ucontext, ib_uctx); + } + + if (atomic_read(&rdev->stats.rsors.srq_count) >= dev_attr->max_srq) { + dev_err(rdev_to_dev(rdev), "Create SRQ failed - max exceeded(SRQs)"); + rc = -EINVAL; + goto exit; + } + + if (srq_init_attr->attr.max_wr >= dev_attr->max_srq_wqes) { + dev_err(rdev_to_dev(rdev), "Create SRQ failed - max exceeded(SRQ_WQs)"); + rc = -EINVAL; + goto exit; + } +#ifndef HAVE_SRQ_CREATE_IN_IB_CORE + srq = kzalloc(sizeof(*srq), GFP_KERNEL); + if (!srq) { + rc = -ENOMEM; + goto exit; + } +#endif + srq->rdev = rdev; + srq->qplib_srq.pd = &pd->qplib_pd; + srq->qplib_srq.dpi = &rdev->dpi_privileged; + + /* Allocate 1 more than what's provided so posting max doesn't + mean empty */ + entries = srq_init_attr->attr.max_wr + 1; + entries = bnxt_re_init_depth(entries, cntx); + if (entries > dev_attr->max_srq_wqes + 1) + entries = dev_attr->max_srq_wqes + 1; + if (cntx) + srq->qplib_srq.small_recv_wqe_sup = cntx->small_recv_wqe_sup; + num_sge = srq->qplib_srq.small_recv_wqe_sup ? + srq_init_attr->attr.max_sge : 6; + srq->qplib_srq.wqe_size = _max_rwqe_sz(num_sge); + srq->qplib_srq.max_wqe = entries; + srq->qplib_srq.max_sge = srq_init_attr->attr.max_sge; + srq->qplib_srq.threshold = srq_init_attr->attr.srq_limit; + srq->srq_limit = srq_init_attr->attr.srq_limit; + srq->qplib_srq.eventq_hw_ring_id = rdev->nqr->nq[0].ring_id; + srq->qplib_srq.sginfo.pgsize = PAGE_SIZE; + srq->qplib_srq.sginfo.pgshft = PAGE_SHIFT; + + if (udata) { + rc = bnxt_re_init_user_srq(rdev, pd, srq, udata); + if (rc) + goto fail; + } + + rc = bnxt_qplib_create_srq(&rdev->qplib_res, &srq->qplib_srq); + if (rc) { + dev_err(rdev_to_dev(rdev), "Create HW SRQ failed!"); + goto fail; + } + + if (udata) { + struct bnxt_re_srq_resp resp = {}; + + if (rdev->hdbr_enabled) { + rc = bnxt_re_hdbr_db_reg_srq(rdev, srq, cntx, &resp); + if (rc) + goto db_reg_fail; + } + resp.srqid = srq->qplib_srq.id; + rc = bnxt_re_copy_to_udata(rdev, &resp, + min(udata->outlen, sizeof(resp)), + udata); + if (rc) + goto cp_usr_fail; + } else { + if (rdev->hdbr_enabled) { + rc = bnxt_re_hdbr_db_reg_srq(rdev, srq, NULL, NULL); + if (rc) + goto db_reg_fail; + } + } + + atomic_inc(&rdev->stats.rsors.srq_count); + max_srq_count = atomic_read(&rdev->stats.rsors.srq_count); + if (max_srq_count > atomic_read(&rdev->stats.rsors.max_srq_count)) + atomic_set(&rdev->stats.rsors.max_srq_count, max_srq_count); + spin_lock_init(&srq->lock); + + BNXT_RE_DBR_LIST_ADD(rdev, srq, BNXT_RE_RES_TYPE_SRQ); +#ifndef HAVE_SRQ_CREATE_IN_IB_CORE + return &srq->ib_srq; +#else + return 0; +#endif +cp_usr_fail: + if (rdev->hdbr_enabled) + bnxt_re_hdbr_db_unreg_srq(rdev, srq); +db_reg_fail: + bnxt_qplib_destroy_srq(&rdev->qplib_res, &srq->qplib_srq); +fail: + if (udata && srq->umem && !IS_ERR(srq->umem)) { + ib_umem_release(srq->umem); + srq->umem = NULL; + } +#ifndef HAVE_SRQ_CREATE_IN_IB_CORE + kfree(srq); +exit: + return ERR_PTR(rc); +#else +exit: + return rc; +#endif +} + +int bnxt_re_modify_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr, + enum ib_srq_attr_mask srq_attr_mask, + struct ib_udata *udata) +{ + struct bnxt_re_srq *srq = to_bnxt_re(ib_srq, struct bnxt_re_srq, + ib_srq); + struct bnxt_re_dev *rdev = srq->rdev; + int rc; + + switch (srq_attr_mask) { + case IB_SRQ_MAX_WR: + /* SRQ resize is not supported */ + break; + case IB_SRQ_LIMIT: + /* Change the SRQ threshold */ + if (srq_attr->srq_limit > srq->qplib_srq.max_wqe) + return -EINVAL; + + srq->qplib_srq.threshold = srq_attr->srq_limit; + rc = bnxt_qplib_modify_srq(&rdev->qplib_res, &srq->qplib_srq); + if (rc) { + dev_err(rdev_to_dev(rdev), "Modify HW SRQ failed!"); + return rc; + } + /* On success, update the shadow */ + srq->srq_limit = srq_attr->srq_limit; + + if (udata) { + /* Build and send response back to udata */ + rc = bnxt_re_copy_to_udata(rdev, srq, 0, udata); + if (rc) + return rc; + } + break; + default: + dev_err(rdev_to_dev(rdev), + "Unsupported srq_attr_mask 0x%x", srq_attr_mask); + return -EINVAL; + } + return 0; +} + +int bnxt_re_query_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr) +{ + struct bnxt_re_srq *srq = to_bnxt_re(ib_srq, struct bnxt_re_srq, + ib_srq); + struct bnxt_re_dev *rdev = srq->rdev; + int rc; + + /* Get live SRQ attr */ + /*TODO: qplib query_srq is incomplete. */ + rc = bnxt_qplib_query_srq(&rdev->qplib_res, &srq->qplib_srq); + if (rc) { + dev_err(rdev_to_dev(rdev), "Query HW SRQ (0x%x) failed! rc = %d", + srq->qplib_srq.id, rc); + return rc; + } + srq_attr->max_wr = srq->qplib_srq.max_wqe; + srq_attr->max_sge = srq->qplib_srq.max_sge; + srq_attr->srq_limit = srq->qplib_srq.threshold; + + return 0; +} + +int bnxt_re_post_srq_recv(struct ib_srq *ib_srq, CONST_STRUCT ib_recv_wr *wr, + CONST_STRUCT ib_recv_wr **bad_wr) +{ + struct bnxt_re_srq *srq = to_bnxt_re(ib_srq, struct bnxt_re_srq, + ib_srq); + struct bnxt_qplib_swqe wqe = {}; + unsigned long flags; + int rc = 0; + + spin_lock_irqsave(&srq->lock, flags); + while (wr) { + /* Transcribe each ib_recv_wr to qplib_swqe */ + wqe.num_sge = wr->num_sge; + wqe.sg_list = (struct bnxt_qplib_sge *)wr->sg_list; + wqe.wr_id = wr->wr_id; + wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV; + rc = bnxt_qplib_post_srq_recv(&srq->qplib_srq, &wqe); + if (rc) { + *bad_wr = wr; + break; + } + wr = wr->next; + } + spin_unlock_irqrestore(&srq->lock, flags); + + return rc; +} + +unsigned long bnxt_re_lock_cqs(struct bnxt_re_qp *qp) +{ + unsigned long flags; + + spin_lock_irqsave(&qp->scq->cq_lock, flags); + if (qp->rcq && qp->rcq != qp->scq) + spin_lock(&qp->rcq->cq_lock); + + return flags; +} + +void bnxt_re_unlock_cqs(struct bnxt_re_qp *qp, + unsigned long flags) +{ + if (qp->rcq && qp->rcq != qp->scq) + spin_unlock(&qp->rcq->cq_lock); + spin_unlock_irqrestore(&qp->scq->cq_lock, flags); +} + +/* Queue Pairs */ +static int bnxt_re_destroy_gsi_sqp(struct bnxt_re_qp *qp) +{ + struct bnxt_re_qp *gsi_sqp; + struct bnxt_re_ah *gsi_sah; + struct bnxt_re_dev *rdev; + unsigned long flags; + int rc = 0; + + rdev = qp->rdev; + gsi_sqp = rdev->gsi_ctx.gsi_sqp; + gsi_sah = rdev->gsi_ctx.gsi_sah; + + /* remove from active qp list */ + mutex_lock(&rdev->qp_lock); + list_del(&gsi_sqp->list); + mutex_unlock(&rdev->qp_lock); + + if (gsi_sah) { + dev_dbg(rdev_to_dev(rdev), "Destroy the shadow AH\n"); + rc = bnxt_qplib_destroy_ah(&rdev->qplib_res, &gsi_sah->qplib_ah, + true); + if (rc) + dev_err(rdev_to_dev(rdev), + "Destroy HW AH for shadow QP failed!"); + atomic_dec(&rdev->stats.rsors.ah_count); + } + + dev_dbg(rdev_to_dev(rdev), "Destroy the shadow QP\n"); + rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &gsi_sqp->qplib_qp); + if (rc) + dev_err(rdev_to_dev(rdev), "Destroy Shadow QP failed"); + + /* Clean the CQ for shadow QP completions */ + flags = bnxt_re_lock_cqs(gsi_sqp); + bnxt_qplib_clean_qp(&gsi_sqp->qplib_qp); + bnxt_re_unlock_cqs(gsi_sqp, flags); + + bnxt_qplib_free_qp_res(&rdev->qplib_res, &gsi_sqp->qplib_qp); + bnxt_qplib_free_hdr_buf(&rdev->qplib_res, &gsi_sqp->qplib_qp); + kfree(rdev->gsi_ctx.sqp_tbl); + kfree(gsi_sah); + kfree(gsi_sqp); + rdev->gsi_ctx.gsi_sqp = NULL; + rdev->gsi_ctx.gsi_sah = NULL; + rdev->gsi_ctx.sqp_tbl = NULL; + atomic_dec(&rdev->stats.rsors.qp_count); + + return 0; +} + +static void bnxt_re_dump_debug_stats(struct bnxt_re_dev *rdev, u32 active_qps) +{ + u32 total_qp = 0; + u64 avg_time = 0; + int i; + + if (!rdev->rcfw.sp_perf_stats_enabled) + return; + + switch (active_qps) { + case 1: + /* Potential hint for Test Stop */ + for (i = 0; i < RCFW_MAX_STAT_INDEX; i++) { + if (rdev->rcfw.qp_destroy_stats[i]) { + total_qp++; + avg_time += rdev->rcfw.qp_destroy_stats[i]; + } + } + dev_dbg(rdev_to_dev(rdev), + "Perf Debug: %ps Total (%d) QP destroyed in (%d) msec", + __builtin_return_address(0), total_qp, + jiffies_to_msecs(avg_time)); + break; + case 2: + /* Potential hint for Test Start */ + dev_dbg(rdev_to_dev(rdev), + "Perf Debug: %ps active_qps = %d\n", + __builtin_return_address(0), active_qps); + break; + default: + /* Potential hint to know latency of QP destroy. + * Average time taken for 1K QP Destroy. + */ + if (active_qps > 1024 && !(active_qps % 1024)) + dev_dbg(rdev_to_dev(rdev), + "Perf Debug: %ps Active QP (%d) Watermark (%d)", + __builtin_return_address(0), active_qps, + atomic_read(&rdev->stats.rsors.max_qp_count)); + break; + } +} + +int bnxt_re_destroy_qp(struct ib_qp *ib_qp +#ifdef HAVE_DESTROY_QP_UDATA + , struct ib_udata *udata +#endif + ) +{ + struct bnxt_re_qp *qp = to_bnxt_re(ib_qp, struct bnxt_re_qp, ib_qp); + struct bnxt_qplib_qp *qplib_qp = &qp->qplib_qp; + struct bnxt_re_dev *rdev = qp->rdev; + struct bnxt_qplib_nq *scq_nq = NULL; + struct bnxt_qplib_nq *rcq_nq = NULL; + unsigned long flags; + u32 active_qps; + int rc; + + mutex_lock(&rdev->qp_lock); + list_del(&qp->list); + BNXT_RE_DBR_LIST_DEL(rdev, qp, BNXT_RE_RES_TYPE_QP); + active_qps = atomic_dec_return(&rdev->stats.rsors.qp_count); + if (qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_RC) + atomic_dec(&rdev->stats.rsors.rc_qp_count); + else if (qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_UD) + atomic_dec(&rdev->stats.rsors.ud_qp_count); + if (qp->qplib_qp.ppp.st_idx_en & CREQ_MODIFY_QP_RESP_PINGPONG_PUSH_ENABLED) + rdev->ppp_stats.ppp_enabled_qps--; + mutex_unlock(&rdev->qp_lock); + + if (rdev->hdbr_enabled) + bnxt_re_hdbr_db_unreg_qp(rdev, qp); + + bnxt_re_qp_info_rem_qpinfo(rdev, qp); + + if (!ib_qp->uobject) + bnxt_qplib_flush_cqn_wq(&qp->qplib_qp); + + rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp); + if (rc) + dev_err_ratelimited(rdev_to_dev(rdev), + "%s id = %d failed rc = %d", + __func__, qp->qplib_qp.id, rc); + + if (!ib_qp->uobject) { + flags = bnxt_re_lock_cqs(qp); + bnxt_qplib_clean_qp(&qp->qplib_qp); + bnxt_re_unlock_cqs(qp, flags); + } + + bnxt_qplib_free_qp_res(&rdev->qplib_res, &qp->qplib_qp); + if (ib_qp->qp_type == IB_QPT_GSI && + rdev->gsi_ctx.gsi_qp_mode != BNXT_RE_GSI_MODE_UD) { + if (rdev->gsi_ctx.gsi_qp_mode == BNXT_RE_GSI_MODE_ALL && + rdev->gsi_ctx.gsi_sqp) { + bnxt_re_destroy_gsi_sqp(qp); + } + bnxt_qplib_free_hdr_buf(&rdev->qplib_res, &qp->qplib_qp); + } + + if (qp->rumem && !IS_ERR(qp->rumem)) + ib_umem_release(qp->rumem); + if (qp->sumem && !IS_ERR(qp->sumem)) + ib_umem_release(qp->sumem); + + /* Flush all the entries of notification queue associated with + * given qp. + */ + scq_nq = qplib_qp->scq->nq; + rcq_nq = qplib_qp->rcq->nq; + bnxt_re_synchronize_nq(scq_nq); + if (scq_nq != rcq_nq) + bnxt_re_synchronize_nq(rcq_nq); + +#ifndef HAVE_QP_ALLOC_IN_IB_CORE + kfree(qp); +#endif + + bnxt_re_dump_debug_stats(rdev, active_qps); + + /* return success for destroy resources */ + return 0; +} + +static u8 __from_ib_qp_type(enum ib_qp_type type) +{ + switch (type) { + case IB_QPT_GSI: + return CMDQ_CREATE_QP1_TYPE_GSI; + case IB_QPT_RC: + return CMDQ_CREATE_QP_TYPE_RC; + case IB_QPT_UD: + return CMDQ_CREATE_QP_TYPE_UD; + case IB_QPT_RAW_ETHERTYPE: + return CMDQ_CREATE_QP_TYPE_RAW_ETHERTYPE; + default: + return IB_QPT_MAX; + } +} + +static u16 _get_swqe_sz(int nsge) +{ + return sizeof(struct sq_send_hdr) + nsge * sizeof(struct sq_sge); +} + +static int bnxt_re_get_swqe_size(int ilsize, int nsge, int align) +{ + u16 wqe_size, calc_ils; + + wqe_size = _get_swqe_sz(nsge); + if (ilsize) { + calc_ils = (sizeof(struct sq_send_hdr) + ilsize); + wqe_size = max_t(int, calc_ils, wqe_size); + wqe_size = ALIGN(wqe_size, align); + } + return wqe_size; +} + +static int bnxt_re_setup_swqe_size(struct bnxt_re_qp *qp, + struct ib_qp_init_attr *init_attr) +{ + struct bnxt_qplib_dev_attr *dev_attr; + struct bnxt_qplib_qp *qplqp; + struct bnxt_re_dev *rdev; + struct bnxt_qplib_q *sq; + int align, ilsize; + + rdev = qp->rdev; + qplqp = &qp->qplib_qp; + sq = &qplqp->sq; + dev_attr = rdev->dev_attr; + + align = sizeof(struct sq_send_hdr); + if (qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE) + align = sizeof(struct sq_sge); + ilsize = ALIGN(init_attr->cap.max_inline_data, align); + + sq->wqe_size = bnxt_re_get_swqe_size(ilsize, sq->max_sge, align); + if (sq->wqe_size > _get_swqe_sz(dev_attr->max_qp_sges)) + return -EINVAL; + /* For Cu/Wh and gen p5 backward compatibility mode + * wqe size is fixed to 128 bytes + */ + if (sq->wqe_size < _get_swqe_sz(dev_attr->max_qp_sges) && + qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC) + sq->wqe_size = _get_swqe_sz(dev_attr->max_qp_sges); + + if (init_attr->cap.max_inline_data) { + qplqp->max_inline_data = sq->wqe_size - + sizeof(struct sq_send_hdr); + init_attr->cap.max_inline_data = qplqp->max_inline_data; + if (qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC) + sq->max_sge = qplqp->max_inline_data / + sizeof(struct sq_sge); + } + + return 0; +} + +static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, + struct bnxt_re_pd *pd, struct bnxt_re_qp *qp, + struct ib_udata *udata) +{ + struct bnxt_qplib_sg_info *sginfo; + struct bnxt_qplib_qp *qplib_qp; + struct bnxt_re_ucontext *cntx; + struct ib_ucontext *context; + struct bnxt_re_qp_req ureq; + struct ib_umem *umem; + int rc, bytes = 0; + int psn_nume; + int psn_sz; + + qplib_qp = &qp->qplib_qp; + context = pd->ib_pd.uobject->context; + cntx = to_bnxt_re(context, struct bnxt_re_ucontext, ib_uctx); + sginfo = &qplib_qp->sq.sginfo; + + if (udata->inlen < sizeof(ureq)) + dev_warn_once(rdev_to_dev(rdev), + "Update the library ulen %d klen %d", + (unsigned int)udata->inlen, + (unsigned int)sizeof(ureq)); + + rc = ib_copy_from_udata(&ureq, udata, + min(udata->inlen, sizeof(ureq))); + if (rc) + return rc; + + bytes = (qplib_qp->sq.max_wqe * qplib_qp->sq.wqe_size); + /* Consider mapping PSN search memory only for RC QPs. */ + if (qplib_qp->type == CMDQ_CREATE_QP_TYPE_RC) { + psn_sz = _is_chip_gen_p5_p7(rdev->chip_ctx) ? + sizeof(struct sq_psn_search_ext) : + sizeof(struct sq_psn_search); + if (rdev->dev_attr && BNXT_RE_HW_RETX(rdev->dev_attr->dev_cap_flags)) + psn_sz = sizeof(struct sq_msn_search); + psn_nume = (qplib_qp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC) ? + qplib_qp->sq.max_wqe : + ((qplib_qp->sq.max_wqe * qplib_qp->sq.wqe_size) / + sizeof(struct bnxt_qplib_sge)); + if (BNXT_RE_HW_RETX(rdev->dev_attr->dev_cap_flags)) + psn_nume = roundup_pow_of_two(psn_nume); + + bytes += (psn_nume * psn_sz); + } + bytes = PAGE_ALIGN(bytes); + umem = ib_umem_get_compat(rdev, context, udata, ureq.qpsva, bytes, + IB_ACCESS_LOCAL_WRITE, 1); + if (IS_ERR(umem)) { + dev_err(rdev_to_dev(rdev), "%s: ib_umem_get failed with %ld\n", + __func__, PTR_ERR(umem)); + return PTR_ERR(umem); + } + + qp->sumem = umem; + sginfo->npages = ib_umem_num_pages_compat(umem); + /* pgsize and pgshft were initialize already. */ +#ifndef HAVE_RDMA_UMEM_FOR_EACH_DMA_BLOCK + sginfo->sghead = get_ib_umem_sgl(umem, &sginfo->nmap); +#else + sginfo->umem = umem; +#endif + qplib_qp->qp_handle = ureq.qp_handle; + + if (!qp->qplib_qp.srq) { + sginfo = &qplib_qp->rq.sginfo; + bytes = (qplib_qp->rq.max_wqe * qplib_qp->rq.wqe_size); + bytes = PAGE_ALIGN(bytes); + umem = ib_umem_get_compat(rdev, + context, udata, ureq.qprva, bytes, + IB_ACCESS_LOCAL_WRITE, 1); + if (IS_ERR(umem)) { + dev_err(rdev_to_dev(rdev), + "%s: ib_umem_get failed ret =%ld\n", + __func__, PTR_ERR(umem)); + goto rqfail; + } + qp->rumem = umem; + sginfo->npages = ib_umem_num_pages_compat(umem); + /* pgsize and pgshft were initialize already. */ +#ifndef HAVE_RDMA_UMEM_FOR_EACH_DMA_BLOCK + sginfo->sghead = get_ib_umem_sgl(umem, &sginfo->nmap); +#else + sginfo->umem = umem; +#endif + } /* TODO: Add srq. */ + + qplib_qp->dpi = &cntx->dpi; + qplib_qp->is_user = true; + + return 0; +rqfail: + ib_umem_release(qp->sumem); + qp->sumem = NULL; +#ifndef HAVE_RDMA_UMEM_FOR_EACH_DMA_BLOCK + qplib_qp->sq.sginfo.sghead = NULL; + qplib_qp->sq.sginfo.nmap = 0; +#else + qplib_qp->sq.sginfo.umem = NULL; +#endif + + return PTR_ERR(umem); +} + +static struct bnxt_re_ah *bnxt_re_create_shadow_qp_ah(struct bnxt_re_pd *pd, + struct bnxt_qplib_res *qp1_res, + struct bnxt_qplib_qp *qp1_qp) +{ + struct bnxt_re_dev *rdev = pd->rdev; + struct bnxt_re_ah *ah; + union ib_gid sgid; + int rc; + + ah = kzalloc(sizeof(*ah), GFP_KERNEL); + if (!ah) + return NULL; + memset(ah, 0, sizeof(*ah)); + ah->rdev = rdev; + ah->qplib_ah.pd = &pd->qplib_pd; + + rc = bnxt_re_query_gid(&rdev->ibdev, 1, 0, &sgid); + if (rc) + goto fail; + + /* supply the dgid data same as sgid */ + memcpy(ah->qplib_ah.dgid.data, &sgid.raw, + sizeof(union ib_gid)); + ah->qplib_ah.sgid_index = 0; + + ah->qplib_ah.traffic_class = 0; + ah->qplib_ah.flow_label = 0; + ah->qplib_ah.hop_limit = 1; + ah->qplib_ah.sl = 0; + /* Have DMAC same as SMAC */ + ether_addr_copy(ah->qplib_ah.dmac, rdev->netdev->dev_addr); + dev_dbg(rdev_to_dev(rdev), "ah->qplib_ah.dmac = %x:%x:%x:%x:%x:%x\n", + ah->qplib_ah.dmac[0], ah->qplib_ah.dmac[1], ah->qplib_ah.dmac[2], + ah->qplib_ah.dmac[3], ah->qplib_ah.dmac[4], ah->qplib_ah.dmac[5]); + + rc = bnxt_qplib_create_ah(&rdev->qplib_res, &ah->qplib_ah, true); + if (rc) { + dev_err(rdev_to_dev(rdev), + "Allocate HW AH for Shadow QP failed!"); + goto fail; + } + dev_dbg(rdev_to_dev(rdev), "AH ID = %d\n", ah->qplib_ah.id); + atomic_inc(&rdev->stats.rsors.ah_count); + + return ah; +fail: + kfree(ah); + return NULL; +} + +void bnxt_re_update_shadow_ah(struct bnxt_re_dev *rdev) +{ + struct bnxt_re_qp *gsi_qp; + struct bnxt_re_ah *sah; + struct bnxt_re_pd *pd; + struct ib_pd *ib_pd; + int rc; + + if (!rdev) + return; + + sah = rdev->gsi_ctx.gsi_sah; + + dev_dbg(rdev_to_dev(rdev), "Updating the AH\n"); + if (sah) { + /* Check if the AH created with current mac address */ + if (!compare_ether_header(sah->qplib_ah.dmac, rdev->netdev->dev_addr)) { + dev_dbg(rdev_to_dev(rdev), + "Not modifying shadow AH during AH update\n"); + return; + } + + gsi_qp = rdev->gsi_ctx.gsi_qp; + ib_pd = gsi_qp->ib_qp.pd; + pd = to_bnxt_re(ib_pd, struct bnxt_re_pd, ib_pd); + rc = bnxt_qplib_destroy_ah(&rdev->qplib_res, + &sah->qplib_ah, false); + if (rc) { + dev_err(rdev_to_dev(rdev), + "Failed to destroy shadow AH during AH update"); + return; + } + atomic_dec(&rdev->stats.rsors.ah_count); + kfree(sah); + rdev->gsi_ctx.gsi_sah = NULL; + + sah = bnxt_re_create_shadow_qp_ah(pd, &rdev->qplib_res, + &gsi_qp->qplib_qp); + if (!sah) { + dev_err(rdev_to_dev(rdev), + "Failed to update AH for ShadowQP"); + return; + } + rdev->gsi_ctx.gsi_sah = sah; + atomic_inc(&rdev->stats.rsors.ah_count); + } +} + +#ifdef POST_QP1_DUMMY_WQE +static int post_qp1_dummy_wqe(struct bnxt_re_qp *qp) +{ + struct bnxt_qplib_qp *lib_qp = &qp->qplib_qp; + struct bnxt_qplib_swqe wqe = {0}; + unsigned long flags; + int rc = 0; + + spin_lock_irqsave(&qp->sq_lock, flags); + + wqe.num_sge = 0; + wqe.wr_id = BNXT_QPLIB_QP1_DUMMY_WRID; + wqe.type = BNXT_QPLIB_SWQE_TYPE_SEND; + wqe.flags = 0; + + rc = bnxt_qplib_post_send(lib_qp, &wqe); + if (!rc) + bnxt_qplib_post_send_db(lib_qp); + + spin_unlock_irqrestore(&qp->sq_lock, flags); + return rc; +} +#endif /* POST_QP1_DUMMY_WQE */ + +static struct bnxt_re_qp *bnxt_re_create_shadow_qp(struct bnxt_re_pd *pd, + struct bnxt_qplib_res *qp1_res, + struct bnxt_qplib_qp *qp1_qp) +{ + struct bnxt_re_dev *rdev = pd->rdev; + struct bnxt_re_qp *qp; + int rc; + + qp = kzalloc(sizeof(*qp), GFP_KERNEL); + if (!qp) + return NULL; + memset(qp, 0, sizeof(*qp)); + qp->rdev = rdev; + + /* Initialize the shadow QP structure from the QP1 values */ + ether_addr_copy(qp->qplib_qp.smac, rdev->netdev->dev_addr); + qp->qplib_qp.pd = &pd->qplib_pd; + qp->qplib_qp.qp_handle = (u64)&qp->qplib_qp; + qp->qplib_qp.type = IB_QPT_UD; + + qp->qplib_qp.max_inline_data = 0; + qp->qplib_qp.sig_type = true; + + /* Shadow QP SQ depth should be same as QP1 RQ depth */ + qp->qplib_qp.sq.wqe_size = bnxt_re_get_swqe_size(0, 6, 32); + qp->qplib_qp.sq.max_wqe = qp1_qp->rq.max_wqe; + qp->qplib_qp.sq.max_sge = 2; + qp->qplib_qp.sq.max_sw_wqe = qp->qplib_qp.sq.max_wqe; + /* Q full delta can be 1 since it is internal QP */ + qp->qplib_qp.sq.q_full_delta = 1; + qp->qplib_qp.sq.sginfo.pgsize = PAGE_SIZE; + qp->qplib_qp.sq.sginfo.pgshft = PAGE_SHIFT; + + qp->qplib_qp.scq = qp1_qp->scq; + qp->qplib_qp.rcq = qp1_qp->rcq; + + qp->qplib_qp.rq.wqe_size = _max_rwqe_sz(6); /* 128 Byte wqe size */ + qp->qplib_qp.rq.max_wqe = qp1_qp->rq.max_wqe; + qp->qplib_qp.rq.max_sge = qp1_qp->rq.max_sge; + qp->qplib_qp.rq.max_sw_wqe = qp->qplib_qp.sq.max_wqe; + qp->qplib_qp.rq.sginfo.pgsize = PAGE_SIZE; + qp->qplib_qp.rq.sginfo.pgshft = PAGE_SHIFT; + /* Q full delta can be 1 since it is internal QP */ + qp->qplib_qp.rq.q_full_delta = 1; + qp->qplib_qp.mtu = qp1_qp->mtu; + qp->qplib_qp.dpi = &rdev->dpi_privileged; + + rc = bnxt_qplib_alloc_hdr_buf(qp1_res, &qp->qplib_qp, 0, + BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6); + if (rc) + goto fail; + + rc = bnxt_qplib_create_qp(qp1_res, &qp->qplib_qp); + if (rc) { + dev_err(rdev_to_dev(rdev), "create HW QP failed!"); + goto qp_fail; + } + + dev_dbg(rdev_to_dev(rdev), "Created shadow QP with ID = %d\n", + qp->qplib_qp.id); + spin_lock_init(&qp->sq_lock); + INIT_LIST_HEAD(&qp->list); + mutex_lock(&rdev->qp_lock); + list_add_tail(&qp->list, &rdev->qp_list); + atomic_inc(&rdev->stats.rsors.qp_count); + mutex_unlock(&rdev->qp_lock); + return qp; +qp_fail: + bnxt_qplib_free_hdr_buf(qp1_res, &qp->qplib_qp); +fail: + kfree(qp); + return NULL; +} + +static int bnxt_re_init_rq_attr(struct bnxt_re_qp *qp, + struct ib_qp_init_attr *init_attr, + struct bnxt_re_ucontext *cntx) +{ + struct bnxt_qplib_dev_attr *dev_attr; + struct bnxt_qplib_qp *qplqp; + struct bnxt_re_dev *rdev; + struct bnxt_qplib_q *rq; + int entries; + + rdev = qp->rdev; + qplqp = &qp->qplib_qp; + rq = &qplqp->rq; + dev_attr = rdev->dev_attr; + + if (init_attr->srq) { + struct bnxt_re_srq *srq; + + srq = to_bnxt_re(init_attr->srq, struct bnxt_re_srq, ib_srq); + if (!srq) { + dev_err(rdev_to_dev(rdev), "SRQ not found"); + return -EINVAL; + } + qplqp->srq = &srq->qplib_srq; + rq->max_wqe = 0; + } else { + if (cntx) + qplqp->small_recv_wqe_sup = cntx->small_recv_wqe_sup; + rq->max_sge = init_attr->cap.max_recv_sge; + if (rq->max_sge > dev_attr->max_qp_sges) + rq->max_sge = dev_attr->max_qp_sges; + init_attr->cap.max_recv_sge = rq->max_sge; + rq->wqe_size = bnxt_re_get_rwqe_size(rdev, qplqp, rq->max_sge, + dev_attr->max_qp_sges); + + /* Allocate 1 more than what's provided so posting max doesn't + mean empty */ + entries = init_attr->cap.max_recv_wr + 1; + entries = bnxt_re_init_depth(entries, cntx); + rq->max_wqe = min_t(u32, entries, dev_attr->max_qp_wqes + 1); + rq->max_sw_wqe = rq->max_wqe; + rq->q_full_delta = 0; + rq->sginfo.pgsize = PAGE_SIZE; + rq->sginfo.pgshft = PAGE_SHIFT; + } + + return 0; +} + +static void bnxt_re_adjust_gsi_rq_attr(struct bnxt_re_qp *qp) +{ + struct bnxt_qplib_dev_attr *dev_attr; + struct bnxt_qplib_qp *qplqp; + struct bnxt_re_dev *rdev; + + rdev = qp->rdev; + qplqp = &qp->qplib_qp; + dev_attr = rdev->dev_attr; + + if (rdev->gsi_ctx.gsi_qp_mode != BNXT_RE_GSI_MODE_UD) + qplqp->rq.max_sge = dev_attr->max_qp_sges; +} + +static int bnxt_re_init_sq_attr(struct bnxt_re_qp *qp, + struct ib_qp_init_attr *init_attr, + struct bnxt_re_ucontext *cntx) +{ + struct bnxt_qplib_dev_attr *dev_attr; + struct bnxt_qplib_qp *qplqp; + struct bnxt_re_dev *rdev; + struct bnxt_qplib_q *sq; + int diff = 0; + int entries; + int rc; + + rdev = qp->rdev; + qplqp = &qp->qplib_qp; + sq = &qplqp->sq; + dev_attr = rdev->dev_attr; + + sq->max_sge = init_attr->cap.max_send_sge; + + if (sq->max_sge > dev_attr->max_qp_sges) { + sq->max_sge = dev_attr->max_qp_sges; + init_attr->cap.max_send_sge = sq->max_sge; + } + rc = bnxt_re_setup_swqe_size(qp, init_attr); + if (rc) + return rc; + /* + * Change the SQ depth if user has requested minimum using + * configfs. Only supported for kernel consumers. Setting + * min_tx_depth to 4096 to handle iser SQ full condition + * in most of the newer OS distros + */ + entries = init_attr->cap.max_send_wr; + if (!cntx && rdev->min_tx_depth && init_attr->qp_type != IB_QPT_GSI) { + /* + * If users specify any value greater than 1 use min_tx_depth + * provided by user for comparison. Else, compare it with the + * BNXT_RE_MIN_KERNEL_QP_TX_DEPTH and adjust it accordingly. + */ + if (rdev->min_tx_depth > 1 && entries < rdev->min_tx_depth) + entries = rdev->min_tx_depth; + else if (entries < BNXT_RE_MIN_KERNEL_QP_TX_DEPTH) + entries = BNXT_RE_MIN_KERNEL_QP_TX_DEPTH; + } + diff = bnxt_re_get_diff(cntx, rdev->chip_ctx); + /* FIXME: the min equation at the boundary condition */ + entries = bnxt_re_init_depth(entries + diff + 1, cntx); + sq->max_wqe = min_t(u32, entries, dev_attr->max_qp_wqes + diff + 1); + sq->q_full_delta = diff + 1; + /* + * Reserving one slot for Phantom WQE. Application can + * post one extra entry in this case. But allowing this to avoid + * unexpected Queue full condition + */ + sq->q_full_delta -= 1; /* becomes 0 for gen-p5 */ + sq->sginfo.pgsize = PAGE_SIZE; + sq->sginfo.pgshft = PAGE_SHIFT; + return 0; +} + +static void bnxt_re_adjust_gsi_sq_attr(struct bnxt_re_qp *qp, + struct ib_qp_init_attr *init_attr, + void *cntx) +{ + struct bnxt_qplib_dev_attr *dev_attr; + struct bnxt_qplib_qp *qplqp; + struct bnxt_re_dev *rdev; + int entries; + + rdev = qp->rdev; + qplqp = &qp->qplib_qp; + dev_attr = rdev->dev_attr; + + if (rdev->gsi_ctx.gsi_qp_mode != BNXT_RE_GSI_MODE_UD) { + entries = init_attr->cap.max_send_wr + 1; + entries = bnxt_re_init_depth(entries, cntx); + qplqp->sq.max_wqe = min_t(u32, entries, + dev_attr->max_qp_wqes + 1); + qplqp->sq.q_full_delta = qplqp->sq.max_wqe - + init_attr->cap.max_send_wr; + qplqp->sq.max_sge++; /* Need one extra sge to put UD header */ + if (qplqp->sq.max_sge > dev_attr->max_qp_sges) + qplqp->sq.max_sge = dev_attr->max_qp_sges; + } +} + +static int bnxt_re_init_qp_type(struct bnxt_re_dev *rdev, + struct ib_qp_init_attr *init_attr) +{ + struct bnxt_qplib_chip_ctx *chip_ctx; + struct bnxt_re_gsi_context *gsi_ctx; + int qptype; + + chip_ctx = rdev->chip_ctx; + gsi_ctx = &rdev->gsi_ctx; + + qptype = __from_ib_qp_type(init_attr->qp_type); + if (qptype == IB_QPT_MAX) { + dev_err(rdev_to_dev(rdev), "QP type 0x%x not supported", + qptype); + qptype = -EINVAL; + goto out; + } + + if (_is_chip_gen_p5_p7(chip_ctx) && init_attr->qp_type == IB_QPT_GSI) { + /* For Thor always force UD mode. */ + qptype = CMDQ_CREATE_QP_TYPE_GSI; + gsi_ctx->gsi_qp_mode = BNXT_RE_GSI_MODE_UD; + } +out: + return qptype; +} + +static int bnxt_re_init_qp_wqe_mode(struct bnxt_re_dev *rdev) +{ + return rdev->chip_ctx->modes.wqe_mode; +} + +static int bnxt_re_init_qp_attr(struct bnxt_re_qp *qp, struct bnxt_re_pd *pd, + struct ib_qp_init_attr *init_attr, + struct ib_udata *udata) +{ + struct bnxt_qplib_dev_attr *dev_attr; + struct bnxt_re_ucontext *cntx = NULL; + struct ib_ucontext *context; + struct bnxt_qplib_qp *qplqp; + struct bnxt_re_dev *rdev; + struct bnxt_re_cq *cq; + int rc = 0, qptype; + + rdev = qp->rdev; + qplqp = &qp->qplib_qp; + dev_attr = rdev->dev_attr; + + if (udata) { + context = pd->ib_pd.uobject->context; + cntx = to_bnxt_re(context, struct bnxt_re_ucontext, ib_uctx); + } + + /* Setup misc params */ + qplqp->is_user = udata ? true : false; + qplqp->pd = &pd->qplib_pd; + qplqp->qp_handle = (u64)qplqp; + qplqp->sig_type = ((init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) ? + true : false); + qptype = bnxt_re_init_qp_type(rdev, init_attr); + if (qptype < 0) { + rc = qptype; + goto out; + } + qplqp->type = (u8)qptype; + qplqp->wqe_mode = bnxt_re_init_qp_wqe_mode(rdev); + ether_addr_copy(qplqp->smac, rdev->netdev->dev_addr); + + if (init_attr->qp_type == IB_QPT_RC) { + qplqp->max_rd_atomic = dev_attr->max_qp_rd_atom; + qplqp->max_dest_rd_atomic = dev_attr->max_qp_init_rd_atom; + } + qplqp->mtu = ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu)); + qplqp->dpi = &rdev->dpi_privileged; /* Doorbell page */ + if (init_attr->create_flags) { + dev_dbg(rdev_to_dev(rdev), + "QP create flags 0x%x not supported", + init_attr->create_flags); + return -EOPNOTSUPP; + } + + /* Setup CQs */ + if (init_attr->send_cq) { + cq = to_bnxt_re(init_attr->send_cq, struct bnxt_re_cq, ib_cq); + if (!cq) { + dev_err(rdev_to_dev(rdev), "Send CQ not found"); + rc = -EINVAL; + goto out; + } + qplqp->scq = &cq->qplib_cq; + qp->scq = cq; + } + + if (init_attr->recv_cq) { + cq = to_bnxt_re(init_attr->recv_cq, struct bnxt_re_cq, ib_cq); + if (!cq) { + dev_err(rdev_to_dev(rdev), "Receive CQ not found"); + rc = -EINVAL; + goto out; + } + qplqp->rcq = &cq->qplib_cq; + qp->rcq = cq; + } + + /* Setup RQ/SRQ */ + rc = bnxt_re_init_rq_attr(qp, init_attr, cntx); + if (rc) + goto out; + if (init_attr->qp_type == IB_QPT_GSI) + bnxt_re_adjust_gsi_rq_attr(qp); + + /* Setup SQ */ + rc = bnxt_re_init_sq_attr(qp, init_attr, cntx); + if (rc) + goto out; + if (init_attr->qp_type == IB_QPT_GSI) + bnxt_re_adjust_gsi_sq_attr(qp, init_attr, cntx); + + if (udata) /* This will update DPI and qp_handle */ + rc = bnxt_re_init_user_qp(rdev, pd, qp, udata); +out: + return rc; +} + +static int bnxt_re_create_shadow_gsi(struct bnxt_re_qp *qp, + struct bnxt_re_pd *pd) +{ + struct bnxt_re_sqp_entries *sqp_tbl = NULL; + struct bnxt_re_dev *rdev; + struct bnxt_re_qp *sqp; + struct bnxt_re_ah *sah; + + rdev = qp->rdev; + /* Create a shadow QP to handle the QP1 traffic */ + sqp_tbl = kzalloc(sizeof(*sqp_tbl) * BNXT_RE_MAX_GSI_SQP_ENTRIES, + GFP_KERNEL); + if (!sqp_tbl) + return -ENOMEM; + rdev->gsi_ctx.sqp_tbl = sqp_tbl; + + sqp = bnxt_re_create_shadow_qp(pd, &rdev->qplib_res, &qp->qplib_qp); + if (!sqp) { + dev_err(rdev_to_dev(rdev), + "Failed to create Shadow QP for QP1"); + goto out; + } + + sqp->rcq = qp->rcq; + sqp->scq = qp->scq; + sah = bnxt_re_create_shadow_qp_ah(pd, &rdev->qplib_res, + &qp->qplib_qp); + if (!sah) { + bnxt_qplib_destroy_qp(&rdev->qplib_res, + &sqp->qplib_qp); + dev_err(rdev_to_dev(rdev), + "Failed to create AH entry for ShadowQP"); + goto out; + } + rdev->gsi_ctx.gsi_sqp = sqp; + rdev->gsi_ctx.gsi_sah = sah; + + return 0; +out: + kfree(sqp_tbl); + return -ENODEV; +} + +static int __get_rq_hdr_buf_size(u8 gsi_mode) +{ + return (gsi_mode == BNXT_RE_GSI_MODE_ALL) ? + BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2 : + BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE; +} + +static int __get_sq_hdr_buf_size(u8 gsi_mode) +{ + return (gsi_mode != BNXT_RE_GSI_MODE_ROCE_V1) ? + BNXT_QPLIB_MAX_QP1_SQ_HDR_SIZE_V2 : + BNXT_QPLIB_MAX_QP1_SQ_HDR_SIZE; +} + +static int bnxt_re_create_gsi_qp(struct bnxt_re_qp *qp, struct bnxt_re_pd *pd) +{ + struct bnxt_qplib_qp *qplqp; + struct bnxt_qplib_res *res; + struct bnxt_re_dev *rdev; + u32 sstep, rstep; + u8 gsi_mode; + int rc = 0; + + rdev = qp->rdev; + qplqp = &qp->qplib_qp; + res = &rdev->qplib_res; + gsi_mode = rdev->gsi_ctx.gsi_qp_mode; + + rstep = __get_rq_hdr_buf_size(gsi_mode); + sstep = __get_sq_hdr_buf_size(gsi_mode); + rc = bnxt_qplib_alloc_hdr_buf(res, qplqp, sstep, rstep); + if (rc) + return rc; + + rc = bnxt_qplib_create_qp1(res, qplqp); + if (rc) { + dev_err(rdev_to_dev(rdev), "create HW QP1 failed!"); + goto err_free_hdr_buf; + } + + if (gsi_mode == BNXT_RE_GSI_MODE_ALL) { + rc = bnxt_re_create_shadow_gsi(qp, pd); + if (rc) + goto err_destroy_qp1; + } + + return 0; +err_destroy_qp1: + bnxt_qplib_destroy_qp(res, qplqp); +err_free_hdr_buf: + bnxt_qplib_free_hdr_buf(res, qplqp); + return rc; +} + +static bool bnxt_re_test_qp_limits(struct bnxt_re_dev *rdev, + struct ib_qp_init_attr *init_attr, + struct bnxt_qplib_dev_attr *dev_attr) +{ + bool rc = true; + int ilsize; + + ilsize = ALIGN(init_attr->cap.max_inline_data, sizeof(struct sq_sge)); + if ((init_attr->cap.max_send_wr > dev_attr->max_qp_wqes) || + (init_attr->cap.max_recv_wr > dev_attr->max_qp_wqes) || + (init_attr->cap.max_send_sge > dev_attr->max_qp_sges) || + (init_attr->cap.max_recv_sge > dev_attr->max_qp_sges) || + (ilsize > dev_attr->max_inline_data)) { + dev_err(rdev_to_dev(rdev), "Create QP failed - max exceeded! " + "0x%x/0x%x 0x%x/0x%x 0x%x/0x%x " + "0x%x/0x%x 0x%x/0x%x", + init_attr->cap.max_send_wr, dev_attr->max_qp_wqes, + init_attr->cap.max_recv_wr, dev_attr->max_qp_wqes, + init_attr->cap.max_send_sge, dev_attr->max_qp_sges, + init_attr->cap.max_recv_sge, dev_attr->max_qp_sges, + init_attr->cap.max_inline_data, + dev_attr->max_inline_data); + rc = false; + } + return rc; +} + +ALLOC_QP_RET bnxt_re_create_qp(ALLOC_QP_IN *qp_in, + struct ib_qp_init_attr *qp_init_attr, + struct ib_udata *udata) +{ + struct bnxt_re_pd *pd; +#ifdef HAVE_QP_ALLOC_IN_IB_CORE + struct ib_pd *ib_pd = qp_in->pd; +#else + struct ib_pd *ib_pd = qp_in; +#endif + struct bnxt_qplib_dev_attr *dev_attr; + struct bnxt_re_dev *rdev; + u32 active_qps, tmp_qps; + struct bnxt_re_qp *qp; + int rc; + + pd = to_bnxt_re(ib_pd, struct bnxt_re_pd, ib_pd); + rdev = pd->rdev; + dev_attr = rdev->dev_attr; + + if (atomic_read(&rdev->stats.rsors.qp_count) >= dev_attr->max_qp) { + dev_err(rdev_to_dev(rdev), "Create QP failed - max exceeded(QPs Alloc'd %u of max %u)", + atomic_read(&rdev->stats.rsors.qp_count), dev_attr->max_qp); + rc = -EINVAL; + goto exit; + } + + rc = bnxt_re_test_qp_limits(rdev, qp_init_attr, dev_attr); + if (!rc) { + rc = -EINVAL; + goto exit; + } + qp = __get_qp_from_qp_in(qp_in, rdev); + if (!qp) { + rc = -ENOMEM; + goto exit; + } + qp->rdev = rdev; + + rc = bnxt_re_init_qp_attr(qp, pd, qp_init_attr, udata); + if (rc) + goto fail; + + if (qp_init_attr->qp_type == IB_QPT_GSI && + !_is_chip_gen_p5_p7(rdev->chip_ctx)) { + rc = bnxt_re_create_gsi_qp(qp, pd); + if (rc) + goto fail; + } else { + rc = bnxt_qplib_create_qp(&rdev->qplib_res, &qp->qplib_qp); + if (rc) { + dev_err(rdev_to_dev(rdev), "create HW QP failed!"); + goto free_umem; + } + + if (udata) { + struct bnxt_re_qp_resp resp = {}; + + if (rdev->hdbr_enabled) { + rc = bnxt_re_hdbr_db_reg_qp(rdev, qp, pd, &resp); + if (rc) + goto reg_db_fail; + } + resp.qpid = qp->qplib_qp.id; + rc = bnxt_re_copy_to_udata(rdev, &resp, + min(udata->outlen, sizeof(resp)), + udata); + if (rc) + goto qp_destroy; + } else { + if (rdev->hdbr_enabled) { + rc = bnxt_re_hdbr_db_reg_qp(rdev, qp, NULL, NULL); + if (rc) + goto reg_db_fail; + } + } + } + + qp->ib_qp.qp_num = qp->qplib_qp.id; + if (qp_init_attr->qp_type == IB_QPT_GSI) + rdev->gsi_ctx.gsi_qp = qp; + spin_lock_init(&qp->sq_lock); + spin_lock_init(&qp->rq_lock); + INIT_LIST_HEAD(&qp->list); + mutex_lock(&rdev->qp_lock); + list_add_tail(&qp->list, &rdev->qp_list); + mutex_unlock(&rdev->qp_lock); + atomic_inc(&rdev->stats.rsors.qp_count); + active_qps = atomic_read(&rdev->stats.rsors.qp_count); + if (active_qps > atomic_read(&rdev->stats.rsors.max_qp_count)) + atomic_set(&rdev->stats.rsors.max_qp_count, active_qps); + bnxt_re_qp_info_add_qpinfo(rdev, qp); + BNXT_RE_DBR_LIST_ADD(rdev, qp, BNXT_RE_RES_TYPE_QP); + + bnxt_re_dump_debug_stats(rdev, active_qps); + + /* Get the counters for RC QPs and UD QPs */ + if (qp_init_attr->qp_type == IB_QPT_RC) { + tmp_qps = atomic_inc_return(&rdev->stats.rsors.rc_qp_count); + if (tmp_qps > atomic_read(&rdev->stats.rsors.max_rc_qp_count)) + atomic_set(&rdev->stats.rsors.max_rc_qp_count, tmp_qps); + } else if (qp_init_attr->qp_type == IB_QPT_UD) { + tmp_qps = atomic_inc_return(&rdev->stats.rsors.ud_qp_count); + if (tmp_qps > atomic_read(&rdev->stats.rsors.max_ud_qp_count)) + atomic_set(&rdev->stats.rsors.max_ud_qp_count, tmp_qps); + } + +#ifdef HAVE_QP_ALLOC_IN_IB_CORE + return 0; +#else + return &qp->ib_qp; +#endif + +qp_destroy: + if (rdev->hdbr_enabled) + bnxt_re_hdbr_db_unreg_qp(rdev, qp); +reg_db_fail: + bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp); +free_umem: + if (udata) { + if (qp->rumem && !IS_ERR(qp->rumem)) + ib_umem_release(qp->rumem); + if (qp->sumem && !IS_ERR(qp->sumem)) + ib_umem_release(qp->sumem); + } +fail: +#ifndef HAVE_QP_ALLOC_IN_IB_CORE + kfree(qp); +#endif +exit: +#ifdef HAVE_QP_ALLOC_IN_IB_CORE + return rc; +#else + return ERR_PTR(rc); +#endif +} + +static int bnxt_re_modify_shadow_qp(struct bnxt_re_dev *rdev, + struct bnxt_re_qp *qp1_qp, + int qp_attr_mask) +{ + struct bnxt_re_qp *qp = rdev->gsi_ctx.gsi_sqp; + int rc = 0; + + if (qp_attr_mask & IB_QP_STATE) { + qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_STATE; + qp->qplib_qp.state = qp1_qp->qplib_qp.state; + } + if (qp_attr_mask & IB_QP_PKEY_INDEX) { + qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PKEY; + qp->qplib_qp.pkey_index = qp1_qp->qplib_qp.pkey_index; + } + + if (qp_attr_mask & IB_QP_QKEY) { + qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_QKEY; + /* Using a Random QKEY */ + qp->qplib_qp.qkey = BNXT_RE_QP_RANDOM_QKEY; + } + if (qp_attr_mask & IB_QP_SQ_PSN) { + qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN; + qp->qplib_qp.sq.psn = qp1_qp->qplib_qp.sq.psn; + } + + rc = bnxt_qplib_modify_qp(&rdev->qplib_res, &qp->qplib_qp); + if (rc) + dev_err(rdev_to_dev(rdev), "Modify Shadow QP for QP1 failed"); + return rc; +} + +#ifdef RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP +static u32 ipv4_from_gid(u8 *gid) +{ + return (gid[15] << 24 | gid[14] << 16 | gid[13] << 8 | gid[12]); +} + +static u16 get_source_port(struct bnxt_re_dev *rdev, + struct bnxt_re_qp *qp) +{ + u8 ip_off, data[48], smac[ETH_ALEN]; + u16 crc = 0, buf_len = 0, i; + u8 addr_len; + u32 qpn; + + if (qp->qplib_qp.nw_type == CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV6) { + addr_len = 16; + ip_off = 0; + } else { + addr_len = 4; + ip_off = 12; + } + + if (rdev->binfo) { + memcpy(&smac[2], &qp->qplib_qp.lag_src_mac, 4); + smac[0] = qp->qplib_qp.smac[0]; + smac[1] = qp->qplib_qp.smac[1]; + } else { + memcpy(smac, qp->qplib_qp.smac, ETH_ALEN); + } + + memset(data, 0, 48); + memcpy(data, qp->qplib_qp.ah.dmac, ETH_ALEN); + buf_len += ETH_ALEN; + + memcpy(data + buf_len, smac, ETH_ALEN); + buf_len += ETH_ALEN; + + memcpy(data + buf_len, qp->qplib_qp.ah.dgid.data + ip_off, addr_len); + buf_len += addr_len; + + memcpy(data + buf_len, qp->qp_info_entry.sgid.raw + ip_off, addr_len); + buf_len += addr_len; + + qpn = htonl(qp->qplib_qp.dest_qpn); + memcpy(data + buf_len, (u8 *)&qpn + 1, 3); + buf_len += 3; + + for (i = 0; i < buf_len; i++) + crc = crc16(crc, (data + i), 1); + + return crc; +} + +void bnxt_re_update_qp_info(struct bnxt_re_dev *rdev, struct bnxt_re_qp *qp) +{ + u16 type; + + type = __from_hw_to_ib_qp_type(qp->qplib_qp.type); + + /* User-space can extract ip address with sgid_index. */ + if (ipv6_addr_v4mapped((struct in6_addr *)&qp->qplib_qp.ah.dgid)) { + qp->qp_info_entry.s_ip.ipv4_addr = ipv4_from_gid(qp->qp_info_entry.sgid.raw); + qp->qp_info_entry.d_ip.ipv4_addr = ipv4_from_gid(qp->qplib_qp.ah.dgid.data); + } else { + memcpy(&qp->qp_info_entry.s_ip.ipv6_addr, qp->qp_info_entry.sgid.raw, + sizeof(qp->qp_info_entry.s_ip.ipv6_addr)); + memcpy(&qp->qp_info_entry.d_ip.ipv6_addr, qp->qplib_qp.ah.dgid.data, + sizeof(qp->qp_info_entry.d_ip.ipv6_addr)); + } + + if (type == IB_QPT_RC && + (qp->qplib_qp.nw_type == CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV4 || + qp->qplib_qp.nw_type == CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV6)) { + qp->qp_info_entry.s_port = get_source_port(rdev, qp); + } + qp->qp_info_entry.d_port = BNXT_RE_QP_DEST_PORT; +} +#endif + +void bnxt_qplib_manage_flush_qp(struct bnxt_re_qp *qp) +{ + struct bnxt_qplib_q *rq, *sq; + struct bnxt_re_dev *rdev; + unsigned long flags; + + if (qp->sumem) + return; + + rdev = qp->rdev; + + if (qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_ERR) { + rq = &qp->qplib_qp.rq; + sq = &qp->qplib_qp.sq; + + dev_dbg(rdev_to_dev(rdev), + "Move QP = %p to flush list\n", qp); + flags = bnxt_re_lock_cqs(qp); + bnxt_qplib_add_flush_qp(&qp->qplib_qp); + bnxt_re_unlock_cqs(qp, flags); + + if (sq->hwq.prod != sq->hwq.cons) + bnxt_re_handle_cqn(&qp->scq->qplib_cq); + + if (qp->rcq && (qp->rcq != qp->scq) && + (rq->hwq.prod != rq->hwq.cons)) + bnxt_re_handle_cqn(&qp->rcq->qplib_cq); + } + + if (qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_RESET) { + dev_dbg(rdev_to_dev(rdev), + "Move QP = %p out of flush list\n", qp); + flags = bnxt_re_lock_cqs(qp); + bnxt_qplib_clean_qp(&qp->qplib_qp); + bnxt_re_unlock_cqs(qp, flags); + } +} + +int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr, + int qp_attr_mask, struct ib_udata *udata) +{ + enum ib_qp_state curr_qp_state, new_qp_state; + struct bnxt_re_modify_qp_ex_resp resp = {}; + struct bnxt_re_modify_qp_ex_req ureq = {}; + struct bnxt_qplib_dev_attr *dev_attr; + struct bnxt_qplib_ppp *ppp = NULL; + struct bnxt_re_dev *rdev; + struct bnxt_re_qp *qp; +#ifdef RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP + IB_GID_ATTR *sgid_attr; +#ifndef HAVE_GID_ATTR_IN_IB_AH + struct ib_gid_attr gid_attr; + int status; +#endif /* HAVE_GID_ATTR_IN_IB_AH */ + union ib_gid sgid, *gid_ptr = NULL; + u8 nw_type; +#endif /* RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP */ + int rc, entries; + bool is_copy_to_udata = false; + bool is_qpmtu_high = false; + +#ifdef HAVE_IB_QP_ATTR_STANDARD_BITS + if (qp_attr_mask & ~IB_QP_ATTR_STANDARD_BITS) + return -EOPNOTSUPP; +#endif + + qp = to_bnxt_re(ib_qp, struct bnxt_re_qp, ib_qp); + rdev = qp->rdev; + dev_attr = rdev->dev_attr; + + qp->qplib_qp.modify_flags = 0; + ppp = &qp->qplib_qp.ppp; + if (qp_attr_mask & IB_QP_STATE) { + curr_qp_state = __to_ib_qp_state(qp->qplib_qp.cur_qp_state); + new_qp_state = qp_attr->qp_state; + if (!ib_modify_qp_is_ok_compat(curr_qp_state, new_qp_state, + ib_qp->qp_type, qp_attr_mask)) { + dev_err(rdev_to_dev(rdev),"invalid attribute mask=0x%x" + " specified for qpn=0x%x of type=0x%x" + " current_qp_state=0x%x, new_qp_state=0x%x\n", + qp_attr_mask, ib_qp->qp_num, ib_qp->qp_type, + curr_qp_state, new_qp_state); + return -EINVAL; + } + qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_STATE; + qp->qplib_qp.state = __from_ib_qp_state(qp_attr->qp_state); + + if (udata && curr_qp_state == IB_QPS_RESET && + new_qp_state == IB_QPS_INIT) { + if (!ib_copy_from_udata(&ureq, udata, sizeof(ureq))) { + if (ureq.comp_mask & + BNXT_RE_COMP_MASK_MQP_EX_PPP_REQ_EN_MASK) { + ppp->req = BNXT_QPLIB_PPP_REQ; + ppp->dpi = ureq.dpi; + } + } + } + } + if (qp_attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) { + qp->qplib_qp.modify_flags |= + CMDQ_MODIFY_QP_MODIFY_MASK_EN_SQD_ASYNC_NOTIFY; + qp->qplib_qp.en_sqd_async_notify = true; + } + if (qp_attr_mask & IB_QP_ACCESS_FLAGS) { + qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_ACCESS; + qp->qplib_qp.access = + __from_ib_access_flags(qp_attr->qp_access_flags); + /* LOCAL_WRITE access must be set to allow RC receive */ + qp->qplib_qp.access |= BNXT_QPLIB_ACCESS_LOCAL_WRITE; + /*FIXME: Temporarily setup all the permisions */ + qp->qplib_qp.access |= CMDQ_MODIFY_QP_ACCESS_REMOTE_WRITE; + qp->qplib_qp.access |= CMDQ_MODIFY_QP_ACCESS_REMOTE_READ; + } + if (qp_attr_mask & IB_QP_PKEY_INDEX) { + qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PKEY; + qp->qplib_qp.pkey_index = qp_attr->pkey_index; + } + if (qp_attr_mask & IB_QP_QKEY) { + qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_QKEY; + qp->qplib_qp.qkey = qp_attr->qkey; + } + if (qp_attr_mask & IB_QP_AV) { + qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_DGID | + CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL | + CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX | + CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT | + CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS | + CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC | + CMDQ_MODIFY_QP_MODIFY_MASK_VLAN_ID; + memcpy(qp->qplib_qp.ah.dgid.data, qp_attr->ah_attr.grh.dgid.raw, + sizeof(qp->qplib_qp.ah.dgid.data)); + qp->qplib_qp.ah.flow_label = qp_attr->ah_attr.grh.flow_label; + qp->qplib_qp.ah.sgid_index = _get_sgid_index(rdev, + qp_attr->ah_attr.grh.sgid_index); + qp->qplib_qp.ah.host_sgid_index = qp_attr->ah_attr.grh.sgid_index; + qp->qplib_qp.ah.hop_limit = qp_attr->ah_attr.grh.hop_limit; + qp->qplib_qp.ah.traffic_class = + qp_attr->ah_attr.grh.traffic_class; + qp->qplib_qp.ah.sl = qp_attr->ah_attr.sl; +#ifdef HAVE_IB_AH_DMAC + ether_addr_copy(qp->qplib_qp.ah.dmac, ROCE_DMAC(&qp_attr->ah_attr)); +#endif +#ifdef RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP +#ifndef HAVE_GID_ATTR_IN_IB_AH + sgid_attr = &gid_attr; + status = bnxt_re_get_cached_gid(&rdev->ibdev, 1, + qp_attr->ah_attr.grh.sgid_index, + &sgid, &sgid_attr, + &qp_attr->ah_attr.grh, NULL); + if (!status) + dev_put(sgid_attr->ndev); + gid_ptr = &sgid; +#else + sgid_attr = qp_attr->ah_attr.grh.sgid_attr; + gid_ptr = (union ib_gid *)&sgid_attr->gid; +#endif + if (sgid_attr->ndev) { + memcpy(qp->qplib_qp.smac, sgid_attr->ndev->dev_addr, + ETH_ALEN); + nw_type = bnxt_re_gid_to_network_type(sgid_attr, &sgid); + dev_dbg(rdev_to_dev(rdev), + "Connection using the nw_type %d\n", nw_type); + switch (nw_type) { + case RDMA_NETWORK_IPV4: + qp->qplib_qp.nw_type = + CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV4; + break; + case RDMA_NETWORK_IPV6: + qp->qplib_qp.nw_type = + CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV6; + break; + default: + qp->qplib_qp.nw_type = + CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV1; + break; + } + } + memcpy(&qp->qp_info_entry.sgid, gid_ptr, sizeof(qp->qp_info_entry.sgid)); +#else + qp->qplib_qp.nw_type = + CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV1; +#endif + } + + /* MTU settings allowed only during INIT -> RTR */ + if (qp_attr->qp_state == IB_QPS_RTR) { + bnxt_re_init_qpmtu(qp, rdev->netdev->mtu, qp_attr_mask, qp_attr, + &is_qpmtu_high); + if (udata && !ib_copy_from_udata(&ureq, udata, sizeof(ureq))) { + if (ureq.comp_mask & BNXT_RE_COMP_MASK_MQP_EX_PATH_MTU_MASK) { + resp.comp_mask |= BNXT_RE_COMP_MASK_MQP_EX_PATH_MTU_MASK; + resp.path_mtu = qp->qplib_qp.mtu; + is_copy_to_udata = true; + } else if (is_qpmtu_high) { + dev_err(rdev_to_dev(rdev), "qp %#x invalid mtu", + qp->qplib_qp.id); + return -EINVAL; + } + } + } + + if (qp_attr_mask & IB_QP_TIMEOUT) { + qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_TIMEOUT; + qp->qplib_qp.timeout = qp_attr->timeout; + } + if (qp_attr_mask & IB_QP_RETRY_CNT) { + qp->qplib_qp.modify_flags |= + CMDQ_MODIFY_QP_MODIFY_MASK_RETRY_CNT; + qp->qplib_qp.retry_cnt = qp_attr->retry_cnt; + } + if (qp_attr_mask & IB_QP_RNR_RETRY) { + qp->qplib_qp.modify_flags |= + CMDQ_MODIFY_QP_MODIFY_MASK_RNR_RETRY; + qp->qplib_qp.rnr_retry = qp_attr->rnr_retry; + } + if (qp_attr_mask & IB_QP_MIN_RNR_TIMER) { + qp->qplib_qp.modify_flags |= + CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER; + qp->qplib_qp.min_rnr_timer = qp_attr->min_rnr_timer; + } + if (qp_attr_mask & IB_QP_RQ_PSN) { + qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN; + qp->qplib_qp.rq.psn = qp_attr->rq_psn; + } + if (qp_attr_mask & IB_QP_MAX_QP_RD_ATOMIC) { + qp->qplib_qp.modify_flags |= + CMDQ_MODIFY_QP_MODIFY_MASK_MAX_RD_ATOMIC; + /* Cap the max_rd_atomic to device max */ + if (qp_attr->max_rd_atomic > dev_attr->max_qp_rd_atom) + dev_dbg(rdev_to_dev(rdev), + "max_rd_atomic requested %d is > device max %d\n", + qp_attr->max_rd_atomic, + dev_attr->max_qp_rd_atom); + qp->qplib_qp.max_rd_atomic = min_t(u32, qp_attr->max_rd_atomic, + dev_attr->max_qp_rd_atom); + } + if (qp_attr_mask & IB_QP_SQ_PSN) { + qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN; + qp->qplib_qp.sq.psn = qp_attr->sq_psn; + } + if (qp_attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) { + if (qp_attr->max_dest_rd_atomic > + dev_attr->max_qp_init_rd_atom) { + dev_err(rdev_to_dev(rdev), + "max_dest_rd_atomic requested %d is > device max %d\n", + qp_attr->max_dest_rd_atomic, + dev_attr->max_qp_init_rd_atom); + return -EINVAL; + } + qp->qplib_qp.modify_flags |= + CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC; + qp->qplib_qp.max_dest_rd_atomic = qp_attr->max_dest_rd_atomic; + } + if (qp_attr_mask & IB_QP_CAP) { + qp->qplib_qp.modify_flags |= + CMDQ_MODIFY_QP_MODIFY_MASK_SQ_SIZE | + CMDQ_MODIFY_QP_MODIFY_MASK_RQ_SIZE | + CMDQ_MODIFY_QP_MODIFY_MASK_SQ_SGE | + CMDQ_MODIFY_QP_MODIFY_MASK_RQ_SGE | + CMDQ_MODIFY_QP_MODIFY_MASK_MAX_INLINE_DATA; + if ((qp_attr->cap.max_send_wr >= dev_attr->max_qp_wqes) || + (qp_attr->cap.max_recv_wr >= dev_attr->max_qp_wqes) || + (qp_attr->cap.max_send_sge >= dev_attr->max_qp_sges) || + (qp_attr->cap.max_recv_sge >= dev_attr->max_qp_sges) || + (qp_attr->cap.max_inline_data >= + dev_attr->max_inline_data)) { + dev_err(rdev_to_dev(rdev), + "Create QP failed - max exceeded"); + return -EINVAL; + } + entries = roundup_pow_of_two(qp_attr->cap.max_send_wr); + if (entries > dev_attr->max_qp_wqes) + entries = dev_attr->max_qp_wqes; + entries = min_t(u32, entries, dev_attr->max_qp_wqes); + qp->qplib_qp.sq.max_wqe = entries; + qp->qplib_qp.sq.q_full_delta = qp->qplib_qp.sq.max_wqe - + qp_attr->cap.max_send_wr; + /* + * Reserving one slot for Phantom WQE. Some application can + * post one extra entry in this case. Allowing this to avoid + * unexpected Queue full condition + */ + qp->qplib_qp.sq.q_full_delta -= 1; + qp->qplib_qp.sq.max_sge = qp_attr->cap.max_send_sge; + if (qp->qplib_qp.rq.max_wqe) { + entries = roundup_pow_of_two(qp_attr->cap.max_recv_wr); + if (entries > dev_attr->max_qp_wqes) + entries = dev_attr->max_qp_wqes; + qp->qplib_qp.rq.max_wqe = entries; + qp->qplib_qp.rq.q_full_delta = qp->qplib_qp.rq.max_wqe - + qp_attr->cap.max_recv_wr; + qp->qplib_qp.rq.max_sge = qp_attr->cap.max_recv_sge; + } else { + /* SRQ was used prior, just ignore the RQ caps */ + } + } + if (qp_attr_mask & IB_QP_DEST_QPN) { + qp->qplib_qp.modify_flags |= + CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID; + qp->qplib_qp.dest_qpn = qp_attr->dest_qp_num; + } + + rc = bnxt_qplib_modify_qp(&rdev->qplib_res, &qp->qplib_qp); + if (rc) { + dev_err(rdev_to_dev(rdev), "Modify HW QP failed!"); + return rc; + } + if (qp_attr_mask & IB_QP_STATE) + bnxt_qplib_manage_flush_qp(qp); + if (ureq.comp_mask & BNXT_RE_COMP_MASK_MQP_EX_PPP_REQ_EN_MASK && + ppp->st_idx_en & CREQ_MODIFY_QP_RESP_PINGPONG_PUSH_ENABLED) { + resp.comp_mask |= BNXT_RE_COMP_MASK_MQP_EX_PPP_REQ_EN; + resp.ppp_st_idx = ppp->st_idx_en >> + BNXT_QPLIB_PPP_ST_IDX_SHIFT; + is_copy_to_udata = true; + rdev->ppp_stats.ppp_enabled_qps++; + } + + if (is_copy_to_udata) { + rc = bnxt_re_copy_to_udata(rdev, &resp, + min(udata->outlen, sizeof(resp)), + udata); + if (rc) + return rc; + } + + if (ib_qp->qp_type == IB_QPT_GSI && + rdev->gsi_ctx.gsi_qp_mode == BNXT_RE_GSI_MODE_ALL && + rdev->gsi_ctx.gsi_sqp) + rc = bnxt_re_modify_shadow_qp(rdev, qp, qp_attr_mask); +#ifdef RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP + /* + * Update info when qp_info_info + */ + bnxt_re_update_qp_info(rdev, qp); +#endif +#ifdef POST_QP1_DUMMY_WQE + if (!(_is_chip_gen_p5_p7(rdev->chip_ctx)) && + ib_qp->qp_type == IB_QPT_GSI && + qp_attr->qp_state == IB_QPS_RTS) { + /* To suppress the WQE completion, + * temporarily change the sig_type of QP to 0. + * WQE completion is issued based on this flag + * inside qplib_post_send. Restore sig_type + * once posting is done. + */ + u8 tmp_sig_type = qp->qplib_qp.sig_type; + qp->qplib_qp.sig_type = 0; + dev_dbg(rdev_to_dev(rdev), "posting dummy wqe"); + post_qp1_dummy_wqe(qp); + qp->qplib_qp.sig_type = tmp_sig_type; + } +#endif /* POST_QP1_DUMMY_WQE */ + return rc; +} + +int bnxt_re_query_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr, + int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr) +{ + struct bnxt_re_qp *qp = to_bnxt_re(ib_qp, struct bnxt_re_qp, ib_qp); + struct bnxt_re_dev *rdev = qp->rdev; + struct bnxt_qplib_qp *qplib_qp; + int rc; + + qplib_qp = kcalloc(1, sizeof(*qplib_qp), GFP_KERNEL); + if (!qplib_qp) + return -ENOMEM; + + qplib_qp->id = qp->qplib_qp.id; + qplib_qp->ah.host_sgid_index = qp->qplib_qp.ah.host_sgid_index; + + rc = bnxt_qplib_query_qp(&rdev->qplib_res, qplib_qp); + if (rc) { + dev_err(rdev_to_dev(rdev), "Query HW QP (0x%x) failed! rc = %d", + qplib_qp->id, rc); + goto free_mem; + } + qp_attr->qp_state = __to_ib_qp_state(qplib_qp->state); + qp_attr->cur_qp_state = __to_ib_qp_state(qplib_qp->cur_qp_state); + qp_attr->en_sqd_async_notify = qplib_qp->en_sqd_async_notify ? 1 : 0; + qp_attr->qp_access_flags = __to_ib_access_flags(qplib_qp->access); + qp_attr->pkey_index = qplib_qp->pkey_index; + qp_attr->qkey = qplib_qp->qkey; +#ifdef HAVE_ROCE_AH_ATTR + qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE; +#endif + memcpy(qp_attr->ah_attr.grh.dgid.raw, qplib_qp->ah.dgid.data, + sizeof(qplib_qp->ah.dgid.data)); + qp_attr->ah_attr.grh.flow_label = qplib_qp->ah.flow_label; + qp_attr->ah_attr.grh.sgid_index = qplib_qp->ah.host_sgid_index; + qp_attr->ah_attr.grh.hop_limit = qplib_qp->ah.hop_limit; + qp_attr->ah_attr.grh.traffic_class = qplib_qp->ah.traffic_class; + qp_attr->ah_attr.sl = qplib_qp->ah.sl; +#ifdef HAVE_IB_AH_DMAC + ether_addr_copy(ROCE_DMAC(&qp_attr->ah_attr), qplib_qp->ah.dmac); +#endif + qp_attr->path_mtu = __to_ib_mtu(qplib_qp->path_mtu); + qp_attr->timeout = qplib_qp->timeout; + qp_attr->retry_cnt = qplib_qp->retry_cnt; + qp_attr->rnr_retry = qplib_qp->rnr_retry; + qp_attr->min_rnr_timer = qplib_qp->min_rnr_timer; + qp_attr->rq_psn = qplib_qp->rq.psn; + qp_attr->max_rd_atomic = qplib_qp->max_rd_atomic; + qp_attr->sq_psn = qplib_qp->sq.psn; + qp_attr->max_dest_rd_atomic = qplib_qp->max_dest_rd_atomic; + qp_init_attr->sq_sig_type = qplib_qp->sig_type ? IB_SIGNAL_ALL_WR : + IB_SIGNAL_REQ_WR; + qp_attr->dest_qp_num = qplib_qp->dest_qpn; + + qp_attr->cap.max_send_wr = qp->qplib_qp.sq.max_wqe; + qp_attr->cap.max_send_sge = qp->qplib_qp.sq.max_sge; + qp_attr->cap.max_recv_wr = qp->qplib_qp.rq.max_wqe; + qp_attr->cap.max_recv_sge = qp->qplib_qp.rq.max_sge; + qp_attr->cap.max_inline_data = qp->qplib_qp.max_inline_data; + qp_init_attr->cap = qp_attr->cap; + +free_mem: + kfree(qplib_qp); + return rc; +} + +/* Builders */ + +/* For Raw, the application is responsible to build the entire packet */ +static void bnxt_re_build_raw_send(CONST_STRUCT ib_send_wr *wr, + struct bnxt_qplib_swqe *wqe) +{ + switch (wr->send_flags) { + case IB_SEND_IP_CSUM: + wqe->rawqp1.lflags |= SQ_SEND_RAWETH_QP1_LFLAGS_IP_CHKSUM; + break; + default: + /* Pad HW RoCE iCRC */ + wqe->rawqp1.lflags |= SQ_SEND_RAWETH_QP1_LFLAGS_ROCE_CRC; + break; + } +} + +/* For QP1, the driver must build the entire RoCE (v1/v2) packet hdr + as according to the sgid and AV + */ +static int bnxt_re_build_qp1_send(struct bnxt_re_qp *qp, CONST_STRUCT ib_send_wr *wr, + struct bnxt_qplib_swqe *wqe, int payload_size) +{ +#ifdef HAVE_IB_RDMA_WR + struct bnxt_re_ah *ah = to_bnxt_re(ud_wr(wr)->ah, struct bnxt_re_ah, + ib_ah); +#else + struct bnxt_re_ah *ah = to_bnxt_re(wr->wr.ud.ah, struct bnxt_re_ah, + ib_ah); +#endif + struct bnxt_qplib_ah *qplib_ah = &ah->qplib_ah; + struct bnxt_qplib_sge sge; + int i, rc = 0, size; + union ib_gid sgid; + u16 vlan_id; + u8 *ptmac; + void *buf; + + memset(&qp->qp1_hdr, 0, sizeof(qp->qp1_hdr)); + + /* Get sgid */ + rc = bnxt_re_query_gid(&qp->rdev->ibdev, 1, qplib_ah->sgid_index, &sgid); + if (rc) + return rc; + + /* ETH */ + qp->qp1_hdr.eth_present = 1; + ptmac = ah->qplib_ah.dmac; + memcpy(qp->qp1_hdr.eth.dmac_h, ptmac, 4); + ptmac += 4; + memcpy(qp->qp1_hdr.eth.dmac_l, ptmac, 2); + + ptmac = qp->qplib_qp.smac; + memcpy(qp->qp1_hdr.eth.smac_h, ptmac, 2); + ptmac += 2; + memcpy(qp->qp1_hdr.eth.smac_l, ptmac, 4); + + qp->qp1_hdr.eth.type = cpu_to_be16(BNXT_QPLIB_ETHTYPE_ROCEV1); + + /* For vlan, check the sgid for vlan existence */ + vlan_id = rdma_get_vlan_id(&sgid); + if (vlan_id && vlan_id < 0x1000) { + qp->qp1_hdr.vlan_present = 1; + qp->qp1_hdr.eth.type = cpu_to_be16(ETH_P_8021Q); + } + /* GRH */ + qp->qp1_hdr.grh_present = 1; + qp->qp1_hdr.grh.ip_version = 6; + qp->qp1_hdr.grh.payload_length = + cpu_to_be16((IB_BTH_BYTES + IB_DETH_BYTES + payload_size + 7) + & ~3); + qp->qp1_hdr.grh.next_header = 0x1b; + memcpy(qp->qp1_hdr.grh.source_gid.raw, sgid.raw, sizeof(sgid)); + memcpy(qp->qp1_hdr.grh.destination_gid.raw, qplib_ah->dgid.data, + sizeof(sgid)); + + /* BTH */ + if (wr->opcode == IB_WR_SEND_WITH_IMM) { + qp->qp1_hdr.bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE; + qp->qp1_hdr.immediate_present = 1; + } else { + qp->qp1_hdr.bth.opcode = IB_OPCODE_UD_SEND_ONLY; + } + if (wr->send_flags & IB_SEND_SOLICITED) + qp->qp1_hdr.bth.solicited_event = 1; + qp->qp1_hdr.bth.pad_count = (4 - payload_size) & 3; + /* P_key for QP1 is for all members */ + qp->qp1_hdr.bth.pkey = cpu_to_be16(0xFFFF); + qp->qp1_hdr.bth.destination_qpn = IB_QP1; + qp->qp1_hdr.bth.ack_req = 0; + qp->send_psn++; + qp->send_psn &= BTH_PSN_MASK; + qp->qp1_hdr.bth.psn = cpu_to_be32(qp->send_psn); + /* DETH */ + /* Use the privileged Q_Key for QP1 */ + qp->qp1_hdr.deth.qkey = cpu_to_be32(IB_QP1_QKEY); + qp->qp1_hdr.deth.source_qpn = IB_QP1; + + /* Pack the QP1 to the transmit buffer */ + buf = bnxt_qplib_get_qp1_sq_buf(&qp->qplib_qp, &sge); + if (!buf) { + dev_err(rdev_to_dev(qp->rdev), "QP1 buffer is empty!"); + return -ENOMEM; + } + size = ib_ud_header_pack(&qp->qp1_hdr, buf); + for (i = wqe->num_sge; i; i--) { + wqe->sg_list[i].addr = wqe->sg_list[i - 1].addr; + wqe->sg_list[i].lkey = wqe->sg_list[i - 1].lkey; + wqe->sg_list[i].size = wqe->sg_list[i - 1].size; + } + wqe->sg_list[0].addr = sge.addr; + wqe->sg_list[0].lkey = sge.lkey; + wqe->sg_list[0].size = sge.size; + wqe->num_sge++; + + return rc; +} + +#ifdef ENABLE_ROCEV2_QP1 +/* Routine for sending QP1 packets for RoCE V1 and V2 + */ +static int bnxt_re_build_qp1_send_v2(struct bnxt_re_qp *qp, + CONST_STRUCT ib_send_wr *wr, + struct bnxt_qplib_swqe *wqe, + int payload_size) +{ + struct bnxt_re_dev *rdev = qp->rdev; +#ifdef HAVE_IB_RDMA_WR + struct bnxt_re_ah *ah = to_bnxt_re(ud_wr(wr)->ah, struct bnxt_re_ah, + ib_ah); +#else + struct bnxt_re_ah *ah = to_bnxt_re(wr->wr.ud.ah, struct bnxt_re_ah, + ib_ah); +#endif + struct bnxt_qplib_ah *qplib_ah = &ah->qplib_ah; + struct bnxt_qplib_sge sge; + u8 nw_type; + u16 ether_type; +#ifdef RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP + IB_GID_ATTR *sgid_attr; + union ib_gid *psgid; +#ifndef HAVE_GID_ATTR_IN_IB_AH + struct ib_device *ibdev = &qp->rdev->ibdev; + IB_GID_ATTR gid_attr; + union ib_gid sgid; + +#endif /* HAVE_GID_ATTR_IN_IB_AH */ +#endif /* RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP */ + union ib_gid dgid; + bool is_eth = false; + bool is_vlan = false; + bool is_grh = false; + bool is_udp = false; + u8 ip_version = 0, gsi_mode; + u16 vlan_id = 0xFFFF; + void *buf; + int i, rc = 0, size; + unsigned int dscp; + uint8_t *ip_hdr; + + memset(&qp->qp1_hdr, 0, sizeof(qp->qp1_hdr)); + +#ifndef HAVE_GID_ATTR_IN_IB_AH + sgid_attr = &gid_attr; + psgid = &sgid; + rc = bnxt_re_get_cached_gid(ibdev, 1, qplib_ah->host_sgid_index, &sgid, + &sgid_attr, NULL, &ah->ib_ah); + if (rc) { + dev_err(rdev_to_dev(qp->rdev), + "Failed to query gid at index %d", + qplib_ah->host_sgid_index); + return rc; + } + + if (sgid_attr->ndev) { + if (is_vlan_dev(sgid_attr->ndev)) + vlan_id = vlan_dev_vlan_id(sgid_attr->ndev); + dev_put(sgid_attr->ndev); + } +#else + sgid_attr = ah->ib_ah.sgid_attr; + psgid = (union ib_gid *)&sgid_attr->gid; + if ((sgid_attr->ndev) && is_vlan_dev(sgid_attr->ndev)) + vlan_id = vlan_dev_vlan_id(sgid_attr->ndev); +#endif + + /* Get network header type for this GID */ + nw_type = bnxt_re_gid_to_network_type(sgid_attr, psgid); + gsi_mode = rdev->gsi_ctx.gsi_qp_mode; + switch (nw_type) { + case RDMA_NETWORK_IPV4: + if (gsi_mode != BNXT_RE_GSI_MODE_ALL && + gsi_mode != BNXT_RE_GSI_MODE_ROCE_V2_IPV4) { + rc = -EINVAL; + goto done; + } + nw_type = BNXT_RE_ROCEV2_IPV4_PACKET; + break; + case RDMA_NETWORK_IPV6: + if (gsi_mode != BNXT_RE_GSI_MODE_ALL && + gsi_mode != BNXT_RE_GSI_MODE_ROCE_V2_IPV6) { + rc = -EINVAL; + goto done; + } + nw_type = BNXT_RE_ROCEV2_IPV6_PACKET; + break; + default: + if (gsi_mode != BNXT_RE_GSI_MODE_ALL) { + rc = -EINVAL; + goto done; + } + nw_type = BNXT_RE_ROCE_V1_PACKET; + break; + } + memcpy(&dgid.raw, &qplib_ah->dgid, 16); + is_udp = sgid_attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP; + if (is_udp) { + if (ipv6_addr_v4mapped((struct in6_addr *)psgid)) { + ip_version = 4; + ether_type = ETH_P_IP; + } else { + ip_version = 6; + ether_type = ETH_P_IPV6; + } + is_grh = false; + } else { + ether_type = BNXT_QPLIB_ETHTYPE_ROCEV1; + is_grh = true; + } + + is_eth = true; + is_vlan = (vlan_id && (vlan_id < 0x1000)) ? true : false; + + dev_dbg(rdev_to_dev(qp->rdev), + "eth = %d grh = %d udp = %d vlan = %d ip_ver = %d\n", + is_eth, is_grh, is_udp, is_vlan, ip_version); + + ib_ud_header_init(payload_size, !is_eth, is_eth, is_vlan, is_grh, + ip_version, is_udp, 0, &qp->qp1_hdr); + + /* ETH */ + ether_addr_copy(qp->qp1_hdr.eth.dmac_h, ah->qplib_ah.dmac); + ether_addr_copy(qp->qp1_hdr.eth.smac_h, qp->qplib_qp.smac); + + /* For vlan, check the sgid for vlan existence */ + if (!is_vlan) { + qp->qp1_hdr.eth.type = cpu_to_be16(ether_type); + } else { + qp->qp1_hdr.vlan.type = cpu_to_be16(ether_type); + qp->qp1_hdr.vlan.tag = cpu_to_be16(vlan_id); + } + + if (is_grh || (ip_version == 6)) { + memcpy(qp->qp1_hdr.grh.source_gid.raw, psgid->raw, sizeof(*psgid)); + memcpy(qp->qp1_hdr.grh.destination_gid.raw, qplib_ah->dgid.data, + sizeof(*psgid)); + qp->qp1_hdr.grh.hop_limit = qplib_ah->hop_limit; + } + + if (ip_version == 4) { + /* TODO */ + qp->qp1_hdr.ip4.tos = 0; + qp->qp1_hdr.ip4.id = 0; + qp->qp1_hdr.ip4.frag_off = htons(IP_DF); + qp->qp1_hdr.ip4.ttl = qplib_ah->hop_limit; + + memcpy(&qp->qp1_hdr.ip4.saddr, psgid->raw + 12, 4); + memcpy(&qp->qp1_hdr.ip4.daddr, qplib_ah->dgid.data + 12, 4); + qp->qp1_hdr.ip4.check = ib_ud_ip4_csum(&qp->qp1_hdr); + } + + if (is_udp) { + qp->qp1_hdr.udp.dport = htons(ROCE_V2_UDP_DPORT); + qp->qp1_hdr.udp.sport = htons(BNXT_RE_ROCE_V2_UDP_SPORT); + qp->qp1_hdr.udp.csum = 0; + } + + /* BTH */ + if (wr->opcode == IB_WR_SEND_WITH_IMM) { + qp->qp1_hdr.bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE; + qp->qp1_hdr.immediate_present = 1; + } else { + qp->qp1_hdr.bth.opcode = IB_OPCODE_UD_SEND_ONLY; + } + if (wr->send_flags & IB_SEND_SOLICITED) + qp->qp1_hdr.bth.solicited_event = 1; + /* pad_count */ + qp->qp1_hdr.bth.pad_count = (4 - payload_size) & 3; + + /* P_key for QP1 is for all members */ + qp->qp1_hdr.bth.pkey = cpu_to_be16(0xFFFF); + qp->qp1_hdr.bth.destination_qpn = IB_QP1; + qp->qp1_hdr.bth.ack_req = 0; + qp->send_psn++; + qp->send_psn &= BTH_PSN_MASK; + qp->qp1_hdr.bth.psn = cpu_to_be32(qp->send_psn); + /* DETH */ + /* Use the privileged Q_Key for QP1 */ + qp->qp1_hdr.deth.qkey = cpu_to_be32(IB_QP1_QKEY); + qp->qp1_hdr.deth.source_qpn = IB_QP1; + + /* Pack the QP1 to the transmit buffer */ + buf = bnxt_qplib_get_qp1_sq_buf(&qp->qplib_qp, &sge); + if (!buf) { + dev_err(rdev_to_dev(qp->rdev), "QP1 buffer is empty!"); + rc = -ENOMEM; + goto done; + } + size = ib_ud_header_pack(&qp->qp1_hdr, buf); + for (i = wqe->num_sge; i; i--) { + wqe->sg_list[i].addr = wqe->sg_list[i - 1].addr; + wqe->sg_list[i].lkey = wqe->sg_list[i - 1].lkey; + wqe->sg_list[i].size = wqe->sg_list[i - 1].size; + } + dscp = (qp->rdev->cc_param.tos_dscp << 2) | + qp->rdev->cc_param.tos_ecn; + /* Fill dscp values on this raw ethernet packet */ + if (dscp) { + u8 len = is_vlan ? VLAN_ETH_HLEN : ETH_HLEN; + ip_hdr = (u8 *) buf + len; + if (ip_version == 4) + ipv4_copy_dscp(dscp, (struct iphdr *)ip_hdr); + else + ipv6_copy_dscp(dscp, (struct ipv6hdr *)ip_hdr); + } + /* + * Max Header buf size for IPV6 RoCE V2 is 86, + * which is same as the QP1 SQ header buffer. + * Header buf size for IPV4 RoCE V2 can be 66. + * ETH(14) + VLAN(4)+ IP(20) + UDP (8) + BTH(20). + * Subtract 20 bytes from QP1 SQ header buf size + */ + if (is_udp && ip_version == 4) + sge.size -= 20; + /* + * Max Header buf size for RoCE V1 is 78. + * ETH(14) + VLAN(4) + GRH(40) + BTH(20). + * Subtract 8 bytes from QP1 SQ header buf size + */ + if (!is_udp) + sge.size -= 8; + /* Subtract 4 bytes for non vlan packets */ + if (!is_vlan) + sge.size -= 4; + wqe->sg_list[0].addr = sge.addr; + wqe->sg_list[0].lkey = sge.lkey; + wqe->sg_list[0].size = sge.size; + wqe->num_sge++; +done: + return rc; +} +#endif + +static int bnxt_re_build_gsi_send(struct bnxt_re_qp *qp, + CONST_STRUCT ib_send_wr *wr, + struct bnxt_qplib_swqe *wqe) +{ + struct bnxt_re_dev *rdev; + int rc, indx, len = 0; + + rdev = qp->rdev; + + /* Mode UD is applicable to Gen P5 only */ + if (rdev->gsi_ctx.gsi_qp_mode == BNXT_RE_GSI_MODE_UD) + return 0; + + for (indx = 0; indx < wr->num_sge; indx++) { + wqe->sg_list[indx].addr = wr->sg_list[indx].addr; + wqe->sg_list[indx].lkey = wr->sg_list[indx].lkey; + wqe->sg_list[indx].size = wr->sg_list[indx].length; + len += wr->sg_list[indx].length; + } +#ifdef ENABLE_ROCEV2_QP1 + if (rdev->gsi_ctx.gsi_qp_mode != BNXT_RE_GSI_MODE_ROCE_V1) + rc = bnxt_re_build_qp1_send_v2(qp, wr, wqe, len); + else + rc = bnxt_re_build_qp1_send(qp, wr, wqe, len); +#else + rc = bnxt_re_build_qp1_send(qp, wr, wqe, len); +#endif + wqe->rawqp1.lflags |= SQ_SEND_RAWETH_QP1_LFLAGS_ROCE_CRC; + + return rc; +} + +/* For the MAD layer, it only provides the recv SGE the size of + ib_grh + MAD datagram. No Ethernet headers, Ethertype, BTH, DETH, + nor RoCE iCRC. The Cu+ solution must provide buffer for the entire + receive packet (334 bytes) with no VLAN and then copy the GRH + and the MAD datagram out to the provided SGE. +*/ + +static int bnxt_re_build_qp1_recv(struct bnxt_re_qp *qp, + CONST_STRUCT ib_recv_wr *wr, + struct bnxt_qplib_swqe *wqe) +{ + struct bnxt_re_dev *rdev = qp->rdev; + struct bnxt_qplib_sge ref, sge; + u8 udp_hdr_size = 0; + u8 ip_hdr_size = 0; + int size; + + if (bnxt_qplib_get_qp1_rq_buf(&qp->qplib_qp, &sge)) { + /* Create 5 SGEs as according to the following: + * Ethernet header (14) + * ib_grh (40) - as provided from the wr + * ib_bth + ib_deth + UDP(RoCE v2 only) (28) + * MAD (256) - as provided from the wr + * iCRC (4) + */ + + /* Set RoCE v2 header size and offsets */ + if (rdev->gsi_ctx.gsi_qp_mode == BNXT_RE_GSI_MODE_ROCE_V2_IPV4) + ip_hdr_size = 20; + if (rdev->gsi_ctx.gsi_qp_mode != BNXT_RE_GSI_MODE_ROCE_V1) + udp_hdr_size = 8; + + /* Save the reference from ULP */ + ref.addr = wr->sg_list[0].addr; + ref.lkey = wr->sg_list[0].lkey; + ref.size = wr->sg_list[0].length; + + /* SGE 1 */ + size = sge.size; + wqe->sg_list[0].addr = sge.addr; + wqe->sg_list[0].lkey = sge.lkey; + wqe->sg_list[0].size = BNXT_QPLIB_MAX_QP1_RQ_ETH_HDR_SIZE; + size -= wqe->sg_list[0].size; + if (size <= 0) { + dev_err(rdev_to_dev(qp->rdev),"QP1 rq buffer is empty!"); + return -ENOMEM; + } + sge.size = (u32)size; + sge.addr += wqe->sg_list[0].size; + + /* SGE 2 */ + /* In case of RoCE v2 ipv4 lower 20 bytes should have IP hdr */ + wqe->sg_list[1].addr = ref.addr + ip_hdr_size; + wqe->sg_list[1].lkey = ref.lkey; + wqe->sg_list[1].size = sizeof(struct ib_grh) - ip_hdr_size; + ref.size -= wqe->sg_list[1].size; + if (ref.size <= 0) { + dev_err(rdev_to_dev(qp->rdev), + "QP1 ref buffer is empty!"); + return -ENOMEM; + } + ref.addr += wqe->sg_list[1].size + ip_hdr_size; + + /* SGE 3 */ + wqe->sg_list[2].addr = sge.addr; + wqe->sg_list[2].lkey = sge.lkey; + wqe->sg_list[2].size = BNXT_QPLIB_MAX_QP1_RQ_BDETH_HDR_SIZE + + udp_hdr_size; + size -= wqe->sg_list[2].size; + if (size <= 0) { + dev_err(rdev_to_dev(qp->rdev), + "QP1 rq buffer is empty!"); + return -ENOMEM; + } + sge.size = (u32)size; + sge.addr += wqe->sg_list[2].size; + + /* SGE 4 */ + wqe->sg_list[3].addr = ref.addr; + wqe->sg_list[3].lkey = ref.lkey; + wqe->sg_list[3].size = ref.size; + ref.size -= wqe->sg_list[3].size; + if (ref.size) { + dev_err(rdev_to_dev(qp->rdev), + "QP1 ref buffer is incorrect!"); + return -ENOMEM; + } + /* SGE 5 */ + wqe->sg_list[4].addr = sge.addr; + wqe->sg_list[4].lkey = sge.lkey; + wqe->sg_list[4].size = sge.size; + size -= wqe->sg_list[4].size; + if (size) { + dev_err(rdev_to_dev(qp->rdev), + "QP1 rq buffer is incorrect!"); + return -ENOMEM; + } + sge.size = (u32)size; + wqe->num_sge = 5; + } else { + dev_err(rdev_to_dev(qp->rdev), "QP1 buffer is empty!"); + return -ENOMEM; + } + + return 0; +} + +static int bnxt_re_build_qp1_shadow_qp_recv(struct bnxt_re_qp *qp, + CONST_STRUCT ib_recv_wr *wr, + struct bnxt_qplib_swqe *wqe) +{ + struct bnxt_re_sqp_entries *sqp_entry; + struct bnxt_qplib_sge sge; + struct bnxt_re_dev *rdev; + u32 rq_prod_index; + + rdev = qp->rdev; + + rq_prod_index = bnxt_qplib_get_rq_prod_index(&qp->qplib_qp); + + if (bnxt_qplib_get_qp1_rq_buf(&qp->qplib_qp, &sge)) { + /* Create 1 SGE to receive the entire + * ethernet packet + */ + /* SGE 1 */ + wqe->sg_list[0].addr = sge.addr; + /* TODO check the lkey to be used */ + wqe->sg_list[0].lkey = sge.lkey; + wqe->sg_list[0].size = BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2; + if (sge.size < wqe->sg_list[0].size) { + dev_err(rdev_to_dev(qp->rdev), + "QP1 rq buffer is empty!"); + return -ENOMEM; + } + + sqp_entry = &rdev->gsi_ctx.sqp_tbl[rq_prod_index]; + sqp_entry->sge.addr = wr->sg_list[0].addr; + sqp_entry->sge.lkey = wr->sg_list[0].lkey; + sqp_entry->sge.size = wr->sg_list[0].length; + /* Store the wrid for reporting completion */ + sqp_entry->wrid = wqe->wr_id; + /* change the wqe->wrid to table index */ + wqe->wr_id = rq_prod_index; + } + + return 0; +} + +static bool is_ud_qp(struct bnxt_re_qp *qp) +{ + return (qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_UD || + qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_GSI); +} + +static int bnxt_re_build_send_wqe(struct bnxt_re_qp *qp, + CONST_STRUCT ib_send_wr *wr, + struct bnxt_qplib_swqe *wqe) +{ + struct bnxt_re_ah *ah = NULL; + + if(is_ud_qp(qp)) { +#ifdef HAVE_IB_RDMA_WR + ah = to_bnxt_re(ud_wr(wr)->ah, struct bnxt_re_ah, ib_ah); + wqe->send.q_key = ud_wr(wr)->remote_qkey; + wqe->send.dst_qp = ud_wr(wr)->remote_qpn; +#else + ah = to_bnxt_re(wr->wr.ud.ah, struct bnxt_re_ah, + ib_ah); + wqe->send.q_key = wr->wr.ud.remote_qkey; + wqe->send.dst_qp = wr->wr.ud.remote_qpn; +#endif + wqe->send.avid = ah->qplib_ah.id; + } + switch (wr->opcode) { + case IB_WR_SEND: + wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND; + break; + case IB_WR_SEND_WITH_IMM: + wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM; + wqe->send.imm_data = wr->ex.imm_data; + break; + case IB_WR_SEND_WITH_INV: + wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV; + wqe->send.inv_key = wr->ex.invalidate_rkey; + break; + default: + dev_err(rdev_to_dev(qp->rdev), "%s Invalid opcode %d!", + __func__, wr->opcode); + return -EINVAL; + } + if (wr->send_flags & IB_SEND_SIGNALED) + wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP; + if (wr->send_flags & IB_SEND_FENCE) + wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE; + if (wr->send_flags & IB_SEND_SOLICITED) + wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT; + if (wr->send_flags & IB_SEND_INLINE) + wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_INLINE; + + return 0; +} + +static int bnxt_re_build_rdma_wqe(CONST_STRUCT ib_send_wr *wr, + struct bnxt_qplib_swqe *wqe) +{ + switch (wr->opcode) { + case IB_WR_RDMA_WRITE: + wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE; + break; + case IB_WR_RDMA_WRITE_WITH_IMM: + wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM; + wqe->rdma.imm_data = wr->ex.imm_data; + break; + case IB_WR_RDMA_READ: + wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_READ; + wqe->rdma.inv_key = wr->ex.invalidate_rkey; + break; + default: + return -EINVAL; + } +#ifdef HAVE_IB_RDMA_WR + wqe->rdma.remote_va = rdma_wr(wr)->remote_addr; + wqe->rdma.r_key = rdma_wr(wr)->rkey; +#else + wqe->rdma.remote_va = wr->wr.rdma.remote_addr; + wqe->rdma.r_key = wr->wr.rdma.rkey; +#endif + if (wr->send_flags & IB_SEND_SIGNALED) + wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP; + if (wr->send_flags & IB_SEND_FENCE) + wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE; + if (wr->send_flags & IB_SEND_SOLICITED) + wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT; + if (wr->send_flags & IB_SEND_INLINE) + wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_INLINE; + + return 0; +} + +static int bnxt_re_build_atomic_wqe(CONST_STRUCT ib_send_wr *wr, + struct bnxt_qplib_swqe *wqe) +{ + switch (wr->opcode) { + case IB_WR_ATOMIC_CMP_AND_SWP: + if (wr->num_sge > 1) + return -EINVAL; + wqe->type = BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP; +#ifdef HAVE_IB_RDMA_WR + wqe->atomic.cmp_data = atomic_wr(wr)->compare_add; + wqe->atomic.swap_data = atomic_wr(wr)->swap; +#else + wqe->atomic.cmp_data = wr->wr.atomic.compare_add; + wqe->atomic.swap_data = wr->wr.atomic.swap; +#endif + break; + case IB_WR_ATOMIC_FETCH_AND_ADD: + if (wr->num_sge > 1) + return -EINVAL; + wqe->type = BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD; +#ifdef HAVE_IB_RDMA_WR + wqe->atomic.cmp_data = atomic_wr(wr)->compare_add; +#else + wqe->atomic.cmp_data = wr->wr.atomic.compare_add; +#endif + break; + default: + return -EINVAL; + } +#ifdef HAVE_IB_RDMA_WR + wqe->atomic.remote_va = atomic_wr(wr)->remote_addr; + wqe->atomic.r_key = atomic_wr(wr)->rkey; +#else + wqe->atomic.remote_va = wr->wr.atomic.remote_addr; + wqe->atomic.r_key = wr->wr.atomic.rkey; +#endif + if (wr->send_flags & IB_SEND_SIGNALED) + wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP; + if (wr->send_flags & IB_SEND_FENCE) + wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE; + if (wr->send_flags & IB_SEND_SOLICITED) + wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT; + return 0; +} + +static int bnxt_re_build_inv_wqe(CONST_STRUCT ib_send_wr *wr, + struct bnxt_qplib_swqe *wqe) +{ + wqe->type = BNXT_QPLIB_SWQE_TYPE_LOCAL_INV; + wqe->local_inv.inv_l_key = wr->ex.invalidate_rkey; + if (wr->send_flags & IB_SEND_SIGNALED) + wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP; + if (wr->send_flags & IB_SEND_FENCE) + wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE; + if (wr->send_flags & IB_SEND_SOLICITED) + wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT; + + return 0; +} + +#ifdef HAVE_IB_FAST_REG_MR +static int bnxt_re_build_frmr_wqe(struct ib_send_wr *wr, + struct bnxt_qplib_swqe *wqe) +{ + struct bnxt_re_frpl *frpl = to_bnxt_re(wr->wr.fast_reg.page_list, + struct bnxt_re_frpl, ib_frpl); + struct fast_reg = wr->wr.fast_reg; + int access = fast_reg.access_flags; + + if (!fast_reg.page_list_len || + fast_reg.page_list_len > frpl->qplib_frpl.max_pg_ptrs) { + dev_err_ratelimited(rdev_to_dev(frpl->rdev), + "%s: failed npages %d > %d", + __func__, fast_reg.page_list_len, + frpl->qplib_frpl.max_pg_ptrs); + return -EINVAL; + } + + wqe->frmr.pbl_ptr = (u64 *)frpl->qplib_frpl.hwq.pbl_ptr[0]; + wqe->frmr.pbl_dma_ptr = frpl->qplib_frpl.hwq.pbl_dma_ptr[0]; + wqe->frmr.levels = frpl->qplib_frpl.hwq.level; + wqe->frmr.page_list = fast_reg.page_list->page_list; + wqe->frmr.page_list_len = fast_reg.page_list_len; + wqe->type = BNXT_QPLIB_SWQE_TYPE_FAST_REG_MR; + + if (wr->send_flags & IB_SEND_SIGNALED) + wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP; + if (wr->send_flags & IB_SEND_FENCE) + wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE; + if (access & IB_ACCESS_LOCAL_WRITE) + wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_LOCAL_WRITE; + if (access & IB_ACCESS_REMOTE_READ) + wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_READ; + if (access & IB_ACCESS_REMOTE_WRITE) + wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_WRITE; + if (access & IB_ACCESS_REMOTE_ATOMIC) + wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_ATOMIC; + if (access & IB_ACCESS_MW_BIND) + wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_WINDOW_BIND; + + /* TODO: OFED provides the rkey of the MR instead of the lkey */ + wqe->frmr.l_key = fast_reg.rkey; + wqe->frmr.length = fast_reg.length; + wqe->frmr.pbl_pg_sz_log = ilog2(PAGE_SIZE >> PAGE_SHIFT_4K); + wqe->frmr.pg_sz_log = ilog2((1ULL << fast_reg.page_shift) >> + PAGE_SHIFT_4K); + wqe->frmr.va = fast_reg.iova_start; + wqe->frmr.zero_based = false; + return 0; +} +#endif + +#ifdef HAVE_IB_REG_MR_WR +static int bnxt_re_build_reg_wqe(CONST_STRUCT ib_reg_wr *wr, + struct bnxt_qplib_swqe *wqe) +{ + struct bnxt_re_mr *mr = to_bnxt_re(wr->mr, struct bnxt_re_mr, ib_mr); + struct bnxt_qplib_frpl *qplib_frpl = &mr->qplib_frpl; + int reg_len, i, access = wr->access; + + if (mr->npages > qplib_frpl->max_pg_ptrs) { + dev_err_ratelimited(rdev_to_dev(mr->rdev), + " %s: failed npages %d > %d", __func__, + mr->npages, qplib_frpl->max_pg_ptrs); + return -EINVAL; + } + + wqe->frmr.pbl_ptr = (__le64 *)qplib_frpl->hwq.pbl_ptr[0]; + wqe->frmr.pbl_dma_ptr = qplib_frpl->hwq.pbl_dma_ptr[0]; + wqe->frmr.levels = qplib_frpl->hwq.level; + wqe->frmr.page_list = mr->pages; + wqe->frmr.page_list_len = mr->npages; + wqe->type = BNXT_QPLIB_SWQE_TYPE_REG_MR; + + if (wr->wr.send_flags & IB_SEND_SIGNALED) + wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP; + if (access & IB_ACCESS_LOCAL_WRITE) + wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_LOCAL_WRITE; + if (access & IB_ACCESS_REMOTE_READ) + wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_READ; + if (access & IB_ACCESS_REMOTE_WRITE) + wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_WRITE; + if (access & IB_ACCESS_REMOTE_ATOMIC) + wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_ATOMIC; + if (access & IB_ACCESS_MW_BIND) + wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_WINDOW_BIND; + + /* TODO: OFED provides the rkey of the MR instead of the lkey */ + wqe->frmr.l_key = wr->key; + wqe->frmr.length = wr->mr->length; + wqe->frmr.pbl_pg_sz_log = ilog2(PAGE_SIZE >> PAGE_SHIFT_4K); + wqe->frmr.pg_sz_log = ilog2(wr->mr->page_size >> PAGE_SHIFT_4K); + wqe->frmr.va = wr->mr->iova; + reg_len = wqe->frmr.page_list_len * wr->mr->page_size; + + if (wqe->frmr.length > reg_len) { + dev_err_ratelimited(rdev_to_dev(mr->rdev), + "%s: bnxt_re_mr 0x%px len (%d > %d)", + __func__, (void *)mr, wqe->frmr.length, + reg_len); + + for (i = 0; i < mr->npages; i++) + dev_dbg(rdev_to_dev(mr->rdev), + "%s: build_reg_wqe page[%d] = 0x%llx", + __func__, i, mr->pages[i]); + + return -EINVAL; + } + + return 0; +} +#endif + +#ifdef HAVE_IB_MW_BIND_INFO +static int bnxt_re_build_bind_wqe(struct ib_send_wr *wr, + struct bnxt_qplib_swqe *wqe) +{ + struct ib_mw_bind_info *bind_info = get_bind_info(wr); + struct ib_mw *mw = get_ib_mw(wr); + + wqe->type = BNXT_QPLIB_SWQE_TYPE_BIND_MW; + wqe->wr_id = wr->wr_id; + if (wr->send_flags & IB_SEND_SIGNALED) + wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP; + if (wr->send_flags & IB_SEND_FENCE) + wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE; + wqe->bind.zero_based = false; + wqe->bind.parent_l_key = bind_info->mr->lkey; + wqe->bind.r_key = ib_inc_rkey(mw->rkey); + wqe->bind.va = bind_info->addr; + wqe->bind.length = bind_info->length; + wqe->bind.access_cntl = __from_ib_access_flags( + bind_info->mw_access_flags); + wqe->bind.mw_type = mw->type == IB_MW_TYPE_1 ? + SQ_BIND_MW_TYPE_TYPE1 : SQ_BIND_MW_TYPE_TYPE2; + return 0; +} +#endif + +static void bnxt_re_set_sg_list(CONST_STRUCT ib_send_wr *wr, + struct bnxt_qplib_swqe *wqe) +{ + wqe->sg_list = (struct bnxt_qplib_sge *)wr->sg_list; + wqe->num_sge = wr->num_sge; +} + +static void bnxt_ud_qp_hw_stall_workaround(struct bnxt_re_qp *qp) +{ + if ((qp->ib_qp.qp_type == IB_QPT_UD || qp->ib_qp.qp_type == IB_QPT_GSI || + qp->ib_qp.qp_type == IB_QPT_RAW_ETHERTYPE) && + qp->qplib_qp.wqe_cnt == BNXT_RE_UD_QP_HW_STALL) { + int qp_attr_mask; + struct ib_qp_attr qp_attr; + + qp_attr_mask = IB_QP_STATE; + qp_attr.qp_state = IB_QPS_RTS; + bnxt_re_modify_qp(&qp->ib_qp, &qp_attr, qp_attr_mask, NULL); + qp->qplib_qp.wqe_cnt = 0; + } +} + +static int bnxt_re_post_send_shadow_qp(struct bnxt_re_dev *rdev, + struct bnxt_re_qp *qp, + CONST_STRUCT ib_send_wr *wr) +{ + struct bnxt_qplib_swqe wqe; + unsigned long flags; + int rc = 0; + + spin_lock_irqsave(&qp->sq_lock, flags); + while (wr) { + /* House keeping */ + memset(&wqe, 0, sizeof(wqe)); + /* Common */ + if (wr->num_sge > qp->qplib_qp.sq.max_sge) { + dev_err(rdev_to_dev(rdev), "Limit exceeded for Send SGEs"); + rc = -EINVAL; + break; + } + + bnxt_re_set_sg_list(wr, &wqe); + wqe.wr_id = wr->wr_id; + wqe.type = BNXT_QPLIB_SWQE_TYPE_SEND; + rc = bnxt_re_build_send_wqe(qp, wr, &wqe); + if (rc) + break; + + rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe); + if (rc) { + dev_err(rdev_to_dev(rdev), + "bad_wr seen with opcode = 0x%x rc = %d", + wr->opcode, rc); + break; + } + wr = wr->next; + } + bnxt_qplib_post_send_db(&qp->qplib_qp); + bnxt_ud_qp_hw_stall_workaround(qp); + spin_unlock_irqrestore(&qp->sq_lock, flags); + return rc; +} + +static void bnxt_re_legacy_set_uc_fence(struct bnxt_qplib_swqe *wqe) +{ + /* Need unconditional fence for non-wire memory opcode + * to work as expected. + */ + if (wqe->type == BNXT_QPLIB_SWQE_TYPE_LOCAL_INV || + wqe->type == BNXT_QPLIB_SWQE_TYPE_FAST_REG_MR || + wqe->type == BNXT_QPLIB_SWQE_TYPE_REG_MR || + wqe->type == BNXT_QPLIB_SWQE_TYPE_BIND_MW) + wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE; +} + +int bnxt_re_post_send(struct ib_qp *ib_qp, CONST_STRUCT ib_send_wr *wr, + CONST_STRUCT ib_send_wr **bad_wr) +{ + struct bnxt_re_qp *qp = to_bnxt_re(ib_qp, struct bnxt_re_qp, ib_qp); + struct bnxt_qplib_sge sge[6]; + struct bnxt_qplib_swqe wqe; + struct bnxt_re_dev *rdev; + unsigned long flags; + int rc = 0; + + rdev = qp->rdev; + spin_lock_irqsave(&qp->sq_lock, flags); + while (wr) { + /* House keeping */ + memset(&wqe, 0, sizeof(wqe)); + /* Common */ + if (wr->num_sge > qp->qplib_qp.sq.max_sge) { + dev_err(rdev_to_dev(rdev), "Limit exceeded for Send SGEs"); + rc = -EINVAL; + goto bad; + } + + bnxt_re_set_sg_list(wr, &wqe); + wqe.wr_id = wr->wr_id; + + switch (wr->opcode) { + case IB_WR_SEND: + case IB_WR_SEND_WITH_IMM: + if (ib_qp->qp_type == IB_QPT_GSI && + rdev->gsi_ctx.gsi_qp_mode != BNXT_RE_GSI_MODE_UD) { + memset(sge, 0, sizeof(sge)); + wqe.sg_list = sge; + rc = bnxt_re_build_gsi_send(qp, wr, &wqe); + if (rc) + goto bad; + } else if (ib_qp->qp_type == IB_QPT_RAW_ETHERTYPE) { + bnxt_re_build_raw_send(wr, &wqe); + } + switch (wr->send_flags) { + case IB_SEND_IP_CSUM: + wqe.rawqp1.lflags |= + SQ_SEND_RAWETH_QP1_LFLAGS_IP_CHKSUM; + break; + default: + break; + } + fallthrough; + case IB_WR_SEND_WITH_INV: + rc = bnxt_re_build_send_wqe(qp, wr, &wqe); + break; + case IB_WR_RDMA_WRITE: + case IB_WR_RDMA_WRITE_WITH_IMM: + case IB_WR_RDMA_READ: + rc = bnxt_re_build_rdma_wqe(wr, &wqe); + break; + case IB_WR_ATOMIC_CMP_AND_SWP: + case IB_WR_ATOMIC_FETCH_AND_ADD: + rc = bnxt_re_build_atomic_wqe(wr, &wqe); + break; + case IB_WR_RDMA_READ_WITH_INV: + dev_err(rdev_to_dev(rdev), + "RDMA Read with Invalidate is not supported"); + rc = -EINVAL; + goto bad; + case IB_WR_LOCAL_INV: + rc = bnxt_re_build_inv_wqe(wr, &wqe); + break; +#ifdef HAVE_IB_FAST_REG_MR + case IB_WR_FAST_REG_MR: + rc = bnxt_re_build_frmr_wqe(wr, &wqe); + break; +#endif +#ifdef HAVE_IB_REG_MR_WR + case IB_WR_REG_MR: + rc = bnxt_re_build_reg_wqe(reg_wr(wr), &wqe); + break; +#endif +#ifdef HAVE_IB_MW_BIND_INFO + case IB_WR_BIND_MW: + /* For type 1, 2A, and 2B binding */ + rc = bnxt_re_build_bind_wqe(wr, &wqe); + break; +#endif + default: + /* Unsupported WRs */ + dev_err(rdev_to_dev(rdev), + "WR (0x%x) is not supported", wr->opcode); + rc = -EINVAL; + goto bad; + } + + if (likely(!rc)) { + if (!_is_chip_gen_p5_p7(rdev->chip_ctx)) + bnxt_re_legacy_set_uc_fence(&wqe); + rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe); + } +bad: + if (unlikely(rc)) { + dev_err(rdev_to_dev(rdev), + "bad_wr seen with opcode = 0x%x", wr->opcode); + *bad_wr = wr; + break; + } + wr = wr->next; + } + bnxt_qplib_post_send_db(&qp->qplib_qp); + if (!_is_chip_gen_p5_p7(rdev->chip_ctx)) + bnxt_ud_qp_hw_stall_workaround(qp); + spin_unlock_irqrestore(&qp->sq_lock, flags); + + return rc; +} + +static int bnxt_re_post_recv_shadow_qp(struct bnxt_re_dev *rdev, + struct bnxt_re_qp *qp, + struct ib_recv_wr *wr) +{ + struct bnxt_qplib_swqe wqe; + int rc = 0; + + /* rq lock can be pardoned here. */ + while (wr) { + /* House keeping */ + memset(&wqe, 0, sizeof(wqe)); + /* Common */ + if (wr->num_sge > qp->qplib_qp.rq.max_sge) { + dev_err(rdev_to_dev(rdev), + "Limit exceeded for Receive SGEs"); + rc = -EINVAL; + goto bad; + } + + wqe.sg_list = (struct bnxt_qplib_sge *)wr->sg_list; + wqe.num_sge = wr->num_sge; + wqe.wr_id = wr->wr_id; + wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV; + rc = bnxt_qplib_post_recv(&qp->qplib_qp, &wqe); +bad: + if (rc) { + dev_err(rdev_to_dev(rdev), + "bad_wr seen with RQ post"); + break; + } + wr = wr->next; + } + bnxt_qplib_post_recv_db(&qp->qplib_qp); + return rc; +} + +static int bnxt_re_build_gsi_recv(struct bnxt_re_qp *qp, + CONST_STRUCT ib_recv_wr *wr, + struct bnxt_qplib_swqe *wqe) +{ + struct bnxt_re_dev *rdev = qp->rdev; + int rc = 0; + + if (rdev->gsi_ctx.gsi_qp_mode == BNXT_RE_GSI_MODE_ALL) + rc = bnxt_re_build_qp1_shadow_qp_recv(qp, wr, wqe); + else + rc = bnxt_re_build_qp1_recv(qp, wr, wqe); + + return rc; +} + +int bnxt_re_post_recv(struct ib_qp *ib_qp, CONST_STRUCT ib_recv_wr *wr, + CONST_STRUCT ib_recv_wr **bad_wr) +{ + struct bnxt_re_qp *qp = to_bnxt_re(ib_qp, struct bnxt_re_qp, ib_qp); + struct bnxt_re_dev *rdev = qp->rdev; + struct bnxt_qplib_sge sge[6]; + struct bnxt_qplib_swqe wqe; + unsigned long flags; + u32 count = 0; + int rc = 0; + + spin_lock_irqsave(&qp->rq_lock, flags); + while (wr) { + memset(&wqe, 0, sizeof(wqe)); + if (wr->num_sge > qp->qplib_qp.rq.max_sge) { + dev_err(rdev_to_dev(rdev), "Limit exceeded for Receive SGEs"); + rc = -EINVAL; + goto bad; + } + wqe.num_sge = wr->num_sge; + wqe.sg_list = (struct bnxt_qplib_sge *)wr->sg_list; + wqe.wr_id = wr->wr_id; + wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV; + + if (ib_qp->qp_type == IB_QPT_GSI && + rdev->gsi_ctx.gsi_qp_mode != BNXT_RE_GSI_MODE_UD) { + memset(sge, 0, sizeof(sge)); + wqe.sg_list = sge; + rc = bnxt_re_build_gsi_recv(qp, wr, &wqe); + if (rc) + goto bad; + } + rc = bnxt_qplib_post_recv(&qp->qplib_qp, &wqe); +bad: + if (rc) { + dev_err(rdev_to_dev(rdev), "bad_wr seen with RQ post"); + *bad_wr = wr; + break; + } + /* Ring DB if the RQEs posted reaches a threshold value */ + if (++count >= BNXT_RE_RQ_WQE_THRESHOLD) { + bnxt_qplib_post_recv_db(&qp->qplib_qp); + count = 0; + } + wr = wr->next; + } + + if (count) + bnxt_qplib_post_recv_db(&qp->qplib_qp); + spin_unlock_irqrestore(&qp->rq_lock, flags); + + return rc; +} + +/* Completion Queues */ +DESTROY_CQ_RET bnxt_re_destroy_cq(struct ib_cq *ib_cq +#ifdef HAVE_DESTROY_CQ_UDATA + , struct ib_udata *udata +#endif + ) +{ + struct bnxt_re_cq *cq = to_bnxt_re(ib_cq, struct bnxt_re_cq, ib_cq); + struct bnxt_re_dev *rdev = cq->rdev; + int rc = 0; + + if (cq->uctx_cq_page) { + BNXT_RE_CQ_PAGE_LIST_DEL(cq->uctx, cq); + free_page((u64)cq->uctx_cq_page); + cq->uctx_cq_page = NULL; + } + + if (cq->is_dbr_recov_cq && cq->uctx) { + struct bnxt_re_dbr_res_list *res_list; + void *dbr_page; + + dbr_page = cq->uctx->dbr_recov_cq_page; + + res_list = &rdev->res_list[BNXT_RE_RES_TYPE_UCTX]; + spin_lock(&res_list->lock); + cq->uctx->dbr_recov_cq_page = NULL; + cq->uctx->dbr_recov_cq = NULL; + spin_unlock(&res_list->lock); + free_page((u64)dbr_page); + +#ifndef HAVE_CQ_ALLOC_IN_IB_CORE + kfree(cq); +#endif + +#ifndef HAVE_DESTROY_CQ_RET_VOID + return 0; +#else + return; +#endif + } + + BNXT_RE_DBR_LIST_DEL(rdev, cq, BNXT_RE_RES_TYPE_CQ); + + if (rdev->hdbr_enabled) + bnxt_re_hdbr_db_unreg_cq(rdev, cq); + + rc = bnxt_qplib_destroy_cq(&rdev->qplib_res, &cq->qplib_cq); + if (rc) + dev_err_ratelimited(rdev_to_dev(rdev), + "%s id = %d failed rc = %d", + __func__, cq->qplib_cq.id, rc); + + bnxt_re_put_nq(rdev, cq->qplib_cq.nq); + if (cq->umem && !IS_ERR(cq->umem)) + ib_umem_release(cq->umem); + + kfree(cq->cql); + atomic_dec(&rdev->stats.rsors.cq_count); + +#ifndef HAVE_CQ_ALLOC_IN_IB_CORE + kfree(cq); +#endif + +#ifndef HAVE_DESTROY_CQ_RET_VOID + /* return success for destroy resources */ + return 0; +#endif +} + +#ifdef HAVE_IB_CQ_INIT_ATTR +ALLOC_CQ_RET bnxt_re_create_cq(ALLOC_CQ_IN *cq_in, + const struct ib_cq_init_attr *attr, +#ifdef HAVE_CREATE_CQ_UCONTEXT + struct ib_ucontext *context, +#endif + struct ib_udata *udata) +#else +ALLOC_CQ_RET bnxt_re_create_cq(ALLOC_CQ_IN *cq_in, int cqe, + int comp_vector, +#ifdef HAVE_CREATE_CQ_UCONTEXT + struct ib_ucontext *context, +#endif + struct ib_udata *udata) +#endif +{ + struct bnxt_qplib_dev_attr *dev_attr; + struct bnxt_re_ucontext *uctx = NULL; +#ifndef HAVE_CREATE_CQ_UCONTEXT + struct ib_ucontext *context = NULL; +#endif + struct bnxt_qplib_cq *qplcq; + struct bnxt_re_cq_req ureq; + struct bnxt_re_dev *rdev; + int rc, entries; + struct bnxt_re_cq *cq; + u32 max_active_cqs; +#ifdef HAVE_IB_CQ_INIT_ATTR + int cqe = attr->cqe; + +#ifdef HAVE_CQ_ALLOC_IN_IB_CORE + if (attr->flags) + return -EOPNOTSUPP; +#endif +#endif + + rdev = rdev_from_cq_in(cq_in); + if (udata) { +#ifdef HAVE_RDMA_UDATA_TO_DRV_CONTEXT + uctx = rdma_udata_to_drv_context(udata, + struct bnxt_re_ucontext, + ib_uctx); +#else +#ifdef HAVE_CREATE_CQ_UCONTEXT + uctx = to_bnxt_re(context, struct bnxt_re_ucontext, ib_uctx); +#endif /* HAVE_CREATE_CQ_UCONTEXT */ +#endif /* HAVE_RDMA_UDATA_TO_DRV_CONTEXT */ + } + dev_attr = rdev->dev_attr; + + if (atomic_read(&rdev->stats.rsors.cq_count) >= dev_attr->max_cq) { + dev_err(rdev_to_dev(rdev), "Create CQ failed - max exceeded(CQs)"); + rc = -EINVAL; + goto exit; + } + /* Validate CQ fields */ + if (cqe < 1 || cqe > dev_attr->max_cq_wqes) { + dev_err(rdev_to_dev(rdev), "Create CQ failed - max exceeded(CQ_WQs)"); + rc = -EINVAL; + goto exit; + } + + cq = __get_cq_from_cq_in(cq_in, rdev); + if (!cq) { + rc = -ENOMEM; + goto exit; + } + cq->rdev = rdev; + cq->uctx = uctx; + qplcq = &cq->qplib_cq; + qplcq->cq_handle = (u64)qplcq; + /* + * Since CQ is for QP1 is shared with Shadow CQ, the size + * should be double the size. There is no way to identify + * whether this CQ is for GSI QP. So assuming that the first + * CQ created is for QP1 + */ + if (!udata && !rdev->gsi_ctx.first_cq_created && + rdev->gsi_ctx.gsi_qp_mode == BNXT_RE_GSI_MODE_ALL) { + rdev->gsi_ctx.first_cq_created = true; + /* + * Total CQE required for the CQ = CQE for QP1 RQ + + * CQE for Shadow QP SQEs + CQE for Shadow QP RQEs. + * Max entries of shadow QP SQ and RQ = QP1 RQEs = cqe + */ + cqe *= 3; + } + + entries = bnxt_re_init_depth(cqe + 1, uctx); + if (entries > dev_attr->max_cq_wqes + 1) + entries = dev_attr->max_cq_wqes + 1; + + qplcq->sginfo.pgshft = PAGE_SHIFT; + qplcq->sginfo.pgsize = PAGE_SIZE; + if (udata) { + if (udata->inlen < sizeof(ureq)) + dev_warn_once(rdev_to_dev(rdev), + "Update the library ulen %d klen %d", + (unsigned int)udata->inlen, + (unsigned int)sizeof(ureq)); + + rc = ib_copy_from_udata(&ureq, udata, + min(udata->inlen, sizeof(ureq))); + if (rc) + goto fail; + + if (BNXT_RE_IS_DBR_RECOV_CQ(ureq)) { + struct bnxt_re_dbr_res_list *res_list; + void *dbr_page; + u32 *epoch; + + dbr_page = (void *)__get_free_page(GFP_KERNEL); + if (!dbr_page) { + dev_err(rdev_to_dev(rdev), + "DBR recov CQ page allocation failed!"); + rc = -ENOMEM; + goto fail; + } + + /* memset the epoch and epoch_ack to 0 */ + epoch = dbr_page; + epoch[0] = 0x0; + epoch[1] = 0x0; + + res_list = &rdev->res_list[BNXT_RE_RES_TYPE_UCTX]; + spin_lock(&res_list->lock); + uctx->dbr_recov_cq = cq; + uctx->dbr_recov_cq_page = dbr_page; + spin_unlock(&res_list->lock); + + cq->is_dbr_recov_cq = true; + goto success; + } + + cq->umem = ib_umem_get_compat + (rdev, context, udata, ureq.cq_va, + entries * sizeof(struct cq_base), + IB_ACCESS_LOCAL_WRITE, 1); + if (IS_ERR(cq->umem)) { + rc = PTR_ERR(cq->umem); + dev_err(rdev_to_dev(rdev), + "%s: ib_umem_get failed! rc = %d\n", + __func__, rc); + goto fail; + } + qplcq->sginfo.npages = ib_umem_num_pages_compat(cq->umem); +#ifndef HAVE_RDMA_UMEM_FOR_EACH_DMA_BLOCK + qplcq->sginfo.sghead = get_ib_umem_sgl(cq->umem, + &qplcq->sginfo.nmap); +#else + qplcq->sginfo.umem = cq->umem; +#endif + if (!uctx->dpi.dbr) { + rc = bnxt_re_get_user_dpi(rdev, uctx); + if (rc) + goto c2fail; + } + qplcq->dpi = &uctx->dpi; + } else { + cq->max_cql = entries > MAX_CQL_PER_POLL ? MAX_CQL_PER_POLL : entries; + cq->cql = kcalloc(cq->max_cql, sizeof(struct bnxt_qplib_cqe), + GFP_KERNEL); + if (!cq->cql) { + dev_err(rdev_to_dev(rdev), + "Allocate CQL for %d failed!", cq->max_cql); + rc = -ENOMEM; + goto fail; + } + /* TODO: DPI is for privilege app for now */ + qplcq->dpi = &rdev->dpi_privileged; + } + /* + * Allocating the NQ in a round robin fashion. nq_alloc_cnt is a + * used for getting the NQ index. + */ + qplcq->max_wqe = entries; + qplcq->nq = bnxt_re_get_nq(rdev); + qplcq->cnq_hw_ring_id = qplcq->nq->ring_id; + + rc = bnxt_qplib_create_cq(&rdev->qplib_res, qplcq); + if (rc) { + dev_err(rdev_to_dev(rdev), "Create HW CQ failed!"); + goto fail; + } + + INIT_LIST_HEAD(&cq->cq_list); + cq->ib_cq.cqe = entries; + cq->cq_period = qplcq->period; + + atomic_inc(&rdev->stats.rsors.cq_count); + max_active_cqs = atomic_read(&rdev->stats.rsors.cq_count); + if (max_active_cqs > atomic_read(&rdev->stats.rsors.max_cq_count)) + atomic_set(&rdev->stats.rsors.max_cq_count, max_active_cqs); + spin_lock_init(&cq->cq_lock); + + if (udata) { + struct bnxt_re_cq_resp resp = {}; + + if (rdev->hdbr_enabled) { + rc = bnxt_re_hdbr_db_reg_cq(rdev, cq, uctx, &resp, &ureq); + if (rc) + goto destroy_cq; + } + +#ifdef HAVE_CREATE_CQ_UCONTEXT + cq->context = context; +#endif + resp.cqid = qplcq->id; + resp.tail = qplcq->hwq.cons; + resp.phase = qplcq->period; + resp.comp_mask = 0; + resp.dbr = (u64)uctx->dpi.umdbr; + resp.dpi = uctx->dpi.dpi; + resp.comp_mask |= BNXT_RE_COMP_MASK_CQ_HAS_DB_INFO; + /* Copy only on a valid wcpdi */ + if (uctx->wcdpi.dpi) { + resp.wcdpi = uctx->wcdpi.dpi; + resp.comp_mask |= BNXT_RE_COMP_MASK_CQ_HAS_WC_DPI; + } + + if (_is_chip_p7(rdev->chip_ctx)) { + cq->uctx_cq_page = (void *)__get_free_page(GFP_KERNEL); + + if (!cq->uctx_cq_page) { + dev_err(rdev_to_dev(rdev), + "CQ page allocation failed!"); + (void)bnxt_qplib_destroy_cq(&rdev->qplib_res, qplcq); + rc = -ENOMEM; + goto c2fail; + } + + resp.uctx_cq_page = (u64)cq->uctx_cq_page; + resp.comp_mask |= BNXT_RE_COMP_MASK_CQ_HAS_CQ_PAGE; + } + + rc = bnxt_re_copy_to_udata(rdev, &resp, + min(udata->outlen, sizeof(resp)), + udata); + if (rc) + goto unreg_db_cq; + if (cq->uctx_cq_page) + BNXT_RE_CQ_PAGE_LIST_ADD(uctx, cq); + } else { + if (rdev->hdbr_enabled) { + rc = bnxt_re_hdbr_db_reg_cq(rdev, cq, NULL, NULL, NULL); + if (rc) + goto destroy_cq; + } + } + BNXT_RE_DBR_LIST_ADD(rdev, cq, BNXT_RE_RES_TYPE_CQ); + +success: +#ifdef HAVE_CQ_ALLOC_IN_IB_CORE + return 0; +#else + return &cq->ib_cq; +#endif + +unreg_db_cq: + if (cq->uctx_cq_page) { + free_page((u64)cq->uctx_cq_page); + cq->uctx_cq_page = NULL; + } + if (rdev->hdbr_enabled) + bnxt_re_hdbr_db_unreg_cq(rdev, cq); +destroy_cq: + (void)bnxt_qplib_destroy_cq(&rdev->qplib_res, qplcq); +c2fail: + if (udata && cq->umem && !IS_ERR(cq->umem)) + ib_umem_release(cq->umem); +fail: + if (cq) { + if (cq->cql) + kfree(cq->cql); +#ifndef HAVE_CQ_ALLOC_IN_IB_CORE + kfree(cq); +#endif + } +exit: +#ifdef HAVE_CQ_ALLOC_IN_IB_CORE + return rc; +#else + return ERR_PTR(rc); +#endif +} + +int bnxt_re_modify_cq(struct ib_cq *ib_cq, u16 cq_count, u16 cq_period) +{ + struct bnxt_re_cq *cq = to_bnxt_re(ib_cq, struct bnxt_re_cq, ib_cq); + struct bnxt_re_dev *rdev = cq->rdev; + int rc; + + if ((cq->cq_count != cq_count) || (cq->cq_period != cq_period)) { + cq->qplib_cq.count = cq_count; + cq->qplib_cq.period = cq_period; + rc = bnxt_qplib_modify_cq(&rdev->qplib_res, &cq->qplib_cq); + if (rc) { + dev_err(rdev_to_dev(rdev), "Modify HW CQ %#x failed!", + cq->qplib_cq.id); + return rc; + } + /* On success, update the shadow */ + cq->cq_count = cq_count; + cq->cq_period = cq_period; + } + return 0; +} + +static void bnxt_re_resize_cq_complete(struct bnxt_re_cq *cq) +{ + struct bnxt_re_dev *rdev = cq->rdev; + + bnxt_qplib_resize_cq_complete(&rdev->qplib_res, &cq->qplib_cq); + + cq->qplib_cq.max_wqe = cq->resize_cqe; + if (cq->resize_umem) { + ib_umem_release(cq->umem); + cq->umem = cq->resize_umem; + cq->resize_umem = NULL; + cq->resize_cqe = 0; + } +} + +int bnxt_re_resize_cq(struct ib_cq *ib_cq, int cqe, struct ib_udata *udata) +{ + struct bnxt_qplib_sg_info sginfo = {}; + struct bnxt_qplib_dpi *orig_dpi = NULL; + struct bnxt_qplib_dev_attr *dev_attr; + struct bnxt_re_ucontext *uctx = NULL; + struct bnxt_re_resize_cq_req ureq; + struct ib_ucontext *context = NULL; + struct bnxt_re_dev *rdev; + struct bnxt_re_cq *cq; + int rc, entries; + + /* Don't allow more than one resize request at the same time. + * TODO: need a mutex here when we support kernel consumers of resize. + */ + cq = to_bnxt_re(ib_cq, struct bnxt_re_cq, ib_cq); + rdev = cq->rdev; + dev_attr = rdev->dev_attr; + if (ib_cq->uobject) { +#ifdef HAVE_RDMA_UDATA_TO_DRV_CONTEXT + uctx = rdma_udata_to_drv_context(udata, + struct bnxt_re_ucontext, + ib_uctx); + context = &uctx->ib_uctx; +#else + context = cq->context; + uctx = to_bnxt_re(context, struct bnxt_re_ucontext, ib_uctx); +#endif + } + + if (cq->resize_umem) { + dev_err(rdev_to_dev(rdev), "Resize CQ %#x failed - Busy", + cq->qplib_cq.id); + return -EBUSY; + } + + /* Check the requested cq depth out of supported depth */ + if (cqe < 1 || cqe > dev_attr->max_cq_wqes) { + dev_err(rdev_to_dev(rdev), "Resize CQ %#x failed - max exceeded", + cq->qplib_cq.id); + return -EINVAL; + } + + entries = bnxt_re_init_depth(cqe + 1, uctx); + entries = min_t(u32, (u32)entries, dev_attr->max_cq_wqes + 1); + + /* Check to see if the new requested size can be handled by already + * existing CQ + */ + if (entries == cq->ib_cq.cqe) { + dev_info(rdev_to_dev(rdev), "CQ is already at size %d", cqe); + return 0; + } + + if (ib_cq->uobject && udata) { + if (udata->inlen < sizeof(ureq)) + dev_warn_once(rdev_to_dev(rdev), + "Update the library ulen %d klen %d", + (unsigned int)udata->inlen, + (unsigned int)sizeof(ureq)); + + rc = ib_copy_from_udata(&ureq, udata, + min(udata->inlen, sizeof(ureq))); + if (rc) + goto fail; + + dev_dbg(rdev_to_dev(rdev), "%s: va %p", __func__, + (void *)ureq.cq_va); + cq->resize_umem = ib_umem_get_compat + (rdev, + context, udata, ureq.cq_va, + entries * sizeof(struct cq_base), + IB_ACCESS_LOCAL_WRITE, 1); + if (IS_ERR(cq->resize_umem)) { + rc = PTR_ERR(cq->resize_umem); + cq->resize_umem = NULL; + dev_err(rdev_to_dev(rdev), "%s: ib_umem_get failed! rc = %d\n", + __func__, rc); + goto fail; + } + cq->resize_cqe = entries; + dev_dbg(rdev_to_dev(rdev), "%s: ib_umem_get() success\n", + __func__); + memcpy(&sginfo, &cq->qplib_cq.sginfo, sizeof(sginfo)); + orig_dpi = cq->qplib_cq.dpi; + + cq->qplib_cq.sginfo.npages = ib_umem_num_pages_compat(cq->resize_umem); +#ifndef HAVE_RDMA_UMEM_FOR_EACH_DMA_BLOCK + cq->qplib_cq.sginfo.sghead = get_ib_umem_sgl(cq->resize_umem, + &cq->qplib_cq.sginfo.nmap); +#else + cq->qplib_cq.sginfo.umem = cq->resize_umem; +#endif + cq->qplib_cq.sginfo.pgsize = PAGE_SIZE; + cq->qplib_cq.sginfo.pgshft = PAGE_SHIFT; + cq->qplib_cq.dpi = &uctx->dpi; + } else { + /* TODO: kernel consumer */ + } + + rc = bnxt_qplib_resize_cq(&rdev->qplib_res, &cq->qplib_cq, entries); + if (rc) { + dev_err(rdev_to_dev(rdev), "Resize HW CQ %#x failed!", + cq->qplib_cq.id); + goto fail; + } + + cq->ib_cq.cqe = cq->resize_cqe; + /* For kernel consumers complete resize here. For uverbs consumers, + * we complete it in the context of ibv_poll_cq(). + */ + if (!cq->resize_umem) + bnxt_qplib_resize_cq_complete(&rdev->qplib_res, &cq->qplib_cq); + + atomic_inc(&rdev->stats.rsors.resize_count); + return 0; + +fail: + if (cq->resize_umem) { + ib_umem_release(cq->resize_umem); + cq->resize_umem = NULL; + cq->resize_cqe = 0; + memcpy(&cq->qplib_cq.sginfo, &sginfo, sizeof(sginfo)); + cq->qplib_cq.dpi = orig_dpi; + } + return rc; +} + +static enum ib_wc_status __req_to_ib_wc_status(u8 qstatus) +{ + switch(qstatus) { + case CQ_REQ_STATUS_OK: + return IB_WC_SUCCESS; + case CQ_REQ_STATUS_BAD_RESPONSE_ERR: + return IB_WC_BAD_RESP_ERR; + case CQ_REQ_STATUS_LOCAL_LENGTH_ERR: + return IB_WC_LOC_LEN_ERR; + case CQ_REQ_STATUS_LOCAL_QP_OPERATION_ERR: + return IB_WC_LOC_QP_OP_ERR; + case CQ_REQ_STATUS_LOCAL_PROTECTION_ERR: + return IB_WC_LOC_PROT_ERR; + case CQ_REQ_STATUS_MEMORY_MGT_OPERATION_ERR: + return IB_WC_GENERAL_ERR; + case CQ_REQ_STATUS_REMOTE_INVALID_REQUEST_ERR: + return IB_WC_REM_INV_REQ_ERR; + case CQ_REQ_STATUS_REMOTE_ACCESS_ERR: + return IB_WC_REM_ACCESS_ERR; + case CQ_REQ_STATUS_REMOTE_OPERATION_ERR: + return IB_WC_REM_OP_ERR; + case CQ_REQ_STATUS_RNR_NAK_RETRY_CNT_ERR: + return IB_WC_RNR_RETRY_EXC_ERR; + case CQ_REQ_STATUS_TRANSPORT_RETRY_CNT_ERR: + return IB_WC_RETRY_EXC_ERR; + case CQ_REQ_STATUS_WORK_REQUEST_FLUSHED_ERR: + return IB_WC_WR_FLUSH_ERR; + default: + return IB_WC_GENERAL_ERR; + } + return 0; +} + +static enum ib_wc_status __rawqp1_to_ib_wc_status(u8 qstatus) +{ + switch(qstatus) { + case CQ_RES_RAWETH_QP1_STATUS_OK: + return IB_WC_SUCCESS; + case CQ_RES_RAWETH_QP1_STATUS_LOCAL_ACCESS_ERROR: + return IB_WC_LOC_ACCESS_ERR; + case CQ_RES_RAWETH_QP1_STATUS_HW_LOCAL_LENGTH_ERR: + return IB_WC_LOC_LEN_ERR; + case CQ_RES_RAWETH_QP1_STATUS_LOCAL_PROTECTION_ERR: + return IB_WC_LOC_PROT_ERR; + case CQ_RES_RAWETH_QP1_STATUS_LOCAL_QP_OPERATION_ERR: + return IB_WC_LOC_QP_OP_ERR; + case CQ_RES_RAWETH_QP1_STATUS_MEMORY_MGT_OPERATION_ERR: + return IB_WC_GENERAL_ERR; + case CQ_RES_RAWETH_QP1_STATUS_WORK_REQUEST_FLUSHED_ERR: + return IB_WC_WR_FLUSH_ERR; + case CQ_RES_RAWETH_QP1_STATUS_HW_FLUSH_ERR: + return IB_WC_WR_FLUSH_ERR; + default: + return IB_WC_GENERAL_ERR; + } +} + +static enum ib_wc_status __rc_to_ib_wc_status(u8 qstatus) +{ + switch(qstatus) { + case CQ_RES_RC_STATUS_OK: + return IB_WC_SUCCESS; + case CQ_RES_RC_STATUS_LOCAL_ACCESS_ERROR: + return IB_WC_LOC_ACCESS_ERR; + case CQ_RES_RC_STATUS_LOCAL_LENGTH_ERR: + return IB_WC_LOC_LEN_ERR; + case CQ_RES_RC_STATUS_LOCAL_PROTECTION_ERR: + return IB_WC_LOC_PROT_ERR; + case CQ_RES_RC_STATUS_LOCAL_QP_OPERATION_ERR: + return IB_WC_LOC_QP_OP_ERR; + case CQ_RES_RC_STATUS_MEMORY_MGT_OPERATION_ERR: + return IB_WC_GENERAL_ERR; + case CQ_RES_RC_STATUS_REMOTE_INVALID_REQUEST_ERR: + return IB_WC_REM_INV_REQ_ERR; + case CQ_RES_RC_STATUS_WORK_REQUEST_FLUSHED_ERR: + return IB_WC_WR_FLUSH_ERR; + case CQ_RES_RC_STATUS_HW_FLUSH_ERR: + return IB_WC_WR_FLUSH_ERR; + default: + return IB_WC_GENERAL_ERR; + } +} + +static void bnxt_re_process_req_wc(struct ib_wc *wc, struct bnxt_qplib_cqe *cqe) +{ + switch (cqe->type) { + case BNXT_QPLIB_SWQE_TYPE_SEND: + wc->opcode = IB_WC_SEND; + break; + case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM: + wc->opcode = IB_WC_SEND; + wc->wc_flags |= IB_WC_WITH_IMM; + break; + case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV: + wc->opcode = IB_WC_SEND; + wc->wc_flags |= IB_WC_WITH_INVALIDATE; + break; + case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE: + wc->opcode = IB_WC_RDMA_WRITE; + break; + case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM: + wc->opcode = IB_WC_RDMA_WRITE; + wc->wc_flags |= IB_WC_WITH_IMM; + break; + case BNXT_QPLIB_SWQE_TYPE_RDMA_READ: + wc->opcode = IB_WC_RDMA_READ; + break; + case BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP: + wc->opcode = IB_WC_COMP_SWAP; + break; + case BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD: + wc->opcode = IB_WC_FETCH_ADD; + break; + case BNXT_QPLIB_SWQE_TYPE_LOCAL_INV: + wc->opcode = IB_WC_LOCAL_INV; + break; +#ifdef HAVE_IB_FAST_REG_MR + case BNXT_QPLIB_SWQE_TYPE_FAST_REG_MR: + wc->opcode = IB_WC_FAST_REG_MR; + break; +#endif +#ifdef HAVE_IB_REG_MR_WR + case BNXT_QPLIB_SWQE_TYPE_REG_MR: + wc->opcode = IB_WC_REG_MR; + break; +#endif + default: + wc->opcode = IB_WC_SEND; + break; + } + + wc->status = __req_to_ib_wc_status(cqe->status); +} + +static int bnxt_re_check_packet_type(u16 raweth_qp1_flags, u16 raweth_qp1_flags2) +{ + bool is_udp = false, is_ipv6 = false, is_ipv4 = false; + + /* raweth_qp1_flags Bit 9-6 indicates itype */ + + if ((raweth_qp1_flags & CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_ROCE) + != CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_ROCE) + return -1; + + if (raweth_qp1_flags2 & + CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_IP_CS_CALC && + raweth_qp1_flags2 & + CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_L4_CS_CALC) { + is_udp = true; + /* raweth_qp1_flags2 Bit 8 indicates ip_type. 0-v4 1 - v6 */ + (raweth_qp1_flags2 & + CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_IP_TYPE) ? + (is_ipv6 = true) : (is_ipv4 = true); + return ((is_ipv6) ? + BNXT_RE_ROCEV2_IPV6_PACKET : + BNXT_RE_ROCEV2_IPV4_PACKET); + } else { + return BNXT_RE_ROCE_V1_PACKET; + } +} + +#ifdef ENABLE_ROCEV2_QP1 +static int bnxt_re_to_ib_nw_type(int nw_type) +{ + u8 nw_hdr_type = 0xFF; + + switch (nw_type) { + case BNXT_RE_ROCE_V1_PACKET: + nw_hdr_type = RDMA_NETWORK_ROCE_V1; + break; + case BNXT_RE_ROCEV2_IPV4_PACKET: + nw_hdr_type = RDMA_NETWORK_IPV4; + break; + case BNXT_RE_ROCEV2_IPV6_PACKET: + nw_hdr_type = RDMA_NETWORK_IPV6; + break; + } + return nw_hdr_type; +} +#endif + +static bool bnxt_re_is_loopback_packet(struct bnxt_re_dev *rdev, + void *rq_hdr_buf) +{ + u8 *tmp_buf = NULL; + struct ethhdr *eth_hdr; + u16 eth_type; + bool rc = false; + + tmp_buf = (u8 *)rq_hdr_buf; + /* + * If dest mac is not same as I/F mac, this could be a + * loopback address or multicast address, check whether + * it is a loopback packet + */ + if (!ether_addr_equal(tmp_buf, rdev->netdev->dev_addr)) { + tmp_buf += 4; + /* Check the ether type */ + eth_hdr = (struct ethhdr *)tmp_buf; + eth_type = ntohs(eth_hdr->h_proto); + switch (eth_type) { + case BNXT_QPLIB_ETHTYPE_ROCEV1: + rc = true; + break; +#ifdef ENABLE_ROCEV2_QP1 + case ETH_P_IP: + case ETH_P_IPV6: { + u32 len; + struct udphdr *udp_hdr; + + len = (eth_type == ETH_P_IP ? sizeof(struct iphdr) : + sizeof(struct ipv6hdr)); + tmp_buf += sizeof(struct ethhdr) + len; + udp_hdr = (struct udphdr *)tmp_buf; + rc = ntohs(udp_hdr->dest) == ROCE_V2_UDP_DPORT; + } + break; +#endif + default: + break; + } + } + + return rc; +} + +static bool bnxt_re_is_vlan_in_packet(struct bnxt_re_dev *rdev, + void *rq_hdr_buf, + struct bnxt_qplib_cqe *cqe) +{ + struct vlan_hdr *vlan_hdr; + struct ethhdr *eth_hdr; + u8 *tmp_buf = NULL; + u16 eth_type; + + tmp_buf = (u8 *)rq_hdr_buf; + /* Check the ether type */ + eth_hdr = (struct ethhdr *)tmp_buf; + eth_type = ntohs(eth_hdr->h_proto); + if (eth_type == ETH_P_8021Q) { + tmp_buf += sizeof(struct ethhdr); + vlan_hdr = (struct vlan_hdr *)tmp_buf; + cqe->raweth_qp1_metadata = + ntohs(vlan_hdr->h_vlan_TCI) | + (eth_type << + CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_TPID_SFT); + cqe->raweth_qp1_flags2 |= + CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_META_FORMAT_VLAN; + return true; + } + + return false; +} + +static int bnxt_re_process_raw_qp_packet_receive(struct bnxt_re_qp *gsi_qp, + struct bnxt_qplib_cqe *cqe) +{ + struct bnxt_re_sqp_entries *sqp_entry = NULL; + struct bnxt_qplib_hdrbuf *hdr_buf; + dma_addr_t shrq_hdr_buf_map; + struct ib_sge s_sge[2] = {}; + struct ib_sge r_sge[2] = {}; + struct ib_recv_wr rwr = {}; + struct bnxt_re_ah *gsi_sah; + struct bnxt_re_qp *gsi_sqp; + dma_addr_t rq_hdr_buf_map; + struct bnxt_re_dev *rdev; + struct ib_send_wr *swr; + u32 skip_bytes = 0; + void *rq_hdr_buf; + int pkt_type = 0; + u32 offset = 0; + u32 tbl_idx; + int rc; +#ifdef HAVE_IB_UD_WR + struct ib_ud_wr udwr = {}; +#else + struct ib_send_wr udwr = {}; +#endif + +#ifdef HAVE_IB_UD_WR + swr = &udwr.wr; +#else + swr = &udwr; +#endif + rdev = gsi_qp->rdev; + gsi_sqp = rdev->gsi_ctx.gsi_sqp; + tbl_idx = cqe->wr_id; + + hdr_buf = gsi_qp->qplib_qp.rq_hdr_buf; + rq_hdr_buf = hdr_buf->va + tbl_idx * hdr_buf->step; + rq_hdr_buf_map = bnxt_qplib_get_qp_buf_from_index(&gsi_qp->qplib_qp, + tbl_idx); + /* Shadow QP header buffer */ + shrq_hdr_buf_map = bnxt_qplib_get_qp_buf_from_index(&gsi_sqp->qplib_qp, + tbl_idx); + sqp_entry = &rdev->gsi_ctx.sqp_tbl[tbl_idx]; + + /* Find packet type from the cqe */ + pkt_type = bnxt_re_check_packet_type(cqe->raweth_qp1_flags, + cqe->raweth_qp1_flags2); + if (pkt_type < 0) { + dev_err(rdev_to_dev(rdev), "Not handling this packet\n"); + return -EINVAL; + } + + /* Adjust the offset for the user buffer and post in the rq */ + + if (pkt_type == BNXT_RE_ROCEV2_IPV4_PACKET) + offset = 20; + + /* + * QP1 loopback packet has 4 bytes of internal header before + * ether header. Skip these four bytes. + */ + if (bnxt_re_is_loopback_packet(rdev, rq_hdr_buf)) + skip_bytes = 4; + + if (bnxt_re_is_vlan_in_packet(rdev, rq_hdr_buf, cqe)) + skip_bytes += VLAN_HLEN; + + /* Store this cqe */ + memcpy(&sqp_entry->cqe, cqe, sizeof(struct bnxt_qplib_cqe)); + sqp_entry->qp1_qp = gsi_qp; + + /* First send SGE . Skip the ether header*/ + s_sge[0].addr = rq_hdr_buf_map + BNXT_QPLIB_MAX_QP1_RQ_ETH_HDR_SIZE + + skip_bytes; + s_sge[0].lkey = 0xFFFFFFFF; + s_sge[0].length = offset ? BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV4 : + BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6; + + /* Second Send SGE */ + s_sge[1].addr = s_sge[0].addr + s_sge[0].length + + BNXT_QPLIB_MAX_QP1_RQ_BDETH_HDR_SIZE; + if (pkt_type != BNXT_RE_ROCE_V1_PACKET) + s_sge[1].addr += 8; + s_sge[1].lkey = 0xFFFFFFFF; + s_sge[1].length = 256; + + /* First recv SGE */ + r_sge[0].addr = shrq_hdr_buf_map; + r_sge[0].lkey = 0xFFFFFFFF; + r_sge[0].length = 40; + + r_sge[1].addr = sqp_entry->sge.addr + offset; + r_sge[1].lkey = sqp_entry->sge.lkey; + r_sge[1].length = BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6 + 256 - offset; + + /* Create receive work request */ + rwr.num_sge = 2; + rwr.sg_list = r_sge; + rwr.wr_id = tbl_idx; + rwr.next = NULL; + + rc = bnxt_re_post_recv_shadow_qp(rdev, gsi_sqp, &rwr); + if (rc) { + dev_err(rdev_to_dev(rdev), + "Failed to post Rx buffers to shadow QP"); + return -ENOMEM; + } + + swr->num_sge = 2; + swr->sg_list = s_sge; + swr->wr_id = tbl_idx; + swr->opcode = IB_WR_SEND; + swr->next = NULL; + + gsi_sah = rdev->gsi_ctx.gsi_sah; +#ifdef HAVE_IB_UD_WR + udwr.ah = &gsi_sah->ib_ah; + udwr.remote_qpn = gsi_sqp->qplib_qp.id; + udwr.remote_qkey = gsi_sqp->qplib_qp.qkey; +#else + udwr.wr.ud.ah = &gsi_sah->ib_ah; + udwr.wr.ud.remote_qpn = gsi_sqp->qplib_qp.id; + udwr.wr.ud.remote_qkey = gsi_sqp->qplib_qp.qkey; +#endif + /* post data received in the send queue */ + rc = bnxt_re_post_send_shadow_qp(rdev, gsi_sqp, swr); + + return rc; +} + +static void bnxt_re_process_res_rawqp1_wc(struct ib_wc *wc, + struct bnxt_qplib_cqe *cqe) +{ + wc->opcode = IB_WC_RECV; + wc->status = __rawqp1_to_ib_wc_status(cqe->status); + wc->wc_flags |= IB_WC_GRH; +} + +static void bnxt_re_process_res_rc_wc(struct ib_wc *wc, + struct bnxt_qplib_cqe *cqe) +{ + wc->opcode = IB_WC_RECV; + wc->status = __rc_to_ib_wc_status(cqe->status); + + if (cqe->flags & CQ_RES_RC_FLAGS_IMM) + wc->wc_flags |= IB_WC_WITH_IMM; + if (cqe->flags & CQ_RES_RC_FLAGS_INV) + wc->wc_flags |= IB_WC_WITH_INVALIDATE; + if ((cqe->flags & (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM)) == + (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM)) + wc->opcode = IB_WC_RECV_RDMA_WITH_IMM; +} + +/* Returns TRUE if pkt has valid VLAN and if VLAN id is non-zero */ +static bool bnxt_re_is_nonzero_vlanid_pkt(struct bnxt_qplib_cqe *orig_cqe, + u16 *vid, u8 *sl) +{ + u32 metadata; + u16 tpid; + bool ret = false; + metadata = orig_cqe->raweth_qp1_metadata; + if (orig_cqe->raweth_qp1_flags2 & + CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_META_FORMAT_VLAN) { + tpid = ((metadata & + CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_TPID_MASK) >> + CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_TPID_SFT); + if (tpid == ETH_P_8021Q) { + *vid = metadata & + CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_VID_MASK; + *sl = (metadata & + CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_PRI_MASK) >> + CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_PRI_SFT; + ret = !!(*vid); + } + } + + return ret; +} + +static void bnxt_re_process_res_shadow_qp_wc(struct bnxt_re_qp *gsi_sqp, + struct ib_wc *wc, + struct bnxt_qplib_cqe *cqe) +{ + u32 tbl_idx; + struct bnxt_re_dev *rdev = gsi_sqp->rdev; + struct bnxt_re_qp *gsi_qp = NULL; + struct bnxt_qplib_cqe *orig_cqe = NULL; + struct bnxt_re_sqp_entries *sqp_entry = NULL; + int nw_type; + u16 vlan_id; + u8 sl; + + tbl_idx = cqe->wr_id; + + sqp_entry = &rdev->gsi_ctx.sqp_tbl[tbl_idx]; + gsi_qp = sqp_entry->qp1_qp; + orig_cqe = &sqp_entry->cqe; + + wc->wr_id = sqp_entry->wrid; + /* TODO Check whether this needs to be altered.*/ + wc->byte_len = orig_cqe->length; + wc->qp = &gsi_qp->ib_qp; + + wc->ex.imm_data = orig_cqe->immdata; + wc->src_qp = orig_cqe->src_qp; +#ifdef HAVE_IB_WC_SMAC + memcpy(wc->smac, orig_cqe->smac, ETH_ALEN); +#endif + if (bnxt_re_is_nonzero_vlanid_pkt(orig_cqe, &vlan_id, &sl)) { + if (bnxt_re_check_if_vlan_valid(rdev, vlan_id)) { + wc->sl = sl; +#ifdef HAVE_IB_WC_VLAN_ID + wc->vlan_id = vlan_id; + wc->wc_flags |= IB_WC_WITH_VLAN; +#endif + } + } + wc->port_num = 1; + wc->vendor_err = orig_cqe->status; + + wc->opcode = IB_WC_RECV; + wc->status = __rawqp1_to_ib_wc_status(orig_cqe->status); + wc->wc_flags |= IB_WC_GRH; + + nw_type = bnxt_re_check_packet_type(orig_cqe->raweth_qp1_flags, + orig_cqe->raweth_qp1_flags2); + dev_dbg(rdev_to_dev(rdev), "%s nw_type = %d\n", __func__, nw_type); +#ifdef ENABLE_ROCEV2_QP1 + if (nw_type >= 0) { + wc->network_hdr_type = bnxt_re_to_ib_nw_type(nw_type); + wc->wc_flags |= IB_WC_WITH_NETWORK_HDR_TYPE; + } +#endif +} + +static void bnxt_re_process_res_ud_wc(struct bnxt_re_dev *rdev, + struct bnxt_re_qp *qp, struct ib_wc *wc, + struct bnxt_qplib_cqe *cqe) +{ +#ifdef ENABLE_ROCEV2_QP1 + u8 nw_type; +#endif + u16 vlan_id = 0; + + wc->opcode = IB_WC_RECV; + wc->status = __rc_to_ib_wc_status(cqe->status); + if (cqe->flags & CQ_RES_UD_FLAGS_IMM) + wc->wc_flags |= IB_WC_WITH_IMM; + if (cqe->flags & CQ_RES_RC_FLAGS_INV) + wc->wc_flags |= IB_WC_WITH_INVALIDATE; + /* report only on GSI QP for Thor */ + if (rdev->gsi_ctx.gsi_qp->qplib_qp.id == qp->qplib_qp.id && + rdev->gsi_ctx.gsi_qp_mode == BNXT_RE_GSI_MODE_UD) { + wc->wc_flags |= IB_WC_GRH; +#ifdef HAVE_IB_WC_SMAC + memcpy(wc->smac, cqe->smac, ETH_ALEN); + wc->wc_flags |= IB_WC_WITH_SMAC; +#endif + if (_is_cqe_v2_supported(rdev->dev_attr->dev_cap_flags)) { + if (cqe->flags & CQ_RES_UD_V2_FLAGS_META_FORMAT_MASK) { + if (cqe->cfa_meta & + BNXT_QPLIB_CQE_CFA_META1_VALID) + vlan_id = (cqe->cfa_meta & 0xFFF); + } + } else if (cqe->flags & CQ_RES_UD_FLAGS_META_FORMAT_VLAN) { + vlan_id = (cqe->cfa_meta & 0xFFF); + } +#ifdef HAVE_IB_WC_VLAN_ID + /* Mark only if vlan_id is non zero */ + if (vlan_id && bnxt_re_check_if_vlan_valid(rdev, vlan_id)) { + wc->vlan_id = vlan_id; + wc->wc_flags |= IB_WC_WITH_VLAN; + } +#endif +#ifdef ENABLE_ROCEV2_QP1 + nw_type = (cqe->flags >> 4) & 0x3; + wc->network_hdr_type = bnxt_re_to_ib_nw_type(nw_type); + wc->wc_flags |= IB_WC_WITH_NETWORK_HDR_TYPE; +#endif + } +} + +static int bnxt_re_legacy_send_phantom_wqe(struct bnxt_re_qp *qp) +{ + struct bnxt_qplib_qp *lib_qp = &qp->qplib_qp; + unsigned long flags; + int rc = 0; + + spin_lock_irqsave(&qp->sq_lock, flags); + + rc = bnxt_re_legacy_bind_fence_mw(lib_qp); + if (!rc) { + lib_qp->sq.phantom_wqe_cnt++; + dev_dbg(&lib_qp->sq.hwq.pdev->dev, + "qp %#x sq->prod %#x sw_prod %#x phantom_wqe_cnt %d\n", + lib_qp->id, lib_qp->sq.hwq.prod, + HWQ_CMP(lib_qp->sq.hwq.prod, &lib_qp->sq.hwq), + lib_qp->sq.phantom_wqe_cnt); + } + + spin_unlock_irqrestore(&qp->sq_lock, flags); + return rc; +} + +int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc) +{ + struct bnxt_re_cq *cq = to_bnxt_re(ib_cq, struct bnxt_re_cq, ib_cq); + struct bnxt_re_dev *rdev = cq->rdev; + struct bnxt_re_qp *qp; + struct bnxt_qplib_cqe *cqe; + int i, ncqe, budget, init_budget; + struct bnxt_qplib_q *sq; + struct bnxt_qplib_qp *lib_qp; + u32 tbl_idx; + struct bnxt_re_sqp_entries *sqp_entry = NULL; + unsigned long flags; + u8 gsi_mode; + + /* + * DB recovery CQ; only process the door bell pacing alert from + * the user lib + */ + if (cq->is_dbr_recov_cq) { + bnxt_re_pacing_alert(rdev); + return 0; + } + + /* User CQ; the only processing we do is to + * complete any pending CQ resize operation. + */ + if (cq->umem) { + if (cq->resize_umem) + bnxt_re_resize_cq_complete(cq); + return 0; + } + + spin_lock_irqsave(&cq->cq_lock, flags); + + budget = min_t(u32, num_entries, cq->max_cql); + init_budget = budget; + if (!cq->cql) { + dev_err(rdev_to_dev(rdev), "POLL CQ no CQL to use"); + goto exit; + } + cqe = &cq->cql[0]; + gsi_mode = rdev->gsi_ctx.gsi_qp_mode; + while (budget) { + lib_qp = NULL; + ncqe = bnxt_qplib_poll_cq(&cq->qplib_cq, cqe, budget, &lib_qp); + if (lib_qp) { + sq = &lib_qp->sq; + if (sq->legacy_send_phantom == true) { + qp = container_of(lib_qp, struct bnxt_re_qp, qplib_qp); + if (bnxt_re_legacy_send_phantom_wqe(qp) == -ENOMEM) + dev_err(rdev_to_dev(rdev), + "Phantom failed! Scheduled to send again\n"); + else + sq->legacy_send_phantom = false; + } + } + if (ncqe < budget) + ncqe += bnxt_qplib_process_flush_list(&cq->qplib_cq, + cqe + ncqe, + budget - ncqe); + + if (!ncqe) + break; + + for (i = 0; i < ncqe; i++, cqe++) { + /* Transcribe each qplib_wqe back to ib_wc */ + memset(wc, 0, sizeof(*wc)); + + wc->wr_id = cqe->wr_id; + wc->byte_len = cqe->length; + qp = to_bnxt_re((struct bnxt_qplib_qp *)cqe->qp_handle, + struct bnxt_re_qp, qplib_qp); + if (!qp) { + dev_err(rdev_to_dev(rdev), + "POLL CQ bad QP handle"); + continue; + } + wc->qp = &qp->ib_qp; + wc->ex.imm_data = cqe->immdata; + wc->src_qp = cqe->src_qp; +#ifdef HAVE_IB_WC_SMAC + memcpy(wc->smac, cqe->smac, ETH_ALEN); +#endif + wc->port_num = 1; + wc->vendor_err = cqe->status; + + switch(cqe->opcode) { + case CQ_BASE_CQE_TYPE_REQ: + if (gsi_mode == BNXT_RE_GSI_MODE_ALL && + qp->qplib_qp.id == + rdev->gsi_ctx.gsi_sqp->qplib_qp.id) { + /* Handle this completion with + * the stored completion */ + dev_dbg(rdev_to_dev(rdev), + "Skipping this UD Send CQ\n"); + memset(wc, 0, sizeof(*wc)); + continue; + } + bnxt_re_process_req_wc(wc, cqe); + break; + case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1: + if (gsi_mode == BNXT_RE_GSI_MODE_ALL) { + if (!cqe->status) { + int rc = 0; + rc = bnxt_re_process_raw_qp_packet_receive(qp, cqe); + if (!rc) { + memset(wc, 0, + sizeof(*wc)); + continue; + } + /* TODO Respond with error to the stack */ + cqe->status = -1; + } + /* Errors need not be looped back. + * But change the wr_id to the one + * stored in the table + */ + tbl_idx = cqe->wr_id; + sqp_entry = &rdev->gsi_ctx.sqp_tbl[tbl_idx]; + wc->wr_id = sqp_entry->wrid; + } + + bnxt_re_process_res_rawqp1_wc(wc, cqe); + break; + case CQ_BASE_CQE_TYPE_RES_RC: + bnxt_re_process_res_rc_wc(wc, cqe); + break; + case CQ_BASE_CQE_TYPE_RES_UD: + if (gsi_mode == BNXT_RE_GSI_MODE_ALL && + qp->qplib_qp.id == + rdev->gsi_ctx.gsi_sqp->qplib_qp.id) { + /* Handle this completion with + * the stored completion + */ + dev_dbg(rdev_to_dev(rdev), + "Handling the UD receive CQ\n"); + if (cqe->status) { + /* TODO handle this completion as a failure in + * loopback porocedure + */ + continue; + } else { + bnxt_re_process_res_shadow_qp_wc(qp, wc, cqe); + break; + } + } + bnxt_re_process_res_ud_wc(rdev, qp, wc, cqe); + break; + default: + dev_err(rdev_to_dev(cq->rdev), + "POLL CQ type 0x%x not handled, skip!", + cqe->opcode); + continue; + } + wc++; + budget--; + } + } +exit: + spin_unlock_irqrestore(&cq->cq_lock, flags); + return init_budget - budget; +} + +int bnxt_re_req_notify_cq(struct ib_cq *ib_cq, + enum ib_cq_notify_flags ib_cqn_flags) +{ + struct bnxt_re_cq *cq = to_bnxt_re(ib_cq, struct bnxt_re_cq, ib_cq); + int type = 0, rc = 0; + unsigned long flags; + + spin_lock_irqsave(&cq->cq_lock, flags); + /* Trigger on the very next completion */ + if (ib_cqn_flags & IB_CQ_NEXT_COMP) + type = DBC_DBC_TYPE_CQ_ARMALL; + /* Trigger on the next solicited completion */ + else if (ib_cqn_flags & IB_CQ_SOLICITED) + type = DBC_DBC_TYPE_CQ_ARMSE; + + /* Poll to see if there are missed events */ + if ((ib_cqn_flags & IB_CQ_REPORT_MISSED_EVENTS) && + !(bnxt_qplib_is_cq_empty(&cq->qplib_cq))) + rc = 1; + else + bnxt_qplib_req_notify_cq(&cq->qplib_cq, type); + + spin_unlock_irqrestore(&cq->cq_lock, flags); + + return rc; +} + +/* Memory Regions */ +struct ib_mr *bnxt_re_get_dma_mr(struct ib_pd *ib_pd, int mr_access_flags) +{ + struct bnxt_qplib_mrinfo mrinfo; + struct bnxt_re_dev *rdev; + struct bnxt_re_mr *mr; + struct bnxt_re_pd *pd; + u32 max_mr_count; + u64 pbl = 0; + int rc; + + memset(&mrinfo, 0, sizeof(mrinfo)); + pd = to_bnxt_re(ib_pd, struct bnxt_re_pd, ib_pd); + rdev = pd->rdev; + dev_dbg(rdev_to_dev(rdev), "Get DMA MR"); + + if (bnxt_re_get_total_mr_mw_count(rdev) >= rdev->dev_attr->max_mr) + return ERR_PTR(-ENOMEM); + + mr = kzalloc(sizeof(*mr), GFP_KERNEL); + if (!mr) + return ERR_PTR(-ENOMEM); + mr->rdev = rdev; + mr->qplib_mr.pd = &pd->qplib_pd; + mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags); + mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR; + + /* Allocate and register 0 as the address */ + rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr); + if (rc) { + dev_err(rdev_to_dev(rdev), "Allocate DMA MR failed!"); + goto fail; + } + mr->qplib_mr.total_size = -1; /* Infinite length */ + mrinfo.ptes = &pbl; + mrinfo.sg.npages = 0; + mrinfo.sg.pgsize = PAGE_SIZE; + mrinfo.sg.pgshft = PAGE_SHIFT; + mrinfo.sg.pgsize = PAGE_SIZE; + mrinfo.mrw = &mr->qplib_mr; + mrinfo.is_dma = true; + rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mrinfo, false); + if (rc) { + dev_err(rdev_to_dev(rdev), "Register DMA MR failed!"); + goto fail_mr; + } + mr->ib_mr.lkey = mr->qplib_mr.lkey; + if (mr_access_flags & (IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_READ | + IB_ACCESS_REMOTE_ATOMIC)) + mr->ib_mr.rkey = mr->ib_mr.lkey; + atomic_inc(&rdev->stats.rsors.mr_count); + max_mr_count = atomic_read(&rdev->stats.rsors.mr_count); + if (max_mr_count > atomic_read(&rdev->stats.rsors.max_mr_count)) + atomic_set(&rdev->stats.rsors.max_mr_count, max_mr_count); + + return &mr->ib_mr; + +fail_mr: + bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr); +fail: + kfree(mr); + return ERR_PTR(rc); +} + +#ifdef HAVE_IB_REG_PHYS_MR +static u32 __get_phys_page_count(struct ib_phys_buf *phys_buf_array, + int num_phys_buf) +{ + int i, pages; + + /* Calculate the size of the PTL needed */ + for (i = 0, pages = 0; i < num_phys_buf; i++) + pages += DIV_ROUND_UP(phys_buf_array[i].size, PAGE_SIZE); + + return pages; +} + +struct ib_mr *bnxt_re_reg_phys_mr(struct ib_pd *ib_pd, + struct ib_phys_buf *phys_buf_array, + int num_phys_buf, int mr_access_flags, + u64 *iova_start) +{ + struct bnxt_qplib_mrinfo mrinfo; + int i, j, num_pgs, pages, rc; + u64 *pbl_tbl, *pbl_tbl_orig; + struct bnxt_re_dev *rdev; + struct bnxt_re_mr *mr; + struct bnxt_re_pd *pd; + u32 max_mr_count; + + pd = to_bnxt_re(ib_pd, struct bnxt_re_pd, ib_pd); + rdev = pd->rdev; + memset(&mrinfo, 0, sizeof(mrinfo)); + dev_dbg(rdev_to_dev(rdev), "Reg phys MR"); + + if (bnxt_re_get_total_mr_mw_count(rdev) >= rdev->dev_attr->max_mr) + return ERR_PTR(-ENOMEM); + + mr = kzalloc(sizeof(*mr), GFP_KERNEL); + if (!mr) + return ERR_PTR(-ENOMEM); + mr->rdev = rdev; + mr->qplib_mr.pd = &pd->qplib_pd; + mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR; + + rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr); + if (rc) { + dev_err(rdev_to_dev(rdev), "HW alloc MR failed!"); + goto fail; + } + mr->ib_mr.lkey = mr->qplib_mr.lkey; + if (mr_access_flags & (IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_READ | + IB_ACCESS_REMOTE_ATOMIC)) + mr->ib_mr.rkey = mr->ib_mr.lkey; + + /* Must unravel the ib_phys_buf->addr/size to align with + what the hw expects */ + mr->qplib_mr.va = *iova_start; + num_pgs = __get_phys_page_count(phys_buf_array, num_phys_buf); + if (!num_pgs) { + dev_err(rdev_to_dev(rdev), "Phys buf array is invalid!"); + rc = -EINVAL; + goto fail_mr; + } + + pbl_tbl = kcalloc(num_pgs, sizeof(u64 *), GFP_KERNEL); + if (!pbl_tbl) { + dev_err(rdev_to_dev(rdev), "Allocate pbl_tbl failed!"); + rc = -EINVAL; + goto fail_mr; + } + pbl_tbl_orig = pbl_tbl; + for (i = 0; i < num_phys_buf; i++) { + pages = DIV_ROUND_UP(phys_buf_array[i].size, PAGE_SIZE); + mr->qplib_mr.total_size += phys_buf_array[i].size; + for (j = 0; j < pages; j++, pbl_tbl++) + *pbl_tbl = phys_buf_array[i].addr + j * PAGE_SHIFT; + } + mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags); + mrinfo.ptes = pbl_tbl_orig; + mrinfo.sg.npages = num_pgs; + mrinfo.sg.pgshft = PAGE_SHIFT; + mrinfo.mrw = &mr->qplib_mr; + rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mrinfo, false); + kfree(pbl_tbl_orig); + + if (rc) { + dev_err(rdev_to_dev(rdev), "Reg phys MR failed!"); + goto fail_mr; + } + atomic_inc(&rdev->stats.rsors.mr_count); + max_mr_count = atomic_read(&rdev->stats.rsors.mr_count); + if (max_mr_count > atomic_read(&rdev->stats.rsors.max_mr_count)) + atomic_set(&rdev->stats.rsors.max_mr_count, max_mr_count); + return &mr->ib_mr; + +fail_mr: + bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr); +fail: + kfree(mr); + return ERR_PTR(rc); +} + +int bnxt_re_rereg_phys_mr(struct ib_mr *ib_mr, int mr_rereg_mask, + struct ib_pd *ib_pd, + struct ib_phys_buf *phys_buf_array, + int num_phys_buf, int mr_access_flags, + u64 *iova_start) +{ + u64 *pbl_tbl, *pbl_tbl_orig = NULL; + int i, j, num_pgs = 0, pages, rc; + struct bnxt_qplib_mrinfo mrinfo; + struct bnxt_re_dev *rdev; + struct bnxt_re_pd *pd; + struct bnxt_re_mr *mr; + + pd = to_bnxt_re(ib_pd, struct bnxt_re_pd, ib_pd); + mr = to_bnxt_re(ib_mr, struct bnxt_re_mr, ib_mr); + rdev = mr->rdev; + memset(&mrinfo, 0, sizeof(mrinfo)); + dev_dbg(rdev_to_dev(rdev), "Reg phys MR"); + + /* TODO: Must decipher what to modify based on the mr_rereg_mask */ + if (mr_rereg_mask & IB_MR_REREG_TRANS) { + mr->qplib_mr.va = *iova_start; + num_pgs = __get_phys_page_count(phys_buf_array, num_phys_buf); + if (!num_pgs) { + dev_err(rdev_to_dev(rdev), + "Phys buf array is invalid!"); + rc = -EINVAL; + goto fail; + } + + pbl_tbl = kcalloc(num_pgs, sizeof(u64 *), GFP_KERNEL); + if (!pbl_tbl) { + dev_err(rdev_to_dev(rdev), "Allocate pbl_tbl failed!"); + rc = -EINVAL; + goto fail; + } + mr->qplib_mr.total_size = 0; + pbl_tbl_orig = pbl_tbl; + for (i = 0; i < num_phys_buf; i++) { + pages = DIV_ROUND_UP(phys_buf_array[i].size, PAGE_SIZE); + mr->qplib_mr.total_size += phys_buf_array[i].size; + for (j = 0; j < pages; j++, pbl_tbl++) + *pbl_tbl = phys_buf_array[i].addr + + j * PAGE_SIZE; + } + } + if (mr_rereg_mask & IB_MR_REREG_PD) + mr->qplib_mr.pd = &pd->qplib_pd; + + if (mr_rereg_mask & IB_MR_REREG_ACCESS) + mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags); + + mrinfo.ptes = pbl_tbl_orig; + mrinfo.sg.npages = num_pgs; + mrinfo.sg.pgshft = PAGE_SHIFT; + mrinfo.mrw = &mr->qplib_mr; + rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mrinfo, false); + if (rc) { + dev_err(rdev_to_dev(rdev), "Rereg phys MR failed!"); + goto free_pbl; + } + kfree(pbl_tbl_orig); + mr->ib_mr.rkey = mr->qplib_mr.rkey; + dev_dbg(rdev_to_dev(rdev), "Alloc Phy MR lkey=0x%x rkey=0x%x", + mr->ib_mr.lkey, mr->ib_mr.rkey); + return 0; + +free_pbl: + kfree(pbl_tbl_orig); +fail: + return rc; +} +#endif + +#ifdef HAVE_IB_QUERY_MR +int bnxt_re_query_mr(struct ib_mr *ib_mr, struct ib_mr_attr *mr_attr) +{ + struct bnxt_re_mr *mr = to_bnxt_re(ib_mr, struct bnxt_re_mr, ib_mr); + + /* TODO: Transcribe the qplib_mr's attributes back to ib_mr_attr */ + mr_attr->pd = ib_mr->pd; + mr_attr->device_virt_addr = mr->qplib_mr.va; + mr_attr->size = mr->qplib_mr.total_size; + mr_attr->mr_access_flags = __to_ib_access_flags(mr->qplib_mr.flags); + mr_attr->lkey = mr->qplib_mr.lkey; + mr_attr->rkey = mr->qplib_mr.rkey; + return 0; +} +#endif + +int bnxt_re_dereg_mr(struct ib_mr *ib_mr + +#ifdef HAVE_DEREG_MR_UDATA + , struct ib_udata *udata +#endif + ) +{ + struct bnxt_re_mr *mr = to_bnxt_re(ib_mr, struct bnxt_re_mr, ib_mr); + struct bnxt_re_dev *rdev = mr->rdev; + int rc = 0; + +#ifdef CONFIG_INFINIBAND_PEER_MEM + if (atomic_inc_return(&mr->invalidated) > 1) { + /* The peer is in the process of invalidating the MR. + * Wait for it to finish. + */ + wait_for_completion(&mr->invalidation_comp); + } else { +#endif + rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr); + if (rc) + dev_err(rdev_to_dev(rdev), "Dereg MR failed (%d): rc - %#x\n", + mr->qplib_mr.lkey, rc); +#ifdef CONFIG_INFINIBAND_PEER_MEM + } +#endif + +#ifdef HAVE_IB_ALLOC_MR + if (mr->pages) { + bnxt_qplib_free_fast_reg_page_list(&rdev->qplib_res, + &mr->qplib_frpl); + kfree(mr->pages); + mr->npages = 0; + mr->pages = NULL; + } +#endif + if (!IS_ERR(mr->ib_umem) && mr->ib_umem) { +#ifdef HAVE_IB_UMEM_STOP_INVALIDATION + if (mr->is_invalcb_active) + ib_umem_stop_invalidation_notifier(mr->ib_umem); +#endif + mr->is_invalcb_active = false; + bnxt_re_peer_mem_release(mr->ib_umem); + } + kfree(mr); + atomic_dec(&rdev->stats.rsors.mr_count); + return 0; +} + +#ifdef HAVE_IB_CREATE_MR +/* Create/destroy a MR that may be used for signature handover operations */ +int bnxt_re_destroy_mr(struct ib_mr *ib_mr) +{ + struct bnxt_re_mr *mr = to_bnxt_re(ib_mr, struct bnxt_re_mr, ib_mr); + struct bnxt_re_dev *rdev = mr->rdev; + + dev_err(rdev_to_dev(rdev), "Destroy MR called!"); + return 0; +} + +struct ib_mr *bnxt_re_create_mr(struct ib_pd *ib_pd, + struct ib_mr_init_attr *mr_init_attr) +{ + struct bnxt_re_pd *pd = to_bnxt_re(ib_pd, struct bnxt_re_pd, ib_pd); + struct bnxt_re_dev *rdev = pd->rdev; + + dev_err(rdev_to_dev(rdev), "Create MR called!"); + return NULL; +} +#endif + +#ifdef HAVE_IB_FAST_REG_MR +/* Fast Register Memory Regions */ +struct ib_mr *bnxt_re_alloc_fast_reg_mr(struct ib_pd *ib_pd, + int max_page_list_len) +{ + struct bnxt_re_pd *pd = to_bnxt_re(ib_pd, struct bnxt_re_pd, ib_pd); + struct bnxt_re_dev *rdev = pd->rdev; + u32 max_mr_count; + struct bnxt_re_mr *mr; + int rc; + + /* Qualify */ + if (max_page_list_len > MAX_PBL_LVL_1_PGS) { + dev_err(rdev_to_dev(rdev), + "Allocate Fast reg MR exceeded MAX!"); + return ERR_PTR(-ENOMEM); + } + + if (bnxt_re_get_total_mr_mw_count(rdev) >= rdev->dev_attr->max_mr) + return ERR_PTR(-ENOMEM); + + mr = kzalloc(sizeof(*mr), GFP_KERNEL); + if (!mr) + return ERR_PTR(-ENOMEM); + mr->rdev = rdev; + mr->qplib_mr.pd = &pd->qplib_pd; + mr->qplib_mr.flags = BNXT_QPLIB_FR_PMR; + mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR; + + rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr); + if (rc) { + dev_err(rdev_to_dev(rdev), "Fast reg phys MR failed!"); + goto fail; + } + mr->ib_mr.lkey = mr->qplib_mr.lkey; + mr->ib_mr.rkey = mr->ib_mr.lkey; + + atomic_inc(&rdev->stats.rsors.mr_count); + max_mr_count = atomic_read(&rdev->stats.rsors.mr_count); + if (max_mr_count > atomic_read(&rdev->stats.rsors.max_mr_count)) + atomic_set(&rdev->stats.rsors.max_mr_count, max_mr_count); + return &mr->ib_mr; + +fail: + kfree(mr); + return ERR_PTR(rc); +} + +struct ib_fast_reg_page_list *bnxt_re_alloc_fast_reg_page_list( + struct ib_device *ibdev, + int page_list_len) +{ + struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev); + struct bnxt_re_frpl *frpl; + int rc; + + frpl = kzalloc(sizeof(*frpl), GFP_KERNEL); + if (!frpl) + return ERR_PTR(-ENOMEM); + frpl->rdev = rdev; + frpl->page_list = kzalloc(sizeof(u64) * page_list_len, GFP_KERNEL); + if (!frpl->page_list) { + rc = -ENOMEM; + goto fail; + } + rc = bnxt_qplib_alloc_fast_reg_page_list(&rdev->qplib_res, + &frpl->qplib_frpl, + page_list_len); + if (rc) { + dev_err(rdev_to_dev(rdev), + "Allocate HW Fast reg page list failed!"); + goto fail_pl; + } + frpl->ib_frpl.page_list = frpl->page_list; + + return &frpl->ib_frpl; + +fail_pl: + kfree(frpl->page_list); +fail: + kfree(frpl); + return ERR_PTR(rc); +} + +void bnxt_re_free_fast_reg_page_list(struct ib_fast_reg_page_list *ib_frpl) +{ + struct bnxt_re_frpl *frpl = to_bnxt_re(ib_frpl, struct bnxt_re_frpl, + ib_frpl); + struct bnxt_re_dev *rdev = frpl->rdev; + + bnxt_qplib_free_fast_reg_page_list(&rdev->qplib_res, + &frpl->qplib_frpl); + kfree(frpl->page_list); + kfree(frpl); +} +#endif + +#ifdef HAVE_IB_MAP_MR_SG +static int bnxt_re_set_page(struct ib_mr *ib_mr, u64 addr) +{ + struct bnxt_re_mr *mr = to_bnxt_re(ib_mr, struct bnxt_re_mr, ib_mr); + + if (unlikely(mr->npages == mr->qplib_frpl.max_pg_ptrs)) + return -ENOMEM; + + mr->pages[mr->npages++] = addr; + dev_dbg(NULL, "%s: ibdev %p Set MR pages[%d] = 0x%llx", + ROCE_DRV_MODULE_NAME, ib_mr->device, mr->npages - 1, + mr->pages[mr->npages - 1]); + return 0; +} + +int bnxt_re_map_mr_sg(struct ib_mr *ib_mr, struct scatterlist *sg, int sg_nents +#ifdef HAVE_IB_MAP_MR_SG_PAGE_SIZE + , unsigned int *sg_offset +#else +#ifdef HAVE_IB_MAP_MR_SG_OFFSET + , unsigned int sg_offset +#endif +#endif + ) +{ + struct bnxt_re_mr *mr = to_bnxt_re(ib_mr, struct bnxt_re_mr, ib_mr); + + dev_dbg(NULL, "%s: ibdev %p Map MR sg nents = %d", ROCE_DRV_MODULE_NAME, + ib_mr->device, sg_nents); + mr->npages = 0; + return ib_sg_to_pages(ib_mr, sg, sg_nents, +#ifdef HAVE_IB_MAP_MR_SG_OFFSET + sg_offset, +#endif + bnxt_re_set_page); +} +#endif + +#ifdef HAVE_IB_ALLOC_MR +struct ib_mr *bnxt_re_alloc_mr(struct ib_pd *ib_pd, enum ib_mr_type type, + u32 max_num_sg +#ifdef HAVE_ALLOC_MR_UDATA + , struct ib_udata *udata +#endif + ) +{ + struct bnxt_re_pd *pd = to_bnxt_re(ib_pd, struct bnxt_re_pd, ib_pd); + struct bnxt_re_dev *rdev = pd->rdev; + struct bnxt_re_mr *mr; + u32 max_mr_count; + int rc; + + dev_dbg(rdev_to_dev(rdev), "Alloc MR"); + if (type != IB_MR_TYPE_MEM_REG) { + dev_dbg(rdev_to_dev(rdev), "MR type 0x%x not supported", type); + return ERR_PTR(-EINVAL); + } + if (max_num_sg > MAX_PBL_LVL_1_PGS) { + dev_dbg(rdev_to_dev(rdev), "Max SG exceeded"); + return ERR_PTR(-EINVAL); + } + + if (bnxt_re_get_total_mr_mw_count(rdev) >= rdev->dev_attr->max_mr) + return ERR_PTR(-ENOMEM); + + mr = kzalloc(sizeof(*mr), GFP_KERNEL); + if (!mr) + return ERR_PTR(-ENOMEM); + mr->rdev = rdev; + mr->qplib_mr.pd = &pd->qplib_pd; + mr->qplib_mr.flags = BNXT_QPLIB_FR_PMR; + mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR; + + rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr); + if (rc) { + dev_err(rdev_to_dev(rdev), "Allocate MR failed!"); + goto fail; + } + mr->ib_mr.lkey = mr->qplib_mr.lkey; + mr->ib_mr.rkey = mr->ib_mr.lkey; + mr->pages = kzalloc(sizeof(u64) * max_num_sg, GFP_KERNEL); + if (!mr->pages) { + rc = -ENOMEM; + goto fail_mr; + } + rc = bnxt_qplib_alloc_fast_reg_page_list(&rdev->qplib_res, + &mr->qplib_frpl, max_num_sg); + if (rc) { + dev_err(rdev_to_dev(rdev), + "Allocate HW Fast reg page list failed!"); + goto free_page; + } + dev_dbg(rdev_to_dev(rdev), "Alloc MR pages = 0x%p", mr->pages); + + atomic_inc(&rdev->stats.rsors.mr_count); + max_mr_count = atomic_read(&rdev->stats.rsors.mr_count); + if (max_mr_count > atomic_read(&rdev->stats.rsors.max_mr_count)) + atomic_set(&rdev->stats.rsors.max_mr_count, max_mr_count); + return &mr->ib_mr; + +free_page: + kfree(mr->pages); +fail_mr: + bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr); +fail: + kfree(mr); + return ERR_PTR(rc); +} +#endif + +/* Memory Windows */ +#ifdef HAVE_IB_MW_TYPE +ALLOC_MW_RET bnxt_re_alloc_mw +#ifndef HAVE_ALLOC_MW_IN_IB_CORE + (struct ib_pd *ib_pd, enum ib_mw_type type +#else + (struct ib_mw *ib_mw +#endif /* HAVE_ALLOC_MW_RET_IB_MW*/ +#ifdef HAVE_ALLOW_MW_WITH_UDATA + , struct ib_udata *udata +#endif + ) +#else +ALLOC_MW_RET bnxt_re_alloc_mw(struct ib_pd *ib_pd) +#endif +{ +#ifndef HAVE_ALLOC_MW_IN_IB_CORE + struct bnxt_re_pd *pd = to_bnxt_re(ib_pd, struct bnxt_re_pd, ib_pd); +#else + struct bnxt_re_pd *pd = to_bnxt_re(ib_mw->pd, struct bnxt_re_pd, ib_pd); + enum ib_mw_type type = ib_mw->type; +#endif + struct bnxt_re_dev *rdev = pd->rdev; + struct bnxt_re_mw *mw = NULL; + u32 max_mw_count; + int rc; + + if (bnxt_re_get_total_mr_mw_count(rdev) >= rdev->dev_attr->max_mr) { + rc = -ENOMEM; + goto fail; + } + +#ifndef HAVE_ALLOC_MW_IN_IB_CORE + mw = kzalloc(sizeof(*mw), GFP_KERNEL); + if (!mw) { + rc = -ENOMEM; + goto exit; + } +#else + mw = to_bnxt_re(ib_mw, struct bnxt_re_mw, ib_mw); +#endif + mw->rdev = rdev; + mw->qplib_mw.pd = &pd->qplib_pd; + +#ifdef HAVE_IB_MW_TYPE + mw->qplib_mw.type = (type == IB_MW_TYPE_1 ? + CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE1 : + CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B); +#else + mw->qplib_mw.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE1; +#endif + rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mw->qplib_mw); + if (rc) { + dev_err(rdev_to_dev(rdev), "Allocate MW failed!"); + goto fail; + } + mw->ib_mw.rkey = mw->qplib_mw.rkey; + atomic_inc(&rdev->stats.rsors.mw_count); + max_mw_count = atomic_read(&rdev->stats.rsors.mw_count); + if (max_mw_count > atomic_read(&rdev->stats.rsors.max_mw_count)) + atomic_set(&rdev->stats.rsors.max_mw_count, max_mw_count); + +#ifndef HAVE_ALLOC_MW_IN_IB_CORE + return &mw->ib_mw; +#else + return rc; +#endif + +fail: +#ifndef HAVE_ALLOC_MW_IN_IB_CORE + kfree(mw); +exit: + return ERR_PTR(rc); +#else + return rc; +#endif +} + +#ifdef HAVE_IB_BIND_MW +/* bind_mw is only for Type 1 MW binding */ +int bnxt_re_bind_mw(struct ib_qp *ib_qp, struct ib_mw *ib_mw, + struct ib_mw_bind *mw_bind) +{ + struct bnxt_re_qp *qp = to_bnxt_re(ib_qp, struct bnxt_re_qp, ib_qp); + struct bnxt_qplib_swqe wqe; + int rc = 0; + + memset(&wqe, 0, sizeof(wqe)); + wqe.type = BNXT_QPLIB_SWQE_TYPE_BIND_MW; + wqe.wr_id = mw_bind->wr_id; + if (mw_bind->send_flags & IB_SEND_SIGNALED) + wqe.flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP; + wqe.bind.zero_based = false; + wqe.bind.parent_l_key = mw_bind->bind_info.mr->lkey; + wqe.bind.r_key = ib_inc_rkey(ib_mw->rkey); + wqe.bind.va = mw_bind->bind_info.addr; + wqe.bind.length = mw_bind->bind_info.length; + wqe.bind.access_cntl = __from_ib_access_flags( + mw_bind->bind_info.mw_access_flags); + wqe.bind.mw_type = ib_mw->type == IB_MW_TYPE_1 ? SQ_BIND_MW_TYPE_TYPE1 : + SQ_BIND_MW_TYPE_TYPE2; + if (!_is_chip_gen_p5_p7(qp->rdev->chip_ctx)) + bnxt_re_legacy_set_uc_fence(&wqe); + + rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe); + if (rc) { + dev_err(rdev_to_dev(qp->rdev), "Bind MW failed"); + goto exit; + } + ib_mw->rkey = wqe.bind.r_key; + bnxt_qplib_post_send_db(&qp->qplib_qp); +exit: + return rc; +} +#endif + +int bnxt_re_dealloc_mw(struct ib_mw *ib_mw) +{ + struct bnxt_re_mw *mw = to_bnxt_re(ib_mw, struct bnxt_re_mw, ib_mw); + struct bnxt_re_dev *rdev = mw->rdev; + int rc; + + rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mw->qplib_mw); + if (rc) { + dev_err(rdev_to_dev(rdev), "Free MW failed: %#x\n", rc); + return rc; + } + +#ifndef HAVE_ALLOC_MW_IN_IB_CORE + kfree(mw); +#endif + atomic_dec(&rdev->stats.rsors.mw_count); + return rc; +} + +#ifdef USE_IB_FMR +/* Fast Memory Regions */ +struct ib_fmr *bnxt_re_alloc_fmr(struct ib_pd *ib_pd, int mr_access_flags, + struct ib_fmr_attr *fmr_attr) +{ + struct bnxt_re_pd *pd = to_bnxt_re(ib_pd, struct bnxt_re_pd, ib_pd); + struct bnxt_re_dev *rdev = pd->rdev; + struct bnxt_re_fmr *fmr; + u32 max_mr_count; + int rc; + + if (fmr_attr->max_pages > MAX_PBL_LVL_2_PGS || + fmr_attr->max_maps > rdev->dev_attr.max_map_per_fmr) { + dev_err(rdev_to_dev(rdev), "Allocate FMR exceeded MAX!"); + return ERR_PTR (-ENOMEM); + } + fmr = kzalloc(sizeof(*fmr), GFP_KERNEL); + if (!fmr) + return ERR_PTR (-ENOMEM); + /* TODO: Ignore fmr_attr->page_shift */ + fmr->rdev = rdev; + fmr->qplib_fmr.pd = &pd->qplib_pd; + fmr->qplib_fmr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR; + + rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &fmr->qplib_fmr); + if (rc) { + dev_err(rdev_to_dev(rdev), "Allocate FMR failed!"); + goto fail; + } + fmr->qplib_fmr.flags = __from_ib_access_flags(mr_access_flags); + fmr->ib_fmr.lkey = fmr->qplib_fmr.lkey; + fmr->ib_fmr.rkey = fmr->ib_fmr.lkey; + + atomic_inc(&rdev->stats.rsors.mr_count); + max_mr_count = atomic_read(&rdev->stats.rsors.mr_count); + if (max_mr_count > atomic_read(&rdev->stats.rsors.max_mr_count)) + atomic_set(&rdev->stats.rsors.max_mr_count, max_mr_count); + return &fmr->ib_fmr; + +fail: + kfree(fmr); + return ERR_PTR(rc); +} + +int bnxt_re_map_phys_fmr(struct ib_fmr *ib_fmr, u64 *page_list, int list_len, + u64 iova) +{ + struct bnxt_re_fmr *fmr = to_bnxt_re(ib_fmr, struct bnxt_re_fmr, ib_fmr); + struct bnxt_re_dev *rdev = fmr->rdev; + struct bnxt_qplib_mrinfo mrinfo; + int rc; + + memset(&mrinfo, 0, sizeof(mrinfo)); + fmr->qplib_fmr.va = iova; + fmr->qplib_fmr.total_size = list_len * PAGE_SIZE; + mrinfo.ptes = page_list; + mrinfo.sg.npages = list_len; + mrinfo.mrw = &fmr->qplib_fmr; + mfinfo.sg.pgshft = PAGE_SHIFT; + + rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mrinfo, true); + if (rc) + dev_err(rdev_to_dev(rdev), "Map FMR failed for lkey = 0x%x!", + fmr->ib_fmr.lkey); + return rc; +} + +int bnxt_re_unmap_fmr(struct list_head *fmr_list) +{ + struct bnxt_re_dev *rdev; + struct bnxt_re_fmr *fmr; + struct ib_fmr *ib_fmr; + int rc; + + /* Validate each FMRs inside the fmr_list */ + list_for_each_entry(ib_fmr, fmr_list, list) { + fmr = to_bnxt_re(ib_fmr, struct bnxt_re_fmr, ib_fmr); + rdev = fmr->rdev; + + if (rdev) { + rc = bnxt_qplib_dereg_mrw(&rdev->qplib_res, + &fmr->qplib_fmr, true); + if (rc) { + dev_dbg(rdev_to_dev(rdev), "Unmap MR failed!"); + goto fail; + } + } + } + return 0; +fail: + return rc; +} + +int bnxt_re_dealloc_fmr(struct ib_fmr *ib_fmr) +{ + struct bnxt_re_fmr *fmr = to_bnxt_re(ib_fmr, struct bnxt_re_fmr, + ib_fmr); + struct bnxt_re_dev *rdev = fmr->rdev; + int rc; + + rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &fmr->qplib_fmr); + if (rc) + dev_err(rdev_to_dev(rdev), "Free FMR failed!"); + + kfree(fmr); + atomic_dec(&rdev->stats.rsors.mr_count); + return rc; +} +#endif + +#ifdef CONFIG_INFINIBAND_PEER_MEM +#ifdef HAVE_IB_UMEM_GET_FLAGS +static void bnxt_re_invalidate_umem(void *invalidation_cookie, + struct ib_umem *umem, + unsigned long addr, size_t size) +#else +static void bnxt_re_invalidate_umem(struct ib_umem *umem, + void *invalidation_cookie) +#endif +{ + struct bnxt_re_mr *mr = (struct bnxt_re_mr *)invalidation_cookie; + + /* + * This function is called under client peer lock so + * its resources are race protected. + */ + if (atomic_inc_return(&mr->invalidated) > 1) { + bnxt_re_set_inflight_invalidation_ctx(umem); + return; + } + + bnxt_re_set_inval_ctx_peer_callback(umem); + (void) bnxt_qplib_free_mrw(&mr->rdev->qplib_res, &mr->qplib_mr); + + bnxt_re_peer_mem_release(mr->ib_umem); + mr->ib_umem = NULL; + complete(&mr->invalidation_comp); +} +#endif + +static int bnxt_re_page_size_ok(int page_shift) +{ + switch (page_shift) { + case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_4K: + case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_8K: + case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_64K: + case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_2M: + case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_256K: + case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_1M: + case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_4M: + case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_256MB: + case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_1G: + return 1; + default: + return 0; + } +} + +#if defined(HAVE_DMA_BLOCK_ITERATOR) && defined(HAVE_IB_UMEM_FIND_BEST_PGSZ) +static u32 bnxt_re_best_page_shift(struct ib_umem *umem, u64 va, u64 cmask) +{ + return __ffs(ib_umem_find_best_pgsz(umem, cmask, va)); +} +#endif + +static int bnxt_re_get_page_shift(struct ib_umem *umem, + u64 va, u64 st, u64 cmask) +{ + int pgshft; + +#ifdef HAVE_IB_UMEM_GET_FLAGS +#if !defined(HAVE_IB_UMEM_PAGE_SIZE) && !defined(HAVE_IB_UMEM_PAGE_SHIFT) + pgshft = ib_umem_get_peer_page_shift(umem); + if (pgshft > 0) + return pgshft; + /* Else host memory. Use OS native functions to get page shift */ +#endif +#endif + +#if defined(HAVE_DMA_BLOCK_ITERATOR) && defined(HAVE_IB_UMEM_FIND_BEST_PGSZ) + pgshft = bnxt_re_best_page_shift(umem, va, cmask); +#else +#ifdef HAVE_IB_UMEM_PAGE_SHIFT + pgshft = umem->page_shift; +#else + pgshft = ilog2(umem->page_size); +#endif +#endif /* HAVE_DMA_BLOCK_ITERATOR*/ + return pgshft; +} + +int bnxt_re_get_num_pages(struct ib_umem *umem, u64 start, u64 length, int page_shift) +{ + if (page_shift == PAGE_SHIFT) + return ib_umem_num_pages_compat(umem); + else + return (ALIGN(umem->address + umem->length, BIT(page_shift)) - + ALIGN_DOWN(umem->address, BIT(page_shift))) >> page_shift; +} + +/* uverbs */ +#ifdef HAVE_IB_UMEM_DMABUF +struct ib_mr *bnxt_re_reg_user_mr_dmabuf(struct ib_pd *ib_pd, u64 start, + u64 length, u64 virt_addr, int fd, + int mr_access_flags, struct ib_udata *udata) +{ + struct bnxt_re_pd *pd = to_bnxt_re(ib_pd, struct bnxt_re_pd, ib_pd); + struct bnxt_re_dev *rdev = pd->rdev; + struct ib_umem_dmabuf *umem_dmabuf; + struct bnxt_qplib_mrinfo mrinfo; + int umem_pgs, page_shift, rc; + struct bnxt_re_mr *mr; + struct ib_umem *umem; + u32 max_mr_count; + int npages; + + dev_dbg(rdev_to_dev(rdev), "Register user DMA-BUF MR"); + + if (bnxt_re_get_total_mr_mw_count(rdev) >= rdev->dev_attr->max_mr) + return ERR_PTR(-ENOMEM); + + memset(&mrinfo, 0, sizeof(mrinfo)); + if (length > BNXT_RE_MAX_MR_SIZE) { + dev_err(rdev_to_dev(rdev), "Requested MR Size: %llu > Max supported: %ld\n", + length, BNXT_RE_MAX_MR_SIZE); + return ERR_PTR(-EINVAL); + } + mr = kzalloc(sizeof(*mr), GFP_KERNEL); + if (!mr) + return ERR_PTR(-ENOMEM); + mr->rdev = rdev; + mr->qplib_mr.pd = &pd->qplib_pd; + mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags); + mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_MR; + + if (!_is_alloc_mr_unified(rdev->qplib_res.dattr)) { + rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr); + if (rc) { + dev_err(rdev_to_dev(rdev), "Alloc MR failed!"); + goto fail; + } + /* The fixed portion of the rkey is the same as the lkey */ + mr->ib_mr.rkey = mr->qplib_mr.rkey; + } + + umem_dmabuf = ib_umem_dmabuf_get_pinned(&rdev->ibdev, start, length, + fd, mr_access_flags); + if (IS_ERR(umem_dmabuf)) { + rc = PTR_ERR(umem_dmabuf); + dev_dbg(rdev_to_dev(rdev), "%s: failed to get umem dmabuf : %d\n", + __func__, rc); + goto free_mr; + } + umem = &umem_dmabuf->umem; + mr->ib_umem = umem; + mr->qplib_mr.va = virt_addr; + umem_pgs = ib_umem_num_pages_compat(umem); + if (!umem_pgs) { + dev_err(rdev_to_dev(rdev), "umem is invalid!"); + rc = -EINVAL; + goto free_umem; + } + mr->qplib_mr.total_size = length; + page_shift = bnxt_re_get_page_shift(umem, virt_addr, start, + rdev->dev_attr->page_size_cap); + /* + * FIXME: We have known issue if best page_size is less than PAGE_SIZE + */ + if (page_shift < PAGE_SHIFT) + page_shift = PAGE_SHIFT; + if (!bnxt_re_page_size_ok(page_shift)) { + dev_err(rdev_to_dev(rdev), "umem page size unsupported!"); + rc = -EFAULT; + goto free_umem; + } + npages = bnxt_re_get_num_pages(umem, start, length, page_shift); + + mrinfo.sg.npages = npages; + /* Map umem buf ptrs to the PBL */ +#ifndef HAVE_RDMA_UMEM_FOR_EACH_DMA_BLOCK + mrinfo.sg.sghead = get_ib_umem_sgl(umem, &mrinfo.sg.nmap); +#else + mrinfo.sg.umem = umem; +#endif + mrinfo.sg.pgshft = page_shift; + mrinfo.sg.pgsize = BIT(page_shift); + + mrinfo.mrw = &mr->qplib_mr; + + rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mrinfo, false); + if (rc) { + dev_err(rdev_to_dev(rdev), "Reg user MR failed!"); + goto free_umem; + } + + mr->ib_mr.lkey = mr->qplib_mr.lkey; + mr->ib_mr.rkey = mr->qplib_mr.lkey; + atomic_inc(&rdev->stats.rsors.mr_count); + max_mr_count = atomic_read(&rdev->stats.rsors.mr_count); + if (max_mr_count > atomic_read(&rdev->stats.rsors.max_mr_count)) + atomic_set(&rdev->stats.rsors.max_mr_count, max_mr_count); + + return &mr->ib_mr; + +free_umem: + bnxt_re_peer_mem_release(mr->ib_umem); +free_mr: + if (!_is_alloc_mr_unified(rdev->qplib_res.dattr)) + bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr); +fail: + kfree(mr); + return ERR_PTR(rc); +} +#endif + +struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length, + u64 virt_addr, int mr_access_flags, + struct ib_udata *udata) +{ + struct bnxt_re_pd *pd = to_bnxt_re(ib_pd, struct bnxt_re_pd, ib_pd); + struct bnxt_re_dev *rdev = pd->rdev; + struct bnxt_qplib_mrinfo mrinfo; + int umem_pgs, page_shift, rc; + struct bnxt_re_mr *mr; + struct ib_umem *umem; + u32 max_mr_count; + int npages; + + dev_dbg(rdev_to_dev(rdev), "Reg user MR"); + + if (bnxt_re_get_total_mr_mw_count(rdev) >= rdev->dev_attr->max_mr) + return ERR_PTR(-ENOMEM); + + memset(&mrinfo, 0, sizeof(mrinfo)); + if (length > BNXT_RE_MAX_MR_SIZE) { + dev_err(rdev_to_dev(rdev), "Requested MR Size: %llu " + "> Max supported: %ld\n", length, BNXT_RE_MAX_MR_SIZE); + return ERR_PTR(-ENOMEM); + } + mr = kzalloc(sizeof(*mr), GFP_KERNEL); + if (!mr) + return ERR_PTR (-ENOMEM); + mr->rdev = rdev; + mr->qplib_mr.pd = &pd->qplib_pd; + mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags); + mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_MR; + + if (!_is_alloc_mr_unified(rdev->qplib_res.dattr)) { + rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr); + if (rc) { + dev_err(rdev_to_dev(rdev), "Alloc MR failed!"); + goto fail; + } + /* The fixed portion of the rkey is the same as the lkey */ + mr->ib_mr.rkey = mr->qplib_mr.rkey; + } + + umem = ib_umem_get_flags_compat(rdev, ib_pd->uobject->context, + udata, start, length, + mr_access_flags, 0); + if (IS_ERR(umem)) { + rc = PTR_ERR(umem); + dev_err(rdev_to_dev(rdev), "%s: ib_umem_get failed! rc = %d\n", + __func__, rc); + goto free_mr; + } + mr->ib_umem = umem; + + mr->qplib_mr.va = virt_addr; + umem_pgs = ib_umem_num_pages_compat(umem); + if (!umem_pgs) { + dev_err(rdev_to_dev(rdev), "umem is invalid!"); + rc = -EINVAL; + goto free_umem; + } + mr->qplib_mr.total_size = length; + page_shift = bnxt_re_get_page_shift(umem, virt_addr, start, + rdev->dev_attr->page_size_cap); + /* + * FIXME: We have known issue if best page_size is less than PAGE_SIZE + */ + if (page_shift < PAGE_SHIFT) + page_shift = PAGE_SHIFT; + if (!bnxt_re_page_size_ok(page_shift)) { + dev_err(rdev_to_dev(rdev), "umem page size unsupported!"); + rc = -EFAULT; + goto free_umem; + } + npages = bnxt_re_get_num_pages(umem, start, length, page_shift); + mrinfo.sg.npages = npages; + /* Map umem buf ptrs to the PBL */ +#ifndef HAVE_RDMA_UMEM_FOR_EACH_DMA_BLOCK + mrinfo.sg.sghead = get_ib_umem_sgl(umem, &mrinfo.sg.nmap); +#else + mrinfo.sg.umem = umem; +#endif + mrinfo.sg.pgshft = page_shift; + mrinfo.sg.pgsize = BIT(page_shift); + + mrinfo.mrw = &mr->qplib_mr; + + rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mrinfo, false); + if (rc) { + dev_err(rdev_to_dev(rdev), "Reg user MR failed!"); + goto free_umem; + } + + mr->ib_mr.lkey = mr->ib_mr.rkey = mr->qplib_mr.lkey; + atomic_inc(&rdev->stats.rsors.mr_count); + max_mr_count = atomic_read(&rdev->stats.rsors.mr_count); + if (max_mr_count > atomic_read(&rdev->stats.rsors.max_mr_count)) + atomic_set(&rdev->stats.rsors.max_mr_count, max_mr_count); + +#ifdef CONFIG_INFINIBAND_PEER_MEM + if (bnxt_re_get_peer_mem(umem)) { + atomic_set(&mr->invalidated, 0); + init_completion(&mr->invalidation_comp); +#ifdef HAVE_IB_UMEM_GET_FLAGS + rc = +#endif + ib_umem_activate_invalidation_notifier + (umem, bnxt_re_invalidate_umem, mr); +#ifdef HAVE_IB_UMEM_GET_FLAGS + if (rc) + goto free_umem; +#endif + mr->is_invalcb_active = true; + } +#endif + return &mr->ib_mr; + +free_umem: + bnxt_re_peer_mem_release(mr->ib_umem); +free_mr: + if (!_is_alloc_mr_unified(rdev->qplib_res.dattr)) + bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr); +fail: + kfree(mr); + return ERR_PTR(rc); +} + +REREG_USER_MR_RET +bnxt_re_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start, u64 length, + u64 virt_addr, int mr_access_flags, + struct ib_pd *ib_pd, struct ib_udata *udata) +{ + struct bnxt_re_mr *mr = to_bnxt_re(ib_mr, struct bnxt_re_mr, ib_mr); + struct bnxt_re_pd *pd = to_bnxt_re(ib_pd, struct bnxt_re_pd, ib_pd); + int umem_pgs = 0, page_shift = PAGE_SHIFT, rc; + struct bnxt_re_dev *rdev = mr->rdev; + struct bnxt_qplib_mrinfo mrinfo; + struct ib_umem *umem; + u32 npages; + + /* TODO: Must decipher what to modify based on the flags */ + memset(&mrinfo, 0, sizeof(mrinfo)); + if (flags & IB_MR_REREG_TRANS) { + umem = ib_umem_get_flags_compat(rdev, ib_pd->uobject->context, + udata, start, length, + mr_access_flags, 0); + if (IS_ERR(umem)) { + rc = PTR_ERR(umem); + dev_err(rdev_to_dev(rdev), + "%s: ib_umem_get failed! ret = %d\n", + __func__, rc); + goto fail; + } + mr->ib_umem = umem; + + mr->qplib_mr.va = virt_addr; + umem_pgs = ib_umem_num_pages_compat(umem); + if (!umem_pgs) { + dev_err(rdev_to_dev(rdev), "umem is invalid!"); + rc = -EINVAL; + goto fail_free_umem; + } + mr->qplib_mr.total_size = length; + page_shift = bnxt_re_get_page_shift(umem, virt_addr, start, + rdev->dev_attr->page_size_cap); + /* + * FIXME: We have known issue if best page_size is less than PAGE_SIZE + */ + if (page_shift < PAGE_SHIFT) + page_shift = PAGE_SHIFT; + if (!bnxt_re_page_size_ok(page_shift)) { + dev_err(rdev_to_dev(rdev), + "umem page size unsupported!"); + rc = -EFAULT; + goto fail_free_umem; + } + npages = bnxt_re_get_num_pages(umem, start, length, page_shift); + mrinfo.sg.npages = npages; + /* Map umem buf ptrs to the PBL */ +#ifndef HAVE_RDMA_UMEM_FOR_EACH_DMA_BLOCK + mrinfo.sg.sghead = get_ib_umem_sgl(umem, &mrinfo.sg.nmap); +#else + mrinfo.sg.umem = umem; +#endif + mrinfo.sg.pgshft = page_shift; + mrinfo.sg.pgsize = BIT(page_shift); + } + + mrinfo.mrw = &mr->qplib_mr; + if (flags & IB_MR_REREG_PD) + mr->qplib_mr.pd = &pd->qplib_pd; + + if (flags & IB_MR_REREG_ACCESS) + mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags); + + rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mrinfo, false); + if (rc) { + dev_err(rdev_to_dev(rdev), "Rereg user MR failed!"); + goto fail_free_umem; + } + mr->ib_mr.rkey = mr->qplib_mr.rkey; +#ifndef HAVE_REREG_USER_MR_RET_PTR + return 0; +#else + return NULL; +#endif + +fail_free_umem: + bnxt_re_peer_mem_release(mr->ib_umem); +fail: +#ifndef HAVE_REREG_USER_MR_RET_PTR + return rc; +#else + return ERR_PTR(rc); +#endif +} + +int bnxt_re_check_abi_version(struct bnxt_re_dev *rdev) +{ + struct ib_device *ibdev = &rdev->ibdev; + u32 uverbs_abi_ver; + + uverbs_abi_ver = GET_UVERBS_ABI_VERSION(ibdev); + dev_dbg(rdev_to_dev(rdev), "ABI version requested %d", + uverbs_abi_ver); + if (uverbs_abi_ver != BNXT_RE_ABI_VERSION) { + dev_dbg(rdev_to_dev(rdev), " is different from the device %d ", + BNXT_RE_ABI_VERSION); + return -EPERM; + } + return 0; +} + +static inline void bnxt_re_init_small_recv_wqe_sup(struct bnxt_re_ucontext *uctx, + struct bnxt_re_uctx_req *req, + struct bnxt_re_uctx_resp *resp) +{ + if (req->comp_mask & BNXT_RE_COMP_MASK_REQ_UCNTX_SMALL_RECV_WQE_LIB_SUP && + _is_small_recv_wqe_supported(uctx->rdev->qplib_res.dattr->dev_cap_ext_flags)) { + uctx->small_recv_wqe_sup = true; + resp->comp_mask |= BNXT_RE_COMP_MASK_UCNTX_SMALL_RECV_WQE_DRV_SUP; + } +} + +ALLOC_UCONTEXT_RET bnxt_re_alloc_ucontext(ALLOC_UCONTEXT_IN *uctx_in, + struct ib_udata *udata) +{ +#ifndef HAVE_UCONTEXT_ALLOC_IN_IB_CORE + struct bnxt_re_ucontext *uctx = NULL; + struct ib_device *ibdev = uctx_in; +#else + struct ib_ucontext *ctx = uctx_in; + struct ib_device *ibdev = ctx->device; + struct bnxt_re_ucontext *uctx = + container_of(ctx, struct bnxt_re_ucontext, ib_uctx); +#endif + + struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev); + struct bnxt_qplib_dev_attr *dev_attr = rdev->dev_attr; + struct bnxt_re_uctx_resp resp = {}; + struct bnxt_re_uctx_req ureq = {}; + struct bnxt_qplib_chip_ctx *cctx; + u32 chip_met_rev_num; + bool genp5 = false; + int rc; + + cctx = rdev->chip_ctx; + rc = bnxt_re_check_abi_version(rdev); + if (rc) + goto err_out; + + if (test_bit(BNXT_RE_FLAG_ERR_DEVICE_DETACHED, &rdev->flags)) { + dev_dbg(rdev_to_dev(rdev), + "%s %d controller is not active flags 0x%lx\n", + __func__, __LINE__, rdev->flags); + rc = -EIO; + goto err_out; + } + +#ifndef HAVE_UCONTEXT_ALLOC_IN_IB_CORE + uctx = kzalloc(sizeof(*uctx), GFP_KERNEL); + if (!uctx) + return ERR_PTR(-ENOMEM); +#endif + uctx->rdev = rdev; + uctx->shpg = (void *)__get_free_page(GFP_KERNEL); + if (!uctx->shpg) { + dev_err(rdev_to_dev(rdev), "shared memory allocation failed!"); + rc = -ENOMEM; + goto err_free_uctx; + } + if (rdev->hdbr_enabled) { + uctx->hdbr_app = bnxt_re_hdbr_alloc_app(rdev, true); + if (!uctx->hdbr_app) { + dev_err(rdev_to_dev(rdev), "HDBR app allocation failed!"); + rc = -ENOMEM; + goto hdbr_fail; + } + } + spin_lock_init(&uctx->sh_lock); +#ifdef HAVE_DISASSOCIATE_UCNTX +#ifndef HAVE_RDMA_USER_MMAP_IO + INIT_LIST_HEAD(&uctx->vma_list_head); + mutex_init(&uctx->list_mutex); +#endif +#endif + if (BNXT_RE_ABI_VERSION >= 4) { + chip_met_rev_num = cctx->chip_num; + chip_met_rev_num |= ((u32)cctx->chip_rev & 0xFF) << + BNXT_RE_CHIP_ID0_CHIP_REV_SFT; + chip_met_rev_num |= ((u32)cctx->chip_metal & 0xFF) << + BNXT_RE_CHIP_ID0_CHIP_MET_SFT; + resp.chip_id0 = chip_met_rev_num; + resp.chip_id1 = 0; /* future extension of chip info */ + } + + if (BNXT_RE_ABI_VERSION != 4) { + /*Temp, Use idr_alloc instead*/ + resp.dev_id = rdev->en_dev->pdev->devfn; + resp.max_qp = rdev->qplib_res.hctx->qp_ctx.max; + } + + genp5 = _is_chip_gen_p5_p7(cctx); + if (BNXT_RE_ABI_VERSION > 5) { + resp.modes = genp5 ? cctx->modes.wqe_mode : 0; + if (rdev->dev_attr && BNXT_RE_HW_RETX(rdev->dev_attr->dev_cap_flags)) + resp.comp_mask = BNXT_RE_COMP_MASK_UCNTX_HW_RETX_ENABLED; + } + + resp.pg_size = PAGE_SIZE; + resp.cqe_sz = sizeof(struct cq_base); + resp.max_cqd = dev_attr->max_cq_wqes; + resp.db_push_mode = cctx->modes.db_push_mode; + if (BNXT_RE_PUSH_ENABLED(cctx->modes.db_push_mode)) + resp.comp_mask |= BNXT_RE_COMP_MASK_UCNTX_WC_DPI_ENABLED; + +#ifdef HAVE_IB_USER_VERBS_EX_CMD_MODIFY_QP + resp.comp_mask |= BNXT_RE_COMP_MASK_UCNTX_MQP_EX_SUPPORTED; +#endif + + if (rdev->dbr_pacing) + resp.comp_mask |= BNXT_RE_COMP_MASK_UCNTX_DBR_PACING_ENABLED; + + if (rdev->dbr_drop_recov && rdev->user_dbr_drop_recov) + resp.comp_mask |= BNXT_RE_COMP_MASK_UCNTX_DBR_RECOVERY_ENABLED; + + if (udata->inlen >= sizeof(ureq)) { + rc = ib_copy_from_udata(&ureq, udata, + min(udata->inlen, sizeof(ureq))); + if (rc) + goto err_free_page; + if (bnxt_re_init_pow2_flag(&ureq, &resp)) + dev_warn(rdev_to_dev(rdev), + "Enabled roundup logic. Library bug?"); + if (bnxt_re_init_rsvd_wqe_flag(&ureq, &resp, genp5)) + dev_warn(rdev_to_dev(rdev), + "Rsvd wqe in use! Try the updated library."); + bnxt_re_init_small_recv_wqe_sup(uctx, &ureq, &resp); + } else { + dev_warn(rdev_to_dev(rdev), + "Enabled roundup logic. Update the library!"); + resp.comp_mask &= ~BNXT_RE_COMP_MASK_UCNTX_POW2_DISABLED; + + dev_warn(rdev_to_dev(rdev), + "Rsvd wqe in use. Update the library!"); + resp.comp_mask &= ~BNXT_RE_COMP_MASK_UCNTX_RSVD_WQE_DISABLED; + } + + uctx->cmask = (uint64_t)resp.comp_mask; + rc = bnxt_re_copy_to_udata(rdev, &resp, + min(udata->outlen, sizeof(resp)), + udata); + if (rc) + goto err_free_page; + + BNXT_RE_DBR_LIST_ADD(rdev, uctx, BNXT_RE_RES_TYPE_UCTX); + + INIT_LIST_HEAD(&uctx->cq_list); + mutex_init(&uctx->cq_lock); + +#ifndef HAVE_UCONTEXT_ALLOC_IN_IB_CORE + return &uctx->ib_uctx; +#else + return 0; +#endif + +err_free_page: + if (rdev->hdbr_enabled) { + struct bnxt_re_hdbr_app *app = uctx->hdbr_app; + + mutex_lock(&rdev->hdbr_lock); + list_del(&app->lst); + mutex_unlock(&rdev->hdbr_lock); + bnxt_re_hdbr_dealloc_app(rdev, app); + uctx->hdbr_app = NULL; + } +hdbr_fail: + free_page((u64)uctx->shpg); + uctx->shpg = NULL; +err_free_uctx: +#ifndef HAVE_UCONTEXT_ALLOC_IN_IB_CORE + kfree(uctx); +#endif +err_out: +#ifndef HAVE_UCONTEXT_ALLOC_IN_IB_CORE + return ERR_PTR(rc); +#else + return rc; +#endif +} + +DEALLOC_UCONTEXT_RET bnxt_re_dealloc_ucontext(struct ib_ucontext *ib_uctx) +{ + struct bnxt_re_ucontext *uctx = to_bnxt_re(ib_uctx, + struct bnxt_re_ucontext, + ib_uctx); + struct bnxt_re_dev *rdev = uctx->rdev; + int rc = 0; + + BNXT_RE_DBR_LIST_DEL(rdev, uctx, BNXT_RE_RES_TYPE_UCTX); + + if (rdev->hdbr_enabled && uctx->hdbr_app) { + struct bnxt_re_hdbr_app *app = uctx->hdbr_app; + + mutex_lock(&rdev->hdbr_lock); + list_del(&app->lst); + mutex_unlock(&rdev->hdbr_lock); + bnxt_re_hdbr_dealloc_app(rdev, app); + uctx->hdbr_app = NULL; + } + + if (uctx->shpg) + free_page((u64)uctx->shpg); + + if (uctx->dpi.dbr) { + /* Free DPI only if this is the first PD allocated by the + * application and mark the context dpi as NULL + */ + if (_is_chip_gen_p5_p7(rdev->chip_ctx) && uctx->wcdpi.dbr) { + rc = bnxt_qplib_dealloc_dpi(&rdev->qplib_res, + &uctx->wcdpi); + if (rc) + dev_err(rdev_to_dev(rdev), + "dealloc push dp failed"); + uctx->wcdpi.dbr = NULL; + if (BNXT_RE_PPP_ENABLED(rdev->chip_ctx)) + rdev->ppp_stats.ppp_enabled_ctxs--; + } + + rc = bnxt_qplib_dealloc_dpi(&rdev->qplib_res, + &uctx->dpi); + if (rc) + dev_err(rdev_to_dev(rdev), "Deallocte HW DPI failed!"); + /* Don't fail, continue*/ + uctx->dpi.dbr = NULL; + } + +#ifndef HAVE_UCONTEXT_ALLOC_IN_IB_CORE + kfree(uctx); + return 0; +#endif +} + +#ifdef HAVE_DISASSOCIATE_UCNTX +#ifndef HAVE_RDMA_USER_MMAP_IO +static void bnxt_re_vma_open(struct vm_area_struct *vma) +{ + /* vma_open is called when a new VMA is created on top of our VMA. This + * is done through either mremap flow or split_vma (usually due to + * mlock, madvise, munmap, etc.) We do not support a clone of the VMA + * as this VMA is strongly hardware related. Therefore we set the + * vm_ops of the newly created/cloned VMA to NULL, to prevent it from + * calling us again and trying to do incorrect actions. We assume that + * the original VMA size is exactly a single page, and therefore all + * "splitting" operation will not happen to it. + */ + vma->vm_ops = NULL; +} + +static void bnxt_re_vma_close(struct vm_area_struct *vma) +{ + struct bnxt_re_vma_data *vma_data; + + vma_data = (struct bnxt_re_vma_data *)vma->vm_private_data; + + vma_data->vma = NULL; + mutex_lock(vma_data->list_mutex); + list_del(&vma_data->vma_list); + mutex_unlock(vma_data->list_mutex); + kfree(vma_data); +} + +static const struct vm_operations_struct bnxt_re_vma_op = { + .open = bnxt_re_vma_open, + .close = bnxt_re_vma_close +}; + +int bnxt_re_set_vma_data(struct bnxt_re_ucontext *uctx, + struct vm_area_struct *vma) +{ + struct bnxt_re_vma_data *vma_data; + + vma_data = kzalloc(sizeof(*vma_data), GFP_KERNEL); + if (!vma_data) + return -ENOMEM; + + vma_data->vma = vma; + vma_data->list_mutex = &uctx->list_mutex; + vma->vm_private_data = vma_data; + vma->vm_ops = &bnxt_re_vma_op; + + mutex_lock(&uctx->list_mutex); + list_add_tail(&vma_data->vma_list, &uctx->vma_list_head); + mutex_unlock(&uctx->list_mutex); + + return 0; +} +#endif +#endif + +struct bnxt_re_cq *is_bnxt_re_cq_page(struct bnxt_re_ucontext *uctx, + u64 pg_off) +{ + struct bnxt_re_cq *cq = NULL, *tmp_cq; + + if (!_is_chip_p7(uctx->rdev->chip_ctx)) + return NULL; + + mutex_lock(&uctx->cq_lock); + list_for_each_entry(tmp_cq, &uctx->cq_list, cq_list) { + if (((u64)tmp_cq->uctx_cq_page >> PAGE_SHIFT) == pg_off) { + cq = tmp_cq; + break; + } + } + mutex_unlock(&uctx->cq_lock); + return cq; +} + +/* Helper function to mmap the virtual memory from user app */ +int bnxt_re_mmap(struct ib_ucontext *ib_uctx, struct vm_area_struct *vma) +{ + struct bnxt_re_ucontext *uctx = to_bnxt_re(ib_uctx, + struct bnxt_re_ucontext, + ib_uctx); + struct bnxt_re_dev *rdev = uctx->rdev; + struct bnxt_re_cq *cq = NULL; + int rc = 0; + u64 pfn; + +#ifndef HAVE_RDMA_USER_MMAP_IO + if (vma->vm_end - vma->vm_start != PAGE_SIZE) + return -EINVAL; +#endif + + if (vma->vm_flags & VM_LOCKED) { + /* This is for mapping DB copy memory page */ + pfn = virt_to_phys((void *)(vma->vm_pgoff << PAGE_SHIFT)) >> PAGE_SHIFT; + rc = remap_pfn_compat(ib_uctx, vma, pfn); + if (rc) { + dev_err(rdev_to_dev(rdev), + "DB copy memory mapping failed!"); + rc = -EAGAIN; + } + /* Return directly from here */ + return rc; + } + + switch (vma->vm_pgoff) { + case BNXT_RE_MAP_SH_PAGE: + pfn = virt_to_phys(uctx->shpg) >> PAGE_SHIFT; + rc = remap_pfn_compat(ib_uctx, vma, pfn); + if (rc) { + dev_err(rdev_to_dev(rdev), + "Shared page mapping failed!"); + rc = -EAGAIN; + } + /* Return directly from here */ + return rc; + case BNXT_RE_MAP_WC: + vma->vm_page_prot = + pgprot_writecombine(vma->vm_page_prot); + pfn = (uctx->wcdpi.umdbr >> PAGE_SHIFT); + if (!pfn) + return -EFAULT; + break; + case BNXT_RE_DBR_PAGE: + /* Driver doesn't expect write access request */ + if (vma->vm_flags & VM_WRITE) + return -EFAULT; + pfn = virt_to_phys(rdev->dbr_page) >> PAGE_SHIFT; + if (!pfn) + return -EFAULT; + break; + case BNXT_RE_MAP_DB_RECOVERY_PAGE: + pfn = virt_to_phys(uctx->dbr_recov_cq_page) >> PAGE_SHIFT; + if (!pfn) + return -EFAULT; + break; + default: + cq = is_bnxt_re_cq_page(uctx, vma->vm_pgoff); + if (cq) { + pfn = virt_to_phys((void *)cq->uctx_cq_page) >> PAGE_SHIFT; + rc = remap_pfn_compat(ib_uctx, vma, pfn); + if (rc) { + dev_err(rdev_to_dev(rdev), + "CQ page mapping failed!"); + rc = -EAGAIN; + } + goto out; + } else { + vma->vm_page_prot = + pgprot_noncached(vma->vm_page_prot); + pfn = vma->vm_pgoff; + } + break; + } + + rc = remap_pfn_compat(ib_uctx, vma, pfn); + if (rc) { + dev_err(rdev_to_dev(rdev), "DPI mapping failed!"); + return -EAGAIN; + } + rc = __bnxt_re_set_vma_data(uctx, vma); +out: + return rc; +} + +#ifdef HAVE_PROCESS_MAD_U32_PORT +int bnxt_re_process_mad(struct ib_device *ibdev, int mad_flags, + u32 port_num, const struct ib_wc *in_wc, + const struct ib_grh *in_grh, + const struct ib_mad *in_mad, struct ib_mad *out_mad, + size_t *out_mad_size, u16 *out_mad_pkey_index) +#else +#ifndef HAVE_PROCESS_MAD_IB_MAD_HDR +int bnxt_re_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, + const struct ib_wc *in_wc, const struct ib_grh *in_grh, + const struct ib_mad *in_mad, struct ib_mad *out_mad, + size_t *out_mad_size, u16 *out_mad_pkey_index) +#else +int bnxt_re_process_mad(struct ib_device *ibdev, int process_mad_flags, + u8 port_num, const struct ib_wc *in_wc, + const struct ib_grh *in_grh, + const struct ib_mad_hdr *in_mad, size_t in_mad_size, + struct ib_mad_hdr *out_mad, size_t *out_mad_size, + u16 *out_mad_pkey_index) +#endif +#endif +{ + struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev); + int ret = IB_MAD_RESULT_SUCCESS; + + /* TODO Currently consuming only management class 0xd. Need + * to review before pushing upstream + */ + rdev->dbg_stats->mad.mad_processed++; +#ifndef HAVE_PROCESS_MAD_IB_MAD_HDR + if (in_mad->mad_hdr.mgmt_class == 0xd) { +#else + if (in_mad->mgmt_class == 0xd) { +#endif + rdev->dbg_stats->mad.mad_consumed++; + ret |= IB_MAD_RESULT_CONSUMED; + } + return ret; +} + +#ifdef HAVE_DISASSOCIATE_UCNTX +#ifndef HAVE_RDMA_USER_MMAP_IO +#ifndef HAVE_NO_MM_MMAP_SEM +static struct mm_struct *bnxt_re_is_task_pending(struct ib_ucontext *ib_uctx, + struct task_struct **task) +{ + struct mm_struct *mm; + + *task = get_pid_task(ib_uctx->tgid, PIDTYPE_PID); + if (!*task) + return NULL; + + mm = get_task_mm(*task); + if (!mm) { + pr_info("no mm, disassociate ucontext is pending task termination\n"); + while (1) { + put_task_struct(*task); + usleep_range(1000, 2000); + *task = get_pid_task(ib_uctx->tgid, PIDTYPE_PID); + if (!*task || (*task)->state == TASK_DEAD) { + pr_info("disassociate ucontext done, task was terminated\n"); + /* in case task was dead need to release the + * task struct. + */ + if (*task) + put_task_struct(*task); + return NULL; + } + } + } + + return mm; +} +#endif /* HAVE_NO_MM_MMAP_SEM */ + +static void bnxt_re_traverse_vma_list(struct ib_ucontext *ib_uctx) +{ + struct bnxt_re_vma_data *vma_data, *n; + struct bnxt_re_ucontext *uctx; + struct vm_area_struct *vma; + + uctx = to_bnxt_re(ib_uctx, struct bnxt_re_ucontext, ib_uctx); + + mutex_lock(&uctx->list_mutex); + list_for_each_entry_safe(vma_data, n, &uctx->vma_list_head, vma_list) { + vma = vma_data->vma; + zap_vma_ptes(vma, vma->vm_start, PAGE_SIZE); + /* context going to be destroyed, should + * not access ops any more + */ + vma->vm_flags &= ~(VM_SHARED | VM_MAYSHARE); + vma->vm_ops = NULL; + list_del(&vma_data->vma_list); + kfree(vma_data); + } + mutex_unlock(&uctx->list_mutex); +} +#endif /* HAVE_RDMA_USER_MMAP_IO */ + +void bnxt_re_disassociate_ucntx(struct ib_ucontext *ib_uctx) +{ +#ifndef HAVE_RDMA_USER_MMAP_IO +/* + * For kernels that have rdma_user_mmap_io() the .disassociate_ucontext + * implementation in driver is a stub + */ +#ifndef HAVE_NO_MM_MMAP_SEM + struct task_struct *task = NULL; + struct mm_struct *mm = NULL; + + mm = bnxt_re_is_task_pending(ib_uctx, &task); + if (!mm) + return; + /* need to protect from a race on closing the vma as part of + * vma_close. + */ + down_write(&mm->mmap_sem); +#endif /* HAVE_NO_MM_MMAP_SEM */ + + bnxt_re_traverse_vma_list(ib_uctx); + +#ifndef HAVE_NO_MM_MMAP_SEM + up_write(&mm->mmap_sem); + mmput(mm); + put_task_struct(task); +#endif /* HAVE_NO_MM_MMAP_SEM */ +#endif /* HAVE_RDMA_USER_MMAP_IO */ +} +#endif diff --git a/bnxt_re-1.10.3-229.0.139.0/ib_verbs.h b/bnxt_re-1.10.3-229.0.139.0/ib_verbs.h new file mode 100644 index 0000000..d92d520 --- /dev/null +++ b/bnxt_re-1.10.3-229.0.139.0/ib_verbs.h @@ -0,0 +1,697 @@ +/* + * Copyright (c) 2015-2023, Broadcom. All rights reserved. The term + * Broadcom refers to Broadcom Inc. and/or its subsidiaries. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * BSD license below: + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * Author: Eddie Wai + * + * Description: IB Verbs interpreter (header) + */ + +#ifndef __BNXT_RE_IB_VERBS_H__ +#define __BNXT_RE_IB_VERBS_H__ + +#include +#include + +#include "bnxt_re-abi.h" +#include "compat.h" +#include "bnxt_re.h" + +#define BNXT_RE_ROCE_V2_UDP_SPORT 0x8CD1 +#define BNXT_RE_QP_RANDOM_QKEY 0x81818181 + +#ifdef HAVE_IB_ARG_CONST_CHANGE +#define CONST_STRUCT const struct +#else +#define CONST_STRUCT struct +#endif + +#ifdef HAVE_RDMA_AH_ATTR +typedef struct rdma_ah_attr RDMA_AH_ATTR; +#else +typedef struct ib_ah_attr RDMA_AH_ATTR; +#endif +#ifdef HAVE_RDMA_AH_INIT_ATTR +typedef struct rdma_ah_init_attr RDMA_AH_ATTR_IN; +#else +typedef RDMA_AH_ATTR RDMA_AH_ATTR_IN; +#endif + +struct bnxt_re_gid_ctx { + u32 idx; + u32 refcnt; +}; + +struct bnxt_re_legacy_fence_data { + u32 size; + void *va; + dma_addr_t dma_addr; + struct bnxt_re_mr *mr; + struct ib_mw *mw; + struct bnxt_qplib_swqe bind_wqe; + u32 bind_rkey; +}; + +struct bnxt_re_pd { + struct ib_pd ib_pd; + struct bnxt_re_dev *rdev; + struct bnxt_qplib_pd qplib_pd; + struct bnxt_re_legacy_fence_data fence; +}; + +struct bnxt_re_ah { + struct ib_ah ib_ah; + struct bnxt_re_dev *rdev; + struct bnxt_qplib_ah qplib_ah; +}; + +struct bnxt_re_srq { + struct ib_srq ib_srq; + struct list_head dbr_list; + struct bnxt_re_dev *rdev; + u32 srq_limit; + struct bnxt_qplib_srq qplib_srq; + struct ib_umem *umem; + spinlock_t lock; +}; + +union ip_addr { + u32 ipv4_addr; + u8 ipv6_addr[16]; +}; + +struct bnxt_re_qp_info_entry { + union ib_gid sgid; + union ib_gid dgid; + union ip_addr s_ip; + union ip_addr d_ip; + u16 s_port; +#define BNXT_RE_QP_DEST_PORT 4791 + u16 d_port; +}; + +struct bnxt_re_qp { + struct ib_qp ib_qp; + struct list_head list; + struct list_head dbr_list; + struct bnxt_re_dev *rdev; + spinlock_t sq_lock; + spinlock_t rq_lock; + struct bnxt_qplib_qp qplib_qp; + struct ib_umem *sumem; + struct ib_umem *rumem; + /* QP1 */ + u32 send_psn; + struct ib_ud_header qp1_hdr; + struct bnxt_re_cq *scq; + struct bnxt_re_cq *rcq; + struct dentry *qp_info_pdev_dentry; + struct bnxt_re_qp_info_entry qp_info_entry; + void *qp_data; +}; + +struct bnxt_re_cq { + struct ib_cq ib_cq; + struct list_head dbr_list; + struct list_head cq_list; + struct bnxt_re_dev *rdev; + struct bnxt_re_ucontext *uctx; + spinlock_t cq_lock; + u16 cq_count; + u16 cq_period; + struct bnxt_qplib_cq qplib_cq; + struct bnxt_qplib_cqe *cql; +#define MAX_CQL_PER_POLL 1024 + u32 max_cql; + struct ib_umem *umem; + struct ib_umem *resize_umem; + struct ib_ucontext *context; + int resize_cqe; + /* list of cq per uctx. Used only for Thor-2 */ + void *uctx_cq_page; + void *dbr_recov_cq_page; + bool is_dbr_recov_cq; +}; + +struct bnxt_re_mr { + struct bnxt_re_dev *rdev; + struct ib_mr ib_mr; + struct ib_umem *ib_umem; + struct bnxt_qplib_mrw qplib_mr; +#ifdef HAVE_IB_ALLOC_MR + u32 npages; + u64 *pages; + struct bnxt_qplib_frpl qplib_frpl; +#endif +#ifdef CONFIG_INFINIBAND_PEER_MEM + atomic_t invalidated; + struct completion invalidation_comp; +#endif + bool is_invalcb_active; +}; + +struct bnxt_re_frpl { + struct bnxt_re_dev *rdev; +#ifdef HAVE_IB_FAST_REG_MR + struct ib_fast_reg_page_list ib_frpl; +#endif + struct bnxt_qplib_frpl qplib_frpl; + u64 *page_list; +}; + +#ifdef HAVE_IB_FMR +struct bnxt_re_fmr { + struct bnxt_re_dev *rdev; + struct ib_fmr ib_fmr; + struct bnxt_qplib_mrw qplib_fmr; +}; +#endif + +struct bnxt_re_mw { + struct bnxt_re_dev *rdev; + struct ib_mw ib_mw; + struct bnxt_qplib_mrw qplib_mw; +}; + +#ifdef HAVE_DISASSOCIATE_UCNTX +#ifndef HAVE_RDMA_USER_MMAP_IO +struct bnxt_re_vma_data { + struct list_head vma_list; + struct mutex *list_mutex; + struct vm_area_struct *vma; +}; +#endif +#endif + +struct bnxt_re_ucontext { + struct ib_ucontext ib_uctx; + struct bnxt_re_dev *rdev; + struct list_head dbr_list; + struct list_head cq_list; + struct bnxt_qplib_dpi dpi; + struct bnxt_qplib_dpi wcdpi; + void *shpg; + spinlock_t sh_lock; +#ifdef HAVE_DISASSOCIATE_UCNTX +#ifndef HAVE_RDMA_USER_MMAP_IO + struct list_head vma_list_head; /*All vma's on this context */ + struct mutex list_mutex; +#endif +#endif + uint64_t cmask; + struct mutex cq_lock; /* Protect cq list */ + void *dbr_recov_cq_page; + struct bnxt_re_cq *dbr_recov_cq; + void *hdbr_app; + bool small_recv_wqe_sup; +}; + +struct bnxt_re_ah_info { + union ib_gid sgid; +#ifdef RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP + struct ib_gid_attr sgid_attr; +#endif + u16 vlan_tag; + u8 nw_type; +}; + +struct net_device *bnxt_re_get_netdev(struct ib_device *ibdev, + PORT_NUM port_num); + +#ifdef HAVE_IB_QUERY_DEVICE_UDATA +int bnxt_re_query_device(struct ib_device *ibdev, + struct ib_device_attr *ib_attr, + struct ib_udata *udata); +#else +int bnxt_re_query_device(struct ib_device *ibdev, + struct ib_device_attr *device_attr); +#endif +int bnxt_re_modify_device(struct ib_device *ibdev, + int device_modify_mask, + struct ib_device_modify *device_modify); +int bnxt_re_query_port(struct ib_device *ibdev, PORT_NUM port_num, + struct ib_port_attr *port_attr); +int bnxt_re_modify_port(struct ib_device *ibdev, PORT_NUM port_num, + int port_modify_mask, + struct ib_port_modify *port_modify); +#ifdef HAVE_IB_GET_PORT_IMMUTABLE +int bnxt_re_get_port_immutable(struct ib_device *ibdev, PORT_NUM port_num, + struct ib_port_immutable *immutable); +#endif +#ifdef HAVE_IB_GET_DEV_FW_STR +void bnxt_re_compat_qfwstr(void); +#endif +int bnxt_re_query_pkey(struct ib_device *ibdev, PORT_NUM port_num, + u16 index, u16 *pkey); +#ifdef HAVE_IB_ADD_DEL_GID +#ifdef HAVE_SIMPLIFIED_ADD_DEL_GID +int bnxt_re_del_gid(const struct ib_gid_attr *attr, void **context); +#ifdef HAVE_SIMPLER_ADD_GID +int bnxt_re_add_gid(const struct ib_gid_attr *attr, void **context); +#else +int bnxt_re_add_gid(const union ib_gid *gid, + const struct ib_gid_attr *attr, void **context); +#endif +#else +int bnxt_re_del_gid(struct ib_device *ibdev, u8 port_num, + unsigned int index, void **context); +int bnxt_re_add_gid(struct ib_device *ibdev, u8 port_num, + unsigned int index, const union ib_gid *gid, + const struct ib_gid_attr *attr, void **context); +#endif +#endif +#ifdef HAVE_IB_MODIFY_GID +int bnxt_re_modify_gid(struct ib_device *ibdev, u8 port_num, + unsigned int index, const union ib_gid *gid, + const struct ib_gid_attr *attr, void **context); +#endif +int bnxt_re_query_gid(struct ib_device *ibdev, PORT_NUM port_num, + int index, union ib_gid *gid); +enum rdma_link_layer bnxt_re_get_link_layer(struct ib_device *ibdev, + PORT_NUM port_num); + +ALLOC_PD_RET bnxt_re_alloc_pd(ALLOC_PD_IN *pd_in, +#ifdef HAVE_UCONTEXT_IN_ALLOC_PD + struct ib_ucontext *ucontext, +#endif + struct ib_udata *udata); + +#ifdef HAVE_DEALLOC_PD_UDATA +DEALLOC_PD_RET bnxt_re_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata); +#else +DEALLOC_PD_RET bnxt_re_dealloc_pd(struct ib_pd *pd); +#endif + +#ifdef HAVE_IB_CREATE_AH_UDATA +CREATE_AH_RET bnxt_re_create_ah(CREATE_AH_IN *ah_in, + RDMA_AH_ATTR_IN *attr, +#ifndef HAVE_RDMA_AH_INIT_ATTR +#ifdef HAVE_SLEEPABLE_AH + u32 flags, +#endif +#endif + struct ib_udata *udata); +#else +struct ib_ah *bnxt_re_create_ah(struct ib_pd *pd, + RDMA_AH_ATTR_IN *attr); +#endif + +int bnxt_re_query_ah(struct ib_ah *ah, RDMA_AH_ATTR *ah_attr); + +#ifdef HAVE_SLEEPABLE_AH +DESTROY_AH_RET bnxt_re_destroy_ah(struct ib_ah *ib_ah, u32 flags); +#else +int bnxt_re_destroy_ah(struct ib_ah *ah); +#endif + +CREATE_SRQ_RET bnxt_re_create_srq(CREATE_SRQ_IN *srq_in, + struct ib_srq_init_attr *srq_init_attr, + struct ib_udata *udata); +int bnxt_re_modify_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr, + enum ib_srq_attr_mask srq_attr_mask, + struct ib_udata *udata); +int bnxt_re_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr); +DESTROY_SRQ_RET bnxt_re_destroy_srq(struct ib_srq *srq +#ifdef HAVE_DESTROY_SRQ_UDATA + , struct ib_udata *udata +#endif + ); +int bnxt_re_post_srq_recv(struct ib_srq *ib_srq, CONST_STRUCT ib_recv_wr *wr, + CONST_STRUCT ib_recv_wr **bad_wr); +ALLOC_QP_RET bnxt_re_create_qp(ALLOC_QP_IN *qp_in, + struct ib_qp_init_attr *qp_init_attr, + struct ib_udata *udata); +int bnxt_re_modify_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr, + int qp_attr_mask, struct ib_udata *udata); +int bnxt_re_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr, + int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr); +int bnxt_re_destroy_qp(struct ib_qp *qp +#ifdef HAVE_DESTROY_QP_UDATA + , struct ib_udata *udata +#endif + ); +int bnxt_re_post_send(struct ib_qp *ib_qp, CONST_STRUCT ib_send_wr *wr, + CONST_STRUCT ib_send_wr **bad_wr); +int bnxt_re_post_recv(struct ib_qp *ib_qp, CONST_STRUCT ib_recv_wr *wr, + CONST_STRUCT ib_recv_wr **bad_wr); +#ifdef HAVE_IB_CQ_INIT_ATTR +ALLOC_CQ_RET bnxt_re_create_cq(ALLOC_CQ_IN *cq_in, + const struct ib_cq_init_attr *attr, +#ifdef HAVE_CREATE_CQ_UCONTEXT + struct ib_ucontext *context, +#endif + struct ib_udata *udata); +#else +ALLOC_CQ_RET bnxt_re_create_cq(ALLOC_CQ_IN *cq_in, int cqe, + int comp_vector, +#ifdef HAVE_CREATE_CQ_UCONTEXT + struct ib_ucontext *context, +#endif + struct ib_udata *udata); +#endif +int bnxt_re_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period); +DESTROY_CQ_RET bnxt_re_destroy_cq(struct ib_cq *cq +#ifdef HAVE_DESTROY_CQ_UDATA + , struct ib_udata *udata +#endif + ); +int bnxt_re_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata); +int bnxt_re_poll_cq(struct ib_cq *cq, int num_entries, struct ib_wc *wc); +int bnxt_re_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags); +struct ib_mr *bnxt_re_get_dma_mr(struct ib_pd *pd, int mr_access_flags); +#ifdef HAVE_IB_MAP_MR_SG +int bnxt_re_map_mr_sg(struct ib_mr *ib_mr, struct scatterlist *sg, int sg_nents +#ifdef HAVE_IB_MAP_MR_SG_PAGE_SIZE + , unsigned int *sg_offset +#else +#ifdef HAVE_IB_MAP_MR_SG_OFFSET + , unsigned int sg_offset +#endif +#endif + ); +#endif +#ifdef HAVE_IB_ALLOC_MR +struct ib_mr *bnxt_re_alloc_mr(struct ib_pd *ib_pd, enum ib_mr_type mr_type, + u32 max_num_sg +#ifdef HAVE_ALLOC_MR_UDATA + , struct ib_udata *udata +#endif + ); +#endif +#ifdef HAVE_IB_REG_PHYS_MR +struct ib_mr *bnxt_re_reg_phys_mr(struct ib_pd *pd, + struct ib_phys_buf *phys_buf_array, + int num_phys_buf, int mr_access_flags, + u64 *iova_start); +int bnxt_re_rereg_phys_mr(struct ib_mr *ib_mr, int mr_rereg_mask, + struct ib_pd *ib_pd, + struct ib_phys_buf *phys_buf_array, + int num_phys_buf, int mr_access_flags, + u64 *iova_start); +#endif +#ifdef HAVE_IB_QUERY_MR +int bnxt_re_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr); +#endif +int bnxt_re_dereg_mr(struct ib_mr *mr +#ifdef HAVE_DEREG_MR_UDATA + , struct ib_udata *udata +#endif + ); +#ifdef HAVE_IB_SIGNATURE_HANDOVER +int bnxt_re_destroy_mr(struct ib_mr *mr); +struct ib_mr *bnxt_re_create_mr(struct ib_pd *pd, + struct ib_mr_init_attr *mr_init_attr); +#endif +#ifdef HAVE_IB_FAST_REG_MR +struct ib_mr *bnxt_re_alloc_fast_reg_mr(struct ib_pd *pd, + int max_page_list_len); +struct ib_fast_reg_page_list *bnxt_re_alloc_fast_reg_page_list( + struct ib_device *ibdev, + int page_list_len); +void bnxt_re_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list); +#endif +#ifdef HAVE_IB_MW_TYPE +ALLOC_MW_RET bnxt_re_alloc_mw +#ifndef HAVE_ALLOC_MW_IN_IB_CORE + (struct ib_pd *ib_pd, enum ib_mw_type type +#else + (struct ib_mw *mw +#endif /* HAVE_ALLOC_MW_RET_IB_MW*/ +#ifdef HAVE_ALLOW_MW_WITH_UDATA + , struct ib_udata *udata +#endif + ); +#else +ALLOC_MW_RET bnxt_re_alloc_mw(struct ib_pd *ib_pd); +#endif +#ifdef HAVE_IB_BIND_MW +int bnxt_re_bind_mw(struct ib_qp *qp, struct ib_mw *mw, + struct ib_mw_bind *mw_bind); +#endif +int bnxt_re_dealloc_mw(struct ib_mw *mw); +#ifdef HAVE_IB_FM +struct ib_fmr *bnxt_re_alloc_fmr(struct ib_pd *pd, int mr_access_flags, + struct ib_fmr_attr *fmr_attr); +int bnxt_re_map_phys_fmr(struct ib_fmr *fmr, u64 *page_list, int list_len, + u64 iova); +int bnxt_re_unmap_fmr(struct list_head *fmr_list); +int bnxt_re_dealloc_fmr(struct ib_fmr *fmr); +#endif +struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, + u64 virt_addr, int mr_access_flags, + struct ib_udata *udata); +#ifdef HAVE_IB_UMEM_DMABUF +struct ib_mr *bnxt_re_reg_user_mr_dmabuf(struct ib_pd *ib_pd, u64 start, + u64 length, u64 virt_addr, + int fd, int mr_access_flags, + struct ib_udata *udata); +#endif +REREG_USER_MR_RET +bnxt_re_rereg_user_mr(struct ib_mr *mr, int flags, u64 start, u64 length, + u64 virt_addr, int mr_access_flags, struct ib_pd *pd, + struct ib_udata *udata); + +ALLOC_UCONTEXT_RET bnxt_re_alloc_ucontext(ALLOC_UCONTEXT_IN *uctx_in, + struct ib_udata *udata); +DEALLOC_UCONTEXT_RET bnxt_re_dealloc_ucontext(struct ib_ucontext *ib_uctx); +int bnxt_re_mmap(struct ib_ucontext *context, struct vm_area_struct *vma); + +#ifdef HAVE_PROCESS_MAD_U32_PORT +int bnxt_re_process_mad(struct ib_device *device, int process_mad_flags, + u32 port_num, const struct ib_wc *in_wc, + const struct ib_grh *in_grh, + const struct ib_mad *in_mad, struct ib_mad *out_mad, + size_t *out_mad_size, u16 *out_mad_pkey_index); +#else +#ifndef HAVE_PROCESS_MAD_IB_MAD_HDR +int bnxt_re_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, + const struct ib_wc *wc, const struct ib_grh *grh, + const struct ib_mad *in_mad, struct ib_mad *out_mad, + size_t *out_mad_size, u16 *out_mad_pkey_index); +#else +int bnxt_re_process_mad(struct ib_device *device, int process_mad_flags, + u8 port_num, const struct ib_wc *in_wc, + const struct ib_grh *in_grh, + const struct ib_mad_hdr *in_mad, size_t in_mad_size, + struct ib_mad_hdr *out_mad, size_t *out_mad_size, + u16 *out_mad_pkey_index); +#endif +#endif + +unsigned long bnxt_re_lock_cqs(struct bnxt_re_qp *qp); +void bnxt_re_unlock_cqs(struct bnxt_re_qp *qp, unsigned long flags); + +#ifdef HAVE_DISASSOCIATE_UCNTX +void bnxt_re_disassociate_ucntx(struct ib_ucontext *ibcontext); +#ifndef HAVE_RDMA_USER_MMAP_IO +int bnxt_re_set_vma_data(struct bnxt_re_ucontext *uctx, + struct vm_area_struct *vma); +#endif +#endif + +static inline enum ib_qp_type __from_hw_to_ib_qp_type(u8 type) +{ + switch (type) { + case CMDQ_CREATE_QP1_TYPE_GSI: + case CMDQ_CREATE_QP_TYPE_GSI: + return IB_QPT_GSI; + case CMDQ_CREATE_QP_TYPE_RC: + return IB_QPT_RC; + case CMDQ_CREATE_QP_TYPE_UD: + return IB_QPT_UD; + case CMDQ_CREATE_QP_TYPE_RAW_ETHERTYPE: + return IB_QPT_RAW_ETHERTYPE; + default: + return IB_QPT_MAX; + } +} + +static inline u8 __from_ib_qp_state(enum ib_qp_state state) +{ + switch (state) { + case IB_QPS_RESET: + return CMDQ_MODIFY_QP_NEW_STATE_RESET; + case IB_QPS_INIT: + return CMDQ_MODIFY_QP_NEW_STATE_INIT; + case IB_QPS_RTR: + return CMDQ_MODIFY_QP_NEW_STATE_RTR; + case IB_QPS_RTS: + return CMDQ_MODIFY_QP_NEW_STATE_RTS; + case IB_QPS_SQD: + return CMDQ_MODIFY_QP_NEW_STATE_SQD; + case IB_QPS_SQE: + return CMDQ_MODIFY_QP_NEW_STATE_SQE; + case IB_QPS_ERR: + default: + return CMDQ_MODIFY_QP_NEW_STATE_ERR; + } +} + +static inline u32 __from_ib_mtu(enum ib_mtu mtu) +{ + switch (mtu) { + case IB_MTU_256: + return CMDQ_MODIFY_QP_PATH_MTU_MTU_256; + case IB_MTU_512: + return CMDQ_MODIFY_QP_PATH_MTU_MTU_512; + case IB_MTU_1024: + return CMDQ_MODIFY_QP_PATH_MTU_MTU_1024; + case IB_MTU_2048: + return CMDQ_MODIFY_QP_PATH_MTU_MTU_2048; + case IB_MTU_4096: + return CMDQ_MODIFY_QP_PATH_MTU_MTU_4096; +/* case IB_MTU_8192: + * return CMDQ_MODIFY_QP_PATH_MTU_MTU_8192; + */ + default: + return CMDQ_MODIFY_QP_PATH_MTU_MTU_2048; + } +} + +static inline enum ib_mtu __to_ib_mtu(u32 mtu) +{ + switch (mtu & CREQ_QUERY_QP_RESP_SB_PATH_MTU_MASK) { + case CMDQ_MODIFY_QP_PATH_MTU_MTU_256: + return IB_MTU_256; + case CMDQ_MODIFY_QP_PATH_MTU_MTU_512: + return IB_MTU_512; + case CMDQ_MODIFY_QP_PATH_MTU_MTU_1024: + return IB_MTU_1024; + case CMDQ_MODIFY_QP_PATH_MTU_MTU_2048: + return IB_MTU_2048; + case CMDQ_MODIFY_QP_PATH_MTU_MTU_4096: + return IB_MTU_4096; + case CMDQ_MODIFY_QP_PATH_MTU_MTU_8192: + return IB_MTU_8192; + default: + return IB_MTU_2048; + } +} + +static inline enum ib_qp_state __to_ib_qp_state(u8 state) +{ + switch (state) { + case CMDQ_MODIFY_QP_NEW_STATE_RESET: + return IB_QPS_RESET; + case CMDQ_MODIFY_QP_NEW_STATE_INIT: + return IB_QPS_INIT; + case CMDQ_MODIFY_QP_NEW_STATE_RTR: + return IB_QPS_RTR; + case CMDQ_MODIFY_QP_NEW_STATE_RTS: + return IB_QPS_RTS; + case CMDQ_MODIFY_QP_NEW_STATE_SQD: + return IB_QPS_SQD; + case CMDQ_MODIFY_QP_NEW_STATE_SQE: + return IB_QPS_SQE; + case CMDQ_MODIFY_QP_NEW_STATE_ERR: + default: + return IB_QPS_ERR; + } +} + +static inline int bnxt_re_init_pow2_flag(struct bnxt_re_uctx_req *req, + struct bnxt_re_uctx_resp *resp) +{ + resp->comp_mask |= BNXT_RE_COMP_MASK_UCNTX_POW2_DISABLED; + if (!(req->comp_mask & BNXT_RE_COMP_MASK_REQ_UCNTX_POW2_SUPPORT)) { + resp->comp_mask &= ~BNXT_RE_COMP_MASK_UCNTX_POW2_DISABLED; + return -EINVAL; + } + return 0; +} + +static inline u32 bnxt_re_init_depth(u32 ent, struct bnxt_re_ucontext *uctx) +{ + return uctx ? (uctx->cmask & BNXT_RE_COMP_MASK_UCNTX_POW2_DISABLED) ? + ent : roundup_pow_of_two(ent) : ent; +} + +static inline int bnxt_re_init_rsvd_wqe_flag(struct bnxt_re_uctx_req *req, + struct bnxt_re_uctx_resp *resp, + bool genp5) +{ + resp->comp_mask |= BNXT_RE_COMP_MASK_UCNTX_RSVD_WQE_DISABLED; + if (!(req->comp_mask & BNXT_RE_COMP_MASK_REQ_UCNTX_RSVD_WQE)) { + resp->comp_mask &= ~BNXT_RE_COMP_MASK_UCNTX_RSVD_WQE_DISABLED; + return -EINVAL; + } else if (!genp5) { + resp->comp_mask &= ~BNXT_RE_COMP_MASK_UCNTX_RSVD_WQE_DISABLED; + } + return 0; +} + +static inline u32 bnxt_re_get_diff(struct bnxt_re_ucontext *uctx, + struct bnxt_qplib_chip_ctx *cctx) +{ + if (!uctx) { + /* return res-wqe only for gen p4 for user resource */ + return _is_chip_gen_p5_p7(cctx) ? 0 : BNXT_QPLIB_RESERVED_QP_WRS; + } else if (uctx->cmask & BNXT_RE_COMP_MASK_UCNTX_RSVD_WQE_DISABLED) { + return 0; + } + /* old lib */ + return BNXT_QPLIB_RESERVED_QP_WRS; +} + +static inline void bnxt_re_init_qpmtu(struct bnxt_re_qp *qp, int mtu, + int mask, struct ib_qp_attr *qp_attr, + bool *is_qpmtu_high) +{ + int qpmtu, qpmtu_int; + int ifmtu, ifmtu_int; + + ifmtu = iboe_get_mtu(mtu); + ifmtu_int = ib_mtu_enum_to_int(ifmtu); + qpmtu = ifmtu; + qpmtu_int = ifmtu_int; + if (mask & IB_QP_PATH_MTU) { + qpmtu = qp_attr->path_mtu; + qpmtu_int = ib_mtu_enum_to_int(qpmtu); + if (qpmtu_int > ifmtu_int) { + /* Trim the QP path mtu to interface mtu and update + * the new mtu to user qp for retransmission psn + * calculations. + */ + qpmtu = ifmtu; + qpmtu_int = ifmtu_int; + *is_qpmtu_high = true; + } + } + qp->qplib_qp.path_mtu = __from_ib_mtu(qpmtu); + qp->qplib_qp.mtu = qpmtu_int; + qp->qplib_qp.modify_flags |= + CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU; +} + +void bnxt_re_update_shadow_ah(struct bnxt_re_dev *rdev); +void bnxt_re_handle_cqn(struct bnxt_qplib_cq *cq); +#endif diff --git a/bnxt_re-1.10.3-229.0.139.0/main.c b/bnxt_re-1.10.3-229.0.139.0/main.c new file mode 100644 index 0000000..cc6e35c --- /dev/null +++ b/bnxt_re-1.10.3-229.0.139.0/main.c @@ -0,0 +1,5911 @@ +/* + * Copyright (c) 2015-2023, Broadcom. All rights reserved. The term + * Broadcom refers to Broadcom Inc. and/or its subsidiaries. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * BSD license below: + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * Author: Eddie Wai + * + * Description: Main component of the bnxt_re driver + */ + +#include +#include + +#include "bnxt_re.h" +#include "configfs.h" +#ifdef ENABLE_DEBUGFS +#include "debugfs.h" +#endif + +#include "ib_verbs.h" +#include "bnxt_re-abi.h" +#include "dcb.h" +/* bnxt_en.h includes */ +#include "bnxt.h" +#include "hdbr.h" +#include "hw_counters.h" + +static char version[] = + "Broadcom NetXtreme-C/E RoCE Driver " ROCE_DRV_MODULE_NAME \ + " v" ROCE_DRV_MODULE_VERSION " (" ROCE_DRV_MODULE_RELDATE ")\n"; + +#define BNXT_RE_DESC "Broadcom NetXtreme RoCE" + +MODULE_AUTHOR("Eddie Wai "); +MODULE_DESCRIPTION(BNXT_RE_DESC " Driver"); +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_VERSION(ROCE_DRV_MODULE_VERSION); + +#if defined(HAVE_IB_UMEM_DMABUF) && !defined(HAVE_IB_UMEM_DMABUF_PINNED) +MODULE_IMPORT_NS(DMA_BUF); +#endif + +DEFINE_MUTEX(bnxt_re_mutex); /* mutex lock for driver */ + +unsigned int restrict_stats = 0; +module_param(restrict_stats, uint, 0); +MODULE_PARM_DESC(restrict_stats, "Restrict stats query frequency to ethtool coalesce value. Disabled by default"); + +unsigned int min_tx_depth = 1; +module_param(min_tx_depth, uint, 0); +MODULE_PARM_DESC(min_tx_depth, "Minimum TX depth - Default is 1"); + +unsigned int cmdq_shadow_qd = RCFW_CMD_NON_BLOCKING_SHADOW_QD; +module_param_named(cmdq_shadow_qd, cmdq_shadow_qd, uint, 0644); +MODULE_PARM_DESC(cmdq_shadow_qd, "Perf Stat Debug: Shadow QD Range (1-64) - Default is 64"); + +/* globals */ +struct list_head bnxt_re_dev_list = LIST_HEAD_INIT(bnxt_re_dev_list); + +/* Global variable to distinguish driver unload and PCI removal */ +static u32 gmod_exit; + +/* Global variable to handle bond creation after L2 bond is created */ +int resched_bond_create_cnt; + +static void bnxt_re_task(struct work_struct *work_task); + +static struct workqueue_struct *bnxt_re_wq; + +static int bnxt_re_update_fw_lag_info(struct bnxt_re_bond_info *binfo, + struct bnxt_re_dev *rdev, + bool bond_mode); +static int bnxt_re_query_hwrm_intf_version(struct bnxt_re_dev *rdev); + +static void bnxt_re_clear_dcbx_cc_param(struct bnxt_re_dev *rdev); +static int bnxt_re_hwrm_dbr_pacing_qcfg(struct bnxt_re_dev *rdev); +static int bnxt_re_ib_init(struct bnxt_re_dev *rdev); +static void bnxt_re_ib_init_2(struct bnxt_re_dev *rdev); +static void bnxt_re_dispatch_event(struct ib_device *ibdev, struct ib_qp *qp, + u8 port_num, enum ib_event_type event); + +static void bnxt_re_update_fifo_occup_slabs(struct bnxt_re_dev *rdev, + u32 fifo_occup) +{ + if (fifo_occup > rdev->dbg_stats->dbq.fifo_occup_water_mark) + rdev->dbg_stats->dbq.fifo_occup_water_mark = fifo_occup; + + if (fifo_occup > 8 * rdev->pacing_algo_th) + rdev->dbg_stats->dbq.fifo_occup_slab_4++; + else if (fifo_occup > 4 * rdev->pacing_algo_th) + rdev->dbg_stats->dbq.fifo_occup_slab_3++; + else if (fifo_occup > 2 * rdev->pacing_algo_th) + rdev->dbg_stats->dbq.fifo_occup_slab_2++; + else if (fifo_occup > rdev->pacing_algo_th) + rdev->dbg_stats->dbq.fifo_occup_slab_1++; +} + +static void bnxt_re_update_do_pacing_slabs(struct bnxt_re_dev *rdev) +{ + struct bnxt_qplib_db_pacing_data *pacing_data = rdev->qplib_res.pacing_data; + + if (pacing_data->do_pacing > rdev->dbg_stats->dbq.do_pacing_water_mark) + rdev->dbg_stats->dbq.do_pacing_water_mark = pacing_data->do_pacing; + + if (pacing_data->do_pacing > 16 * rdev->dbr_def_do_pacing) + rdev->dbg_stats->dbq.do_pacing_slab_5++; + else if (pacing_data->do_pacing > 8 * rdev->dbr_def_do_pacing) + rdev->dbg_stats->dbq.do_pacing_slab_4++; + else if (pacing_data->do_pacing > 4 * rdev->dbr_def_do_pacing) + rdev->dbg_stats->dbq.do_pacing_slab_3++; + else if (pacing_data->do_pacing > 2 * rdev->dbr_def_do_pacing) + rdev->dbg_stats->dbq.do_pacing_slab_2++; + else if (pacing_data->do_pacing > rdev->dbr_def_do_pacing) + rdev->dbg_stats->dbq.do_pacing_slab_1++; +} + +static bool bnxt_re_is_qp1_qp(struct bnxt_re_qp *qp) +{ + return qp->ib_qp.qp_type == IB_QPT_GSI; +} + +static struct bnxt_re_qp *bnxt_re_get_qp1_qp(struct bnxt_re_dev *rdev) +{ + struct bnxt_re_qp *qp; + + mutex_lock(&rdev->qp_lock); + list_for_each_entry(qp, &rdev->qp_list, list) { + if (bnxt_re_is_qp1_qp(qp)) { + mutex_unlock(&rdev->qp_lock); + return qp; + } + } + mutex_unlock(&rdev->qp_lock); + return NULL; +} + +/* SR-IOV helper functions */ +static void bnxt_re_get_sriov_func_type(struct bnxt_re_dev *rdev) +{ + struct bnxt_en_dev *en_dev = rdev->en_dev; + + if (rdev->binfo) + return; + + rdev->is_virtfn = !!BNXT_EN_VF(en_dev); +} + +/* Set the maximum number of each resource that the driver actually wants + * to allocate. This may be up to the maximum number the firmware has + * reserved for the function. The driver may choose to allocate fewer + * resources than the firmware maximum. + */ +static void bnxt_re_limit_pf_res(struct bnxt_re_dev *rdev) +{ + struct bnxt_qplib_max_res dev_res = {}; + struct bnxt_qplib_chip_ctx *cctx; + struct bnxt_qplib_dev_attr *attr; + struct bnxt_qplib_ctx *hctx; + int i; + + attr = rdev->dev_attr; + hctx = rdev->qplib_res.hctx; + cctx = rdev->chip_ctx; + + bnxt_qplib_max_res_supported(cctx, &rdev->qplib_res, &dev_res, false); + if (!_is_chip_gen_p5_p7(cctx)) { + hctx->qp_ctx.max = min_t(u32, dev_res.max_qp, attr->max_qp); + hctx->mrw_ctx.max = min_t(u32, dev_res.max_mr, attr->max_mr); + /* To accommodate 16k MRs and 16k AHs, + * driver has to allocate 32k backing store memory + */ + hctx->mrw_ctx.max *= 2; + hctx->srq_ctx.max = min_t(u32, dev_res.max_srq, attr->max_srq); + hctx->cq_ctx.max = min_t(u32, dev_res.max_cq, attr->max_cq); + for (i = 0; i < MAX_TQM_ALLOC_REQ; i++) + hctx->tqm_ctx.qcount[i] = attr->tqm_alloc_reqs[i]; + } else { + hctx->qp_ctx.max = attr->max_qp ? attr->max_qp : dev_res.max_qp; + hctx->mrw_ctx.max = attr->max_mr ? attr->max_mr : dev_res.max_mr; + hctx->srq_ctx.max = attr->max_srq ? attr->max_srq : dev_res.max_srq; + hctx->cq_ctx.max = attr->max_cq ? attr->max_cq : dev_res.max_cq; + } +} + +static void bnxt_re_limit_vf_res(struct bnxt_re_dev *rdev, + struct bnxt_qplib_vf_res *vf_res, + u32 num_vf) +{ + struct bnxt_qplib_chip_ctx *cctx = rdev->chip_ctx; + struct bnxt_qplib_max_res dev_res = {}; + + memset(vf_res, 0, sizeof(*vf_res)); + + bnxt_qplib_max_res_supported(cctx, &rdev->qplib_res, &dev_res, true); + vf_res->max_qp = dev_res.max_qp / num_vf; + vf_res->max_srq = dev_res.max_srq / num_vf; + vf_res->max_cq = dev_res.max_cq / num_vf; + /* + * MR and AH shares the same backing store, the value specified + * for max_mrw is split into half by the FW for MR and AH + */ + vf_res->max_mrw = dev_res.max_mr * 2 / num_vf; + vf_res->max_gid = BNXT_RE_MAX_GID_PER_VF; +} + +static void bnxt_re_dettach_irq(struct bnxt_re_dev *rdev) +{ + struct bnxt_qplib_rcfw *rcfw = NULL; + struct bnxt_qplib_nq *nq; + int indx; + + rcfw = &rdev->rcfw; + for (indx = 0; indx < rdev->nqr->max_init; indx++) { + nq = &rdev->nqr->nq[indx]; + mutex_lock(&nq->lock); + bnxt_qplib_nq_stop_irq(nq, false); + mutex_unlock(&nq->lock); + } + + if (test_bit(BNXT_RE_FLAG_ALLOC_RCFW, &rdev->flags)) + bnxt_qplib_rcfw_stop_irq(rcfw, false); +} + + +#define MAX_DSCP_PRI_TUPLE 64 + +int bnxt_re_get_pri_dscp_settings(struct bnxt_re_dev *rdev, + u16 target_id, + struct bnxt_re_tc_rec *tc_rec) +{ + struct bnxt_re_dscp2pri d2p[MAX_DSCP_PRI_TUPLE] = {}; + u16 count = MAX_DSCP_PRI_TUPLE; + int rc = 0; + int i; + + rc = bnxt_re_hwrm_pri2cos_qcfg(rdev, tc_rec, target_id); + if (rc) + return rc; + + tc_rec->dscp_valid = 0; + tc_rec->cnp_dscp_bv = 0; + tc_rec->roce_dscp_bv = 0; + rc = bnxt_re_query_hwrm_dscp2pri(rdev, d2p, &count, target_id); + if (rc) + return rc; + + for (i = 0; i < count; i++) { + if (d2p[i].pri == tc_rec->roce_prio) { + tc_rec->roce_dscp = d2p[i].dscp; + tc_rec->roce_dscp_bv |= (1ull << d2p[i].dscp); + tc_rec->dscp_valid |= (1 << ROCE_DSCP_VALID); + } else if (d2p[i].pri == tc_rec->cnp_prio) { + tc_rec->cnp_dscp = d2p[i].dscp; + tc_rec->cnp_dscp_bv |= (1ull << d2p[i].dscp); + tc_rec->dscp_valid |= (1 << CNP_DSCP_VALID); + } + } + + return 0; +} + +struct bnxt_re_dcb_work { + struct work_struct work; + struct bnxt_re_dev *rdev; + struct hwrm_async_event_cmpl cmpl; +}; + +static void bnxt_re_init_dcb_wq(struct bnxt_re_dev *rdev) +{ + rdev->dcb_wq = create_singlethread_workqueue("bnxt_re_dcb_wq"); +} + +static void bnxt_re_uninit_dcb_wq(struct bnxt_re_dev *rdev) +{ + if (!rdev->dcb_wq) + return; + flush_workqueue(rdev->dcb_wq); + destroy_workqueue(rdev->dcb_wq); + rdev->dcb_wq = NULL; +} + +static void bnxt_re_init_aer_wq(struct bnxt_re_dev *rdev) +{ + rdev->aer_wq = create_singlethread_workqueue("bnxt_re_aer_wq"); +} + +static void bnxt_re_uninit_aer_wq(struct bnxt_re_dev *rdev) +{ + if (!rdev->aer_wq) + return; + flush_workqueue(rdev->aer_wq); + destroy_workqueue(rdev->aer_wq); + rdev->aer_wq = NULL; +} + +static int bnxt_re_update_qp1_tos_dscp(struct bnxt_re_dev *rdev) +{ + struct bnxt_re_qp *qp; + + if (!_is_chip_gen_p5_p7(rdev->chip_ctx)) + return 0; + + qp = bnxt_re_get_qp1_qp(rdev); + if (!qp) + return 0; + + qp->qplib_qp.modify_flags = CMDQ_MODIFY_QP_MODIFY_MASK_TOS_DSCP; + qp->qplib_qp.tos_dscp = rdev->cc_param.qp1_tos_dscp; + + return bnxt_qplib_modify_qp(&rdev->qplib_res, &qp->qplib_qp); +} + +static void bnxt_re_reconfigure_dscp(struct bnxt_re_dev *rdev) +{ + struct bnxt_qplib_cc_param *cc_param; + struct bnxt_re_tc_rec *tc_rec; + bool update_cc = false; + u8 dscp_user; + int rc; + + cc_param = &rdev->cc_param; + tc_rec = &rdev->tc_rec[0]; + + if (!(cc_param->roce_dscp_user || cc_param->cnp_dscp_user)) + return; + + if (cc_param->cnp_dscp_user) { + dscp_user = (cc_param->cnp_dscp_user & 0x3f); + if ((tc_rec->cnp_dscp_bv & (1ul << dscp_user)) && + (cc_param->alt_tos_dscp != dscp_user)) { + cc_param->alt_tos_dscp = dscp_user; + cc_param->mask |= CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_ALT_TOS_DSCP; + update_cc = true; + } + } + + if (cc_param->roce_dscp_user) { + dscp_user = (cc_param->roce_dscp_user & 0x3f); + if ((tc_rec->roce_dscp_bv & (1ul << dscp_user)) && + (cc_param->tos_dscp != dscp_user)) { + cc_param->tos_dscp = dscp_user; + cc_param->mask |= CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_TOS_DSCP; + update_cc = true; + } + } + + if (update_cc) { + rc = bnxt_qplib_modify_cc(&rdev->qplib_res, cc_param); + if (rc) + dev_err(rdev_to_dev(rdev), "Failed to apply cc settings\n"); + } +} + +void bnxt_re_dcb_wq_task(struct work_struct *work) +{ + struct bnxt_qplib_cc_param *cc_param; + struct bnxt_re_tc_rec *tc_rec; + struct bnxt_re_dev *rdev; + struct bnxt_re_dcb_work *dcb_work = + container_of(work, struct bnxt_re_dcb_work, work); + int rc; + + rdev = dcb_work->rdev; + if (!rdev) + goto exit; + + mutex_lock(&rdev->cc_lock); + + cc_param = &rdev->cc_param; + rc = bnxt_qplib_query_cc_param(&rdev->qplib_res, cc_param); + if (rc) { + dev_err(rdev_to_dev(rdev), "Failed to query ccparam rc:%d", rc); + goto fail; + } + tc_rec = &rdev->tc_rec[0]; + rc = bnxt_re_get_pri_dscp_settings(rdev, -1, tc_rec); + if (rc) { + dev_err(rdev_to_dev(rdev), "Failed to get pri2cos rc:%d", rc); + goto fail; + } + + /* + * Upon the receival of DCB Async event: + * If roce_dscp or cnp_dscp or both (which user configured using configfs) + * is in the list, re-program the value using modify_roce_cc command + */ + bnxt_re_reconfigure_dscp(rdev); + + cc_param->roce_pri = tc_rec->roce_prio; + if (cc_param->qp1_tos_dscp != cc_param->tos_dscp) { + cc_param->qp1_tos_dscp = cc_param->tos_dscp; + rc = bnxt_re_update_qp1_tos_dscp(rdev); + if (rc) { + dev_err(rdev_to_dev(rdev), "%s:Failed to modify QP1 rc:%d", + __func__, rc); + goto fail; + } + } + +fail: + mutex_unlock(&rdev->cc_lock); +exit: + kfree(dcb_work); +} + +static int bnxt_re_hwrm_dbr_pacing_broadcast_event(struct bnxt_re_dev *rdev) +{ + struct hwrm_func_dbr_pacing_broadcast_event_output resp = {0}; + struct hwrm_func_dbr_pacing_broadcast_event_input req = {0}; + struct bnxt_en_dev *en_dev = rdev->en_dev; + struct bnxt_fw_msg fw_msg = {}; + int rc; + + bnxt_re_init_hwrm_hdr((void *)&req, HWRM_FUNC_DBR_PACING_BROADCAST_EVENT, -1); + bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp, + sizeof(resp), BNXT_RE_HWRM_CMD_TIMEOUT(rdev)); + rc = bnxt_send_msg(en_dev, &fw_msg); + if (rc) { + dev_dbg(rdev_to_dev(rdev), + "Failed to send dbr pacing broadcast event rc:%d", rc); + return rc; + } + return 0; +} + +static int bnxt_re_hwrm_dbr_pacing_nqlist_query(struct bnxt_re_dev *rdev) +{ + struct hwrm_func_dbr_pacing_nqlist_query_output resp = {0}; + struct hwrm_func_dbr_pacing_nqlist_query_input req = {0}; + struct bnxt_dbq_nq_list *nq_list = &rdev->nq_list; + struct bnxt_en_dev *en_dev = rdev->en_dev; + struct bnxt_fw_msg fw_msg = {}; + bool primary_found = false; + struct bnxt_qplib_nq *nq; + int rc, i, j = 1; + u16 *nql_ptr; + + nq = &rdev->nqr->nq[0]; + + bnxt_re_init_hwrm_hdr((void *)&req, HWRM_FUNC_DBR_PACING_NQLIST_QUERY, -1); + bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp, + sizeof(resp), BNXT_RE_HWRM_CMD_TIMEOUT(rdev)); + rc = bnxt_send_msg(en_dev, &fw_msg); + if (rc) { + dev_dbg(rdev_to_dev(rdev), + "Failed to send dbr pacing nq list query rc:%d", rc); + return rc; + } + nq_list->num_nql_entries = le32_to_cpu(resp.num_nqs); + nql_ptr = &resp.nq_ring_id0; + /* populate the nq_list of the primary function with list received + * from FW. Fill the NQ IDs of secondary functions from index 1 to + * num_nql_entries - 1. Fill the nq_list->nq_id[0] with the + * nq_id of the primary pf + */ + for (i = 0; i < nq_list->num_nql_entries; i++) { + u16 nq_id = *nql_ptr; + + dev_dbg(rdev_to_dev(rdev), + "nq_list->nq_id[%d] = %d\n", i, nq_id); + if (nq_id != nq->ring_id) { + nq_list->nq_id[j] = nq_id; + j++; + } else { + primary_found = true; + nq_list->nq_id[0] = nq->ring_id; + } + nql_ptr++; + } + if (primary_found) + bnxt_qplib_dbr_pacing_set_primary_pf(rdev->chip_ctx, 1); + else + dev_err(rdev_to_dev(rdev), + "%s primary NQ id missing", __func__); + + return 0; +} + +static void __wait_for_fifo_occupancy_below_th(struct bnxt_re_dev *rdev) +{ + struct bnxt_qplib_db_pacing_data *pacing_data = rdev->qplib_res.pacing_data; + u32 read_val, fifo_occup; + bool first_read = true; +#ifdef BNXT_FPGA + u32 retry_fifo_check = 1000; +#endif + + /* loop shouldn't run infintely as the occupancy usually goes + * below pacing algo threshold as soon as pacing kicks in. + */ + while (1) { + read_val = readl(rdev->en_dev->bar0 + rdev->dbr_db_fifo_reg_off); + fifo_occup = pacing_data->fifo_max_depth - + ((read_val & pacing_data->fifo_room_mask) >> + pacing_data->fifo_room_shift); + /* Fifo occupancy cannot be greater the MAX FIFO depth */ + if (fifo_occup > pacing_data->fifo_max_depth) + break; + + if (first_read) { + bnxt_re_update_fifo_occup_slabs(rdev, fifo_occup); + first_read = false; + } + if (fifo_occup < pacing_data->pacing_th) + break; +#ifdef BNXT_FPGA + if (!retry_fifo_check--) { + dev_info_once(rdev_to_dev(rdev), + "%s: read_val = 0x%x fifo_occup = 0x%xfifo_max_depth = 0x%x pacing_th = 0x%x\n", + __func__, read_val, fifo_occup, pacing_data->fifo_max_depth, + pacing_data->pacing_th); + break; + } +#endif + } +} + +static void bnxt_re_set_default_pacing_data(struct bnxt_re_dev *rdev) +{ + struct bnxt_qplib_db_pacing_data *pacing_data = rdev->qplib_res.pacing_data; + + pacing_data->do_pacing = rdev->dbr_def_do_pacing; + pacing_data->pacing_th = rdev->pacing_algo_th; + pacing_data->alarm_th = + pacing_data->pacing_th * BNXT_RE_PACING_ALARM_TH_MULTIPLE(rdev->chip_ctx); +} + +#define CAG_RING_MASK 0x7FF +#define CAG_RING_SHIFT 17 +#define WATERMARK_MASK 0xFFF +#define WATERMARK_SHIFT 0 + +static bool bnxt_re_check_if_dbq_intr_triggered(struct bnxt_re_dev *rdev) +{ + u32 read_val; + int j; + + for (j = 0; j < 10; j++) { + read_val = readl(rdev->en_dev->bar0 + rdev->dbr_aeq_arm_reg_off); + dev_dbg(rdev_to_dev(rdev), "AEQ ARM status = 0x%x\n", + read_val); + if (!read_val) + return true; + } + return false; +} + +int bnxt_re_set_dbq_throttling_reg(struct bnxt_re_dev *rdev, u16 nq_id, u32 throttle) +{ + u32 cag_ring_water_mark = 0, read_val; + u32 throttle_val; + + /* Convert throttle percentage to value */ + throttle_val = (rdev->qplib_res.pacing_data->fifo_max_depth * throttle) / 100; + + if (bnxt_qplib_dbr_pacing_ext_en(rdev->chip_ctx)) { + cag_ring_water_mark = (nq_id & CAG_RING_MASK) << CAG_RING_SHIFT | + (throttle_val & WATERMARK_MASK); + writel(cag_ring_water_mark, rdev->en_dev->bar0 + rdev->dbr_throttling_reg_off); + read_val = readl(rdev->en_dev->bar0 + rdev->dbr_throttling_reg_off); + dev_dbg(rdev_to_dev(rdev), + "%s: dbr_throttling_reg_off read_val = 0x%x\n", + __func__, read_val); + if (read_val != cag_ring_water_mark) { + dev_dbg(rdev_to_dev(rdev), + "nq_id = %d write_val=0x%x read_val=0x%x\n", + nq_id, cag_ring_water_mark, read_val); + return 1; + } + } + writel(1, rdev->en_dev->bar0 + rdev->dbr_aeq_arm_reg_off); + return 0; +} + +static void bnxt_re_set_dbq_throttling_for_non_primary(struct bnxt_re_dev *rdev) +{ + struct bnxt_dbq_nq_list *nq_list; + struct bnxt_qplib_nq *nq; + int i; + + nq_list = &rdev->nq_list; + /* Run a loop for other Active functions if this is primary function */ + if (bnxt_qplib_dbr_pacing_is_primary_pf(rdev->chip_ctx)) { + dev_dbg(rdev_to_dev(rdev), "%s: nq_list->num_nql_entries= %d\n", + __func__, nq_list->num_nql_entries); + nq = &rdev->nqr->nq[0]; + for (i = nq_list->num_nql_entries - 1; i > 0; i--) { + u16 nq_id = nq_list->nq_id[i]; + + dev_dbg(rdev_to_dev(rdev), + "%s: nq_id = %d cur_fn_ring_id = %d\n", + __func__, nq_id, nq->ring_id); + if (bnxt_re_set_dbq_throttling_reg + (rdev, nq_id, 0)) + break; + bnxt_re_check_if_dbq_intr_triggered(rdev); + } + } +} + +static void bnxt_re_handle_dbr_nq_pacing_notification(struct bnxt_re_dev *rdev) +{ + struct bnxt_qplib_nq *nq; + int rc = 0; + + nq = &rdev->nqr->nq[0]; + + dev_dbg(rdev_to_dev(rdev), "%s: Query NQ list for DBR pacing", + __func__); + + /* Query the NQ list*/ + rc = bnxt_re_hwrm_dbr_pacing_nqlist_query(rdev); + if (rc) { + dev_err(rdev_to_dev(rdev), + "Failed to Query NQ list rc= %d", rc); + return; + } + /*Configure GRC access for Throttling and aeq_arm register */ + writel(rdev->chip_ctx->dbr_aeq_arm_reg & + BNXT_GRC_BASE_MASK, + rdev->en_dev->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 28); + + rdev->dbr_throttling_reg_off = + (rdev->chip_ctx->dbr_throttling_reg & + BNXT_GRC_OFFSET_MASK) + 0x8000; + rdev->dbr_aeq_arm_reg_off = + (rdev->chip_ctx->dbr_aeq_arm_reg & + BNXT_GRC_OFFSET_MASK) + 0x8000; + + bnxt_re_set_dbq_throttling_reg(rdev, nq->ring_id, rdev->dbq_watermark); +} + +static void bnxt_re_dbr_drop_recov_task(struct work_struct *work) +{ + struct bnxt_re_dbr_drop_recov_work *dbr_recov_work = + container_of(work, struct bnxt_re_dbr_drop_recov_work, work); + struct bnxt_re_dbr_res_list *res_list; + u64 start_time, diff_time_msec; + struct bnxt_re_ucontext *uctx; + bool user_dbr_drop_recov; + struct bnxt_re_dev *rdev; + struct bnxt_re_srq *srq; + u32 user_recov_pend = 0; + struct bnxt_re_qp *qp; + struct bnxt_re_cq *cq; + int i; + + rdev = dbr_recov_work->rdev; + if (!rdev) + goto exit; + + if (test_bit(BNXT_RE_FLAG_ERR_DEVICE_DETACHED, &rdev->flags)) + goto exit; + + if (dbr_recov_work->curr_epoch != rdev->dbr_evt_curr_epoch) { + rdev->dbr_sw_stats->dbr_drop_recov_event_skips++; + dev_dbg(rdev_to_dev(rdev), "%s: Ignore DBR recov evt epoch %d (latest ep %d)\n", + __func__, dbr_recov_work->curr_epoch, rdev->dbr_evt_curr_epoch); + goto exit; + } + + user_dbr_drop_recov = rdev->user_dbr_drop_recov; + rdev->dbr_recovery_on = true; + + /* CREQ */ + bnxt_qplib_replay_db(&rdev->rcfw.creq.creq_db.dbinfo, false); + + /* NQ */ + for (i = 0; i < rdev->nqr->num_msix - 1; i++) + bnxt_qplib_replay_db(&rdev->nqr->nq[i].nq_db.dbinfo, false); + + /* ARM_ENA for all userland CQs */ + res_list = &rdev->res_list[BNXT_RE_RES_TYPE_CQ]; + spin_lock(&res_list->lock); + list_for_each_entry(cq, &res_list->head, dbr_list) { + if (cq->umem) + bnxt_qplib_replay_db(&cq->qplib_cq.dbinfo, true); + } + spin_unlock(&res_list->lock); + + /* ARM_ENA for all userland SRQs */ + res_list = &rdev->res_list[BNXT_RE_RES_TYPE_SRQ]; + spin_lock(&res_list->lock); + list_for_each_entry(srq, &res_list->head, dbr_list) { + if (srq->qplib_srq.is_user) + bnxt_qplib_replay_db(&srq->qplib_srq.dbinfo, true); + } + spin_unlock(&res_list->lock); + + if (!user_dbr_drop_recov) + goto skip_user_recovery; + + /* Notify all uusrlands */ + res_list = &rdev->res_list[BNXT_RE_RES_TYPE_UCTX]; + spin_lock(&res_list->lock); + list_for_each_entry(uctx, &res_list->head, dbr_list) { + uint32_t *user_epoch = uctx->dbr_recov_cq_page; + + if (!user_epoch || !uctx->dbr_recov_cq) { + dev_dbg(rdev_to_dev(rdev), "%s: %d Found %s = NULL during DBR recovery\n", + __func__, __LINE__, (user_epoch) ? "dbr_recov_cq" : "user_epoch"); + continue; + } + + *user_epoch = dbr_recov_work->curr_epoch; + if (uctx->dbr_recov_cq->ib_cq.comp_handler) + (*uctx->dbr_recov_cq->ib_cq.comp_handler) + (&uctx->dbr_recov_cq->ib_cq, + uctx->dbr_recov_cq->ib_cq.cq_context); + } + spin_unlock(&res_list->lock); + +skip_user_recovery: + /* ARM_ENA and Cons update DBs for Kernel CQs */ + res_list = &rdev->res_list[BNXT_RE_RES_TYPE_CQ]; + spin_lock(&res_list->lock); + list_for_each_entry(cq, &res_list->head, dbr_list) { + if (!cq->umem) { + bnxt_qplib_replay_db(&cq->qplib_cq.dbinfo, true); + bnxt_qplib_replay_db(&cq->qplib_cq.dbinfo, false); + } + } + spin_unlock(&res_list->lock); + + /* ARM_ENA and Cons update DBs for Kernel SRQs */ + res_list = &rdev->res_list[BNXT_RE_RES_TYPE_SRQ]; + spin_lock(&res_list->lock); + list_for_each_entry(srq, &res_list->head, dbr_list) { + if (!srq->qplib_srq.is_user) { + bnxt_qplib_replay_db(&srq->qplib_srq.dbinfo, true); + bnxt_qplib_replay_db(&srq->qplib_srq.dbinfo, false); + } + } + spin_unlock(&res_list->lock); + + /* QP */ + res_list = &rdev->res_list[BNXT_RE_RES_TYPE_QP]; + spin_lock(&res_list->lock); + list_for_each_entry(qp, &res_list->head, dbr_list) { + struct bnxt_qplib_q *q; + /* Do nothing for user QPs */ + if (qp->qplib_qp.is_user) + continue; + + /* Replay SQ */ + q = &qp->qplib_qp.sq; + bnxt_qplib_replay_db(&q->dbinfo, false); + + /* Check if RQ exists */ + if (!qp->qplib_qp.rq.max_wqe) + continue; + + /* Replay RQ */ + q = &qp->qplib_qp.rq; + bnxt_qplib_replay_db(&q->dbinfo, false); + } + spin_unlock(&res_list->lock); + + if (!user_dbr_drop_recov) + goto dbr_compl; + + /* Check whether all user-lands completed the recovery */ + start_time = get_jiffies_64(); + for (i = 0; i < rdev->user_dbr_drop_recov_timeout; i++) { + user_recov_pend = 0; + res_list = &rdev->res_list[BNXT_RE_RES_TYPE_UCTX]; + spin_lock(&res_list->lock); + list_for_each_entry(uctx, &res_list->head, dbr_list) { + uint32_t *epoch = uctx->dbr_recov_cq_page; + + if (!epoch || !uctx->dbr_recov_cq) + continue; + + /* + * epoch[0] = user_epoch + * epoch[1] = user_epoch_ack + */ + if (epoch[0] != epoch[1]) + user_recov_pend++; + } + spin_unlock(&res_list->lock); + + if (!user_recov_pend) + break; + + diff_time_msec = jiffies_to_msecs(get_jiffies_64() - start_time); + if (diff_time_msec >= rdev->user_dbr_drop_recov_timeout) + break; + + usleep_range(1000, 1500); + } + + if (user_recov_pend) { + rdev->dbr_sw_stats->dbr_drop_recov_timeouts++; + rdev->dbr_sw_stats->dbr_drop_recov_timeout_users += user_recov_pend; + dev_dbg(rdev_to_dev(rdev), "DBR recovery timeout for %d users\n", user_recov_pend); + goto pacing_exit; + } + +dbr_compl: + bnxt_dbr_complete(rdev->en_dev, dbr_recov_work->curr_epoch); +pacing_exit: + rdev->dbr_recovery_on = false; +exit: + kfree(dbr_recov_work); +} + +static void bnxt_re_dbq_wq_task(struct work_struct *work) +{ + struct bnxt_re_dbq_work *dbq_work = + container_of(work, struct bnxt_re_dbq_work, work); + struct bnxt_re_dev *rdev; + + rdev = dbq_work->rdev; + + if (!rdev) + goto exit; + switch (dbq_work->event) { + case BNXT_RE_DBQ_EVENT_SCHED: + dev_dbg(rdev_to_dev(rdev), "%s: Handle DBQ Pacing event\n", + __func__); + if (!bnxt_qplib_dbr_pacing_ext_en(rdev->chip_ctx)) + bnxt_re_hwrm_dbr_pacing_broadcast_event(rdev); + else + bnxt_re_pacing_alert(rdev); + break; + case BNXT_RE_DBR_PACING_EVENT: + dev_dbg(rdev_to_dev(rdev), "%s: Sched interrupt/pacing worker", + __func__); + if (_is_chip_p7(rdev->chip_ctx)) + bnxt_re_pacing_alert(rdev); + else if (!rdev->chip_ctx->modes.dbr_pacing_v0) + bnxt_re_hwrm_dbr_pacing_qcfg(rdev); + break; + case BNXT_RE_DBR_NQ_PACING_NOTIFICATION: + if (!rdev->is_virtfn) { + bnxt_re_handle_dbr_nq_pacing_notification(rdev); + /* Issue a broadcast event to notify other functions + * that primary changed + */ + bnxt_re_hwrm_dbr_pacing_broadcast_event(rdev); + } + break; + } +exit: + kfree(dbq_work); +} + +static bool bnxt_re_is_qp1_or_shadow_qp(struct bnxt_re_dev *rdev, + struct bnxt_re_qp *qp) +{ + if (rdev->gsi_ctx.gsi_qp_mode == BNXT_RE_GSI_MODE_ALL) + return (qp->ib_qp.qp_type == IB_QPT_GSI) || + (qp == rdev->gsi_ctx.gsi_sqp); + else + return (qp->ib_qp.qp_type == IB_QPT_GSI); +} + +/* bnxt_re_stop_user_qps_nonfatal - Move all kernel qps to flush list + * @rdev - rdma device instance + * + * This function will move all the kernel QPs to flush list. + * Calling this function at appropriate interface will flush all pending + * data path commands to be completed with FLUSH_ERR from + * bnxt_qplib_process_flush_list poll context. + * + */ +static void bnxt_re_stop_user_qps_nonfatal(struct bnxt_re_dev *rdev) +{ + struct bnxt_qplib_qp *qpl_qp; + struct ib_qp_attr qp_attr; + int num_qps_stopped = 0; + int mask = IB_QP_STATE; + struct bnxt_re_qp *qp; + + if (!rdev) + return; + + if (test_bit(BNXT_RE_FLAG_ERR_DEVICE_DETACHED, &rdev->flags)) + return; + +restart: + dev_dbg(rdev_to_dev(rdev), "from %s %d num_qps_stopped %d\n", + __func__, __LINE__, num_qps_stopped); + + mutex_lock(&rdev->qp_lock); + list_for_each_entry(qp, &rdev->qp_list, list) { + qpl_qp = &qp->qplib_qp; + if (!qpl_qp->is_user || bnxt_re_is_qp1_or_shadow_qp(rdev, qp)) + continue; + /* This is required to move furhter in list otherwise, + * we will not be able to complete the list due to budget. + * lable:restart will start iteration from head once again. + */ + if (qpl_qp->state == CMDQ_MODIFY_QP_NEW_STATE_RESET || + qpl_qp->state == CMDQ_MODIFY_QP_NEW_STATE_ERR) + continue; + + qp_attr.qp_state = IB_QPS_ERR; + bnxt_re_modify_qp(&qp->ib_qp, &qp_attr, mask, NULL); + + bnxt_re_dispatch_event(&rdev->ibdev, &qp->ib_qp, 1, IB_EVENT_QP_FATAL); + /* + * 1. Release qp_lock after a budget to unblock other verb + * requests (like qp_destroy) from stack. + * 2. Traverse through the qp_list freshly as addition / deletion + * might have happened since qp_lock is getting released here. + */ + if (++num_qps_stopped % BNXT_RE_STOP_QPS_BUDGET == 0) { + mutex_unlock(&rdev->qp_lock); + schedule(); + goto restart; + } + } + mutex_unlock(&rdev->qp_lock); + dev_dbg(rdev_to_dev(rdev), "from %s %d num_qps_stopped %d\n", + __func__, __LINE__, num_qps_stopped); +} + +/* bnxt_re_drain_kernel_qps_fatal - Move all kernel qps to flush list + * @rdev - rdma device instance + * + * This function will move all the kernel QPs to flush list. + * Calling this function at appropriate interface will flush all pending + * data path commands to be completed with FLUSH_ERR from + * bnxt_qplib_process_flush_list poll context. + * + */ +static void bnxt_re_drain_kernel_qps_fatal(struct bnxt_re_dev *rdev) +{ + struct bnxt_qplib_qp *qpl_qp; + struct bnxt_re_qp *qp; + unsigned long flags; + + if (!test_bit(BNXT_RE_FLAG_ERR_DEVICE_DETACHED, &rdev->flags)) + return; + + mutex_lock(&rdev->qp_lock); + + list_for_each_entry(qp, &rdev->qp_list, list) { + qpl_qp = &qp->qplib_qp; + qpl_qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR; + qpl_qp->cur_qp_state = qpl_qp->state; + if (!qpl_qp->is_user) { + flags = bnxt_re_lock_cqs(qp); + bnxt_qplib_add_flush_qp(qpl_qp); + bnxt_re_unlock_cqs(qp, flags); + + /* If we don't have any new submission from ULP, + * driver will not have a chance to call completion handler. + * If ULP has not properly handled drain_qp it can happen. + * Open below for quick check. + * + * bnxt_re_handle_cqn(&qp->scq->qplib_cq); + * bnxt_re_handle_cqn(&qp->rcq->qplib_cq); + */ + } + } + + mutex_unlock(&rdev->qp_lock); +} + +static void bnxt_re_aer_wq_task(struct work_struct *work) +{ + struct bnxt_re_aer_work *aer_work = + container_of(work, struct bnxt_re_aer_work, work); + struct bnxt_re_dev *rdev = aer_work->rdev; + + if (rdev) { + /* If dbq interrupt is scheduled, wait for it to finish */ + while(atomic_read(&rdev->dbq_intr_running)) + usleep_range(1, 10); + + flush_workqueue(rdev->dbq_wq); + cancel_work_sync(&rdev->dbq_fifo_check_work); + cancel_delayed_work_sync(&rdev->dbq_pacing_work); + bnxt_re_drain_kernel_qps_fatal(aer_work->rdev); + } + + kfree(aer_work); +} + +static void bnxt_re_async_notifier(void *handle, struct hwrm_async_event_cmpl *cmpl) +{ + struct bnxt_re_en_dev_info *en_info = auxiliary_get_drvdata(handle); + struct bnxt_re_dbr_drop_recov_work *dbr_recov_work; + struct bnxt_re_dcb_work *dcb_work; + struct bnxt_re_dbq_work *dbq_work; + struct bnxt_re_aer_work *aer_work; + struct bnxt_re_dev *rdev; + u32 err_type; + u16 event_id; + u32 data1; + u32 data2; + + if (!cmpl) { + dev_err(NULL, "Async event, bad completion\n"); + return; + } + + if (!en_info || !en_info->en_dev) { + dev_err(NULL, "Async event, bad en_info or en_dev\n"); + return; + } + rdev = en_info->rdev; + + event_id = le16_to_cpu(cmpl->event_id); + data1 = le32_to_cpu(cmpl->event_data1); + data2 = le32_to_cpu(cmpl->event_data2); + + if (!rdev || !rdev_to_dev(rdev)) { + dev_dbg(NULL, "Async event, bad rdev or netdev\n"); + return; + } + + if (test_bit(BNXT_RE_FLAG_ERR_DEVICE_DETACHED, &rdev->flags) || + !test_bit(BNXT_RE_FLAG_NETDEV_REGISTERED, &rdev->flags)) { + dev_dbg(NULL, "Async event, device already detached\n"); + return; + } + dev_dbg(rdev_to_dev(rdev), "Async event_id = %d data1 = %d data2 = %d", + event_id, data1, data2); + + switch (event_id) { + case ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE: + /* Not handling the event in older FWs */ + if (!is_qport_service_type_supported(rdev)) + break; + if (!rdev->dcb_wq) + break; + dcb_work = kzalloc(sizeof(*dcb_work), GFP_ATOMIC); + if (!dcb_work) + break; + + dcb_work->rdev = rdev; + memcpy(&dcb_work->cmpl, cmpl, sizeof(*cmpl)); + INIT_WORK(&dcb_work->work, bnxt_re_dcb_wq_task); + queue_work(rdev->dcb_wq, &dcb_work->work); + break; + case ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY: + if (EVENT_DATA1_RESET_NOTIFY_FATAL(data1)) { + /* Set rcfw flag to control commands send to Bono */ + set_bit(ERR_DEVICE_DETACHED, &rdev->rcfw.cmdq.flags); + /* Set bnxt_re flag to control commands send via L2 driver */ + set_bit(BNXT_RE_FLAG_ERR_DEVICE_DETACHED, &rdev->flags); + wake_up_all(&rdev->rcfw.cmdq.waitq); + } + if (!rdev->aer_wq) + break; + aer_work = kzalloc(sizeof(*aer_work), GFP_ATOMIC); + if (!aer_work) + break; + + aer_work->rdev = rdev; + INIT_WORK(&aer_work->work, bnxt_re_aer_wq_task); + queue_work(rdev->aer_wq, &aer_work->work); + break; + case ASYNC_EVENT_CMPL_EVENT_ID_DOORBELL_PACING_THRESHOLD: + if (!rdev->dbr_pacing) + break; + dbq_work = kzalloc(sizeof(*dbq_work), GFP_ATOMIC); + if (!dbq_work) + goto unlock; + dbq_work->rdev = rdev; + dbq_work->event = BNXT_RE_DBR_PACING_EVENT; + INIT_WORK(&dbq_work->work, bnxt_re_dbq_wq_task); + queue_work(rdev->dbq_wq, &dbq_work->work); + rdev->dbr_sw_stats->dbq_int_recv++; + break; + case ASYNC_EVENT_CMPL_EVENT_ID_DOORBELL_PACING_NQ_UPDATE: + if (!rdev->dbr_pacing) + break; + + dbq_work = kzalloc(sizeof(*dbq_work), GFP_ATOMIC); + if (!dbq_work) + goto unlock; + dbq_work->rdev = rdev; + dbq_work->event = BNXT_RE_DBR_NQ_PACING_NOTIFICATION; + INIT_WORK(&dbq_work->work, bnxt_re_dbq_wq_task); + queue_work(rdev->dbq_wq, &dbq_work->work); + break; + + case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT: + err_type = BNXT_RE_EVENT_ERROR_REPORT_TYPE(data1); + + if (err_type == BNXT_RE_ASYNC_ERR_REP_BASE(TYPE_DOORBELL_DROP_THRESHOLD) && + rdev->dbr_drop_recov) { + rdev->dbr_sw_stats->dbr_drop_recov_events++; + rdev->dbr_evt_curr_epoch = BNXT_RE_EVENT_DBR_EPOCH(data1); + + dbr_recov_work = kzalloc(sizeof(*dbr_recov_work), GFP_ATOMIC); + if (!dbr_recov_work) + goto unlock; + dbr_recov_work->rdev = rdev; + dbr_recov_work->curr_epoch = rdev->dbr_evt_curr_epoch; + INIT_WORK(&dbr_recov_work->work, bnxt_re_dbr_drop_recov_task); + queue_work(rdev->dbr_drop_recov_wq, &dbr_recov_work->work); + } + break; + default: + break; + } +unlock: + return; +} + +static void bnxt_re_db_fifo_check(struct work_struct *work) +{ + struct bnxt_re_dev *rdev = container_of(work, struct bnxt_re_dev, + dbq_fifo_check_work); + struct bnxt_qplib_db_pacing_data *pacing_data; + u32 pacing_save; + + if (!mutex_trylock(&rdev->dbq_lock)) + return; + pacing_data = rdev->qplib_res.pacing_data; + pacing_save = rdev->do_pacing_save; + __wait_for_fifo_occupancy_below_th(rdev); + cancel_delayed_work_sync(&rdev->dbq_pacing_work); + if (rdev->dbr_recovery_on) + goto recovery_on; + if (pacing_save > rdev->dbr_def_do_pacing) { + /* Double the do_pacing value during the congestion */ + pacing_save = pacing_save << 1; + } else { + /* + * when a new congestion is detected increase the do_pacing + * by 8 times. And also increase the pacing_th by 4 times. The + * reason to increase pacing_th is to give more space for the + * queue to oscillate down without getting empty, but also more + * room for the queue to increase without causing another alarm. + */ + pacing_save = pacing_save << 3; + pacing_data->pacing_th = rdev->pacing_algo_th * 4; + } + + if (pacing_save > BNXT_RE_MAX_DBR_DO_PACING) + pacing_save = BNXT_RE_MAX_DBR_DO_PACING; + + pacing_data->do_pacing = pacing_save; + rdev->do_pacing_save = pacing_data->do_pacing; + pacing_data->alarm_th = + pacing_data->pacing_th * BNXT_RE_PACING_ALARM_TH_MULTIPLE(rdev->chip_ctx); +recovery_on: + schedule_delayed_work(&rdev->dbq_pacing_work, + msecs_to_jiffies(rdev->dbq_pacing_time)); + rdev->dbr_sw_stats->dbq_pacing_alerts++; + mutex_unlock(&rdev->dbq_lock); +} + +static void bnxt_re_pacing_timer_exp(struct work_struct *work) +{ + struct bnxt_re_dev *rdev = container_of(work, struct bnxt_re_dev, + dbq_pacing_work.work); + struct bnxt_qplib_db_pacing_data *pacing_data; + u32 read_val, fifo_occup; + struct bnxt_qplib_nq *nq; + + if (!mutex_trylock(&rdev->dbq_lock)) + return; + + pacing_data = rdev->qplib_res.pacing_data; + read_val = readl(rdev->en_dev->bar0 + rdev->dbr_db_fifo_reg_off); + fifo_occup = pacing_data->fifo_max_depth - + ((read_val & pacing_data->fifo_room_mask) >> + pacing_data->fifo_room_shift); + + if (fifo_occup > pacing_data->pacing_th) + goto restart_timer; + + /* + * Instead of immediately going back to the default do_pacing + * reduce it by 1/8 times and restart the timer. + */ + pacing_data->do_pacing = pacing_data->do_pacing - (pacing_data->do_pacing >> 3); + pacing_data->do_pacing = max_t(u32, rdev->dbr_def_do_pacing, pacing_data->do_pacing); + /* + * If the fifo_occup is less than the interrupt enable threshold + * enable the interrupt on the primary PF. + */ + if (rdev->dbq_int_disable && fifo_occup < rdev->pacing_en_int_th) { + if (bnxt_qplib_dbr_pacing_is_primary_pf(rdev->chip_ctx)) { + if (!rdev->chip_ctx->modes.dbr_pacing_v0) { + nq = &rdev->nqr->nq[0]; + bnxt_re_set_dbq_throttling_reg(rdev, nq->ring_id, + rdev->dbq_watermark); + rdev->dbr_sw_stats->dbq_int_en++; + rdev->dbq_int_disable = false; + } + } + } + if (pacing_data->do_pacing <= rdev->dbr_def_do_pacing) { + bnxt_re_set_default_pacing_data(rdev); + rdev->dbr_sw_stats->dbq_pacing_complete++; + goto dbq_unlock; + } +restart_timer: + schedule_delayed_work(&rdev->dbq_pacing_work, + msecs_to_jiffies(rdev->dbq_pacing_time)); + bnxt_re_update_do_pacing_slabs(rdev); + rdev->dbr_sw_stats->dbq_pacing_resched++; +dbq_unlock: + rdev->do_pacing_save = pacing_data->do_pacing; + mutex_unlock(&rdev->dbq_lock); +} + +void bnxt_re_pacing_alert(struct bnxt_re_dev *rdev) +{ + struct bnxt_qplib_db_pacing_data *pacing_data; + + if (!rdev->dbr_pacing) + return; + mutex_lock(&rdev->dbq_lock); + pacing_data = rdev->qplib_res.pacing_data; + + /* + * Increase the alarm_th to max so that other user lib instances do not + * keep alerting the driver. + */ + pacing_data->alarm_th = pacing_data->fifo_max_depth; + pacing_data->do_pacing = BNXT_RE_MAX_DBR_DO_PACING; + cancel_work_sync(&rdev->dbq_fifo_check_work); + schedule_work(&rdev->dbq_fifo_check_work); + mutex_unlock(&rdev->dbq_lock); +} + +void bnxt_re_schedule_dbq_event(struct bnxt_qplib_res *res) +{ + struct bnxt_re_dbq_work *dbq_work; + struct bnxt_re_dev *rdev; + + rdev = container_of(res, struct bnxt_re_dev, qplib_res); + + atomic_set(&rdev->dbq_intr_running, 1); + + if (test_bit(BNXT_RE_FLAG_ERR_DEVICE_DETACHED, &rdev->flags)) + goto exit; + /* Run the loop to send dbq event to other functions + * for newer FW + */ + if (bnxt_qplib_dbr_pacing_ext_en(rdev->chip_ctx) && + !rdev->chip_ctx->modes.dbr_pacing_v0) + bnxt_re_set_dbq_throttling_for_non_primary(rdev); + + dbq_work = kzalloc(sizeof(*dbq_work), GFP_ATOMIC); + if (!dbq_work) + goto exit; + dbq_work->rdev = rdev; + dbq_work->event = BNXT_RE_DBQ_EVENT_SCHED; + INIT_WORK(&dbq_work->work, bnxt_re_dbq_wq_task); + queue_work(rdev->dbq_wq, &dbq_work->work); + rdev->dbr_sw_stats->dbq_int_recv++; + rdev->dbq_int_disable = true; +exit: + atomic_set(&rdev->dbq_intr_running, 0); +} + +int bnxt_re_handle_start(struct auxiliary_device *adev) +{ + struct bnxt_re_en_dev_info *en_info = auxiliary_get_drvdata(adev); + struct bnxt_re_bond_info *info = NULL; + struct bnxt_re_dev *rdev = NULL; + struct net_device *real_dev; + struct bnxt_en_dev *en_dev; + struct net_device *netdev; + int rc = 0; + + if (!en_info || !en_info->en_dev) { + dev_err(NULL, "Start, bad en_info or en_dev\n"); + return -EINVAL; + } + netdev = en_info->en_dev->net; + if (en_info->rdev) { + dev_info(rdev_to_dev(en_info->rdev), + "%s: Device is already added adev %p rdev: %p\n", + __func__, adev, en_info->rdev); + return 0; + } + + en_dev = en_info->en_dev; + real_dev = rdma_vlan_dev_real_dev(netdev); + if (!real_dev) + real_dev = netdev; + if (en_info->binfo_valid) { + info = kzalloc(sizeof(*info), GFP_KERNEL); + if (!info) { + bnxt_unregister_dev(en_dev); + clear_bit(BNXT_RE_FLAG_EN_DEV_NETDEV_REG, &en_info->flags); + return -ENOMEM; + } + memcpy(info, &en_info->binfo, sizeof(*info)); + real_dev = info->master; + } + rc = bnxt_re_add_device(&rdev, real_dev, info, + en_info->gsi_mode, + BNXT_RE_POST_RECOVERY_INIT, + en_info->wqe_mode, + adev); + + if (rc) { + /* Add device failed. Unregister the device. + * This has to be done explicitly as + * bnxt_re_stop would not have unregistered + */ + bnxt_unregister_dev(en_dev); + clear_bit(BNXT_RE_FLAG_EN_DEV_NETDEV_REG, &en_info->flags); + if (en_info->binfo_valid) { + kfree(info); + en_info->binfo_valid = false; + } + return rc; + } + rtnl_lock(); + if (en_info->binfo_valid) { + info->rdev = rdev; + en_info->binfo_valid = false; + } + bnxt_re_get_link_speed(rdev); + rtnl_unlock(); + rc = bnxt_re_ib_init(rdev); + if (rc) { + dev_err(rdev_to_dev(rdev), "Failed ib_init\n"); + return rc; + } + bnxt_re_ib_init_2(rdev); + + /* Reset active_port_map so that worker can update f/w using SET_LINK_AGGR_MODE */ + if (rdev->binfo) + rdev->binfo->active_port_map = 0; + + return rc; +} + +static void bnxt_re_stop(struct auxiliary_device *adev) +{ + struct bnxt_re_en_dev_info *en_info = auxiliary_get_drvdata(adev); + struct bnxt_en_dev *en_dev; + struct bnxt_re_dev *rdev; + + rtnl_unlock(); + mutex_lock(&bnxt_re_mutex); + if (!en_info || !en_info->en_dev) { + dev_err(NULL, "Stop, bad en_info or en_dev\n"); + goto exit; + } + rdev = en_info->rdev; + if (!rdev) + goto exit; + + if (!bnxt_re_is_rdev_valid(rdev)) + goto exit; + + /* + * Check if fw has undergone reset or is in a fatal condition. + * If so, set flags so that no further commands are sent down to FW + */ + + en_dev = rdev->en_dev; + if (test_bit(BNXT_STATE_FW_FATAL_COND, &en_dev->en_state) || + test_bit(BNXT_STATE_FW_RESET_DET, &en_dev->en_state)) { + /* Set rcfw flag to control commands send to Bono */ + set_bit(ERR_DEVICE_DETACHED, &rdev->rcfw.cmdq.flags); + /* Set bnxt_re flag to control commands send via L2 driver */ + set_bit(BNXT_RE_FLAG_ERR_DEVICE_DETACHED, &rdev->flags); + wake_up_all(&rdev->rcfw.cmdq.waitq); + bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1, + IB_EVENT_DEVICE_FATAL); + } + + if (test_bit(BNXT_RE_FLAG_STOP_IN_PROGRESS, &rdev->flags)) + goto exit; + set_bit(BNXT_RE_FLAG_STOP_IN_PROGRESS, &rdev->flags); + + en_info->wqe_mode = rdev->chip_ctx->modes.wqe_mode; + en_info->gsi_mode = rdev->gsi_ctx.gsi_qp_mode; + + if (rdev->binfo) { + memcpy(&en_info->binfo, rdev->binfo, sizeof(*rdev->binfo)); + en_info->binfo_valid = true; + } + + if (rdev->dbr_pacing) + bnxt_re_set_pacing_dev_state(rdev); + + dev_info(rdev_to_dev(rdev), "%s: L2 driver notified to stop en_state 0x%lx", + __func__, en_dev->en_state); + bnxt_re_ib_uninit(rdev); + bnxt_re_remove_device(rdev, BNXT_RE_PRE_RECOVERY_REMOVE, rdev->adev); +exit: + mutex_unlock(&bnxt_re_mutex); + /* Take rtnl_lock before return, bnxt_re_stop is called with rtnl_lock */ + rtnl_lock(); + + /* TODO: Handle return values when bnxt_en supports */ + return; +} + +static void bnxt_re_start(struct auxiliary_device *adev) +{ + /* TODO: Handle return values + * when bnxt_en supports it + */ + rtnl_unlock(); + mutex_lock(&bnxt_re_mutex); + if (bnxt_re_handle_start(adev)) + dev_err(NULL, "Failed to start RoCE device"); + mutex_unlock(&bnxt_re_mutex); + /* Take rtnl_lock before return, bnxt_re_start is called with rtnl_lock */ + rtnl_lock(); + return; +} + +static void bnxt_re_vf_res_config(struct bnxt_re_dev *rdev) +{ + struct bnxt_qplib_ctx *hctx; + u32 num_vfs; + + /* For Thor2, VF creation is not dependent on LAG*/ + if (!BNXT_RE_CHIP_P7(rdev->chip_ctx->chip_num) && rdev->binfo) + return; + + /* + * Use the total VF count since the actual VF count may not be + * available at this point. + */ + num_vfs = pci_sriov_get_totalvfs(rdev->en_dev->pdev); + if (!num_vfs) + return; + + hctx = rdev->qplib_res.hctx; + bnxt_re_limit_vf_res(rdev, &hctx->vf_res, num_vfs); + bnxt_qplib_set_func_resources(&rdev->qplib_res); +} + +/* In kernels which has native Auxiliary bus support, auxiliary bus + * subsystem will invoke shutdown. Else, bnxt_en driver will invoke + * bnxt_ulp_shutdown directly. In that case, bnxt_re driver has to + * release RTNL before invoking ib_uninit and acquire RTNL after that. + */ +static void bnxt_re_shutdown(struct auxiliary_device *adev) +{ + struct bnxt_re_en_dev_info *en_info = auxiliary_get_drvdata(adev); + struct bnxt_re_dev *rdev; + + if (!en_info) { + dev_err(NULL, "Shutdown, bad en_info\n"); + return; + } +#ifndef HAVE_AUXILIARY_DRIVER + /* rtnl_lock held by L2 before coming here */ + rtnl_unlock(); +#endif + mutex_lock(&bnxt_re_mutex); + rdev = en_info->rdev; + if (!rdev || !bnxt_re_is_rdev_valid(rdev)) + goto exit; + + bnxt_re_ib_uninit(rdev); + bnxt_re_remove_device(rdev, BNXT_RE_COMPLETE_REMOVE, rdev->adev); +exit: + mutex_unlock(&bnxt_re_mutex); +#ifndef HAVE_AUXILIARY_DRIVER + /* rtnl_lock held by L2 before coming here */ + rtnl_lock(); +#endif + return; +} + +static void bnxt_re_stop_irq(void *handle) +{ + struct bnxt_re_en_dev_info *en_info = auxiliary_get_drvdata(handle); + + if (!en_info) { + dev_err(NULL, "Stop irq, bad en_info\n"); + return; + } + if (!en_info->rdev) + return; + bnxt_re_dettach_irq(en_info->rdev); +} + +static void bnxt_re_start_irq(void *handle, struct bnxt_msix_entry *ent) +{ + struct bnxt_re_en_dev_info *en_info = auxiliary_get_drvdata(handle); + struct bnxt_msix_entry *msix_ent = NULL; + struct bnxt_qplib_rcfw *rcfw = NULL; + struct bnxt_re_dev *rdev; + struct bnxt_qplib_nq *nq; + int indx, rc, vec; + + if (!en_info) { + dev_err(NULL, "Start irq, bad en_info\n"); + return; + } + rdev = en_info->rdev; + if (!rdev) + return; + if (test_bit(BNXT_RE_FLAG_ERR_DEVICE_DETACHED, &rdev->flags)) + return; + msix_ent = rdev->nqr->msix_entries; + rcfw = &rdev->rcfw; + + if (!ent) { + /* Not setting the f/w timeout bit in rcfw. + * During the driver unload the first command + * to f/w will timeout and that will set the + * timeout bit. + */ + dev_err(rdev_to_dev(rdev), "Failed to re-start IRQs\n"); + return; + } + + /* Vectors may change after restart, so update with new vectors + * in device structure. + */ + for (indx = 0; indx < rdev->nqr->num_msix; indx++) + rdev->nqr->msix_entries[indx].vector = ent[indx].vector; + + if (test_bit(BNXT_RE_FLAG_ALLOC_RCFW, &rdev->flags)) { + rc = bnxt_qplib_rcfw_start_irq(rcfw, msix_ent[BNXT_RE_AEQ_IDX].vector, + false); + if (rc) { + dev_warn(rdev_to_dev(rdev), + "Failed to reinit CREQ\n"); + return; + } + } + for (indx = 0 ; indx < rdev->nqr->max_init; indx++) { + nq = &rdev->nqr->nq[indx]; + vec = indx + 1; + rc = bnxt_qplib_nq_start_irq(nq, indx, msix_ent[vec].vector, + false); + if (rc) { + dev_warn(rdev_to_dev(rdev), + "Failed to reinit NQ index %d\n", indx); + return; + } + } +} + +/* + * Except for ulp_async_notifier and ulp_sriov_cfg, the remaining ulp_ops + * below are called with rtnl_lock held + */ +static struct bnxt_ulp_ops bnxt_re_ulp_ops = { + .ulp_async_notifier = bnxt_re_async_notifier, + .ulp_irq_stop = bnxt_re_stop_irq, + .ulp_irq_restart = bnxt_re_start_irq +}; + +static inline const char *bnxt_re_netevent(unsigned long event) +{ + BNXT_RE_NETDEV_EVENT(event, NETDEV_UP); + BNXT_RE_NETDEV_EVENT(event, NETDEV_DOWN); + BNXT_RE_NETDEV_EVENT(event, NETDEV_REBOOT); + BNXT_RE_NETDEV_EVENT(event, NETDEV_CHANGE); + BNXT_RE_NETDEV_EVENT(event, NETDEV_REGISTER); + BNXT_RE_NETDEV_EVENT(event, NETDEV_UNREGISTER); + BNXT_RE_NETDEV_EVENT(event, NETDEV_CHANGEMTU); + BNXT_RE_NETDEV_EVENT(event, NETDEV_CHANGEADDR); +#ifdef HAVE_NETDEV_PRE_CHANGEADDR + BNXT_RE_NETDEV_EVENT(event, NETDEV_PRE_CHANGEADDR); +#endif + BNXT_RE_NETDEV_EVENT(event, NETDEV_GOING_DOWN); + BNXT_RE_NETDEV_EVENT(event, NETDEV_CHANGENAME); + BNXT_RE_NETDEV_EVENT(event, NETDEV_FEAT_CHANGE); + BNXT_RE_NETDEV_EVENT(event, NETDEV_BONDING_FAILOVER); + BNXT_RE_NETDEV_EVENT(event, NETDEV_PRE_UP); + BNXT_RE_NETDEV_EVENT(event, NETDEV_PRE_TYPE_CHANGE); + BNXT_RE_NETDEV_EVENT(event, NETDEV_POST_TYPE_CHANGE); + BNXT_RE_NETDEV_EVENT(event, NETDEV_POST_INIT); + BNXT_RE_NETDEV_EVENT(event, NETDEV_RELEASE); + BNXT_RE_NETDEV_EVENT(event, NETDEV_NOTIFY_PEERS); + BNXT_RE_NETDEV_EVENT(event, NETDEV_JOIN); + BNXT_RE_NETDEV_EVENT(event, NETDEV_CHANGEUPPER); + BNXT_RE_NETDEV_EVENT(event, NETDEV_RESEND_IGMP); + BNXT_RE_NETDEV_EVENT(event, NETDEV_PRECHANGEMTU); + BNXT_RE_NETDEV_EVENT(event, NETDEV_CHANGEINFODATA); + BNXT_RE_NETDEV_EVENT(event, NETDEV_BONDING_INFO); +#ifdef HAVE_NETDEV_PRECHANGEUPPER + BNXT_RE_NETDEV_EVENT(event, NETDEV_PRECHANGEUPPER); +#endif +#ifdef HAVE_NETDEV_CHANGELOWERSTATE + BNXT_RE_NETDEV_EVENT(event, NETDEV_CHANGELOWERSTATE); +#endif +#ifdef HAVE_NETDEV_UDP_TUNNEL_DROP_INFO + BNXT_RE_NETDEV_EVENT(event, NETDEV_UDP_TUNNEL_PUSH_INFO); + BNXT_RE_NETDEV_EVENT(event, NETDEV_UDP_TUNNEL_DROP_INFO); +#endif +#ifdef HAVE_NETDEV_CHANGE_TX_QUEUE_LEN + BNXT_RE_NETDEV_EVENT(event, NETDEV_CHANGE_TX_QUEUE_LEN); +#endif +#ifdef HAVE_NETDEV_CVLAN_FILTER_PUSH_INFO + BNXT_RE_NETDEV_EVENT(event, NETDEV_CVLAN_FILTER_PUSH_INFO); + BNXT_RE_NETDEV_EVENT(event, NETDEV_CVLAN_FILTER_DROP_INFO); + BNXT_RE_NETDEV_EVENT(event, NETDEV_SVLAN_FILTER_PUSH_INFO); + BNXT_RE_NETDEV_EVENT(event, NETDEV_SVLAN_FILTER_DROP_INFO); +#endif + return "Unknown"; +} + +/* RoCE -> Net driver */ + +/* Driver registration routines used to let the networking driver (bnxt_en) + * to know that the RoCE driver is now installed */ + +static int bnxt_re_register_netdev(struct bnxt_re_dev *rdev) +{ + struct bnxt_en_dev *en_dev = rdev->en_dev; + int rc = 0; + + rc = bnxt_register_dev(en_dev, &bnxt_re_ulp_ops, rdev->adev); + if (rc) { + dev_err(rdev_to_dev(rdev), "netdev %p register failed! rc = 0x%x", + rdev->netdev, rc); + return rc; + } + + return rc; +} + +static int bnxt_re_alloc_nqr_mem(struct bnxt_re_dev *rdev) +{ + rdev->nqr = kzalloc(sizeof(*rdev->nqr), GFP_KERNEL); + if (!rdev->nqr) + return -ENOMEM; + + return 0; +} + +static void bnxt_re_free_nqr_mem(struct bnxt_re_dev *rdev) +{ + kfree(rdev->nqr); + rdev->nqr = NULL; +} + +static void bnxt_re_set_db_offset(struct bnxt_re_dev *rdev) +{ + struct bnxt_qplib_chip_ctx *cctx; + struct bnxt_en_dev *en_dev; + struct bnxt_qplib_res *res; + + res = &rdev->qplib_res; + en_dev = rdev->en_dev; + cctx = rdev->chip_ctx; + + if (_is_chip_gen_p5_p7(cctx)) { + res->dpi_tbl.ucreg.offset = en_dev->l2_db_offset; + res->dpi_tbl.wcreg.offset = en_dev->l2_db_size; + } else { + /* L2 doesn't support db_offset value for Wh+ */ + res->dpi_tbl.ucreg.offset = res->is_vf ? BNXT_QPLIB_DBR_VF_DB_OFFSET : + BNXT_QPLIB_DBR_PF_DB_OFFSET; + res->dpi_tbl.wcreg.offset = res->dpi_tbl.ucreg.offset + PAGE_SIZE; + } +} + +static void bnxt_re_set_drv_mode(struct bnxt_re_dev *rdev, u8 mode) +{ + struct bnxt_qplib_chip_ctx *cctx; + struct bnxt_en_dev *en_dev; + + en_dev = rdev->en_dev; + cctx = rdev->chip_ctx; + cctx->modes.wqe_mode = _is_chip_gen_p5_p7(rdev->chip_ctx) ? + mode : BNXT_QPLIB_WQE_MODE_STATIC; + + dev_dbg(rdev_to_dev(rdev), + "Configuring te_bypass mode - 0x%x", + ((struct bnxt_re_en_dev_info *) + auxiliary_get_drvdata(rdev->adev))->te_bypass); + + cctx->modes.te_bypass = ((struct bnxt_re_en_dev_info *) + auxiliary_get_drvdata(rdev->adev))->te_bypass; + + /* + * In Thor2, as per HW requirement if HW LAG is enabled, TE Bypass needs + * to be disabled regardless of the bond device created or not. + */ + if (_is_chip_p7(rdev->chip_ctx) && BNXT_EN_HW_LAG(rdev->en_dev)) + cctx->modes.te_bypass = 0; + + if (bnxt_re_hwrm_qcaps(rdev)) + dev_err(rdev_to_dev(rdev), + "Failed to query hwrm qcaps\n"); + + rdev->roce_mode = en_dev->flags & BNXT_EN_FLAG_ROCE_CAP; + dev_dbg(rdev_to_dev(rdev), + "RoCE is supported on the device - caps:0x%x", + rdev->roce_mode); + /* For Wh+, support only RoCE v2 now */ + if (!_is_chip_gen_p5_p7(rdev->chip_ctx)) + rdev->roce_mode = BNXT_RE_FLAG_ROCEV2_CAP; + cctx->hw_stats_size = en_dev->hw_ring_stats_size; +} + +static void bnxt_re_destroy_chip_ctx(struct bnxt_re_dev *rdev) +{ + struct bnxt_qplib_chip_ctx *chip_ctx; + struct bnxt_qplib_res *res; + + if (!rdev->chip_ctx) + return; + + res = &rdev->qplib_res; + bnxt_qplib_unmap_db_bar(res); + + kfree(res->hctx); + res->rcfw = NULL; + kfree(rdev->dev_attr); + rdev->dev_attr = NULL; + + chip_ctx = rdev->chip_ctx; + rdev->chip_ctx = NULL; + res->cctx = NULL; + res->hctx = NULL; + res->pdev = NULL; + res->netdev = NULL; + kfree(chip_ctx); +} + +static int bnxt_re_setup_chip_ctx(struct bnxt_re_dev *rdev, u8 wqe_mode) +{ + struct bnxt_qplib_chip_ctx *chip_ctx; + struct bnxt_en_dev *en_dev; + int rc; + + en_dev = rdev->en_dev; + /* Supply pci device to qplib */ + rdev->qplib_res.pdev = en_dev->pdev; + rdev->qplib_res.netdev = rdev->netdev; + rdev->qplib_res.en_dev = en_dev; + + chip_ctx = kzalloc(sizeof(*chip_ctx), GFP_KERNEL); + if (!chip_ctx) + return -ENOMEM; + rdev->chip_ctx = chip_ctx; + rdev->qplib_res.cctx = chip_ctx; + rc = bnxt_re_query_hwrm_intf_version(rdev); + if (rc) + goto fail; + rdev->dev_attr = kzalloc(sizeof(*rdev->dev_attr), GFP_KERNEL); + if (!rdev->dev_attr) { + rc = -ENOMEM; + goto fail; + } + rdev->qplib_res.dattr = rdev->dev_attr; + rdev->qplib_res.rcfw = &rdev->rcfw; + rdev->qplib_res.is_vf = rdev->is_virtfn; + + rdev->qplib_res.hctx = kzalloc(sizeof(*rdev->qplib_res.hctx), + GFP_KERNEL); + if (!rdev->qplib_res.hctx) { + rc = -ENOMEM; + goto fail; + } + bnxt_re_set_drv_mode(rdev, wqe_mode); + + bnxt_re_set_db_offset(rdev); + rc = bnxt_qplib_map_db_bar(&rdev->qplib_res); + if (rc) + goto fail; + + rc = bnxt_qplib_enable_atomic_ops_to_root(en_dev->pdev, rdev->is_virtfn); + if (rc) + dev_dbg(rdev_to_dev(rdev), + "platform doesn't support global atomics"); + + return 0; +fail: + kfree(rdev->chip_ctx); + rdev->chip_ctx = NULL; + + kfree(rdev->dev_attr); + rdev->dev_attr = NULL; + + kfree(rdev->qplib_res.hctx); + rdev->qplib_res.hctx = NULL; + return rc; +} + +static u16 bnxt_re_get_rtype(struct bnxt_re_dev *rdev) { + return _is_chip_gen_p5_p7(rdev->chip_ctx) ? + RING_ALLOC_REQ_RING_TYPE_NQ : + RING_ALLOC_REQ_RING_TYPE_ROCE_CMPL; +} + +static int bnxt_re_net_ring_free(struct bnxt_re_dev *rdev, u16 fw_ring_id) +{ + int rc = -EINVAL; + struct hwrm_ring_free_input req = {0}; + struct hwrm_ring_free_output resp; + struct bnxt_en_dev *en_dev = rdev->en_dev; + struct bnxt_fw_msg fw_msg = {}; + + if (!en_dev) + return rc; + + /* To avoid unnecessary error messages during recovery. + * HW is anyway in error state. So dont send down the command */ + if (test_bit(BNXT_RE_FLAG_ERR_DEVICE_DETACHED, &rdev->flags)) + return 0; + + /* allocation had failed, no need to issue hwrm */ + if (fw_ring_id == 0xffff) + return 0; + + bnxt_re_init_hwrm_hdr((void *)&req, HWRM_RING_FREE, -1); + req.ring_type = bnxt_re_get_rtype(rdev); + req.ring_id = cpu_to_le16(fw_ring_id); + bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp, + sizeof(resp), BNXT_RE_HWRM_CMD_TIMEOUT(rdev)); + rc = bnxt_send_msg(en_dev, &fw_msg); + if (rc) { + dev_dbg(rdev_to_dev(rdev), + "Failed to free HW ring with rc = 0x%x", rc); + return rc; + } + dev_dbg(rdev_to_dev(rdev), "HW ring freed with id = 0x%x\n", + fw_ring_id); + + return rc; +} + +static int bnxt_re_net_ring_alloc(struct bnxt_re_dev *rdev, + struct bnxt_re_ring_attr *ring_attr, + u16 *fw_ring_id) +{ + int rc = -EINVAL; + struct hwrm_ring_alloc_input req = {0}; + struct hwrm_ring_alloc_output resp; + struct bnxt_en_dev *en_dev = rdev->en_dev; + struct bnxt_fw_msg fw_msg = {}; + + if (!en_dev) + return rc; + + bnxt_re_init_hwrm_hdr((void *)&req, HWRM_RING_ALLOC, -1); + req.flags = cpu_to_le16(ring_attr->flags); + req.enables = 0; + req.page_tbl_addr = cpu_to_le64(ring_attr->dma_arr[0]); + if (ring_attr->pages > 1) { + /* Page size is in log2 units */ + req.page_size = BNXT_PAGE_SHIFT; + req.page_tbl_depth = 1; + } else { + req.page_size = 4; /* FIXME: hardconding */ + req.page_tbl_depth = 0; + } + + req.fbo = 0; + /* Association of ring index with doorbell index and MSIX number */ + req.logical_id = cpu_to_le16(ring_attr->lrid); + req.length = cpu_to_le32(ring_attr->depth + 1); + req.ring_type = ring_attr->type; + req.int_mode = ring_attr->mode; + bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp, + sizeof(resp), BNXT_RE_HWRM_CMD_TIMEOUT(rdev)); + rc = bnxt_send_msg(en_dev, &fw_msg); + if (rc) { + dev_dbg(rdev_to_dev(rdev), + "Failed to allocate HW ring with rc = 0x%x", rc); + return rc; + } + *fw_ring_id = le16_to_cpu(resp.ring_id); + dev_dbg(rdev_to_dev(rdev), + "HW ring allocated with id = 0x%x at slot 0x%x", + resp.ring_id, ring_attr->lrid); + + return rc; +} + +static int bnxt_re_net_stats_ctx_free(struct bnxt_re_dev *rdev, + u32 fw_stats_ctx_id, u16 tid) +{ + struct bnxt_en_dev *en_dev = rdev->en_dev; + struct hwrm_stat_ctx_free_input req = {0}; + struct hwrm_stat_ctx_free_output resp; + struct bnxt_fw_msg fw_msg = {}; + int rc = -EINVAL; + + if (!en_dev) + return rc; + + /* To avoid unnecessary error messages during recovery. + * HW is anyway in error state. So dont send down the command */ + if (test_bit(BNXT_RE_FLAG_ERR_DEVICE_DETACHED, &rdev->flags)) + return 0; + bnxt_re_init_hwrm_hdr((void *)&req, HWRM_STAT_CTX_FREE, tid); + req.stat_ctx_id = cpu_to_le32(fw_stats_ctx_id); + bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp, + sizeof(resp), BNXT_RE_HWRM_CMD_TIMEOUT(rdev)); + rc = bnxt_send_msg(en_dev, &fw_msg); + if (rc) { + dev_dbg(rdev_to_dev(rdev), + "Failed to free HW stats ctx with rc = 0x%x", rc); + return rc; + } + dev_dbg(rdev_to_dev(rdev), + "HW stats ctx freed with id = 0x%x", fw_stats_ctx_id); + + return rc; +} + +static int bnxt_re_net_stats_ctx_alloc(struct bnxt_re_dev *rdev, u16 tid) +{ + struct hwrm_stat_ctx_alloc_output resp = {}; + struct hwrm_stat_ctx_alloc_input req = {}; + struct bnxt_en_dev *en_dev = rdev->en_dev; + struct bnxt_fw_msg fw_msg = {}; + struct bnxt_qplib_stats *stat; + struct bnxt_qplib_ctx *hctx; + int rc = 0; + + hctx = rdev->qplib_res.hctx; + stat = (tid == 0xffff) ? &hctx->stats : &hctx->stats2; + stat->fw_id = INVALID_STATS_CTX_ID; + + if (!en_dev) + return -EINVAL; + + bnxt_re_init_hwrm_hdr((void *)&req, HWRM_STAT_CTX_ALLOC, tid); + req.update_period_ms = cpu_to_le32(1000); + req.stats_dma_length = rdev->chip_ctx->hw_stats_size; + req.stats_dma_addr = cpu_to_le64(stat->dma_map); + req.stat_ctx_flags = STAT_CTX_ALLOC_REQ_STAT_CTX_FLAGS_ROCE; + bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp, + sizeof(resp), BNXT_RE_HWRM_CMD_TIMEOUT(rdev)); + rc = bnxt_send_msg(en_dev, &fw_msg); + if (rc) { + dev_dbg(rdev_to_dev(rdev), + "Failed to allocate HW stats ctx, rc = 0x%x", rc); + return rc; + } + stat->fw_id = le32_to_cpu(resp.stat_ctx_id); + dev_dbg(rdev_to_dev(rdev), "HW stats ctx allocated with id = 0x%x", + stat->fw_id); + + return rc; +} + +static void bnxt_re_net_unregister_async_event(struct bnxt_re_dev *rdev) +{ + struct bnxt_re_en_dev_info *en_info; + u32 *event_bitmap; + + if (rdev->is_virtfn || + test_bit(BNXT_RE_FLAG_ERR_DEVICE_DETACHED, &rdev->flags)) + return; + + en_info = auxiliary_get_drvdata(rdev->adev); + if (!en_info) + return; + + event_bitmap = en_info->event_bitmap; + memset(event_bitmap, 0, sizeof(en_info->event_bitmap)); + + if (bnxt_register_async_events + (rdev->en_dev, (unsigned long *)event_bitmap, + ASYNC_EVENT_CMPL_EVENT_ID_DOORBELL_PACING_NQ_UPDATE)) + dev_err(rdev_to_dev(rdev), + "Failed to unregister async event"); +} + +static void bnxt_re_net_register_async_event(struct bnxt_re_dev *rdev) +{ + struct bnxt_re_en_dev_info *en_info; + u32 *event_bitmap; + + en_info = auxiliary_get_drvdata(rdev->adev); + if (!en_info) + return; + + event_bitmap = en_info->event_bitmap; + event_bitmap[2] |= BIT(ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT - 64); + + if (rdev->is_virtfn) + goto register_async_events; + + event_bitmap[0] |= BIT(ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE) | + BIT(ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY); + event_bitmap[2] |= BIT(ASYNC_EVENT_CMPL_EVENT_ID_DOORBELL_PACING_THRESHOLD - 64) | + BIT(ASYNC_EVENT_CMPL_EVENT_ID_DOORBELL_PACING_NQ_UPDATE - 64); + +register_async_events: + if (bnxt_register_async_events + (rdev->en_dev, (unsigned long *)event_bitmap, + ASYNC_EVENT_CMPL_EVENT_ID_DOORBELL_PACING_NQ_UPDATE)) + dev_err(rdev_to_dev(rdev), + "Failed to reg Async event"); +} + +static int bnxt_re_query_hwrm_intf_version(struct bnxt_re_dev *rdev) +{ + struct bnxt_en_dev *en_dev = rdev->en_dev; + struct hwrm_ver_get_output resp = {0}; + struct hwrm_ver_get_input req = {0}; + struct bnxt_qplib_chip_ctx *cctx; + struct bnxt_fw_msg fw_msg = {}; + int rc = 0; + + bnxt_re_init_hwrm_hdr((void *)&req, HWRM_VER_GET, -1); + req.hwrm_intf_maj = HWRM_VERSION_MAJOR; + req.hwrm_intf_min = HWRM_VERSION_MINOR; + req.hwrm_intf_upd = HWRM_VERSION_UPDATE; + bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp, + sizeof(resp), DFLT_HWRM_CMD_TIMEOUT); + rc = bnxt_send_msg(en_dev, &fw_msg); + if (rc) { + dev_dbg(rdev_to_dev(rdev), + "Failed to query HW version, rc = 0x%x", rc); + return rc; + } + cctx = rdev->chip_ctx; + cctx->hwrm_intf_ver = (u64) le16_to_cpu(resp.hwrm_intf_major) << 48 | + (u64) le16_to_cpu(resp.hwrm_intf_minor) << 32 | + (u64) le16_to_cpu(resp.hwrm_intf_build) << 16 | + le16_to_cpu(resp.hwrm_intf_patch); + + cctx->hwrm_cmd_max_timeout = le16_to_cpu(resp.max_req_timeout); + + if (!cctx->hwrm_cmd_max_timeout) + cctx->hwrm_cmd_max_timeout = RCFW_FW_STALL_MAX_TIMEOUT; + + cctx->chip_num = le16_to_cpu(resp.chip_num); + cctx->chip_rev = resp.chip_rev; + cctx->chip_metal = resp.chip_metal; + return 0; +} + +/* Query function capabilities using common hwrm */ +int bnxt_re_hwrm_qcaps(struct bnxt_re_dev *rdev) +{ + struct bnxt_en_dev *en_dev = rdev->en_dev; + struct hwrm_func_qcaps_output resp = {0}; + struct hwrm_func_qcaps_input req = {0}; + struct bnxt_qplib_chip_ctx *cctx; + struct bnxt_fw_msg fw_msg = {}; + int rc; + + cctx = rdev->chip_ctx; + bnxt_re_init_hwrm_hdr((void *)&req, HWRM_FUNC_QCAPS, -1); + req.fid = cpu_to_le16(0xffff); + bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp, + sizeof(resp), BNXT_RE_HWRM_CMD_TIMEOUT(rdev)); + rc = bnxt_send_msg(en_dev, &fw_msg); + if (rc) { + dev_dbg(rdev_to_dev(rdev), + "Failed to query capabilities, rc = %#x", rc); + return rc; + } + + /* + * Check if WCB push enalbed for Thor. + * For Thor2 and future chips, push mode enablement check is through + * RoCE specific device attribute query. + */ + if (resp.flags & FUNC_QCAPS_RESP_FLAGS_WCB_PUSH_MODE) + cctx->modes.db_push_mode = BNXT_RE_PUSH_MODE_WCB; + cctx->modes.dbr_pacing = + resp.flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_DBR_PACING_SUPPORTED ? + true : false; + cctx->modes.dbr_pacing_ext = + resp.flags_ext2 & + FUNC_QCAPS_RESP_FLAGS_EXT2_DBR_PACING_EXT_SUPPORTED ? + true : false; + cctx->modes.dbr_drop_recov = + (resp.flags_ext2 & + FUNC_QCAPS_RESP_FLAGS_EXT2_SW_DBR_DROP_RECOVERY_SUPPORTED) ? + true : false; + cctx->modes.dbr_pacing_v0 = + (resp.flags_ext2 & + FUNC_QCAPS_RESP_FLAGS_EXT2_DBR_PACING_V0_SUPPORTED) ? + true : false; + cctx->modes.steering_tag_supported = + (resp.flags_ext2 & + FUNC_QCAPS_RESP_FLAGS_EXT2_STEERING_TAG_SUPPORTED) ? + true : false; + dev_dbg(rdev_to_dev(rdev), + "%s: cctx->modes.dbr_pacing = %d cctx->modes.dbr_pacing_ext = %d\n", + __func__, cctx->modes.dbr_pacing, cctx->modes.dbr_pacing_ext); + dev_dbg(rdev_to_dev(rdev), + "%s: cctx->modes.steering_tag_supported = %d\n", + __func__, cctx->modes.steering_tag_supported); + return 0; +} + +static int bnxt_re_hwrm_dbr_pacing_qcfg(struct bnxt_re_dev *rdev) +{ + struct bnxt_qplib_db_pacing_data *pacing_data = rdev->qplib_res.pacing_data; + struct hwrm_func_dbr_pacing_qcfg_output resp = {0}; + struct hwrm_func_dbr_pacing_qcfg_input req = {0}; + struct bnxt_en_dev *en_dev = rdev->en_dev; + struct bnxt_qplib_chip_ctx *cctx; + struct bnxt_fw_msg fw_msg = {}; + u32 primary_nq_id; + int rc; + + cctx = rdev->chip_ctx; + bnxt_re_init_hwrm_hdr((void *)&req, HWRM_FUNC_DBR_PACING_QCFG, -1); + bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp, + sizeof(resp), BNXT_RE_HWRM_CMD_TIMEOUT(rdev)); + rc = bnxt_send_msg(en_dev, &fw_msg); + if (rc) { + dev_dbg(rdev_to_dev(rdev), + "Failed to query dbr pacing config, rc = %#x", rc); + return rc; + } + + if (!rdev->is_virtfn) { + primary_nq_id = le32_to_cpu(resp.primary_nq_id); + if (primary_nq_id == 0xffffffff && + !bnxt_qplib_dbr_pacing_ext_en(rdev->chip_ctx)) + bnxt_qplib_dbr_pacing_set_primary_pf(rdev->chip_ctx, 1); + + if (bnxt_qplib_dbr_pacing_ext_en(rdev->chip_ctx)) { + struct bnxt_qplib_nq *nq; + + nq = &rdev->nqr->nq[0]; + /* Reset the primary capability */ + if (nq->ring_id != primary_nq_id) + bnxt_qplib_dbr_pacing_set_primary_pf(rdev->chip_ctx, 0); + } + + if ((resp.dbr_throttling_aeq_arm_reg & + FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_MASK) == + FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_GRC) { + cctx->dbr_aeq_arm_reg = resp.dbr_throttling_aeq_arm_reg & + ~FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_MASK; + cctx->dbr_throttling_reg = cctx->dbr_aeq_arm_reg - 4; + } + } + + if ((resp.dbr_stat_db_fifo_reg & + FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_MASK) == + FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_GRC) + cctx->dbr_stat_db_fifo = + resp.dbr_stat_db_fifo_reg & + ~FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_MASK; + + pacing_data->fifo_max_depth = le32_to_cpu(resp.dbr_stat_db_max_fifo_depth); + if (!pacing_data->fifo_max_depth) + pacing_data->fifo_max_depth = BNXT_RE_MAX_FIFO_DEPTH(cctx); + pacing_data->fifo_room_mask = le32_to_cpu(resp.dbr_stat_db_fifo_reg_fifo_room_mask); + pacing_data->fifo_room_shift = resp.dbr_stat_db_fifo_reg_fifo_room_shift; + dev_dbg(rdev_to_dev(rdev), + "%s: nq:0x%x primary_pf:%d db_fifo:0x%x aeq_arm:0x%x", + __func__, resp.primary_nq_id, cctx->modes.dbr_primary_pf, + cctx->dbr_stat_db_fifo, cctx->dbr_aeq_arm_reg); + return 0; +} + +int bnxt_re_hwrm_dbr_pacing_cfg(struct bnxt_re_dev *rdev, bool enable) +{ + struct hwrm_func_dbr_pacing_cfg_output resp = {0}; + struct hwrm_func_dbr_pacing_cfg_input req = {0}; + struct bnxt_en_dev *en_dev = rdev->en_dev; + struct bnxt_fw_msg fw_msg = {}; + int rc; + + if (test_bit(BNXT_RE_FLAG_ERR_DEVICE_DETACHED, &rdev->flags)) + return 0; + + bnxt_re_init_hwrm_hdr((void *)&req, HWRM_FUNC_DBR_PACING_CFG, -1); + if (enable) { + req.flags = FUNC_DBR_PACING_CFG_REQ_FLAGS_DBR_NQ_EVENT_ENABLE; + req.enables = + cpu_to_le32(FUNC_DBR_PACING_CFG_REQ_ENABLES_PRIMARY_NQ_ID_VALID | + FUNC_DBR_PACING_CFG_REQ_ENABLES_PACING_THRESHOLD_VALID); + } else { + req.flags = FUNC_DBR_PACING_CFG_REQ_FLAGS_DBR_NQ_EVENT_DISABLE; + } + req.primary_nq_id = cpu_to_le32(rdev->dbq_nq_id); + req.pacing_threshold = cpu_to_le32(rdev->dbq_watermark); + dev_dbg(rdev_to_dev(rdev), "%s: nq_id = 0x%x pacing_threshold = 0x%x", + __func__, req.primary_nq_id, req.pacing_threshold); + bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp, + sizeof(resp), BNXT_RE_HWRM_CMD_TIMEOUT(rdev)); + rc = bnxt_send_msg(en_dev, &fw_msg); + if (rc) { + dev_err(rdev_to_dev(rdev), + "Failed to set dbr pacing config, rc = %#x", rc); + return rc; + } + return 0; +} + +/* Net -> RoCE driver */ + +/* Device */ +struct bnxt_re_dev *bnxt_re_from_netdev(struct net_device *netdev) +{ + struct bnxt_re_dev *rdev; + + rcu_read_lock(); + list_for_each_entry_rcu(rdev, &bnxt_re_dev_list, list) { + if (rdev->netdev == netdev) { + rcu_read_unlock(); + dev_dbg(rdev_to_dev(rdev), + "netdev (%p) found, ref_count = 0x%x", + netdev, atomic_read(&rdev->ref_count)); + return rdev; + } + if (rdev->binfo) { + if (rdev->binfo->slave1 == netdev || + rdev->binfo->slave2 == netdev) { + rcu_read_unlock(); + dev_dbg(rdev_to_dev(rdev), + "Bond netdev (%p) found, ref_count = 0x%x", + netdev, atomic_read(&rdev->ref_count)); + return rdev; + } + } + } + rcu_read_unlock(); + return NULL; +} + +#ifdef HAVE_IB_SET_DEV_OPS +static const struct ib_device_ops bnxt_re_dev_ops = { +#ifdef HAVE_IB_OWNER_IN_DEVICE_OPS + .owner = THIS_MODULE, + .driver_id = RDMA_DRIVER_BNXT_RE, + .uverbs_abi_ver = BNXT_RE_ABI_VERSION, +#endif + .query_device = bnxt_re_query_device, + .modify_device = bnxt_re_modify_device, + + .query_port = bnxt_re_query_port, + .modify_port = bnxt_re_modify_port, +#ifdef HAVE_IB_GET_PORT_IMMUTABLE + .get_port_immutable = bnxt_re_get_port_immutable, +#endif +#ifdef HAVE_IB_GET_DEV_FW_STR + .get_dev_fw_str = bnxt_re_query_fw_str, +#endif + .query_pkey = bnxt_re_query_pkey, + .query_gid = bnxt_re_query_gid, +#ifdef HAVE_IB_GET_NETDEV + .get_netdev = bnxt_re_get_netdev, +#endif +#ifdef HAVE_IB_ADD_DEL_GID + .add_gid = bnxt_re_add_gid, + .del_gid = bnxt_re_del_gid, +#endif +#ifdef HAVE_IB_MODIFY_GID + .modify_gid = bnxt_re_modify_gid, +#endif + .get_link_layer = bnxt_re_get_link_layer, + + .alloc_pd = bnxt_re_alloc_pd, + .dealloc_pd = bnxt_re_dealloc_pd, + + .create_ah = bnxt_re_create_ah, +#ifdef HAVE_IB_CREATE_USER_AH + .create_user_ah = bnxt_re_create_ah, +#endif + .query_ah = bnxt_re_query_ah, + .destroy_ah = bnxt_re_destroy_ah, + + .create_srq = bnxt_re_create_srq, + .modify_srq = bnxt_re_modify_srq, + .query_srq = bnxt_re_query_srq, + .destroy_srq = bnxt_re_destroy_srq, + .post_srq_recv = bnxt_re_post_srq_recv, + + .create_qp = bnxt_re_create_qp, + .modify_qp = bnxt_re_modify_qp, + .query_qp = bnxt_re_query_qp, + .destroy_qp = bnxt_re_destroy_qp, + + .post_send = bnxt_re_post_send, + .post_recv = bnxt_re_post_recv, + + .create_cq = bnxt_re_create_cq, + .modify_cq = bnxt_re_modify_cq, /* Need ? */ + .destroy_cq = bnxt_re_destroy_cq, + .resize_cq = bnxt_re_resize_cq, + .poll_cq = bnxt_re_poll_cq, + .req_notify_cq = bnxt_re_req_notify_cq, + + .get_dma_mr = bnxt_re_get_dma_mr, +#ifdef HAVE_IB_REG_PHYS_MR + .reg_phys_mr = bnxt_re_reg_phys_mr, + .rereg_phys_mr = bnxt_re_rereg_phys_mr, +#endif +#ifdef HAVE_IB_QUERY_MR + .query_mr = bnxt_re_query_mr, +#endif + .dereg_mr = bnxt_re_dereg_mr, +#ifdef HAVE_IB_SIGNATURE_HANDOVER + .destroy_mr = bnxt_re_destroy_mr, + .create_mr = bnxt_re_create_mr, +#endif +#ifdef HAVE_IB_FAST_REG_MR + .alloc_fast_reg_mr = bnxt_re_alloc_fast_reg_mr, + .alloc_fast_reg_page_list = bnxt_re_alloc_fast_reg_page_list, + .free_fast_reg_page_list = bnxt_re_free_fast_reg_page_list, +#endif +#ifdef HAVE_IB_ALLOC_MR + .alloc_mr = bnxt_re_alloc_mr, +#endif +#ifdef HAVE_IB_MAP_MR_SG + .map_mr_sg = bnxt_re_map_mr_sg, +#endif + .alloc_mw = bnxt_re_alloc_mw, +#ifdef HAVE_IB_BIND_MW + .bind_mw = bnxt_re_bind_mw, +#endif + .dealloc_mw = bnxt_re_dealloc_mw, +#ifdef USE_IB_FMR + .alloc_fmr = bnxt_re_alloc_fmr, + .map_phys_fmr = bnxt_re_map_phys_fmr, + .unmap_fmr = bnxt_re_unmap_fmr, + .dealloc_fmr = bnxt_re_dealloc_fmr, +#endif + .reg_user_mr = bnxt_re_reg_user_mr, +#ifdef HAVE_IB_UMEM_DMABUF + .reg_user_mr_dmabuf = bnxt_re_reg_user_mr_dmabuf, +#endif +#ifdef HAVE_IB_REREG_USER_MR + /* + * TODO: Workaround to mask an issue rereg_user_mr handler + * .rereg_user_mr = bnxt_re_rereg_user_mr, + */ +#endif +#ifdef HAVE_DISASSOCIATE_UCNTX + /* + * Have to be populated for both old and new kernels + * to allow rmmoding driver with app running + */ + .disassociate_ucontext = bnxt_re_disassociate_ucntx, +#endif + .alloc_ucontext = bnxt_re_alloc_ucontext, + .dealloc_ucontext = bnxt_re_dealloc_ucontext, + .mmap = bnxt_re_mmap, + .process_mad = bnxt_re_process_mad, +#ifdef HAVE_AH_ALLOC_IN_IB_CORE + INIT_RDMA_OBJ_SIZE(ib_ah, bnxt_re_ah, ib_ah), +#endif +#ifdef HAVE_PD_ALLOC_IN_IB_CORE + INIT_RDMA_OBJ_SIZE(ib_pd, bnxt_re_pd, ib_pd), +#endif +#ifdef HAVE_SRQ_CREATE_IN_IB_CORE + INIT_RDMA_OBJ_SIZE(ib_srq, bnxt_re_srq, ib_srq), +#endif +#ifdef HAVE_CQ_ALLOC_IN_IB_CORE + INIT_RDMA_OBJ_SIZE(ib_cq, bnxt_re_cq, ib_cq), +#endif +#ifdef HAVE_QP_ALLOC_IN_IB_CORE + INIT_RDMA_OBJ_SIZE(ib_qp, bnxt_re_qp, ib_qp), +#endif +#ifdef HAVE_UCONTEXT_ALLOC_IN_IB_CORE + INIT_RDMA_OBJ_SIZE(ib_ucontext, bnxt_re_ucontext, ib_uctx), +#endif +#ifdef HAVE_ALLOC_HW_PORT_STATS + .alloc_hw_port_stats = bnxt_re_alloc_hw_port_stats, +#else + .alloc_hw_stats = bnxt_re_alloc_hw_stats, +#endif + .get_hw_stats = bnxt_re_get_hw_stats, +}; +#endif /* HAVE_IB_SET_DEV_OPS */ +#ifdef HAVE_RDMA_SET_DEVICE_SYSFS_GROUP +static ssize_t hw_rev_show(struct device *device, struct device_attribute *attr, + char *buf) +{ + struct bnxt_re_dev *rdev = to_bnxt_re_dev(device, ibdev.dev); + +#ifdef HAS_SYSFS_EMIT + return sysfs_emit(buf, "0x%x\n", rdev->en_dev->pdev->vendor); +#else + return scnprintf(buf, PAGE_SIZE, "0x%x\n", rdev->en_dev->pdev->vendor); +#endif +} +static DEVICE_ATTR_RO(hw_rev); + +static ssize_t hca_type_show(struct device *device, + struct device_attribute *attr, char *buf) +{ + struct bnxt_re_dev *rdev = to_bnxt_re_dev(device, ibdev.dev); + +#ifdef HAS_SYSFS_EMIT + return sysfs_emit(buf, "%s\n", rdev->ibdev.node_desc); +#else + return scnprintf(buf, PAGE_SIZE, "%s\n", rdev->ibdev.node_desc); +#endif +} +static DEVICE_ATTR_RO(hca_type); + + +static struct attribute *bnxt_re_attributes[] = { + &dev_attr_hw_rev.attr, + &dev_attr_hca_type.attr, + NULL +}; +static const struct attribute_group bnxt_re_dev_attr_group = { + .attrs = bnxt_re_attributes, +}; +#else +static ssize_t show_rev(struct device *device, struct device_attribute *attr, + char *buf) +{ + struct bnxt_re_dev *rdev = to_bnxt_re_dev(device, ibdev.dev); + + return scnprintf(buf, PAGE_SIZE, "0x%x\n", rdev->en_dev->pdev->vendor); +} + + +static ssize_t show_hca(struct device *device, struct device_attribute *attr, + char *buf) +{ + struct bnxt_re_dev *rdev = to_bnxt_re_dev(device, ibdev.dev); + + return scnprintf(buf, PAGE_SIZE, "%s\n", rdev->ibdev.node_desc); +} + +#ifndef HAVE_IB_GET_DEV_FW_STR +static ssize_t show_fw_ver(struct device *device, struct device_attribute *attr, + char *buf) +{ + struct bnxt_re_dev *rdev = to_bnxt_re_dev(device, ibdev.dev); + + return scnprintf(buf, PAGE_SIZE, "%d.%d.%d.%d\n", + rdev->dev_attr->fw_ver[0], rdev->dev_attr->fw_ver[1], + rdev->dev_attr->fw_ver[2], rdev->dev_attr->fw_ver[3]); +} + +static DEVICE_ATTR(fw_rev, 0444, show_fw_ver, NULL); +#endif +static DEVICE_ATTR(hw_rev, 0444, show_rev, NULL); +static DEVICE_ATTR(hca_type, 0444, show_hca, NULL); + +static struct device_attribute *bnxt_re_attributes[] = { + &dev_attr_hw_rev, +#ifndef HAVE_IB_GET_DEV_FW_STR + &dev_attr_fw_rev, +#endif + &dev_attr_hca_type +}; +#endif + +static int bnxt_re_register_ib(struct bnxt_re_dev *rdev) +{ + struct ib_device *ibdev = &rdev->ibdev; + int ret = 0; + + /* ib device init */ +#ifndef HAVE_IB_OWNER_IN_DEVICE_OPS + ibdev->owner = THIS_MODULE; + ibdev->uverbs_abi_ver = BNXT_RE_ABI_VERSION; +#ifdef HAVE_RDMA_DRIVER_ID + ibdev->driver_id = RDMA_DRIVER_BNXT_RE; +#endif +#endif + ibdev->node_type = RDMA_NODE_IB_CA; + strlcpy(ibdev->node_desc, BNXT_RE_DESC " HCA", + strlen(BNXT_RE_DESC) + 5); + ibdev->phys_port_cnt = 1; + + addrconf_addr_eui48((u8 *)&ibdev->node_guid, rdev->netdev->dev_addr); + + /* Data path irqs is one less than the max msix vectors */ + ibdev->num_comp_vectors = rdev->nqr->num_msix - 1; + bnxt_re_set_dma_device(ibdev, rdev); + ibdev->local_dma_lkey = BNXT_QPLIB_RSVD_LKEY; + +#ifdef HAVE_IB_UVERBS_CMD_MASK_IN_DRIVER + /* User space */ + ibdev->uverbs_cmd_mask = + (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) | + (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) | + (1ull << IB_USER_VERBS_CMD_QUERY_PORT) | + (1ull << IB_USER_VERBS_CMD_ALLOC_PD) | + (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) | + (1ull << IB_USER_VERBS_CMD_REG_MR) | + (1ull << IB_USER_VERBS_CMD_DEREG_MR) | + (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) | + (1ull << IB_USER_VERBS_CMD_CREATE_CQ) | + (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) | + (1ull << IB_USER_VERBS_CMD_CREATE_QP) | + (1ull << IB_USER_VERBS_CMD_MODIFY_QP) | + (1ull << IB_USER_VERBS_CMD_QUERY_QP) | + (1ull << IB_USER_VERBS_CMD_DESTROY_QP) | + /* + * (1ull << IB_USER_VERBS_CMD_REREG_MR) | + */ + (1ull << IB_USER_VERBS_CMD_RESIZE_CQ) | + (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) | + (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) | + (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) | + (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) | + (1ull << IB_USER_VERBS_CMD_ALLOC_MW) | + (1ull << IB_USER_VERBS_CMD_DEALLOC_MW) | + (1ull << IB_USER_VERBS_CMD_CREATE_AH) | + (1ull << IB_USER_VERBS_CMD_MODIFY_AH) | + (1ull << IB_USER_VERBS_CMD_QUERY_AH) | + (1ull << IB_USER_VERBS_CMD_DESTROY_AH); + +#ifdef HAVE_IB_USER_VERBS_EX_CMD_MODIFY_QP + ibdev->uverbs_ex_cmd_mask = (1ull << IB_USER_VERBS_EX_CMD_MODIFY_QP); +#endif +#endif + ibdev->uverbs_cmd_mask |= (1ull << IB_USER_VERBS_CMD_POLL_CQ); + + /* REQ_NOTIFY_CQ is directly handled in libbnxt_re. + * POLL_CQ is processed only as part of a RESIZE_CQ operation; + * the library uses this to let the kernel driver know that + * RESIZE_CQ is complete and memory from the previous CQ can be + * unmapped. + */ +#ifdef HAVE_RDMA_SET_DEVICE_SYSFS_GROUP + rdma_set_device_sysfs_group(ibdev, &bnxt_re_dev_attr_group); +#endif +#ifdef HAVE_IB_DEVICE_SET_NETDEV + ret = ib_device_set_netdev(&rdev->ibdev, rdev->netdev, 1); + if (ret) + return ret; +#endif + +#ifdef HAVE_IB_SET_DEV_OPS + ib_set_device_ops(ibdev, &bnxt_re_dev_ops); +#else + /* Kernel verbs */ + ibdev->query_device = bnxt_re_query_device; + ibdev->modify_device = bnxt_re_modify_device; + + ibdev->query_port = bnxt_re_query_port; + ibdev->modify_port = bnxt_re_modify_port; +#ifdef HAVE_IB_GET_PORT_IMMUTABLE + ibdev->get_port_immutable = bnxt_re_get_port_immutable; +#endif +#ifdef HAVE_IB_GET_DEV_FW_STR + ibdev->get_dev_fw_str = bnxt_re_query_fw_str; +#endif + ibdev->query_pkey = bnxt_re_query_pkey; + ibdev->query_gid = bnxt_re_query_gid; +#ifdef HAVE_IB_GET_NETDEV + ibdev->get_netdev = bnxt_re_get_netdev; +#endif +#ifdef HAVE_IB_ADD_DEL_GID + ibdev->add_gid = bnxt_re_add_gid; + ibdev->del_gid = bnxt_re_del_gid; +#endif +#ifdef HAVE_IB_MODIFY_GID + ibdev->modify_gid = bnxt_re_modify_gid; +#endif + ibdev->get_link_layer = bnxt_re_get_link_layer; + + ibdev->alloc_pd = bnxt_re_alloc_pd; + ibdev->dealloc_pd = bnxt_re_dealloc_pd; + + ibdev->create_ah = bnxt_re_create_ah; + ibdev->query_ah = bnxt_re_query_ah; + ibdev->destroy_ah = bnxt_re_destroy_ah; + + ibdev->create_srq = bnxt_re_create_srq; + ibdev->modify_srq = bnxt_re_modify_srq; + ibdev->query_srq = bnxt_re_query_srq; + ibdev->destroy_srq = bnxt_re_destroy_srq; + ibdev->post_srq_recv = bnxt_re_post_srq_recv; + + ibdev->create_qp = bnxt_re_create_qp; + ibdev->modify_qp = bnxt_re_modify_qp; + ibdev->query_qp = bnxt_re_query_qp; + ibdev->destroy_qp = bnxt_re_destroy_qp; + + ibdev->post_send = bnxt_re_post_send; + ibdev->post_recv = bnxt_re_post_recv; + + ibdev->create_cq = bnxt_re_create_cq; + ibdev->modify_cq = bnxt_re_modify_cq; /* Need ? */ + ibdev->destroy_cq = bnxt_re_destroy_cq; + ibdev->resize_cq = bnxt_re_resize_cq; + ibdev->poll_cq = bnxt_re_poll_cq; + ibdev->req_notify_cq = bnxt_re_req_notify_cq; + + ibdev->get_dma_mr = bnxt_re_get_dma_mr; +#ifdef HAVE_IB_REG_PHYS_MR + ibdev->reg_phys_mr = bnxt_re_reg_phys_mr; + ibdev->rereg_phys_mr = bnxt_re_rereg_phys_mr; +#endif +#ifdef HAVE_IB_QUERY_MR + ibdev->query_mr = bnxt_re_query_mr; +#endif + ibdev->dereg_mr = bnxt_re_dereg_mr; +#ifdef HAVE_IB_SIGNATURE_HANDOVER + ibdev->destroy_mr = bnxt_re_destroy_mr; + ibdev->create_mr = bnxt_re_create_mr; +#endif +#ifdef HAVE_IB_FAST_REG_MR + ibdev->alloc_fast_reg_mr = bnxt_re_alloc_fast_reg_mr; + ibdev->alloc_fast_reg_page_list = bnxt_re_alloc_fast_reg_page_list; + ibdev->free_fast_reg_page_list = bnxt_re_free_fast_reg_page_list; +#endif +#ifdef HAVE_IB_ALLOC_MR + ibdev->alloc_mr = bnxt_re_alloc_mr; +#endif +#ifdef HAVE_IB_MAP_MR_SG + ibdev->map_mr_sg = bnxt_re_map_mr_sg; +#endif + ibdev->alloc_mw = bnxt_re_alloc_mw; +#ifdef HAVE_IB_BIND_MW + ibdev->bind_mw = bnxt_re_bind_mw; +#endif + ibdev->dealloc_mw = bnxt_re_dealloc_mw; +#ifdef USE_IB_FMR + ibdev->alloc_fmr = bnxt_re_alloc_fmr; + ibdev->map_phys_fmr = bnxt_re_map_phys_fmr; + ibdev->unmap_fmr = bnxt_re_unmap_fmr; + ibdev->dealloc_fmr = bnxt_re_dealloc_fmr; +#endif + + ibdev->reg_user_mr = bnxt_re_reg_user_mr; +#ifdef HAVE_IB_UMEM_DMABUF + ibdev->reg_user_mr_dmabuf = bnxt_re_reg_user_mr_dmabuf; +#endif +#ifdef HAVE_IB_REREG_USER_MR + /* + * TODO: Workaround to mask an issue in rereg_user_mr handler + * ibdev->rereg_user_mr = bnxt_re_rereg_user_mr; + */ +#endif +#ifdef HAVE_DISASSOCIATE_UCNTX + ibdev->disassociate_ucontext = bnxt_re_disassociate_ucntx; +#endif + ibdev->alloc_ucontext = bnxt_re_alloc_ucontext; + ibdev->dealloc_ucontext = bnxt_re_dealloc_ucontext; + ibdev->mmap = bnxt_re_mmap; + ibdev->process_mad = bnxt_re_process_mad; +#endif /* HAVE_IB_SET_DEV_OPS */ + +#ifndef HAVE_IB_ALLOC_MR + /* TODO: Workaround to uninitialized the kobj */ + ibdev->dev.kobj.state_initialized = 0; +#endif + + ret = ib_register_device_compat(rdev); + return ret; +} + +static void bnxt_re_dev_dealloc(struct bnxt_re_dev *rdev) +{ + int i = BNXT_RE_REF_WAIT_COUNT; + + dev_dbg(rdev_to_dev(rdev), "%s:Remove the device %p\n", __func__, rdev); + /* Wait for rdev refcount to come down */ + while ((atomic_read(&rdev->ref_count) > 1) && i--) + msleep(100); + + if (atomic_read(&rdev->ref_count) > 1) + dev_err(rdev_to_dev(rdev), + "Failed waiting for ref count to deplete %d", + atomic_read(&rdev->ref_count)); + + atomic_set(&rdev->ref_count, 0); + dev_put(rdev->netdev); + rdev->netdev = NULL; + if (rdev->binfo) { + kfree(rdev->binfo); + rdev->binfo = NULL; + } + synchronize_rcu(); + +#ifdef RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP + kfree(rdev->gid_map); +#endif + kfree(rdev->dbg_stats); + ib_dealloc_device(&rdev->ibdev); +} + +static void bnxt_re_dbr_drop_recov_init(struct bnxt_re_dev *rdev) +{ + struct bnxt_re_dbr_res_list *res; + int i; + + for (i = 0; i < BNXT_RE_RES_TYPE_MAX; i++) { + res = &rdev->res_list[i]; + + INIT_LIST_HEAD(&res->head); + spin_lock_init(&res->lock); + } +} + +static struct bnxt_re_dev *bnxt_re_dev_alloc(struct net_device *netdev, + struct bnxt_en_dev *en_dev) +{ + struct bnxt_re_dev *rdev; +#ifdef RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP + u32 count; +#endif + /* Allocate bnxt_re_dev instance here */ + rdev = (struct bnxt_re_dev *)compat_ib_alloc_device(sizeof(*rdev)); + if (!rdev) { + dev_err(NULL, "%s: bnxt_re_dev allocation failure!", + ROCE_DRV_MODULE_NAME); + return NULL; + } + /* Default values */ + atomic_set(&rdev->ref_count, 0); + rdev->netdev = netdev; + dev_hold(rdev->netdev); + rdev->en_dev = en_dev; + INIT_LIST_HEAD(&rdev->qp_list); + mutex_init(&rdev->qp_lock); + mutex_init(&rdev->cc_lock); + mutex_init(&rdev->dbq_lock); + bnxt_re_clear_rsors_stat(&rdev->stats.rsors); + rdev->cosq[0] = rdev->cosq[1] = 0xFFFF; + rdev->min_tx_depth = 1; + rdev->stats.stats_query_sec = 1; + /* Disable priority vlan as the default mode is DSCP based PFC */ + rdev->cc_param.disable_prio_vlan_tx = 1; + + /* Initialize worker for DBR Pacing */ + INIT_WORK(&rdev->dbq_fifo_check_work, bnxt_re_db_fifo_check); + INIT_DELAYED_WORK(&rdev->dbq_pacing_work, bnxt_re_pacing_timer_exp); +#ifdef RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP + rdev->gid_map = kzalloc(sizeof(*(rdev->gid_map)) * + BNXT_RE_MAX_SGID_ENTRIES, + GFP_KERNEL); + if (!rdev->gid_map) { + ib_dealloc_device(&rdev->ibdev); + return NULL; + } + for(count = 0; count < BNXT_RE_MAX_SGID_ENTRIES; count++) + rdev->gid_map[count] = -1; +#endif /* RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP */ + rdev->dbg_stats = kzalloc(sizeof(*rdev->dbg_stats), GFP_KERNEL); + if (!rdev->dbg_stats) { + ib_dealloc_device(&rdev->ibdev); + return NULL; + } + bnxt_re_dbr_drop_recov_init(rdev); + + return rdev; +} + +static int bnxt_re_handle_unaffi_async_event( + struct creq_func_event *unaffi_async) +{ + switch (unaffi_async->event) { + case CREQ_FUNC_EVENT_EVENT_TX_WQE_ERROR: + case CREQ_FUNC_EVENT_EVENT_TX_DATA_ERROR: + case CREQ_FUNC_EVENT_EVENT_RX_WQE_ERROR: + case CREQ_FUNC_EVENT_EVENT_RX_DATA_ERROR: + case CREQ_FUNC_EVENT_EVENT_CQ_ERROR: + case CREQ_FUNC_EVENT_EVENT_TQM_ERROR: + case CREQ_FUNC_EVENT_EVENT_CFCQ_ERROR: + case CREQ_FUNC_EVENT_EVENT_CFCS_ERROR: + case CREQ_FUNC_EVENT_EVENT_CFCC_ERROR: + case CREQ_FUNC_EVENT_EVENT_CFCM_ERROR: + case CREQ_FUNC_EVENT_EVENT_TIM_ERROR: + break; + default: + return -EINVAL; + } + return 0; +} + +static int bnxt_re_handle_qp_async_event(void *qp_event, struct bnxt_re_qp *qp) +{ + struct bnxt_re_srq *srq = to_bnxt_re(qp->qplib_qp.srq, struct bnxt_re_srq, + qplib_srq); + struct creq_qp_error_notification *err_event; + struct ib_event event; + unsigned int flags; + + if (qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_ERR && + !qp->qplib_qp.is_user) { + flags = bnxt_re_lock_cqs(qp); + bnxt_qplib_add_flush_qp(&qp->qplib_qp); + bnxt_re_unlock_cqs(qp, flags); + } + memset(&event, 0, sizeof(event)); + event.device = &qp->rdev->ibdev; + event.element.qp = &qp->ib_qp; + event.event = IB_EVENT_QP_FATAL; + + err_event = qp_event; + + switch (err_event->req_err_state_reason) { + case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_OPCODE_ERROR: + case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_TIMEOUT_RETRY_LIMIT: + case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_RNR_TIMEOUT_RETRY_LIMIT: + case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_NAK_ARRIVAL_2: + case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_NAK_ARRIVAL_3: + case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_INVALID_READ_RESP: + case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_ILLEGAL_BIND: + case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_ILLEGAL_FAST_REG: + case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_ILLEGAL_INVALIDATE: + case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_RETRAN_LOCAL_ERROR: + case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_AV_DOMAIN_ERROR: + case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_PROD_WQE_MSMTCH_ERROR: + case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_PSN_RANGE_CHECK_ERROR: + event.event = IB_EVENT_QP_ACCESS_ERR; + break; + case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_NAK_ARRIVAL_1: + case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_NAK_ARRIVAL_4: + case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_READ_RESP_LENGTH: + case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_WQE_FORMAT_ERROR: + case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_ORRQ_FORMAT_ERROR: + case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_INVALID_AVID_ERROR: + case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_SERV_TYPE_ERROR: + case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_INVALID_OP_ERROR: + event.event = IB_EVENT_QP_REQ_ERR; + break; + case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_RX_MEMORY_ERROR: + case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_TX_MEMORY_ERROR: + case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_CMP_ERROR: + case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_CQ_LOAD_ERROR: + case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_TX_PCI_ERROR: + case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_RX_PCI_ERROR: + case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_RETX_SETUP_ERROR: + event.event = IB_EVENT_QP_FATAL; + break; + + default: + break; + } + + switch (err_event->res_err_state_reason) { + case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_EXCEED_MAX: + case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_PAYLOAD_LENGTH_MISMATCH: + case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_OPCODE_ERROR: + case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_PSN_SEQ_ERROR_RETRY_LIMIT: + case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_RX_INVALID_R_KEY: + case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_RX_DOMAIN_ERROR: + case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_RX_NO_PERMISSION: + case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_RX_RANGE_ERROR: + case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_TX_INVALID_R_KEY: + case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_TX_DOMAIN_ERROR: + case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_TX_NO_PERMISSION: + case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_TX_RANGE_ERROR: + case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_UNALIGN_ATOMIC: + case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_PSN_NOT_FOUND: + case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_EXCEEDS_WQE: + case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_WQE_FORMAT_ERROR: + case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_UNSUPPORTED_OPCODE: + case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_REM_INVALIDATE: + case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_INVALID_DUP_RKEY: + case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_IRRQ_FORMAT_ERROR: + event.event = IB_EVENT_QP_ACCESS_ERR; + break; + case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_IRRQ_OFLOW: + case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_CMP_ERROR: + case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_CQ_LOAD_ERROR: + case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_TX_PCI_ERROR: + case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_RX_PCI_ERROR: + case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_MEMORY_ERROR: + event.event = IB_EVENT_QP_FATAL; + break; + case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_SRQ_LOAD_ERROR: + case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_SRQ_ERROR: + if (srq) + event.event = IB_EVENT_SRQ_ERR; + break; + default: + break; + } + + if (err_event->res_err_state_reason || err_event->req_err_state_reason) { + /* + * To avoid dmesg error prints flood. + * While posting upstream, use only one print. + */ + dev_err_once(rdev_to_dev(qp->rdev), + "%s %s qp_id: %d cons (%d %d) req (%d %d) res (%d %d)\n", + __func__, qp->qplib_qp.is_user ? "user" : "kernel", + qp->qplib_qp.id, + err_event->sq_cons_idx, + err_event->rq_cons_idx, + err_event->req_slow_path_state, + err_event->req_err_state_reason, + err_event->res_slow_path_state, + err_event->res_err_state_reason); + dev_dbg(rdev_to_dev(qp->rdev), + "%s %s qp_id: %d cons (%d %d) req (%d %d) res (%d %d)\n", + __func__, qp->qplib_qp.is_user ? "user" : "kernel", + qp->qplib_qp.id, + err_event->sq_cons_idx, + err_event->rq_cons_idx, + err_event->req_slow_path_state, + err_event->req_err_state_reason, + err_event->res_slow_path_state, + err_event->res_err_state_reason); + } else { + if (srq) + event.event = IB_EVENT_QP_LAST_WQE_REACHED; + } + + if (event.event == IB_EVENT_SRQ_ERR && srq->ib_srq.event_handler) { + (*srq->ib_srq.event_handler)(&event, + srq->ib_srq.srq_context); + } else if (event.device && qp->ib_qp.event_handler) { + qp->ib_qp.event_handler(&event, qp->ib_qp.qp_context); + } + + return 0; +} + +static int bnxt_re_handle_cq_async_error(void *event, struct bnxt_re_cq *cq) +{ + struct creq_cq_error_notification *cqerr; + bool send = false; + + cqerr = event; + switch (cqerr->cq_err_reason) { + case CREQ_CQ_ERROR_NOTIFICATION_CQ_ERR_REASON_REQ_CQ_INVALID_ERROR: + case CREQ_CQ_ERROR_NOTIFICATION_CQ_ERR_REASON_REQ_CQ_OVERFLOW_ERROR: + case CREQ_CQ_ERROR_NOTIFICATION_CQ_ERR_REASON_REQ_CQ_LOAD_ERROR: + case CREQ_CQ_ERROR_NOTIFICATION_CQ_ERR_REASON_RES_CQ_INVALID_ERROR: + case CREQ_CQ_ERROR_NOTIFICATION_CQ_ERR_REASON_RES_CQ_OVERFLOW_ERROR: + case CREQ_CQ_ERROR_NOTIFICATION_CQ_ERR_REASON_RES_CQ_LOAD_ERROR: + send = true; + default: + break; + } + + if (send && cq->ib_cq.event_handler) { + struct ib_event ibevent = {}; + + ibevent.event = IB_EVENT_CQ_ERR; + ibevent.element.cq = &cq->ib_cq; + ibevent.device = &cq->rdev->ibdev; + + dev_err_once(rdev_to_dev(cq->rdev), + "%s err reason %d\n", __func__, cqerr->cq_err_reason); + dev_dbg(rdev_to_dev(cq->rdev), + "%s err reason %d\n", __func__, cqerr->cq_err_reason); + cq->ib_cq.event_handler(&ibevent, cq->ib_cq.cq_context); + } + + cq->qplib_cq.is_cq_err_event = true; + + return 0; +} + +static int bnxt_re_handle_affi_async_event( + struct creq_qp_event *affi_async, void *obj) +{ + struct bnxt_qplib_qp *qplqp; + struct bnxt_qplib_cq *qplcq; + struct bnxt_re_qp *qp; + struct bnxt_re_cq *cq; + int rc = 0; + u8 event; + + if (!obj) + return rc; /* QP was already dead, still return success */ + + event = affi_async->event; + switch (event) { + case CREQ_QP_EVENT_EVENT_QP_ERROR_NOTIFICATION: + qplqp = obj; + qp = container_of(qplqp, struct bnxt_re_qp, qplib_qp); + /*FIXME: QP referencing */ + rc = bnxt_re_handle_qp_async_event(affi_async, qp); + break; + case CREQ_QP_EVENT_EVENT_CQ_ERROR_NOTIFICATION: + qplcq = obj; + cq = container_of(qplcq, struct bnxt_re_cq, qplib_cq); + rc = bnxt_re_handle_cq_async_error(affi_async, cq); + break; + default: + rc = -EINVAL; + } + + return rc; +} + +static int bnxt_re_aeq_handler(struct bnxt_qplib_rcfw *rcfw, + void *aeqe, void *obj) +{ + struct creq_func_event *unaffi_async; + struct creq_qp_event *affi_async; + u8 type; + int rc; + + type = ((struct creq_base *)aeqe)->type; + if (type == CREQ_BASE_TYPE_FUNC_EVENT) { + unaffi_async = aeqe; + rc = bnxt_re_handle_unaffi_async_event(unaffi_async); + } else { + affi_async = aeqe; + rc = bnxt_re_handle_affi_async_event(affi_async, obj); + } + + return rc; +} + +static int bnxt_re_srqn_handler(struct bnxt_qplib_nq *nq, + struct bnxt_qplib_srq *handle, u8 event) +{ + struct bnxt_re_srq *srq = to_bnxt_re(handle, struct bnxt_re_srq, + qplib_srq); + struct ib_event ib_event; + + if (srq == NULL) { + dev_err(NULL, "%s: SRQ is NULL, SRQN not handled", + ROCE_DRV_MODULE_NAME); + return -EINVAL; + } + ib_event.device = &srq->rdev->ibdev; + ib_event.element.srq = &srq->ib_srq; + + if (srq->ib_srq.event_handler) { + if (event == NQ_SRQ_EVENT_EVENT_SRQ_THRESHOLD_EVENT) + ib_event.event = IB_EVENT_SRQ_LIMIT_REACHED; + + /* Lock event_handler? */ + (*srq->ib_srq.event_handler)(&ib_event, + srq->ib_srq.srq_context); + } + + return 0; +} + +static int bnxt_re_cqn_handler(struct bnxt_qplib_nq *nq, + struct bnxt_qplib_cq *handle) +{ + struct bnxt_re_cq *cq = to_bnxt_re(handle, struct bnxt_re_cq, + qplib_cq); + u32 *cq_ptr; + + if (cq == NULL) { + dev_err(NULL, "%s: CQ is NULL, CQN not handled", + ROCE_DRV_MODULE_NAME); + return -EINVAL; + } + + if (cq->ib_cq.comp_handler) { + if (cq->uctx_cq_page) { + cq_ptr = (u32 *)cq->uctx_cq_page; + *cq_ptr = cq->qplib_cq.toggle; + } + /* Lock comp_handler? */ + (*cq->ib_cq.comp_handler)(&cq->ib_cq, cq->ib_cq.cq_context); + } + + return 0; +} + +struct bnxt_qplib_nq *bnxt_re_get_nq(struct bnxt_re_dev *rdev) +{ + int min, indx; + + mutex_lock(&rdev->nqr->load_lock); + for (indx = 0, min = 0; indx < (rdev->nqr->num_msix - 1); indx++) { + if (rdev->nqr->nq[min].load > rdev->nqr->nq[indx].load) + min = indx; + } + rdev->nqr->nq[min].load++; + mutex_unlock(&rdev->nqr->load_lock); + + return &rdev->nqr->nq[min]; +} + +void bnxt_re_put_nq(struct bnxt_re_dev *rdev, struct bnxt_qplib_nq *nq) +{ + mutex_lock(&rdev->nqr->load_lock); + nq->load--; + mutex_unlock(&rdev->nqr->load_lock); +} + +static bool bnxt_re_check_min_attr(struct bnxt_re_dev *rdev) +{ + struct bnxt_qplib_dev_attr *attr; + + attr = rdev->dev_attr; + + if (!attr->max_cq || !attr->max_qp || + !attr->max_sgid || !attr->max_mr) { + dev_err(rdev_to_dev(rdev),"Insufficient RoCE resources"); + dev_dbg(rdev_to_dev(rdev), + "max_cq = %d, max_qp = %d, max_dpi = %d, max_sgid = %d, max_mr = %d", + attr->max_cq, attr->max_qp, attr->max_dpi, + attr->max_sgid, attr->max_mr); + return false; + } + return true; +} + +static void bnxt_re_dispatch_event(struct ib_device *ibdev, struct ib_qp *qp, + u8 port_num, enum ib_event_type event) +{ + struct ib_event ib_event; + + ib_event.device = ibdev; + if (qp) { + ib_event.element.qp = qp; + ib_event.event = event; + if (qp->event_handler) + qp->event_handler(&ib_event, qp->qp_context); + } else { + ib_event.element.port_num = port_num; + ib_event.event = event; + ib_dispatch_event(&ib_event); + } + + dev_dbg(rdev_to_dev(to_bnxt_re_dev(ibdev, ibdev)), + "ibdev %p Event 0x%x port_num 0x%x", ibdev, event, port_num); +} + +static int bnxt_re_update_gid(struct bnxt_re_dev *rdev) +{ + struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl; + struct bnxt_qplib_gid gid; + u16 gid_idx, index; + int rc = 0; + + if (!test_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags)) + return 0; + + for (index = 0; index < sgid_tbl->active; index++) { + gid_idx = sgid_tbl->hw_id[index]; + + if (!memcmp(&sgid_tbl->tbl[index], &bnxt_qplib_gid_zero, + sizeof(bnxt_qplib_gid_zero))) + continue; + /* Need to modify the VLAN enable setting of non VLAN GID only + * as setting is done for VLAN GID while adding GID + * + * If disable_prio_vlan_tx is enable, then we'll need to remove the + * vlan entry from the sgid_tbl. + */ + if (sgid_tbl->vlan[index] == true) + continue; + + memcpy(&gid, &sgid_tbl->tbl[index], sizeof(gid)); + + rc = bnxt_qplib_update_sgid(sgid_tbl, &gid, gid_idx, + rdev->qplib_res.netdev->dev_addr); + } + + return rc; +} + +static void bnxt_re_get_pri_and_update_gid(struct bnxt_re_dev *rdev) +{ + u8 prio_map = 0; + + /* Get priority for roce */ + prio_map = bnxt_re_get_priority_mask(rdev, + (IEEE_8021QAZ_APP_SEL_ETHERTYPE | + IEEE_8021QAZ_APP_SEL_DGRAM)); + if (prio_map == rdev->cur_prio_map) + return; + + rdev->cur_prio_map = prio_map; + if ((prio_map == 0 && rdev->qplib_res.prio == true) || + (prio_map != 0 && rdev->qplib_res.prio == false)) { + if (!rdev->cc_param.disable_prio_vlan_tx) { + rdev->qplib_res.prio = prio_map ? true : false; + bnxt_re_update_gid(rdev); + } + } +} + +static void bnxt_re_clear_cc(struct bnxt_re_dev *rdev) +{ + struct bnxt_qplib_cc_param *cc_param = &rdev->cc_param; + + if (!is_qport_service_type_supported(rdev)) + bnxt_re_clear_dscp(rdev); + + if (_is_chip_p7(rdev->chip_ctx)) { + cc_param->mask = CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_TOS_DSCP; + } else { + cc_param->mask = (CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_CC_MODE | + CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_ENABLE_CC | + CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_TOS_ECN); + + if (!is_qport_service_type_supported(rdev)) + cc_param->mask |= + (CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_ALT_VLAN_PCP | + CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_ALT_TOS_DSCP | + CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_TOS_DSCP); + } + + cc_param->cur_mask = cc_param->mask; + + if (bnxt_qplib_modify_cc(&rdev->qplib_res, cc_param)) + dev_err(rdev_to_dev(rdev), "Failed to modify cc\n"); +} + +static int bnxt_re_setup_cc(struct bnxt_re_dev *rdev) +{ + struct bnxt_qplib_cc_param *cc_param = &rdev->cc_param; + int rc; + + cc_param->enable = 0x1; + cc_param->mask = (CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_CC_MODE | + CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_ENABLE_CC | + CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_TOS_ECN); + + if (!is_qport_service_type_supported(rdev)) + cc_param->mask |= + (CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_ALT_VLAN_PCP | + CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_ALT_TOS_DSCP | + CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_TOS_DSCP); + + cc_param->cur_mask = cc_param->mask; + + rc = bnxt_qplib_modify_cc(&rdev->qplib_res, cc_param); + if (rc) { + dev_err(rdev_to_dev(rdev), "Failed to modify cc\n"); + return rc; + } + if (!is_qport_service_type_supported(rdev)) { + mutex_lock(&rdev->cc_lock); + rc = bnxt_re_setup_dscp(rdev); + mutex_unlock(&rdev->cc_lock); + if (rc) + goto clear; + } + + /* Reset the programming mask */ + cc_param->mask = 0; + if (cc_param->qp1_tos_dscp != cc_param->tos_dscp) { + cc_param->qp1_tos_dscp = cc_param->tos_dscp; + rc = bnxt_re_update_qp1_tos_dscp(rdev); + if (rc) { + dev_err(rdev_to_dev(rdev), "%s:Failed to modify QP1:%d", + __func__, rc); + goto clear; + } + } + + return 0; + +clear: + bnxt_re_clear_cc(rdev); + return rc; +} + +int bnxt_re_query_hwrm_dscp2pri(struct bnxt_re_dev *rdev, + struct bnxt_re_dscp2pri *d2p, u16 *count, + u16 target_id) +{ + struct hwrm_queue_dscp2pri_qcfg_input req = {}; + struct hwrm_queue_dscp2pri_qcfg_output resp; + struct bnxt_en_dev *en_dev = rdev->en_dev; + struct bnxt_re_dscp2pri *dscp2pri; + struct bnxt_fw_msg fw_msg = {}; + u16 in_count = *count; + dma_addr_t dma_handle; + int rc = 0, i; + u16 data_len; + u8 *kmem; + + data_len = *count * sizeof(*dscp2pri); + bnxt_re_init_hwrm_hdr((void *)&req, HWRM_QUEUE_DSCP2PRI_QCFG, target_id); + req.port_id = (target_id == 0xFFFF) ? en_dev->pf_port_id : 1; + + kmem = dma_zalloc_coherent(&en_dev->pdev->dev, data_len, &dma_handle, + GFP_KERNEL); + if (!kmem) { + dev_err(rdev_to_dev(rdev), + "dma_zalloc_coherent failure, length = %u\n", + (unsigned)data_len); + return -ENOMEM; + } + req.dest_data_addr = cpu_to_le64(dma_handle); + req.dest_data_buffer_size = cpu_to_le16(data_len); + bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp, + sizeof(resp), BNXT_RE_HWRM_CMD_TIMEOUT(rdev)); + rc = bnxt_send_msg(en_dev, &fw_msg); + if (rc) + goto out; + + /* Upload the DSCP-MASK-PRI tuple(s) */ + dscp2pri = (struct bnxt_re_dscp2pri *)kmem; + for (i = 0; i < le16_to_cpu(resp.entry_cnt) && i < in_count; i++) { + d2p[i].dscp = dscp2pri->dscp; + d2p[i].mask = dscp2pri->mask; + d2p[i].pri = dscp2pri->pri; + dscp2pri++; + } + *count = le16_to_cpu(resp.entry_cnt); +out: + dma_free_coherent(&en_dev->pdev->dev, data_len, kmem, dma_handle); + return rc; +} + +int bnxt_re_prio_vlan_tx_update(struct bnxt_re_dev *rdev) +{ + /* Remove the VLAN from the GID entry */ + if (rdev->cc_param.disable_prio_vlan_tx) + rdev->qplib_res.prio = false; + else + rdev->qplib_res.prio = true; + + return bnxt_re_update_gid(rdev); +} + +int bnxt_re_set_hwrm_dscp2pri(struct bnxt_re_dev *rdev, + struct bnxt_re_dscp2pri *d2p, u16 count, + u16 target_id) +{ + struct hwrm_queue_dscp2pri_cfg_input req = {}; + struct bnxt_en_dev *en_dev = rdev->en_dev; + struct hwrm_queue_dscp2pri_cfg_output resp; + struct bnxt_re_dscp2pri *dscp2pri; + struct bnxt_fw_msg fw_msg = {}; + int i, rc, data_len = 3 * 256; + dma_addr_t dma_handle; + u8 *kmem; + + bnxt_re_init_hwrm_hdr((void *)&req, HWRM_QUEUE_DSCP2PRI_CFG, target_id); + req.port_id = (target_id == 0xFFFF) ? en_dev->pf_port_id : 1; + + kmem = dma_alloc_coherent(&en_dev->pdev->dev, data_len, &dma_handle, + GFP_KERNEL); + if (!kmem) { + dev_err(rdev_to_dev(rdev), + "dma_alloc_coherent failure, length = %u\n", + (unsigned)data_len); + return -ENOMEM; + } + req.src_data_addr = cpu_to_le64(dma_handle); + + /* Download the DSCP-MASK-PRI tuple(s) */ + dscp2pri = (struct bnxt_re_dscp2pri *)kmem; + for (i = 0; i < count; i++) { + dscp2pri->dscp = d2p[i].dscp; + dscp2pri->mask = d2p[i].mask; + dscp2pri->pri = d2p[i].pri; + dscp2pri++; + } + + req.entry_cnt = cpu_to_le16(count); + bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp, + sizeof(resp), BNXT_RE_HWRM_CMD_TIMEOUT(rdev)); + rc = bnxt_send_msg(en_dev, &fw_msg); + dma_free_coherent(&en_dev->pdev->dev, data_len, kmem, dma_handle); + return rc; +} + +int bnxt_re_query_hwrm_qportcfg(struct bnxt_re_dev *rdev, + struct bnxt_re_tc_rec *tc_rec, u16 tid) +{ + u8 max_tc, tc, *qptr, *type_ptr0, *type_ptr1; + struct hwrm_queue_qportcfg_output resp = {0}; + struct hwrm_queue_qportcfg_input req = {0}; + struct bnxt_en_dev *en_dev = rdev->en_dev; + struct bnxt_fw_msg fw_msg = {}; + bool def_init = false; + u8 *tmp_type; + u8 cos_id; + int rc; + + bnxt_re_init_hwrm_hdr((void *)&req, HWRM_QUEUE_QPORTCFG, tid); + req.port_id = (tid == 0xFFFF) ? en_dev->pf_port_id : 1; + if (BNXT_EN_ASYM_Q(en_dev)) + req.flags = cpu_to_le32(QUEUE_QPORTCFG_REQ_FLAGS_PATH_RX); + + bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp, + sizeof(resp), BNXT_RE_HWRM_CMD_TIMEOUT(rdev)); + rc = bnxt_send_msg(en_dev, &fw_msg); + if (rc) + return rc; + + if (!resp.max_configurable_queues) + return -EINVAL; + + max_tc = resp.max_configurable_queues; + tc_rec->max_tc = max_tc; + + if (resp.queue_cfg_info & QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_USE_PROFILE_TYPE) + tc_rec->serv_type_enabled = true; + + qptr = &resp.queue_id0; + type_ptr0 = &resp.queue_id0_service_profile_type; + type_ptr1 = &resp.queue_id1_service_profile_type; + for (tc = 0; tc < max_tc; tc++) { + tmp_type = tc ? type_ptr1 + (tc - 1) : type_ptr0; + + cos_id = *qptr++; + /* RoCE CoS queue is the first cos queue. + * For MP12 and MP17 order is 405 and 141015. + */ + if (is_bnxt_roce_queue(rdev, *qptr, *tmp_type)) { + tc_rec->cos_id_roce = cos_id; + tc_rec->tc_roce = tc; + } else if (is_bnxt_cnp_queue(rdev, *qptr, *tmp_type)) { + tc_rec->cos_id_cnp = cos_id; + tc_rec->tc_cnp = tc; + } else if (!def_init) { + def_init = true; + tc_rec->tc_def = tc; + tc_rec->cos_id_def = cos_id; + } + qptr++; + } + + return rc; +} + +int bnxt_re_hwrm_cos2bw_qcfg(struct bnxt_re_dev *rdev, u16 target_id, + struct bnxt_re_cos2bw_cfg *cfg) +{ + struct bnxt_en_dev *en_dev = rdev->en_dev; + struct hwrm_queue_cos2bw_qcfg_output resp; + struct hwrm_queue_cos2bw_qcfg_input req = {0}; + struct bnxt_fw_msg fw_msg = {}; + int rc, indx; + void *data; + + bnxt_re_init_hwrm_hdr((void *)&req, HWRM_QUEUE_COS2BW_QCFG, target_id); + req.port_id = (target_id == 0xFFFF) ? en_dev->pf_port_id : 1; + + bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp, + sizeof(resp), BNXT_RE_HWRM_CMD_TIMEOUT(rdev)); + rc = bnxt_send_msg(en_dev, &fw_msg); + if (rc) + return rc; + data = &resp.queue_id0 + offsetof(struct bnxt_re_cos2bw_cfg, + queue_id); + for (indx = 0; indx < 8; indx++, data += (sizeof(cfg->cfg))) { + memcpy(&cfg->cfg, data, sizeof(cfg->cfg)); + if (indx == 0) + cfg->queue_id = resp.queue_id0; + cfg++; + } + + return rc; +} + +int bnxt_re_hwrm_cos2bw_cfg(struct bnxt_re_dev *rdev, u16 target_id, + struct bnxt_re_cos2bw_cfg *cfg) +{ + struct bnxt_en_dev *en_dev = rdev->en_dev; + struct hwrm_queue_cos2bw_cfg_input req = {0}; + struct hwrm_queue_cos2bw_cfg_output resp = {0}; + struct bnxt_fw_msg fw_msg = {}; + void *data; + int indx; + int rc; + + bnxt_re_init_hwrm_hdr((void *)&req, HWRM_QUEUE_COS2BW_CFG, target_id); + req.port_id = (target_id == 0xFFFF) ? en_dev->pf_port_id : 1; + + /* Chimp wants enable bit to retain previous + * config done by L2 driver + */ + for (indx = 0; indx < 8; indx++) { + if (cfg[indx].queue_id < 40) { + req.enables |= cpu_to_le32( + QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID0_VALID << + indx); + } + + data = (char *)&req.unused_0 + indx * (sizeof(*cfg) - 4); + memcpy(data, &cfg[indx].queue_id, sizeof(*cfg) - 4); + if (indx == 0) { + req.queue_id0 = cfg[0].queue_id; + req.unused_0 = 0; + } + } + + memset(&resp, 0, sizeof(resp)); + bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp, + sizeof(resp), BNXT_RE_HWRM_CMD_TIMEOUT(rdev)); + rc = bnxt_send_msg(en_dev, &fw_msg); + return rc; +} + +int bnxt_re_hwrm_pri2cos_qcfg(struct bnxt_re_dev *rdev, + struct bnxt_re_tc_rec *tc_rec, + u16 target_id) +{ + struct hwrm_queue_pri2cos_qcfg_output resp = {0}; + struct hwrm_queue_pri2cos_qcfg_input req = {}; + struct bnxt_en_dev *en_dev = rdev->en_dev; + struct bnxt_fw_msg fw_msg = {}; + u8 *pri2cos, queue_id; + int rc, i; + + tc_rec->prio_valid = 0; + tc_rec->roce_prio = 0; + tc_rec->cnp_prio = 0; + + bnxt_re_init_hwrm_hdr((void *)&req, HWRM_QUEUE_PRI2COS_QCFG, + target_id); + + req.flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN); + + if (BNXT_EN_ASYM_Q(en_dev)) + req.flags |= cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH_RX); + bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp, + sizeof(resp), BNXT_RE_HWRM_CMD_TIMEOUT(rdev)); + rc = bnxt_send_msg(en_dev, &fw_msg); + if (rc) + return rc; + + pri2cos = &resp.pri0_cos_queue_id; + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { + queue_id = pri2cos[i]; + if (queue_id == tc_rec->cos_id_cnp) { + tc_rec->cnp_prio = i; + tc_rec->prio_valid |= (1 << CNP_PRIO_VALID); + } else if (queue_id == tc_rec->cos_id_roce) { + tc_rec->roce_prio = i; + tc_rec->prio_valid |= (1 << ROCE_PRIO_VALID); + } + } + return rc; +} + +int bnxt_re_hwrm_pri2cos_cfg(struct bnxt_re_dev *rdev, + u16 target_id, u16 port_id, + u8 *cos_id_map, u8 pri_map) +{ + struct hwrm_queue_pri2cos_cfg_output resp = {0}; + struct hwrm_queue_pri2cos_cfg_input req = {}; + struct bnxt_en_dev *en_dev = rdev->en_dev; + struct bnxt_fw_msg fw_msg = {}; + u32 flags = 0; + u8 *pri2cos; + int rc = 0; + int i; + + bnxt_re_init_hwrm_hdr((void *)&req, HWRM_QUEUE_PRI2COS_CFG, target_id); + flags = (QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_BIDIR | + QUEUE_PRI2COS_CFG_REQ_FLAGS_IVLAN); + req.flags = cpu_to_le32(flags); + req.port_id = port_id; + + pri2cos = &req.pri0_cos_queue_id; + + for (i = 0; i < 8; i++) { + if (pri_map & (1 << i)) { + req.enables |= cpu_to_le32 + (QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI0_COS_QUEUE_ID << i); + pri2cos[i] = cos_id_map[i]; + } + } + + bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp, + sizeof(resp), BNXT_RE_HWRM_CMD_TIMEOUT(rdev)); + rc = bnxt_send_msg(en_dev, &fw_msg); + + return rc; +} + +int bnxt_re_cnp_pri2cos_cfg(struct bnxt_re_dev *rdev, u16 target_id, bool reset) +{ + struct bnxt_en_dev *en_dev = rdev->en_dev; + struct bnxt_re_tc_rec *tc_rec; + u8 cos_id_map[8] = {0}; + u8 pri_map = 0; + u16 port_id; + u8 cnp_pri; + + cnp_pri = rdev->cc_param.alt_vlan_pcp; + + if (target_id == 0xFFFF) { + port_id = en_dev->pf_port_id; + tc_rec = &rdev->tc_rec[0]; + } else { + port_id = 1; + tc_rec = &rdev->tc_rec[1]; + } + + pri_map |= (1 << cnp_pri); + if (reset) + cos_id_map[cnp_pri] = tc_rec->cos_id_def; + else + cos_id_map[cnp_pri] = tc_rec->cos_id_cnp; + + return bnxt_re_hwrm_pri2cos_cfg(rdev, target_id, port_id, + cos_id_map, pri_map); +} + +static void bnxt_re_put_stats_ctx(struct bnxt_re_dev *rdev) +{ + struct bnxt_qplib_ctx *hctx; + struct bnxt_qplib_res *res; + u16 tid = 0xffff; + + res = &rdev->qplib_res; + hctx = res->hctx; + + if (test_and_clear_bit(BNXT_RE_FLAG_STATS_CTX_ALLOC, &rdev->flags)) { + bnxt_re_net_stats_ctx_free(rdev, hctx->stats.fw_id, tid); + bnxt_qplib_free_stat_mem(res, &hctx->stats); + } +} + +static void bnxt_re_put_stats2_ctx(struct bnxt_re_dev *rdev) +{ + struct bnxt_qplib_ctx *hctx; + struct bnxt_qplib_res *res; + u16 tid; + + res = &rdev->qplib_res; + hctx = res->hctx; + + if (test_and_clear_bit(BNXT_RE_FLAG_STATS_CTX2_ALLOC, + &rdev->flags)) { + if (rdev->binfo) { + tid = PCI_FUNC(rdev->binfo->pdev2->devfn) + 1; + bnxt_re_net_stats_ctx_free(rdev, hctx->stats2.fw_id, + tid); + bnxt_qplib_free_stat_mem(res, &hctx->stats2); + } + } +} + +static int bnxt_re_get_stats_ctx(struct bnxt_re_dev *rdev) +{ + struct bnxt_qplib_ctx *hctx; + struct bnxt_qplib_res *res; + u16 tid = 0xffff; + int rc; + + res = &rdev->qplib_res; + hctx = res->hctx; + + rc = bnxt_qplib_alloc_stat_mem(res->pdev, rdev->chip_ctx, &hctx->stats); + if (rc) + return -ENOMEM; + rc = bnxt_re_net_stats_ctx_alloc(rdev, tid); + if (rc) + goto free_stat_mem; + set_bit(BNXT_RE_FLAG_STATS_CTX_ALLOC, &rdev->flags); + + return 0; + +free_stat_mem: + bnxt_qplib_free_stat_mem(res, &hctx->stats); + + return rc; +} + +static int bnxt_re_get_stats2_ctx(struct bnxt_re_dev *rdev) +{ + struct bnxt_qplib_ctx *hctx; + struct bnxt_qplib_res *res; + u16 tid; + int rc; + + if (!rdev->binfo) + return 0; + + res = &rdev->qplib_res; + hctx = res->hctx; + + rc = bnxt_qplib_alloc_stat_mem(res->pdev, rdev->chip_ctx, &hctx->stats2); + if (rc) + return -ENOMEM; + + tid = (PCI_FUNC(rdev->binfo->pdev2->devfn) + 1); + rc = bnxt_re_net_stats_ctx_alloc(rdev, tid); + if (rc) + goto free_stat_mem; + dev_dbg(rdev_to_dev(rdev), " LAG second stat context %x\n", + rdev->qplib_res.hctx->stats2.fw_id); + set_bit(BNXT_RE_FLAG_STATS_CTX2_ALLOC, &rdev->flags); + + return 0; + +free_stat_mem: + bnxt_qplib_free_stat_mem(res, &hctx->stats2); + + return rc; +} + +static int bnxt_re_update_dev_attr(struct bnxt_re_dev *rdev) +{ + int rc; + + rc = bnxt_qplib_get_dev_attr(&rdev->rcfw); + if (rc) + return rc; + if (!bnxt_re_check_min_attr(rdev)) + return -EINVAL; + return 0; +} + +static int bnxt_re_set_port_cnp_ets(struct bnxt_re_dev *rdev, u16 portid, bool reset) +{ + struct bnxt_re_cos2bw_cfg ets_cfg[8]; + struct bnxt_re_cos2bw_cfg *cnp_ets_cfg; + struct bnxt_re_tc_rec *tc_rec; + int indx, rc; + + rc = bnxt_re_hwrm_cos2bw_qcfg(rdev, portid, ets_cfg); + if (rc) + goto bail; + + tc_rec = (portid == 0xFFFF) ? &rdev->tc_rec[0] : &rdev->tc_rec[1]; + indx = tc_rec->cos_id_cnp - tc_rec->cos_id_roce; + cnp_ets_cfg = &ets_cfg[indx]; + cnp_ets_cfg->queue_id = tc_rec->cos_id_cnp; + cnp_ets_cfg->tsa = QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_SP; + cnp_ets_cfg->pri_lvl = 7; /* Max priority for CNP. */ + + /* Configure cnp ets to strict */ + rc = bnxt_re_hwrm_cos2bw_cfg(rdev, portid, ets_cfg); + if (rc) + goto bail; + + rc = bnxt_re_cnp_pri2cos_cfg(rdev, portid, reset); +bail: + return rc; +} + +static int bnxt_re_setup_port_cnp_cos(struct bnxt_re_dev *rdev, u16 portid, bool reset) +{ + int rc; + struct bnxt_re_tc_rec *tc_rec; + + /* Query CNP cos id */ + tc_rec = (portid == 0xFFFF) ? + &rdev->tc_rec[0] : &rdev->tc_rec[1]; + rc = bnxt_re_query_hwrm_qportcfg(rdev, tc_rec, portid); + if (rc) + goto bail; + /* config ETS */ + rc = bnxt_re_set_port_cnp_ets(rdev, portid, reset); +bail: + return rc; +} + +int bnxt_re_setup_cnp_cos(struct bnxt_re_dev *rdev, bool reset) +{ + u16 portid; + int rc; + + portid = 0xFFFF; + rc = bnxt_re_setup_port_cnp_cos(rdev, portid, reset); + if (rc) { + dev_err(rdev_to_dev(rdev), + "Failed to setup cnp cos for pci function %d\n", + bnxt_re_dev_pcifn_id(rdev)); + goto bail; + } + + if (rdev->binfo) { + portid = 2; + rc = bnxt_re_setup_port_cnp_cos(rdev, portid, reset); + if (rc) + dev_err(rdev_to_dev(rdev), + "Failed to setup cnp cos for pci function %d\n", + bnxt_re_dev_pcifn_id(rdev)); + } + +bail: + return rc; +} + +static void bnxt_re_free_tbls(struct bnxt_re_dev *rdev) +{ + bnxt_qplib_clear_tbls(&rdev->qplib_res); + bnxt_qplib_free_tbls(&rdev->qplib_res); +} + +static int bnxt_re_alloc_init_tbls(struct bnxt_re_dev *rdev) +{ + int rc; + + rc = bnxt_qplib_alloc_tbls(&rdev->qplib_res); + if (rc) + return rc; + set_bit(BNXT_RE_FLAG_TBLS_ALLOCINIT, &rdev->flags); + + return 0; +} + +static void bnxt_re_clean_nqs(struct bnxt_re_dev *rdev) +{ + struct bnxt_qplib_nq *nq; + int i; + + if (!rdev->nqr->max_init) + return; + + for (i = (rdev->nqr->max_init - 1) ; i >= 0; i--) { + nq = &rdev->nqr->nq[i]; + bnxt_qplib_disable_nq(nq); + bnxt_re_net_ring_free(rdev, nq->ring_id); + bnxt_qplib_free_nq_mem(nq); + } + rdev->nqr->max_init = 0; +} + +static int bnxt_re_setup_nqs(struct bnxt_re_dev *rdev) +{ + struct bnxt_re_ring_attr rattr = {}; + struct bnxt_qplib_nq *nq; + int rc, i; + int depth; + u32 offt; + u16 vec; + + mutex_init(&rdev->nqr->load_lock); + + /* + * TODO: Optimize the depth based on the + * number of NQs. + */ + depth = BNXT_QPLIB_NQE_MAX_CNT; + for (i = 0; i < rdev->nqr->num_msix - 1; i++) { + nq = &rdev->nqr->nq[i]; + vec = rdev->nqr->msix_entries[i + 1].vector; + offt = rdev->nqr->msix_entries[i + 1].db_offset; + nq->hwq.max_elements = depth; + rc = bnxt_qplib_alloc_nq_mem(&rdev->qplib_res, nq); + if (rc) { + dev_err(rdev_to_dev(rdev), + "Failed to get mem for NQ %d, rc = 0x%x", + i, rc); + goto fail_mem; + } + + rattr.dma_arr = nq->hwq.pbl[PBL_LVL_0].pg_map_arr; + rattr.pages = nq->hwq.pbl[rdev->nqr->nq[i].hwq.level].pg_count; + rattr.type = bnxt_re_get_rtype(rdev); + rattr.mode = RING_ALLOC_REQ_INT_MODE_MSIX; + rattr.depth = nq->hwq.max_elements - 1; + rattr.lrid = rdev->nqr->msix_entries[i + 1].ring_idx; + + /* Set DBR pacing capability on the first NQ ring only */ + if (!i && bnxt_qplib_dbr_pacing_ext_en(rdev->chip_ctx) && !rdev->is_virtfn) + rattr.flags = RING_ALLOC_REQ_FLAGS_NQ_DBR_PACING; + else + rattr.flags = 0; + + rc = bnxt_re_net_ring_alloc(rdev, &rattr, &nq->ring_id); + if (rc) { + nq->ring_id = 0xffff; /* Invalid ring-id */ + dev_err(rdev_to_dev(rdev), + "Failed to get fw id for NQ %d, rc = 0x%x", + i, rc); + goto fail_ring; + } + + rc = bnxt_qplib_enable_nq(nq, i, vec, offt, + &bnxt_re_cqn_handler, + &bnxt_re_srqn_handler); + if (rc) { + dev_err(rdev_to_dev(rdev), + "Failed to enable NQ %d, rc = 0x%x", i, rc); + goto fail_en; + } + } + + rdev->nqr->max_init = i; + return 0; +fail_en: + /* *nq was i'th nq */ + bnxt_re_net_ring_free(rdev, nq->ring_id); +fail_ring: + bnxt_qplib_free_nq_mem(nq); +fail_mem: + rdev->nqr->max_init = i; + return rc; +} + +static void bnxt_re_sysfs_destroy_file(struct bnxt_re_dev *rdev) +{ +#ifndef HAVE_RDMA_SET_DEVICE_SYSFS_GROUP + int i; + + for (i = 0; i < ARRAY_SIZE(bnxt_re_attributes); i++) + device_remove_file(&rdev->ibdev.dev, bnxt_re_attributes[i]); +#endif +} + +static int bnxt_re_sysfs_create_file(struct bnxt_re_dev *rdev) +{ +#ifndef HAVE_RDMA_SET_DEVICE_SYSFS_GROUP + int i, j, rc = 0; + + for (i = 0; i < ARRAY_SIZE(bnxt_re_attributes); i++) { + rc = device_create_file(&rdev->ibdev.dev, + bnxt_re_attributes[i]); + if (rc) { + dev_err(rdev_to_dev(rdev), + "Failed to create IB sysfs with rc = 0x%x", rc); + /* Must clean up all created device files */ + for (j = 0; j < i; j++) + device_remove_file(&rdev->ibdev.dev, + bnxt_re_attributes[j]); + clear_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags); + ib_unregister_device(&rdev->ibdev); + return rc; + } + } +#endif + return 0; +} + +/* worker thread for polling periodic events. Now used for QoS programming*/ +static void bnxt_re_worker(struct work_struct *work) +{ + struct bnxt_re_dev *rdev = container_of(work, struct bnxt_re_dev, + worker.work); + u8 active_port_map = 0; + int rc; + + /* QoS is in 30s cadence for PFs*/ + if (!rdev->is_virtfn && !rdev->worker_30s--) { + bnxt_re_get_pri_and_update_gid(rdev); + rdev->worker_30s = 30; + } + /* Use trylock for bnxt_re_mutex as this can be + * held for long time by debugfs show path while issuing + * HWRMS. If the debugfs name update is not done in this + * iteration, the driver will check for the same in the + * next schedule of the worker i.e after 1 sec. + */ + if (mutex_trylock(&bnxt_re_mutex)) { + if (test_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags)) + bnxt_re_rename_debugfs_entry(rdev); + mutex_unlock(&bnxt_re_mutex); + } + + if (rdev->binfo) { + if (!test_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags)) + goto resched; + if (rtnl_trylock()) { + active_port_map = + bnxt_re_get_bond_link_status(rdev->binfo); + rtnl_unlock(); + if (rdev->binfo->active_port_map != active_port_map) { + rdev->binfo->active_port_map = active_port_map; + dev_info(rdev_to_dev(rdev), + "Updating lag device from worker active_port_map = 0x%x", + rdev->binfo->active_port_map); + bnxt_re_update_fw_lag_info(rdev->binfo, rdev, + true); + } + } + if (test_bit(BNXT_RE_FLAG_RECONFIG_SECONDARY_DEV_DCB, &rdev->flags)) { + rc = bnxt_re_setup_dcb(rdev, rdev->binfo->slave2, &rdev->tc_rec[1], 2); + if (!rc || rc != -EBUSY) + clear_bit(BNXT_RE_FLAG_RECONFIG_SECONDARY_DEV_DCB, &rdev->flags); + } + } + + if (!rdev->stats.stats_query_sec) + goto resched; + + if (test_bit(BNXT_RE_FLAG_ISSUE_CFA_FLOW_STATS, &rdev->flags) && + (rdev->is_virtfn || + !_is_ext_stats_supported(rdev->dev_attr->dev_cap_flags))) { + if (!(rdev->stats.stats_query_counter++ % + rdev->stats.stats_query_sec)) { + rc = bnxt_re_get_qos_stats(rdev); + if (rc && rc != -ENOMEM) + clear_bit(BNXT_RE_FLAG_ISSUE_CFA_FLOW_STATS, + &rdev->flags); + } + } + +resched: + schedule_delayed_work(&rdev->worker, msecs_to_jiffies(1000)); +} + +static int bnxt_re_alloc_dbr_sw_stats_mem(struct bnxt_re_dev *rdev) +{ + if (!(rdev->dbr_drop_recov || rdev->dbr_pacing)) + return 0; + + rdev->dbr_sw_stats = kzalloc(sizeof(*rdev->dbr_sw_stats), GFP_KERNEL); + if (!rdev->dbr_sw_stats) + return -ENOMEM; + + return 0; +} + +static void bnxt_re_free_dbr_sw_stats_mem(struct bnxt_re_dev *rdev) +{ + kfree(rdev->dbr_sw_stats); + rdev->dbr_sw_stats = NULL; +} + +static int bnxt_re_initialize_dbr_drop_recov(struct bnxt_re_dev *rdev) +{ + rdev->dbr_drop_recov_wq = + create_singlethread_workqueue("bnxt_re_dbr_drop_recov"); + if (!rdev->dbr_drop_recov_wq) { + dev_err(rdev_to_dev(rdev), "DBR Drop Revov wq alloc failed!"); + return -EINVAL; + } + rdev->dbr_drop_recov = true; + + /* Enable configfs setting dbr_drop_recov by default*/ + rdev->user_dbr_drop_recov = true; + + rdev->user_dbr_drop_recov_timeout = BNXT_RE_DBR_RECOV_USERLAND_TIMEOUT; + return 0; +} + +static void bnxt_re_deinitialize_dbr_drop_recov(struct bnxt_re_dev *rdev) +{ + if (rdev->dbr_drop_recov_wq) { + flush_workqueue(rdev->dbr_drop_recov_wq); + destroy_workqueue(rdev->dbr_drop_recov_wq); + rdev->dbr_drop_recov_wq = NULL; + } + rdev->dbr_drop_recov = false; +} + +static int bnxt_re_initialize_dbr_pacing(struct bnxt_re_dev *rdev) +{ + int rc; + + /* Allocate a page for app use */ + rdev->dbr_page = (void *)__get_free_page(GFP_KERNEL); + if (!rdev->dbr_page) { + dev_err(rdev_to_dev(rdev), "DBR page allocation failed!"); + return -ENOMEM; + } + memset((u8 *)rdev->dbr_page, 0, PAGE_SIZE); + rdev->qplib_res.pacing_data = (struct bnxt_qplib_db_pacing_data *)rdev->dbr_page; + rc = bnxt_re_hwrm_dbr_pacing_qcfg(rdev); + if (rc) { + dev_err(rdev_to_dev(rdev), + "Failed to query dbr pacing config %d\n", rc); + goto fail; + } + /* Create a work queue for scheduling dbq event */ + rdev->dbq_wq = create_singlethread_workqueue("bnxt_re_dbq"); + if (!rdev->dbq_wq) { + dev_err(rdev_to_dev(rdev), "DBQ wq alloc failed!"); + rc = -ENOMEM; + goto fail; + } + /* MAP grc window 2 for reading db fifo depth */ + writel(rdev->chip_ctx->dbr_stat_db_fifo & BNXT_GRC_BASE_MASK, + rdev->en_dev->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4); + rdev->dbr_db_fifo_reg_off = + (rdev->chip_ctx->dbr_stat_db_fifo & BNXT_GRC_OFFSET_MASK) + + 0x2000; + rdev->qplib_res.pacing_data->grc_reg_offset = rdev->dbr_db_fifo_reg_off; + + rdev->dbr_bar_addr = + pci_resource_start(rdev->qplib_res.pdev, 0) + + rdev->dbr_db_fifo_reg_off; + + /* Percentage of DB FIFO */ + rdev->dbq_watermark = BNXT_RE_PACING_DBQ_THRESHOLD; + rdev->pacing_en_int_th = BNXT_RE_PACING_EN_INT_THRESHOLD; + rdev->pacing_algo_th = BNXT_RE_PACING_ALGO_THRESHOLD; + rdev->dbq_pacing_time = BNXT_RE_DBR_INT_TIME; + rdev->dbr_def_do_pacing = BNXT_RE_DBR_DO_PACING_NO_CONGESTION; + rdev->do_pacing_save = rdev->dbr_def_do_pacing; + bnxt_re_set_default_pacing_data(rdev); + dev_dbg(rdev_to_dev(rdev), "Initialized db pacing\n"); + + return 0; +fail: + free_page((u64)rdev->dbr_page); + rdev->dbr_page = NULL; + return rc; +} + +static void bnxt_re_deinitialize_dbr_pacing(struct bnxt_re_dev *rdev) +{ + if (rdev->dbq_wq) + flush_workqueue(rdev->dbq_wq); + + cancel_work_sync(&rdev->dbq_fifo_check_work); + cancel_delayed_work_sync(&rdev->dbq_pacing_work); + + if (rdev->dbq_wq) { + destroy_workqueue(rdev->dbq_wq); + rdev->dbq_wq = NULL; + } + + if (rdev->dbr_page) + free_page((u64)rdev->dbr_page); + rdev->dbr_page = NULL; + rdev->dbr_pacing = false; +} + +/* enable_dbr_pacing needs to be done only for older FWs + * where host selects primary function. ie. pacing_ext + * flags is not set. + */ +int bnxt_re_enable_dbr_pacing(struct bnxt_re_dev *rdev) +{ + struct bnxt_qplib_nq *nq; + + nq = &rdev->nqr->nq[0]; + rdev->dbq_nq_id = nq->ring_id; + + if (!bnxt_qplib_dbr_pacing_ext_en(rdev->chip_ctx) && + bnxt_qplib_dbr_pacing_is_primary_pf(rdev->chip_ctx)) { + if (bnxt_re_hwrm_dbr_pacing_cfg(rdev, true)) + return -EIO; + + /* MAP grc window 8 for ARMing the NQ DBQ */ + writel(rdev->chip_ctx->dbr_aeq_arm_reg & + BNXT_GRC_BASE_MASK, + rdev->en_dev->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 28); + rdev->dbr_aeq_arm_reg_off = + (rdev->chip_ctx->dbr_aeq_arm_reg & + BNXT_GRC_OFFSET_MASK) + 0x8000; + writel(1, rdev->en_dev->bar0 + rdev->dbr_aeq_arm_reg_off); + } + + return 0; +} + +/* disable_dbr_pacing needs to be done only for older FWs + * where host selects primary function. ie. pacing_ext + * flags is not set. + */ + +int bnxt_re_disable_dbr_pacing(struct bnxt_re_dev *rdev) +{ + int rc = 0; + + if (!bnxt_qplib_dbr_pacing_ext_en(rdev->chip_ctx) && + bnxt_qplib_dbr_pacing_is_primary_pf(rdev->chip_ctx)) + rc = bnxt_re_hwrm_dbr_pacing_cfg(rdev, false); + + return rc; +} + +/* bnxt_re_ib_uninit - Uninitialize from IB stack + * @rdev - rdma device instance + * + * If Firmware is responding, stop user qps (moving qps to error state.). + * If Firmware is not responding (error case), start drain kernel qps + * Eventually, unregister device with IB stack. + * + */ +void bnxt_re_ib_uninit(struct bnxt_re_dev *rdev) +{ + if (test_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags)) { + bnxt_re_stop_user_qps_nonfatal(rdev); + bnxt_re_drain_kernel_qps_fatal(rdev); + bnxt_re_sysfs_destroy_file(rdev); + ib_unregister_device(&rdev->ibdev); + clear_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags); + return; + } +} + +static void bnxt_re_dev_uninit(struct bnxt_re_dev *rdev, u8 op_type) +{ + struct bnxt_qplib_dpi *kdpi; + int rc, wait_count = BNXT_RE_RES_FREE_WAIT_COUNT; + + bnxt_re_net_unregister_async_event(rdev); + +#ifdef IB_PEER_MEM_MOD_SUPPORT + if (rdev->peer_dev) { + ib_peer_mem_remove_device(rdev->peer_dev); + rdev->peer_dev = NULL; + } +#endif + bnxt_re_put_stats2_ctx(rdev); + if (test_and_clear_bit(BNXT_RE_FLAG_DEV_LIST_INITIALIZED, + &rdev->flags)) { + /* did the caller hold the lock? */ + list_del_rcu(&rdev->list); + } + + bnxt_re_uninit_resolve_wq(rdev); + bnxt_re_uninit_dcb_wq(rdev); + bnxt_re_uninit_aer_wq(rdev); + bnxt_re_deinitialize_dbr_drop_recov(rdev); + + if (bnxt_qplib_dbr_pacing_en(rdev->chip_ctx)) + (void)bnxt_re_disable_dbr_pacing(rdev); + + if (test_and_clear_bit(BNXT_RE_FLAG_WORKER_REG, &rdev->flags)) + cancel_delayed_work_sync(&rdev->worker); + + if (test_and_clear_bit(BNXT_RE_FLAG_PER_PORT_DEBUG_INFO, &rdev->flags)) + bnxt_re_debugfs_rem_port(rdev); + + bnxt_re_debugfs_rem_pdev(rdev); + + /* Wait for ULPs to release references */ + while (atomic_read(&rdev->stats.rsors.cq_count) && --wait_count) + usleep_range(500, 1000); + if (!wait_count) + dev_err(rdev_to_dev(rdev), + "CQ resources not freed by stack, count = 0x%x", + atomic_read(&rdev->stats.rsors.cq_count)); + + kdpi = &rdev->dpi_privileged; + if (kdpi->umdbr) { /* kernel DPI was allocated with success */ + (void)bnxt_qplib_dealloc_dpi(&rdev->qplib_res, kdpi); + /* + * Driver just need to know no command had failed + * during driver load sequence and below command is + * required indeed. Piggybacking dpi allocation status. + */ + if (rdev->binfo) + bnxt_qplib_set_link_aggr_mode(&rdev->qplib_res, 0, + 0, 0, false, 0); + } + bnxt_re_hdbr_uninit(rdev); + + /* Protect the device uninitialization and start_irq/stop_irq L2 + * callbacks with rtnl lock to avoid race condition between these calls + */ + rtnl_lock(); + if (test_and_clear_bit(BNXT_RE_FLAG_SETUP_NQ, &rdev->flags)) + bnxt_re_clean_nqs(rdev); + rtnl_unlock(); + + if (test_and_clear_bit(BNXT_RE_FLAG_TBLS_ALLOCINIT, &rdev->flags)) + bnxt_re_free_tbls(rdev); + if (test_and_clear_bit(BNXT_RE_FLAG_RCFW_CHANNEL_INIT, &rdev->flags)) { + rc = bnxt_qplib_deinit_rcfw(&rdev->rcfw); + if (rc) + dev_warn(rdev_to_dev(rdev), + "Failed to deinitialize fw, rc = 0x%x", rc); + } + + bnxt_re_put_stats_ctx(rdev); + + if (test_and_clear_bit(BNXT_RE_FLAG_ALLOC_CTX, &rdev->flags)) + bnxt_qplib_free_hwctx(&rdev->qplib_res); + + rtnl_lock(); + if (test_and_clear_bit(BNXT_RE_FLAG_RCFW_CHANNEL_EN, &rdev->flags)) + bnxt_qplib_disable_rcfw_channel(&rdev->rcfw); + + if (rdev->dbr_pacing) + bnxt_re_deinitialize_dbr_pacing(rdev); + + bnxt_re_free_dbr_sw_stats_mem(rdev); + + if (test_and_clear_bit(BNXT_RE_FLAG_NET_RING_ALLOC, &rdev->flags)) + bnxt_re_net_ring_free(rdev, rdev->rcfw.creq.ring_id); + + if (test_and_clear_bit(BNXT_RE_FLAG_ALLOC_RCFW, &rdev->flags)) + bnxt_qplib_free_rcfw_channel(&rdev->qplib_res); + + rdev->nqr->num_msix = 0; + rtnl_unlock(); + + bnxt_re_free_nqr_mem(rdev); + bnxt_re_destroy_chip_ctx(rdev); + + if (op_type != BNXT_RE_PRE_RECOVERY_REMOVE) { + if (test_and_clear_bit(BNXT_RE_FLAG_NETDEV_REGISTERED, + &rdev->flags)) + bnxt_unregister_dev(rdev->en_dev); + } +} + +static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 op_type, u8 wqe_mode) +{ + struct bnxt_re_ring_attr rattr = {}; + struct bnxt_qplib_creq_ctx *creq; + int vec, offset; + int rc = 0; + + if (op_type != BNXT_RE_POST_RECOVERY_INIT) { + /* Registered a new RoCE device instance to netdev */ + rc = bnxt_re_register_netdev(rdev); + if (rc) + return -EINVAL; + } + set_bit(BNXT_RE_FLAG_NETDEV_REGISTERED, &rdev->flags); + + if (rdev->en_dev->ulp_tbl->msix_requested < BNXT_RE_MIN_MSIX) { + dev_err(rdev_to_dev(rdev), + "RoCE requires minimum 2 MSI-X vectors, but only %d reserved\n", + rdev->en_dev->ulp_tbl->msix_requested); + bnxt_unregister_dev(rdev->en_dev); + clear_bit(BNXT_RE_FLAG_NETDEV_REGISTERED, &rdev->flags); + return -EINVAL; + } + dev_dbg(rdev_to_dev(rdev), "Got %d MSI-X vectors\n", + rdev->en_dev->ulp_tbl->msix_requested); + + /* Check whether VF or PF */ + bnxt_re_get_sriov_func_type(rdev); + + rc = bnxt_re_setup_chip_ctx(rdev, wqe_mode); + if (rc) { + dev_err(rdev_to_dev(rdev), "Failed to get chip context rc 0x%x", rc); + bnxt_unregister_dev(rdev->en_dev); + clear_bit(BNXT_RE_FLAG_NETDEV_REGISTERED, &rdev->flags); + rc = -EINVAL; + return rc; + } + + rc = bnxt_re_alloc_nqr_mem(rdev); + if (rc) { + bnxt_re_destroy_chip_ctx(rdev); + bnxt_unregister_dev(rdev->en_dev); + clear_bit(BNXT_RE_FLAG_NETDEV_REGISTERED, &rdev->flags); + return rc; + } + + /* Protect the device initialization and start_irq/stop_irq L2 callbacks + * with rtnl lock to avoid race condition between these calls + */ + rtnl_lock(); + rdev->nqr->num_msix = rdev->en_dev->ulp_tbl->msix_requested; + memcpy(rdev->nqr->msix_entries, rdev->en_dev->msix_entries, + sizeof(struct bnxt_msix_entry) * rdev->nqr->num_msix); + + /* Establish RCFW Communication Channel to initialize the context + memory for the function and all child VFs */ + rc = bnxt_qplib_alloc_rcfw_channel(&rdev->qplib_res); + if (rc) { + dev_err(rdev_to_dev(rdev), + "Failed to alloc mem for rcfw, rc = %#x\n", rc); + goto release_rtnl; + } + set_bit(BNXT_RE_FLAG_ALLOC_RCFW, &rdev->flags); + + creq = &rdev->rcfw.creq; + rattr.dma_arr = creq->hwq.pbl[PBL_LVL_0].pg_map_arr; + rattr.pages = creq->hwq.pbl[creq->hwq.level].pg_count; + rattr.type = bnxt_re_get_rtype(rdev); + rattr.mode = RING_ALLOC_REQ_INT_MODE_MSIX; + rattr.depth = BNXT_QPLIB_CREQE_MAX_CNT - 1; + rattr.lrid = rdev->nqr->msix_entries[BNXT_RE_AEQ_IDX].ring_idx; + rc = bnxt_re_net_ring_alloc(rdev, &rattr, &creq->ring_id); + if (rc) { + creq->ring_id = 0xffff; + dev_err(rdev_to_dev(rdev), + "Failed to allocate CREQ fw id with rc = 0x%x", rc); + goto release_rtnl; + } + set_bit(BNXT_RE_FLAG_NET_RING_ALLOC, &rdev->flags); + + /* Program the NQ ID for DBQ notification */ + if (rdev->chip_ctx->modes.dbr_pacing_v0 || + bnxt_qplib_dbr_pacing_en(rdev->chip_ctx) || + bnxt_qplib_dbr_pacing_ext_en(rdev->chip_ctx)) { + rc = bnxt_re_initialize_dbr_pacing(rdev); + if (!rc) + rdev->dbr_pacing = true; + else + rdev->dbr_pacing = false; + dev_dbg(rdev_to_dev(rdev), "%s: initialize db pacing ret %d\n", + __func__, rc); + } + + vec = rdev->nqr->msix_entries[BNXT_RE_AEQ_IDX].vector; + offset = rdev->nqr->msix_entries[BNXT_RE_AEQ_IDX].db_offset; + rc = bnxt_qplib_enable_rcfw_channel(&rdev->rcfw, vec, offset, + &bnxt_re_aeq_handler); + if (rc) { + dev_err(rdev_to_dev(rdev), + "Failed to enable RCFW channel with rc = 0x%x", rc); + goto release_rtnl; + } + set_bit(BNXT_RE_FLAG_RCFW_CHANNEL_EN, &rdev->flags); + + rc = bnxt_re_update_dev_attr(rdev); + if (rc) + goto release_rtnl; + bnxt_re_limit_pf_res(rdev); + if (!rdev->is_virtfn && !_is_chip_gen_p5_p7(rdev->chip_ctx)) { + rc = bnxt_qplib_alloc_hwctx(&rdev->qplib_res); + if (rc) { + dev_err(rdev_to_dev(rdev), + "Failed to alloc hw contexts, rc = 0x%x", rc); + goto release_rtnl; + } + set_bit(BNXT_RE_FLAG_ALLOC_CTX, &rdev->flags); + } + + rc = bnxt_re_get_stats_ctx(rdev); + if (rc) + goto release_rtnl; + + rc = bnxt_qplib_init_rcfw(&rdev->rcfw, rdev->is_virtfn); + if (rc) { + dev_err(rdev_to_dev(rdev), + "Failed to initialize fw with rc = 0x%x", rc); + goto release_rtnl; + } + set_bit(BNXT_RE_FLAG_RCFW_CHANNEL_INIT, &rdev->flags); + + /* Based resource count on the 'new' device caps */ + rc = bnxt_re_update_dev_attr(rdev); + if (rc) + goto release_rtnl; + rc = bnxt_re_alloc_init_tbls(rdev); + if (rc) { + dev_err(rdev_to_dev(rdev), "tbls alloc-init failed rc = %#x", + rc); + goto release_rtnl; + } + rc = bnxt_re_setup_nqs(rdev); + if (rc) { + dev_err(rdev_to_dev(rdev), "NQs alloc-init failed rc = %#x\n", + rc); + if (rdev->nqr->max_init == 0) + goto release_rtnl; + + dev_warn(rdev_to_dev(rdev), + "expected nqs %d available nqs %d\n", + rdev->nqr->num_msix, rdev->nqr->max_init); + } + set_bit(BNXT_RE_FLAG_SETUP_NQ, &rdev->flags); + rtnl_unlock(); + + rc = bnxt_qplib_alloc_dpi(&rdev->qplib_res, &rdev->dpi_privileged, + rdev, BNXT_QPLIB_DPI_TYPE_KERNEL); + if (rc) + goto fail; + + rc = bnxt_re_hdbr_init(rdev); + if (rc) + goto fail; + + if (rdev->dbr_pacing) + bnxt_re_enable_dbr_pacing(rdev); + + if (rdev->chip_ctx->modes.dbr_drop_recov) + bnxt_re_initialize_dbr_drop_recov(rdev); + + rc = bnxt_re_alloc_dbr_sw_stats_mem(rdev); + if (rc) + goto fail; + + /* This block of code is needed for error recovery support */ + if (!rdev->is_virtfn) { + struct bnxt_re_tc_rec *tc_rec; + + tc_rec = &rdev->tc_rec[0]; + rc = bnxt_re_query_hwrm_qportcfg(rdev, tc_rec, 0xFFFF); + if (rc) { + dev_err(rdev_to_dev(rdev), + "Failed to query port config rc:%d", rc); + return rc; + } + + if (rdev->binfo) { + tc_rec = &rdev->tc_rec[1]; + rc = bnxt_re_query_hwrm_qportcfg(rdev, tc_rec, 2); + if (rc) { + dev_err(rdev_to_dev(rdev), + "Failed to query port config(LAG) rc:%d", rc); + return rc; + } + } + /* Query f/w defaults of CC params */ + rc = bnxt_qplib_query_cc_param(&rdev->qplib_res, &rdev->cc_param); + if (rc) + dev_warn(rdev_to_dev(rdev), + "Failed to query CC defaults\n"); + if (_is_chip_gen_p5_p7(rdev->chip_ctx) && + !(rdev->qplib_res.en_dev->flags & BNXT_EN_FLAG_ROCE_VF_RES_MGMT)) + bnxt_re_vf_res_config(rdev); + } + INIT_DELAYED_WORK(&rdev->worker, bnxt_re_worker); + set_bit(BNXT_RE_FLAG_WORKER_REG, &rdev->flags); + schedule_delayed_work(&rdev->worker, msecs_to_jiffies(1000)); + + bnxt_re_init_dcb_wq(rdev); + bnxt_re_init_aer_wq(rdev); + bnxt_re_init_resolve_wq(rdev); + bnxt_re_debugfs_add_pdev(rdev); + list_add_tail_rcu(&rdev->list, &bnxt_re_dev_list); + set_bit(BNXT_RE_FLAG_DEV_LIST_INITIALIZED, &rdev->flags); + + rc = bnxt_re_get_stats2_ctx(rdev); + if (rc) + goto fail; + + return rc; +release_rtnl: + rtnl_unlock(); +fail: + bnxt_re_dev_uninit(rdev, BNXT_RE_COMPLETE_REMOVE); + + return rc; +} + +static int bnxt_re_ib_init(struct bnxt_re_dev *rdev) +{ + int rc = 0; + + rc = bnxt_re_register_ib(rdev); + if (rc) { + dev_err(rdev_to_dev(rdev), + "Register IB failed with rc = 0x%x", rc); + goto fail; + } + rc = bnxt_re_sysfs_create_file(rdev); + if (rc) { + bnxt_re_ib_uninit(rdev); + goto fail; + } + + set_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags); + set_bit(BNXT_RE_FLAG_ISSUE_ROCE_STATS, &rdev->flags); + set_bit(BNXT_RE_FLAG_ISSUE_CFA_FLOW_STATS, &rdev->flags); + bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1, IB_EVENT_PORT_ACTIVE); + bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1, IB_EVENT_GID_CHANGE); + + return rc; +fail: + bnxt_re_dev_uninit(rdev, BNXT_RE_COMPLETE_REMOVE); + return rc; +} + +/* wrapper for ib_init funcs */ +int _bnxt_re_ib_init(struct bnxt_re_dev *rdev) +{ + return bnxt_re_ib_init(rdev); +} + +/* wrapper for aux init funcs */ +int _bnxt_re_ib_init2(struct bnxt_re_dev *rdev) +{ + bnxt_re_ib_init_2(rdev); + return 0; /* add return for future proof */ +} + +static void bnxt_re_dev_unreg(struct bnxt_re_dev *rdev) +{ + bnxt_re_dev_dealloc(rdev); +} + +static int bnxt_re_check_bond_in_vf_parent(struct bnxt_en_dev *en_dev) +{ + struct bnxt_re_dev *rdev; + struct pci_dev *physfn; + + physfn = pci_physfn(en_dev->pdev); + rcu_read_lock(); + list_for_each_entry_rcu(rdev, &bnxt_re_dev_list, list) { + if (rdev->binfo && BNXT_EN_VF(en_dev)) { + if ((rdev->binfo->pdev1 == physfn) || + (rdev->binfo->pdev2 == physfn)) { + rcu_read_unlock(); + return 1; + } + } + } + rcu_read_unlock(); + return 0; +} + +static int bnxt_re_dev_reg(struct bnxt_re_dev **rdev, struct net_device *netdev, + struct bnxt_en_dev *en_dev) +{ + dev_dbg(NULL, "%s: netdev = %p\n", __func__, netdev); + + /* For Thor2, VF ROCE is not dependent on the PF Lag*/ + if (!BNXT_RE_CHIP_P7(en_dev->chip_num) && + bnxt_re_check_bond_in_vf_parent(en_dev)) { + dev_err(NULL, "RoCE disabled, LAG is configured on PFs\n"); + return -EINVAL; + } + + /* + * Note: + * The first argument to bnxt_re_dev_alloc() is 'netdev' and + * not 'realdev', since in the case of bonding we want to + * register the bonded virtual netdev (master) to the ib stack. + * And 'en_dev' (for L2/PCI communication) is the first slave + * device (PF0 on the card). + * In the case of a regular netdev, both netdev and the en_dev + * correspond to the same device. + */ + *rdev = bnxt_re_dev_alloc(netdev, en_dev); + if (!*rdev) { + dev_err(NULL, "%s: netdev %p not handled", + ROCE_DRV_MODULE_NAME, netdev); + return -ENOMEM; + } + bnxt_re_hold(*rdev); + + return 0; +} + +static void bnxt_re_update_speed_for_bond(struct bnxt_re_dev *rdev) +{ + if (rdev->binfo->aggr_mode != + CMDQ_SET_LINK_AGGR_MODE_AGGR_MODE_ACTIVE_BACKUP) + rdev->espeed *= 2; +} + +void bnxt_re_get_link_speed(struct bnxt_re_dev *rdev) +{ +#ifdef HAVE_ETHTOOL_GLINKSETTINGS_25G + struct ethtool_link_ksettings lksettings; +#else + struct ethtool_cmd ecmd; +#endif + /* Using physical netdev for getting speed + * in case of bond devices. + * No change for normal interface. + */ + struct net_device *netdev = rdev->en_dev->net; + +#ifdef HAVE_ETHTOOL_GLINKSETTINGS_25G + if (netdev->ethtool_ops && netdev->ethtool_ops->get_link_ksettings) { + memset(&lksettings, 0, sizeof(lksettings)); + netdev->ethtool_ops->get_link_ksettings(netdev, &lksettings); + rdev->espeed = lksettings.base.speed; + } +#else + if (netdev->ethtool_ops && netdev->ethtool_ops->get_settings) { + memset(&ecmd, 0, sizeof(ecmd)); + netdev->ethtool_ops->get_settings(netdev, &ecmd); + rdev->espeed = ecmd.speed; + } +#endif + /* Store link speed as sl_espeed. espeed can change for bond device */ + rdev->sl_espeed = rdev->espeed; + if (rdev->binfo) + bnxt_re_update_speed_for_bond(rdev); +} + +static bool bnxt_re_bond_update_reqd(struct netdev_bonding_info *netdev_binfo, + struct bnxt_re_bond_info *binfo, + struct bnxt_re_dev *rdev, + struct net_device *sl_netdev) +{ + int rc = 0; + struct bnxt_re_bond_info tmp_binfo; + + memcpy(&tmp_binfo, binfo, sizeof(*binfo)); + + rc = bnxt_re_get_port_map(netdev_binfo, binfo, sl_netdev); + if (rc) { + dev_dbg(rdev_to_dev(binfo->rdev), + "%s: Error in receiving port state rdev = %p", + __func__, binfo->rdev); + return false; + } + + if (binfo->aggr_mode == tmp_binfo.aggr_mode && + binfo->active_port_map == tmp_binfo.active_port_map) { + dev_dbg(rdev_to_dev(binfo->rdev), + "%s: No need to update rdev=%p active_port_map=%#x", + __func__, binfo->rdev, binfo->active_port_map); + return false; + } + + return true; +} + +static int bnxt_re_update_fw_lag_info(struct bnxt_re_bond_info *binfo, + struct bnxt_re_dev *rdev, + bool bond_mode) +{ + struct bnxt_qplib_ctx *hctx; + int rc = 0; + + dev_dbg(rdev_to_dev(binfo->rdev), "%s: port_map for rdev=%p bond=%#x", + __func__, binfo->rdev, binfo->active_port_map); + + /* Send LAG info to BONO */ + hctx = rdev->qplib_res.hctx; + rc = bnxt_qplib_set_link_aggr_mode(&binfo->rdev->qplib_res, + binfo->aggr_mode, + BNXT_RE_MEMBER_PORT_MAP, + binfo->active_port_map, + bond_mode, hctx->stats2.fw_id); + + if (rc) + dev_err(rdev_to_dev(binfo->rdev), + "%s: setting link aggr mode rc = %d\n", __func__, rc); + + dev_info(rdev_to_dev(binfo->rdev), + "binfo->aggr_mode = %d binfo->active_port_map = 0x%x\n", + binfo->aggr_mode, binfo->active_port_map); + + return rc; +} + +void bnxt_re_remove_device(struct bnxt_re_dev *rdev, u8 op_type, + struct auxiliary_device *aux_dev) +{ + struct bnxt_re_en_dev_info *en_info; + struct bnxt_qplib_cmdq_ctx *cmdq; + struct bnxt_qplib_rcfw *rcfw; + + rcfw = &rdev->rcfw; + cmdq = &rcfw->cmdq; + if (test_bit(FIRMWARE_STALL_DETECTED, &cmdq->flags)) + set_bit(BNXT_RE_FLAG_ERR_DEVICE_DETACHED, &rdev->flags); + + dev_dbg(rdev_to_dev(rdev), "%s: Removing rdev: %p\n", __func__, rdev); + if (test_and_clear_bit(BNXT_RE_FLAG_INIT_DCBX_CC_PARAM, &rdev->flags)) + bnxt_re_clear_dcbx_cc_param(rdev); + bnxt_re_dev_uninit(rdev, op_type); + en_info = auxiliary_get_drvdata(aux_dev); + if (en_info) { + rtnl_lock(); + en_info->rdev = NULL; + rtnl_unlock(); + if (op_type != BNXT_RE_PRE_RECOVERY_REMOVE) { + clear_bit(BNXT_RE_FLAG_EN_DEV_PRIMARY_DEV, &en_info->flags); + clear_bit(BNXT_RE_FLAG_EN_DEV_SECONDARY_DEV, &en_info->flags); + clear_bit(BNXT_RE_FLAG_EN_DEV_NETDEV_REG, &en_info->flags); + } + } + bnxt_re_dev_unreg(rdev); +} + +int bnxt_re_add_device(struct bnxt_re_dev **rdev, + struct net_device *netdev, + struct bnxt_re_bond_info *info, + u8 qp_mode, u8 op_type, u8 wqe_mode, + struct auxiliary_device *aux_dev) +{ + struct bnxt_re_en_dev_info *en_info, *en_info2 = NULL; + struct bnxt_en_dev *en_dev; + int rc = 0; + + en_info = auxiliary_get_drvdata(aux_dev); + en_dev = en_info->en_dev; + + rc = bnxt_re_dev_reg(rdev, netdev, en_dev); + if (rc) { + dev_dbg(NULL, "Failed to create add device for netdev %p\n", + netdev); + return rc; + } + + /* Set Bonding info for handling bond devices */ + (*rdev)->binfo = info; + /* Inherit gsi_qp_mode only if info is valid + * and qp_mode specified is invalid + */ + (*rdev)->gsi_ctx.gsi_qp_mode = (info && !qp_mode) ? + info->gsi_qp_mode : qp_mode; + wqe_mode = (info && wqe_mode == BNXT_QPLIB_WQE_MODE_INVALID) ? + info->wqe_mode : wqe_mode; + (*rdev)->adev = aux_dev; + /* Before updating the rdev pointer in bnxt_re_en_dev_info structure, + * take the rtnl lock to avoid accessing invalid rdev pointer from + * L2 ULP callbacks. This is applicable in all the places where rdev + * pointer is updated in bnxt_re_en_dev_info. + */ + rtnl_lock(); + en_info->rdev = *rdev; + /* + * If this is a bond interface, update second aux_dev's + * en_info->rdev also with this newly created rdev + */ + if (info) { + if (info->aux_dev2) + en_info2 = auxiliary_get_drvdata(info->aux_dev2); + + if (en_info2) + en_info2->rdev = *rdev; + } + rtnl_unlock(); + rc = bnxt_re_dev_init(*rdev, op_type, wqe_mode); + if (rc) { + (*rdev)->binfo = NULL; + bnxt_re_dev_unreg(*rdev); + *rdev = NULL; + } + dev_dbg(rdev_to_dev(*rdev), "%s: Adding rdev: %p\n", __func__, *rdev); + if (!rc) { + set_bit(BNXT_RE_FLAG_EN_DEV_NETDEV_REG, &en_info->flags); + } + return rc; +} + +struct bnxt_re_dev *bnxt_re_get_peer_pf(struct bnxt_re_dev *rdev) +{ + struct pci_dev *pdev_in = rdev->en_dev->pdev; + int tmp_bus_num, bus_num = pdev_in->bus->number; + int tmp_dev_num, dev_num = PCI_SLOT(pdev_in->devfn); + int tmp_func_num, func_num = PCI_FUNC(pdev_in->devfn); + struct bnxt_re_dev *tmp_rdev; + + rcu_read_lock(); + list_for_each_entry_rcu(tmp_rdev, &bnxt_re_dev_list, list) { + tmp_bus_num = tmp_rdev->en_dev->pdev->bus->number; + tmp_dev_num = PCI_SLOT(tmp_rdev->en_dev->pdev->devfn); + tmp_func_num = PCI_FUNC(tmp_rdev->en_dev->pdev->devfn); + + if (bus_num == tmp_bus_num && dev_num == tmp_dev_num && + func_num != tmp_func_num) { + rcu_read_unlock(); + return tmp_rdev; + } + } + rcu_read_unlock(); + return NULL; +} + +static struct bnxt_re_bond_info *bnxt_re_alloc_lag(struct bnxt_re_dev *rdev, + u8 gsi_mode, u8 wqe_mode) +{ + struct bnxt_re_bond_info *info = NULL; + struct bnxt_re_en_dev_info *en_info; + struct bnxt_re_dev *rdev_peer; + struct net_device *master; + int rc = 0; + + rtnl_lock(); + master = netdev_master_upper_dev_get(rdev->netdev); + if (!master) { + rtnl_unlock(); + return info; + } + + /* + * Hold the netdev ref count till the LAG creation to avoid freeing. + * LAG creation is in scheduled task, and there is a possibility of + * bond getting removed simultaneously. + */ + dev_hold(master); + rtnl_unlock(); + + rdev_peer = bnxt_re_get_peer_pf(rdev); + if (!rdev_peer) + goto exit; + + dev_dbg(rdev_to_dev(rdev), "%s: Slave1 rdev: %p\n", __func__, rdev); + dev_dbg(rdev_to_dev(rdev_peer), "%s: Slave2 rdev: %p\n", __func__, rdev_peer); + dev_dbg(rdev_to_dev(rdev), "%s: adev Slave1: %p\n", __func__, rdev->adev); + dev_dbg(rdev_to_dev(rdev_peer), "%s: adev Slave2: %p\n", + __func__, rdev_peer->adev); + info = kzalloc(sizeof(*info), GFP_KERNEL); + if (!info) + goto exit; + + info->master = master; + if (BNXT_RE_IS_PORT0(rdev)) { + info->slave1 = rdev->netdev; + info->slave2 = rdev_peer->netdev; + info->pdev1 = rdev->en_dev->pdev; + info->pdev2 = rdev_peer->en_dev->pdev; + info->gsi_qp_mode = rdev->gsi_ctx.gsi_qp_mode; + info->wqe_mode = rdev->chip_ctx->modes.wqe_mode; + info->aux_dev1 = rdev->adev; + info->aux_dev2 = rdev_peer->adev; + } else { + info->slave2 = rdev->netdev; + info->slave1 = rdev_peer->netdev; + info->pdev2 = rdev->en_dev->pdev; + info->pdev1 = rdev_peer->en_dev->pdev; + info->gsi_qp_mode = rdev_peer->gsi_ctx.gsi_qp_mode; + info->wqe_mode = rdev->chip_ctx->modes.wqe_mode; + info->aux_dev2 = rdev->adev; + info->aux_dev1 = rdev_peer->adev; + } + + bnxt_re_ib_uninit(rdev); + dev_dbg(rdev_to_dev(rdev), "Removing device 1\n"); + bnxt_re_put(rdev); + bnxt_re_remove_device(rdev, BNXT_RE_COMPLETE_REMOVE, rdev->adev); + bnxt_re_ib_uninit(rdev_peer); + dev_dbg(rdev_to_dev(rdev_peer), "Removing device 2\n"); + bnxt_re_remove_device(rdev_peer, BNXT_RE_COMPLETE_REMOVE, rdev_peer->adev); + + rc = bnxt_re_add_device(&info->rdev, info->master, info, + gsi_mode, BNXT_RE_COMPLETE_INIT, wqe_mode, + info->aux_dev1); + if (rc) { + info = ERR_PTR(-EIO); + goto exit; + } + + rc = bnxt_re_ib_init(info->rdev); + if (rc) { + bnxt_re_ib_uninit(info->rdev); + bnxt_re_remove_device(info->rdev, BNXT_RE_COMPLETE_REMOVE, + info->aux_dev1); + info = ERR_PTR(-EIO); + goto exit; + } + + bnxt_re_ib_init_2(info->rdev); + + dev_dbg(rdev_to_dev(info->rdev), "Added bond device rdev %p\n", + info->rdev); + rdev = info->rdev; + + en_info = auxiliary_get_drvdata(info->aux_dev1); + set_bit(BNXT_RE_FLAG_EN_DEV_PRIMARY_DEV, &en_info->flags); + + en_info = auxiliary_get_drvdata(info->aux_dev2); + set_bit(BNXT_RE_FLAG_EN_DEV_SECONDARY_DEV, &en_info->flags); + rtnl_lock(); + en_info->rdev = rdev; + /* Query link speed.. in rtnl context */ + bnxt_re_get_link_speed(info->rdev); + rtnl_unlock(); +exit: + dev_put(master); + return info; +} + +int bnxt_re_schedule_work(struct bnxt_re_dev *rdev, unsigned long event, + struct net_device *vlan_dev, + struct netdev_bonding_info *netdev_binfo, + struct bnxt_re_bond_info *binfo, + struct net_device *netdev, + struct auxiliary_device *adev) +{ + struct bnxt_re_work *re_work; + + /* Allocate for the deferred task */ + re_work = kzalloc(sizeof(*re_work), GFP_KERNEL); + if (!re_work) + return -ENOMEM; + + re_work->rdev = rdev; + re_work->event = event; + re_work->vlan_dev = vlan_dev; + re_work->adev = adev; + if (netdev_binfo) { + memcpy(&re_work->netdev_binfo, netdev_binfo, + sizeof(*netdev_binfo)); + re_work->binfo = binfo; + re_work->netdev = netdev; + } + INIT_WORK(&re_work->work, bnxt_re_task); + if (rdev) + atomic_inc(&rdev->sched_count); + re_work->netdev = netdev; + queue_work(bnxt_re_wq, &re_work->work); + + return 0; +} + +void bnxt_re_create_base_interface(struct bnxt_re_bond_info *binfo, bool primary) +{ + struct auxiliary_device *aux_dev; + struct bnxt_re_dev *rdev = NULL; + struct net_device *net_dev; + int rc = 0; + + if (primary) { + aux_dev = binfo->aux_dev1; + net_dev = binfo->slave1; + } else { + aux_dev = binfo->aux_dev2; + net_dev = binfo->slave2; + } + + rc = bnxt_re_add_device(&rdev, net_dev, NULL, + binfo->gsi_qp_mode, BNXT_RE_COMPLETE_INIT, + binfo->wqe_mode, aux_dev); + if (rc) { + dev_err(NULL, "Failed to add the interface"); + return; + } + dev_dbg(rdev_to_dev(rdev), "Added device netdev = %p", net_dev); + rc = bnxt_re_ib_init(rdev); + if (rc) + goto clean_dev; + + bnxt_re_ib_init_2(rdev); + /* Query link speed.. Already in rtnl context */ + rtnl_lock(); + bnxt_re_get_link_speed(rdev); + rtnl_unlock(); + return; +clean_dev: + bnxt_re_remove_device(rdev, BNXT_RE_COMPLETE_REMOVE, + aux_dev); +} + +int bnxt_re_get_slot_pf_count(struct bnxt_re_dev *rdev) +{ + struct pci_dev *pdev_in = rdev->en_dev->pdev; + int tmp_bus_num, bus_num = pdev_in->bus->number; + int tmp_dev_num, dev_num = PCI_SLOT(pdev_in->devfn); + struct bnxt_re_dev *tmp_rdev; + int pf_cnt = 0; + + rcu_read_lock(); + list_for_each_entry_rcu(tmp_rdev, &bnxt_re_dev_list, list) { + tmp_bus_num = tmp_rdev->en_dev->pdev->bus->number; + tmp_dev_num = PCI_SLOT(tmp_rdev->en_dev->pdev->devfn); + + if (bus_num == tmp_bus_num && dev_num == tmp_dev_num) + pf_cnt++; + } + rcu_read_unlock(); + return pf_cnt; +} + +void bnxt_re_destroy_lag(struct bnxt_re_dev **rdev) +{ + struct bnxt_re_dev *tmp_rdev = *rdev; + struct bnxt_re_en_dev_info *en_info; + struct bnxt_re_bond_info bkup_binfo; + struct auxiliary_device *aux_dev; + + aux_dev = tmp_rdev->adev; + dev_dbg(rdev_to_dev(tmp_rdev), "%s: LAG rdev: %p\n", __func__, tmp_rdev); + dev_dbg(rdev_to_dev(tmp_rdev), "%s: adev LAG: %p\n", __func__, aux_dev); + memcpy(&bkup_binfo, tmp_rdev->binfo, sizeof(*(tmp_rdev->binfo))); + dev_dbg(rdev_to_dev(tmp_rdev), "Destroying lag device\n"); + bnxt_re_update_fw_lag_info(tmp_rdev->binfo, tmp_rdev, false); + bnxt_re_ib_uninit(tmp_rdev); + bnxt_re_remove_device(tmp_rdev, BNXT_RE_COMPLETE_REMOVE, aux_dev); + dev_dbg(rdev_to_dev(tmp_rdev), "Destroying lag device DONE\n"); + *rdev = NULL; + + en_info = auxiliary_get_drvdata(bkup_binfo.aux_dev1); + clear_bit(BNXT_RE_FLAG_EN_DEV_PRIMARY_DEV, &en_info->flags); + en_info->binfo_valid = false; + + en_info = auxiliary_get_drvdata(bkup_binfo.aux_dev2); + clear_bit(BNXT_RE_FLAG_EN_DEV_SECONDARY_DEV, &en_info->flags); +} + +int bnxt_re_create_lag(ifbond *master, ifslave *slave, + struct netdev_bonding_info *netdev_binfo, + struct net_device *netdev, + struct bnxt_re_dev **rdev, + u8 gsi_mode, u8 wqe_mode) +{ + struct bnxt_re_bond_info *tmp_binfo = NULL; + struct net_device *real_dev; + struct bnxt_re_dev *tmp_rdev; + bool do_lag; + + real_dev = rdma_vlan_dev_real_dev(netdev); + if (!real_dev) + real_dev = netdev; + tmp_rdev = bnxt_re_from_netdev(real_dev); + if (!tmp_rdev) + return -EINVAL; + bnxt_re_hold(tmp_rdev); + + if (master && slave) { + do_lag = bnxt_re_is_lag_allowed(master, slave, tmp_rdev); + if (!do_lag) { + bnxt_re_put(tmp_rdev); + return -EINVAL; + } + dev_dbg(rdev_to_dev(tmp_rdev), "%s:do_lag = %d\n", + __func__, do_lag); + } + + tmp_binfo = bnxt_re_alloc_lag(tmp_rdev, gsi_mode, wqe_mode); + if (!tmp_binfo) { + bnxt_re_put(tmp_rdev); + return -ENOMEM; + } + /* + * EIO is returned only if new device creation failed. + * This means older rdev is destroyed already. + */ + + if (PTR_ERR(tmp_binfo) == -EIO) { + *rdev = NULL; + return -EIO; + } + + /* Current rdev already destroyed */ + dev_dbg(rdev_to_dev(tmp_binfo->rdev), "Scheduling for BOND info\n"); + *rdev = tmp_binfo->rdev; + /* Save netdev_bonding_info for non notifier contexts. */ + memcpy(&tmp_binfo->nbinfo, netdev_binfo, sizeof(*netdev_binfo)); + + return 0; +} + +static void bond_fill_ifbond(struct bonding *bond, struct ifbond *info) +{ + info->bond_mode = BOND_MODE(bond); + info->miimon = bond->params.miimon; + info->num_slaves = bond->slave_cnt; +} + +static void bond_fill_ifslave(struct slave *slave, struct ifslave *info) +{ + strcpy(info->slave_name, slave->dev->name); + info->link = slave->link; + info->state = bond_slave_state(slave); + info->link_failure_count = slave->link_failure_count; +} + +static int bnxt_re_check_and_create_bond(struct net_device *netdev) +{ + struct netdev_bonding_info binfo = {}; + struct bnxt_re_dev *rdev = NULL; + struct net_device *master; + struct list_head *iter; + struct bonding *bond; + struct slave *slave; + bool found = false; + int rc = -1; + + if (!netif_is_bond_slave(netdev)) + return 0; + + rtnl_lock(); + master = netdev_master_upper_dev_get(netdev); + rtnl_unlock(); + if (!master) + return rc; + + bond = netdev_priv(master); + bond_for_each_slave(bond, slave, iter) { + found = true; + break; + } + + if (found) { + bond_fill_ifslave(slave, &binfo.slave); + bond_fill_ifbond(slave->bond, &binfo.master); + + rc = bnxt_re_create_lag(&binfo.master, &binfo.slave, + &binfo, slave->dev, &rdev, + BNXT_RE_GSI_MODE_INVALID, + BNXT_QPLIB_WQE_MODE_INVALID); + } + return rc; +} + +static void bnxt_re_clear_dcbx_cc_param(struct bnxt_re_dev *rdev) +{ + struct bnxt_qplib_cc_param *cc_param = &rdev->cc_param; + struct bnxt_re_tc_rec *tc_rec; + int rc; + + if (test_bit(BNXT_RE_FLAG_ERR_DEVICE_DETACHED, &rdev->flags)) + return; + + cc_param->enable = 0; + cc_param->tos_ecn = 0; + + bnxt_re_clear_cc(rdev); + tc_rec = &rdev->tc_rec[0]; + + (void)bnxt_qplib_query_cc_param(&rdev->qplib_res, &rdev->cc_param); + rc = bnxt_re_hwrm_pri2cos_qcfg(rdev, tc_rec, -1); + if (!rc) + cc_param->roce_pri = tc_rec->roce_prio; + bnxt_re_clear_dcb(rdev, rdev->en_dev->net, tc_rec); + if (rdev->binfo) { + tc_rec = &rdev->tc_rec[1]; + bnxt_re_clear_dcb(rdev, rdev->binfo->slave2, tc_rec); + } + cc_param->alt_tos_dscp = 0; + cc_param->alt_vlan_pcp = 0; + cc_param->tos_dscp = 0; + cc_param->roce_pri = 0; + cc_param->qp1_tos_dscp = 0; +} + +int bnxt_re_init_dcbx_cc_param(struct bnxt_re_dev *rdev) +{ + struct bnxt_qplib_cc_param *cc_param = &rdev->cc_param; + struct bnxt_re_tc_rec *tc_rec; + int rc; + + /* + * Set the default values of dscp and pri values for RoCE and + * CNP. Also, set the default parameters for enabling CC + */ + cc_param->tos_ecn = 0x1; + cc_param->cc_mode = _is_chip_gen_p5_p7(rdev->chip_ctx) ? + CMDQ_MODIFY_ROCE_CC_CC_MODE_PROBABILISTIC_CC_MODE : + CMDQ_MODIFY_ROCE_CC_CC_MODE_DCTCP_CC_MODE; + + cc_param->alt_tos_dscp = BNXT_RE_DEFAULT_CNP_DSCP; + cc_param->alt_vlan_pcp = BNXT_RE_DEFAULT_CNP_PRI; + cc_param->tos_dscp = BNXT_RE_DEFAULT_ROCE_DSCP; + cc_param->roce_pri = BNXT_RE_DEFAULT_ROCE_PRI; + + tc_rec = &rdev->tc_rec[0]; + rc = bnxt_re_setup_dcb(rdev, rdev->en_dev->net, tc_rec, 0xFFFF); + if (rc) + return rc; + if (rdev->binfo) { + tc_rec = &rdev->tc_rec[1]; + rc = bnxt_re_setup_dcb(rdev, rdev->binfo->slave2, tc_rec, 2); + if (rc) { + if (rc == -EBUSY) + set_bit(BNXT_RE_FLAG_RECONFIG_SECONDARY_DEV_DCB, &rdev->flags); + else + goto clear_port0; + } + } + + /* CC is not enabled on non p5 adapters at 10G speed */ + if (rdev->sl_espeed == SPEED_10000 && + !_is_chip_gen_p5_p7(rdev->chip_ctx)) + return 0; + + rc = bnxt_re_setup_cc(rdev); + if (rc) + goto clear_port1; + return 0; +clear_port1: + bnxt_re_clear_dcb(rdev, rdev->en_dev->net, &rdev->tc_rec[1]); +clear_port0: + bnxt_re_clear_dcb(rdev, rdev->en_dev->net, &rdev->tc_rec[0]); + return rc; +} + +/* Handle all deferred netevents tasks */ +static void bnxt_re_task(struct work_struct *work) +{ + struct netdev_bonding_info *netdev_binfo = NULL; + struct bnxt_re_bond_info bkup_binfo; + struct bnxt_re_work *re_work; + struct bonding *b_master; + struct bnxt_re_dev *rdev; + struct list_head *iter; + struct slave *b_slave; + int slave_count = 0; + ifbond *bond; + ifslave *secondary; + int rc; + + re_work = container_of(work, struct bnxt_re_work, work); + + mutex_lock(&bnxt_re_mutex); + rdev = re_work->rdev; + + /* + * If the previous rdev is deleted due to bond creation + * do not handle the event + */ + if (!bnxt_re_is_rdev_valid(rdev)) + goto exit; + + /* Ignore the event, if the device is not registred with IB stack. This + * is to avoid handling any event while the device is added/removed. + */ + if (rdev && !test_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags)) { + dev_dbg(rdev_to_dev(rdev), "%s: Ignoring netdev event 0x%lx", + __func__, re_work->event); + goto done; + } + + /* Extra check to silence coverity. We shouldn't handle any event + * when rdev is NULL. + */ + if (!rdev) + goto exit; + + dev_dbg(rdev_to_dev(rdev), "Scheduled work for event 0x%lx", + re_work->event); + + switch (re_work->event) { + case NETDEV_UP: + bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1, + IB_EVENT_PORT_ACTIVE); + bnxt_re_net_register_async_event(rdev); + break; + + case NETDEV_DOWN: + bnxt_qplib_dbr_pacing_set_primary_pf(rdev->chip_ctx, 0); + bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1, + IB_EVENT_PORT_ERR); + break; + + case NETDEV_CHANGE: + if (bnxt_re_get_link_state(rdev) == IB_PORT_DOWN) { + bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1, + IB_EVENT_PORT_ERR); + break; + } else if (bnxt_re_get_link_state(rdev) == IB_PORT_ACTIVE) { + bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1, + IB_EVENT_PORT_ACTIVE); + } + + /* temporarily disable the check for SR2 */ + if (!bnxt_qplib_query_cc_param(&rdev->qplib_res, + &rdev->cc_param) && + !_is_chip_p7(rdev->chip_ctx)) { + /* + * Disable CC for 10G speed + * for non p5 devices + */ + if (rdev->sl_espeed == SPEED_10000 && + !_is_chip_gen_p5_p7(rdev->chip_ctx)) { + if (rdev->cc_param.enable) { + rdev->cc_param.enable = 0; + rdev->cc_param.tos_ecn = 0; + bnxt_re_clear_cc(rdev); + } + } else { + if (!rdev->cc_param.enable && + rdev->cc_param.admin_enable) + bnxt_re_setup_cc(rdev); + } + } + break; + + case NETDEV_UNREGISTER: + /* + * Below code will only execute for rdev->binfo. + * Do not post to upstream. Remove in next phase + */ + if (!rdev->binfo) + panic("Debug from %s %d\n", __func__, __LINE__); + + dev_dbg(rdev_to_dev(rdev), "%s: LAG rdev/adev: %p/%p\n", + __func__, rdev, rdev->adev); + memcpy(&bkup_binfo, rdev->binfo, sizeof(*(rdev->binfo))); + bnxt_re_destroy_lag(&rdev); + bnxt_re_create_base_interface(&bkup_binfo, true); + bnxt_re_create_base_interface(&bkup_binfo, false); + break; + + case NETDEV_BONDING_INFO: + netdev_binfo = &re_work->netdev_binfo; + bond = &netdev_binfo->master; + secondary = &netdev_binfo->slave; + if (rdev->binfo) { + memcpy(&bkup_binfo, rdev->binfo, sizeof(*(rdev->binfo))); + /* Change in bond state */ + dev_dbg(rdev_to_dev(rdev), + "Change in Bond state rdev = %p\n", rdev); + if (bond->num_slaves != 2) { + bnxt_re_destroy_lag(&rdev); + bnxt_re_create_base_interface(&bkup_binfo, true); + bnxt_re_create_base_interface(&bkup_binfo, false); + } + } else { + /* Check BOND needs to be created or not rdev will be valid + * Pass gsi_mode invalid, we want to inherit from PF0 + */ + rc = bnxt_re_create_lag(bond, secondary, + netdev_binfo, re_work->netdev, &rdev, + BNXT_RE_GSI_MODE_INVALID, + BNXT_QPLIB_WQE_MODE_INVALID); + if (rc) + dev_warn(rdev_to_dev(rdev), "%s: failed to create lag %d\n", + __func__, rc); + goto exit; + } + break; + + case NETDEV_CHANGEINFODATA: + if (!netif_is_bond_master(re_work->netdev)) + break; + rtnl_lock(); + b_master = netdev_priv(re_work->netdev); + slave_count = 0; + bond_for_each_slave(b_master, b_slave, iter) + slave_count++; + rtnl_unlock(); + if (slave_count == 2) + break; + if (rdev->binfo) { + dev_dbg(rdev_to_dev(rdev), + "Delete lag %s since one interface is de-slaved\n", + rdev->dev_name); + memcpy(&bkup_binfo, rdev->binfo, sizeof(*(rdev->binfo))); + bnxt_re_destroy_lag(&rdev); + bnxt_re_create_base_interface(&bkup_binfo, true); + bnxt_re_create_base_interface(&bkup_binfo, false); + } + break; + + default: + break; + } +done: + if (rdev) { + /* memory barrier to guarantee task completion + * before decrementing sched count + */ + smp_mb__before_atomic(); + atomic_dec(&rdev->sched_count); + } +exit: + kfree(re_work); + mutex_unlock(&bnxt_re_mutex); +} + +/* + "Notifier chain callback can be invoked for the same chain from + different CPUs at the same time". + + For cases when the netdev is already present, our call to the + register_netdevice_notifier() will actually get the rtnl_lock() + before sending NETDEV_REGISTER and (if up) NETDEV_UP + events. + + But for cases when the netdev is not already present, the notifier + chain is subjected to be invoked from different CPUs simultaneously. + + This is protected by the netdev_mutex. +*/ +static int bnxt_re_netdev_event(struct notifier_block *notifier, + unsigned long event, void *ptr) +{ + struct netdev_notifier_bonding_info *notifier_info = ptr; + struct netdev_bonding_info *netdev_binfo = NULL; + struct net_device *real_dev, *netdev; + struct bnxt_re_dev *rdev = NULL; + ifbond *master; + ifslave *slave; + + netdev = netdev_notifier_info_to_dev(ptr); + real_dev = rdma_vlan_dev_real_dev(netdev); + if (!real_dev) + real_dev = netdev; + /* In case of bonding,this will be bond's rdev */ + rdev = bnxt_re_from_netdev(real_dev); + + netdev_dbg(netdev, "%s: Event = %s (0x%lx), rdev %s (real_dev %s)\n", + __func__, bnxt_re_netevent(event), event, + rdev ? rdev->netdev ? rdev->netdev->name : "->netdev = NULL" : "= NULL", + (real_dev == netdev) ? "= netdev" : real_dev->name); + + if (!rdev) + goto exit; + + if (!test_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags)) + goto exit; + + bnxt_re_hold(rdev); + if (rdev->binfo && (rdev->binfo->slave1 == real_dev || + rdev->binfo->slave2 == real_dev)) { + dev_dbg(rdev_to_dev(rdev), + "Event %#lx on slave interface = %p\n", + event, rdev); + /* + * Handle NETDEV_CHANGE event, + * to update PFC/ECN settings on BONO + * without a shut/noshut + */ + if (event == NETDEV_CHANGE) + goto handle_event; + + if (event != NETDEV_BONDING_INFO) { + dev_dbg(rdev_to_dev(rdev), + "Ignoring event real_dev = %p master=%p slave1=%p slave2=%p\n", + real_dev, rdev->binfo->master, rdev->binfo->slave1, + rdev->binfo->slave2); + goto done; + } else { + dev_dbg(rdev_to_dev(rdev), + "Handle event real_dev = %p master=%p slave1=%p slave2=%p\n", + real_dev, rdev->binfo->master, rdev->binfo->slave1, + rdev->binfo->slave2); + netdev_binfo = ¬ifier_info->bonding_info; + goto handle_event; + } + } + + if (real_dev != netdev) { + switch (event) { + case NETDEV_UP: + bnxt_re_schedule_work(rdev, event, netdev, NULL, NULL, + NULL, NULL); + break; + case NETDEV_DOWN: + break; + default: + break; + } + goto done; + } +handle_event: + switch (event) { + case NETDEV_CHANGEADDR: + if (!_is_chip_gen_p5_p7(rdev->chip_ctx)) + bnxt_re_update_shadow_ah(rdev); + addrconf_addr_eui48((u8 *)&rdev->ibdev.node_guid, + rdev->netdev->dev_addr); + break; + + case NETDEV_CHANGE: + /* + * In bonding case bono requires to + * send this HWRM after PFC/ECN config + * avoding a shut/noshut workaround + * For other case handle as usual + */ + if (rdev->binfo) { + u8 active_port_map; + /* Update the latest link information */ + active_port_map = bnxt_re_get_bond_link_status(rdev->binfo); + if (active_port_map) { + rdev->binfo->active_port_map = active_port_map; + bnxt_re_update_fw_lag_info(rdev->binfo, rdev, true); + } + } + bnxt_re_get_link_speed(rdev); + bnxt_re_schedule_work(rdev, event, NULL, NULL, NULL, NULL, NULL); + break; + + case NETDEV_CHANGEMTU: + /* + * In bonding case bono requires to + * resend this HWRM after MTU change + */ + if (rdev && rdev->binfo && netif_is_bond_master(real_dev)) + bnxt_re_update_fw_lag_info(rdev->binfo, rdev, true); + goto done; + + case NETDEV_UNREGISTER: + /* netdev notifier will call NETDEV_UNREGISTER again later since + * we are still holding the reference to the netdev + */ + + /* + * Workaround to avoid ib_unregister hang. Check for module + * reference and dont free up the device if the reference + * is non zero. Checking only for PF functions. + */ + + if (rdev && !rdev->is_virtfn && module_refcount(THIS_MODULE) > 0) { + dev_info(rdev_to_dev(rdev), + "bnxt_re:Unreg recvd when module refcnt > 0"); + dev_info(rdev_to_dev(rdev), + "bnxt_re:Close all apps using bnxt_re devs"); + dev_info(rdev_to_dev(rdev), + "bnxt_re:Remove the configfs entry created for the device"); + dev_info(rdev_to_dev(rdev), + "bnxt_re:Refer documentation for details"); + goto done; + } + + if (atomic_read(&rdev->sched_count) > 0) + goto done; + + /* + * Schedule ib device unregistration only for LAG. + */ + if (!rdev->unreg_sched && rdev->binfo) { + bnxt_re_schedule_work(rdev, NETDEV_UNREGISTER, + NULL, NULL, NULL, NULL, NULL); + rdev->unreg_sched = true; + goto done; + } + + break; + + case NETDEV_BONDING_INFO: + netdev_binfo = ¬ifier_info->bonding_info; + master = &netdev_binfo->master; + slave = &netdev_binfo->slave; + + dev_info(NULL, "Bonding Info Received: rdev: %p\n", rdev); + dev_info(NULL, "\tMaster: mode: %d num_slaves:%d\n", + master->bond_mode, master->num_slaves); + dev_info(NULL, "\tSlave: id: %d name:%s link:%d state:%d\n", + slave->slave_id, slave->slave_name, + slave->link, slave->state); + /* + * If bond is already created an two secondary interface is available + * handle the link change as early as possible. Else, schedule it to + * the bnxt_re_task + */ + if (rdev->binfo && master->num_slaves == 2) { + /* Change in bond state */ + dev_dbg(rdev_to_dev(rdev), + "Change in Bond state rdev = %p\n", rdev); + if (slave->link == BOND_LINK_UP) { + dev_dbg(rdev_to_dev(rdev), + "LAG: Skip link up\n"); + dev_dbg(rdev_to_dev(rdev), + "LAG: Handle from worker\n"); + goto done; + } + dev_dbg(rdev_to_dev(rdev), "Updating lag device\n"); + if (bnxt_re_bond_update_reqd + (netdev_binfo, rdev->binfo, rdev, real_dev)) + bnxt_re_update_fw_lag_info + (rdev->binfo, rdev, true); + } else if (rdev->binfo || master->num_slaves == 2) { + bnxt_re_schedule_work(rdev, event, NULL, netdev_binfo, + NULL, real_dev, NULL); + } + break; + + case NETDEV_CHANGEINFODATA: + /* + * This event has to be handled to destroy lag interface + * when a user de-slaves an interface from bond + */ + bnxt_re_schedule_work(rdev, event, NULL, + NULL, NULL, real_dev, NULL); + break; + default: + break; + } +done: + if (rdev) + bnxt_re_put(rdev); +exit: + return NOTIFY_DONE; +} + +static struct notifier_block bnxt_re_netdev_notifier = { + .notifier_call = bnxt_re_netdev_event +}; + +#define BNXT_ADEV_NAME "bnxt_en" + +static int bnxt_re_suspend(struct auxiliary_device *adev, pm_message_t state) +{ + bnxt_re_stop(adev); + return 0; +} + +static int bnxt_re_resume(struct auxiliary_device *adev) +{ + bnxt_re_start(adev); + return 0; +} + +static void bnxt_re_remove_bond_interface(struct bnxt_re_dev *rdev, + struct auxiliary_device *adev, + bool primary_dev) +{ + struct bnxt_re_bond_info bkup_binfo; + + dev_dbg(rdev_to_dev(rdev), "%s: Removing adev: %p rdev %p\n", + __func__, adev, rdev); + memcpy(&bkup_binfo, rdev->binfo, sizeof(*rdev->binfo)); + bnxt_re_destroy_lag(&rdev); + if (!gmod_exit) { + if (primary_dev) + bnxt_re_create_base_interface(&bkup_binfo, false); + else + bnxt_re_create_base_interface(&bkup_binfo, true); + } + auxiliary_set_drvdata(adev, NULL); +} + +static void bnxt_re_remove_base_interface(struct bnxt_re_dev *rdev, + struct auxiliary_device *adev) +{ + dev_dbg(rdev_to_dev(rdev), "%s: adev: %p\n", __func__, adev); + + bnxt_re_ib_uninit(rdev); + bnxt_re_remove_device(rdev, BNXT_RE_COMPLETE_REMOVE, adev); + auxiliary_set_drvdata(adev, NULL); +} + +/* + * bnxt_re_remove - Removes the roce aux device + * @adev - aux device pointer + * + * This function removes the roce device. This gets + * called in the mod exit path and pci unbind path. + * If the rdev is bond interace, destroys the lag + * in module exit path, and in pci unbind case + * destroys the lag and recreates other base interface. + * If the device is already removed in error recovery + * path, it just unregister with the L2. + */ +static void bnxt_re_remove(struct auxiliary_device *adev) +{ + struct bnxt_re_en_dev_info *en_info = auxiliary_get_drvdata(adev); + struct bnxt_en_dev *en_dev; + struct bnxt_re_dev *rdev; + bool primary_dev = false; + bool secondary_dev = false; + + if (!en_info) + return; + + mutex_lock(&bnxt_re_mutex); + en_dev = en_info->en_dev; + + rdev = en_info->rdev; + + if (rdev && bnxt_re_is_rdev_valid(rdev)) { + if (pci_channel_offline(rdev->rcfw.pdev)) + set_bit(ERR_DEVICE_DETACHED, &rdev->rcfw.cmdq.flags); + if (test_bit(BNXT_RE_FLAG_EN_DEV_PRIMARY_DEV, &en_info->flags)) + primary_dev = true; + if (test_bit(BNXT_RE_FLAG_EN_DEV_SECONDARY_DEV, &en_info->flags)) + secondary_dev = true; + + /* + * en_dev_info of primary device and secondary device have the + * same rdev pointer when LAG is configured. This rdev pointer + * is rdev of bond interface. + */ + if (rdev->binfo) { + /* removal of bond primary or secondary interface */ + bnxt_re_remove_bond_interface(rdev, adev, primary_dev); + } else if (!primary_dev && !secondary_dev) { + /* removal of non bond interface */ + bnxt_re_remove_base_interface(rdev, adev); + } else { + /* + * removal of bond primary/secondary interface. In this + * case bond device is already removed, so rdev->binfo + * is NULL. + */ + auxiliary_set_drvdata(adev, NULL); + } + } else { + /* device is removed from ulp stop, unregister the net dev */ + if (test_bit(BNXT_RE_FLAG_EN_DEV_NETDEV_REG, &en_info->flags)) + bnxt_unregister_dev(en_dev); + } + kfree(en_info); + mutex_unlock(&bnxt_re_mutex); + return; +} + +static void bnxt_re_ib_init_2(struct bnxt_re_dev *rdev) +{ + int rc; + + rc = bnxt_re_get_device_stats(rdev); + if (rc) + dev_err(rdev_to_dev(rdev), + "Failed initial device stat query"); + + if (!rdev->is_virtfn) { + if (bnxt_re_init_dcbx_cc_param(rdev)) + dev_err(rdev_to_dev(rdev), + "Fail to initialize Flow control"); + else + set_bit(BNXT_RE_FLAG_INIT_DCBX_CC_PARAM, &rdev->flags); + } + bnxt_re_net_register_async_event(rdev); + +#ifdef IB_PEER_MEM_MOD_SUPPORT + rdev->peer_dev = ib_peer_mem_add_device(&rdev->ibdev); +#endif +} + +static int bnxt_re_probe(struct auxiliary_device *adev, + const struct auxiliary_device_id *id) +{ + struct bnxt_aux_priv *aux_priv = + container_of(adev, struct bnxt_aux_priv, aux_dev); + struct bnxt_re_en_dev_info *en_info; + struct bnxt_en_dev *en_dev = NULL; + struct bnxt_re_dev *rdev; + int rc = -ENODEV; + + if (aux_priv) + en_dev = aux_priv->edev; + + if (!en_dev) + return rc; + + if (en_dev->ulp_version != BNXT_ULP_VERSION) { + dev_err(NULL, "%s: probe error: bnxt_en ulp version magic %x is not compatible!\n", + ROCE_DRV_MODULE_NAME, en_dev->ulp_version); + return -EINVAL; + } + + en_info = kzalloc(sizeof(*en_info), GFP_KERNEL); + if (!en_info) + return -ENOMEM; + en_info->en_dev = en_dev; + + /* Use parents chip_type info in pre-init state to assign defaults */ + en_info->wqe_mode = BNXT_QPLIB_WQE_MODE_STATIC; + if (_is_chip_num_p7(en_dev->chip_num)) + en_info->wqe_mode = BNXT_QPLIB_WQE_MODE_VARIABLE; + + auxiliary_set_drvdata(adev, en_info); + + mutex_lock(&bnxt_re_mutex); + rc = bnxt_re_add_device(&rdev, en_dev->net, NULL, + BNXT_RE_GSI_MODE_ALL, + BNXT_RE_COMPLETE_INIT, + en_info->wqe_mode, + adev); + if (rc) { + kfree(en_info); + mutex_unlock(&bnxt_re_mutex); + return rc; + } + + rc = bnxt_re_ib_init(rdev); + if (rc) + goto err; + + bnxt_re_ib_init_2(rdev); + + dev_dbg(rdev_to_dev(rdev), "%s: adev: %p wqe_mode: %s\n", __func__, adev, + (en_info->wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE) ? + "Variable" : "Static"); + + rc = bnxt_re_check_and_create_bond(rdev->netdev); + if (rc) + dev_warn(rdev_to_dev(rdev), "%s: failed to create lag. rc = %d", + __func__, rc); + mutex_unlock(&bnxt_re_mutex); + + return 0; + +err: + mutex_unlock(&bnxt_re_mutex); + bnxt_re_remove(adev); + + return rc; +} + +static const struct auxiliary_device_id bnxt_re_id_table[] = { + { .name = BNXT_ADEV_NAME ".rdma", }, + {}, +}; + +MODULE_DEVICE_TABLE(auxiliary, bnxt_re_id_table); + +static struct auxiliary_driver bnxt_re_driver = { + .name = "rdma", + .probe = bnxt_re_probe, + .remove = bnxt_re_remove, + .shutdown = bnxt_re_shutdown, + .suspend = bnxt_re_suspend, + .resume = bnxt_re_resume, + .id_table = bnxt_re_id_table, +}; + +static int __init bnxt_re_mod_init(void) +{ + int rc = 0; + + pr_info("%s: %s", ROCE_DRV_MODULE_NAME, version); + + bnxt_re_wq = create_singlethread_workqueue("bnxt_re"); + if (!bnxt_re_wq) + return -ENOMEM; + +#ifdef ENABLE_DEBUGFS + bnxt_re_debugfs_init(); +#endif +#ifdef HAVE_CONFIGFS_ENABLED + bnxt_re_configfs_init(); +#endif + rc = bnxt_re_register_netdevice_notifier(&bnxt_re_netdev_notifier); + if (rc) { + dev_err(NULL, "%s: Cannot register to netdevice_notifier", + ROCE_DRV_MODULE_NAME); + goto err_netdev; + } + + INIT_LIST_HEAD(&bnxt_re_dev_list); + + rc = auxiliary_driver_register(&bnxt_re_driver); + if (rc) { + pr_err("%s: Failed to register auxiliary driver\n", + ROCE_DRV_MODULE_NAME); + goto err_auxdrv; + } + + return 0; + +err_auxdrv: + bnxt_re_unregister_netdevice_notifier(&bnxt_re_netdev_notifier); + +err_netdev: +#ifdef HAVE_CONFIGFS_ENABLED + bnxt_re_configfs_exit(); +#endif +#ifdef ENABLE_DEBUGFS + bnxt_re_debugfs_remove(); +#endif + destroy_workqueue(bnxt_re_wq); + + return rc; +} + +static void __exit bnxt_re_mod_exit(void) +{ + gmod_exit = 1; + auxiliary_driver_unregister(&bnxt_re_driver); + + bnxt_re_unregister_netdevice_notifier(&bnxt_re_netdev_notifier); + +#ifdef HAVE_CONFIGFS_ENABLED + bnxt_re_configfs_exit(); +#endif +#ifdef ENABLE_DEBUGFS + bnxt_re_debugfs_remove(); +#endif + if (bnxt_re_wq) + destroy_workqueue(bnxt_re_wq); +} + +module_init(bnxt_re_mod_init); +module_exit(bnxt_re_mod_exit); diff --git a/bnxt_re-1.10.3-229.0.139.0/qplib_fp.c b/bnxt_re-1.10.3-229.0.139.0/qplib_fp.c new file mode 100644 index 0000000..7195306 --- /dev/null +++ b/bnxt_re-1.10.3-229.0.139.0/qplib_fp.c @@ -0,0 +1,3833 @@ +/* + * Copyright (c) 2015-2023, Broadcom. All rights reserved. The term + * Broadcom refers to Broadcom Inc. and/or its subsidiaries. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * BSD license below: + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * Author: Eddie Wai + * + * Description: Fast Path Operators + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "roce_hsi.h" + +#include "qplib_tlv.h" +#include "qplib_res.h" +#include "qplib_rcfw.h" +#include "qplib_sp.h" +#include "qplib_fp.h" +#include "compat.h" + +static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp); + +/* Flush list */ + +static void bnxt_re_legacy_cancel_phantom_processing(struct bnxt_qplib_qp *qp) +{ + qp->sq.condition = false; + qp->sq.legacy_send_phantom = false; + qp->sq.single = false; +} + +static void __bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp) +{ + struct bnxt_qplib_cq *scq, *rcq; + + scq = qp->scq; + rcq = qp->rcq; + + if (!qp->sq.flushed) { + dev_dbg(&scq->hwq.pdev->dev, + "QPLIB: FP: Adding to SQ Flush list = %p", + qp); + bnxt_re_legacy_cancel_phantom_processing(qp); + list_add_tail(&qp->sq_flush, &scq->sqf_head); + qp->sq.flushed = true; + } + if (!qp->srq) { + if (!qp->rq.flushed) { + dev_dbg(&rcq->hwq.pdev->dev, + "QPLIB: FP: Adding to RQ Flush list = %p", + qp); + list_add_tail(&qp->rq_flush, &rcq->rqf_head); + qp->rq.flushed = true; + } + } +} + +static void bnxt_qplib_acquire_cq_flush_locks(struct bnxt_qplib_qp *qp) + __acquires(&qp->scq->flush_lock) __acquires(&qp->rcq->flush_lock) +{ + /* Interrupts are already disabled in calling functions */ + spin_lock(&qp->scq->flush_lock); + if (qp->scq == qp->rcq) + __acquire(&qp->rcq->flush_lock); + else + spin_lock(&qp->rcq->flush_lock); +} + +static void bnxt_qplib_release_cq_flush_locks(struct bnxt_qplib_qp *qp) + __releases(&qp->scq->flush_lock) __releases(&qp->rcq->flush_lock) +{ + if (qp->scq == qp->rcq) + __release(&qp->rcq->flush_lock); + else + spin_unlock(&qp->rcq->flush_lock); + spin_unlock(&qp->scq->flush_lock); +} + +void bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp) +{ + + bnxt_qplib_acquire_cq_flush_locks(qp); + __bnxt_qplib_add_flush_qp(qp); + bnxt_qplib_release_cq_flush_locks(qp); +} + +static void __bnxt_qplib_del_flush_qp(struct bnxt_qplib_qp *qp) +{ + if (qp->sq.flushed) { + qp->sq.flushed = false; + list_del(&qp->sq_flush); + } + if (!qp->srq) { + if (qp->rq.flushed) { + qp->rq.flushed = false; + list_del(&qp->rq_flush); + } + } +} + +void bnxt_qplib_clean_qp(struct bnxt_qplib_qp *qp) +{ + + bnxt_qplib_acquire_cq_flush_locks(qp); + __clean_cq(qp->scq, (u64)(unsigned long)qp); + qp->sq.hwq.prod = 0; + qp->sq.hwq.cons = 0; + qp->sq.swq_start = 0; + qp->sq.swq_last = 0; + __clean_cq(qp->rcq, (u64)(unsigned long)qp); + qp->rq.hwq.prod = 0; + qp->rq.hwq.cons = 0; + qp->rq.swq_start = 0; + qp->rq.swq_last = 0; + + __bnxt_qplib_del_flush_qp(qp); + bnxt_qplib_release_cq_flush_locks(qp); +} + +static void bnxt_qpn_cqn_sched_task(struct work_struct *work) +{ + struct bnxt_qplib_nq_work *nq_work = + container_of(work, struct bnxt_qplib_nq_work, work); + + struct bnxt_qplib_cq *cq = nq_work->cq; + struct bnxt_qplib_nq *nq = nq_work->nq; + + if (cq && nq) { + spin_lock_bh(&cq->compl_lock); + if (nq->cqn_handler) { + dev_dbg(&nq->res->pdev->dev, + "%s:Trigger cq = %p event nq = %p\n", + __func__, cq, nq); + nq->cqn_handler(nq, cq); + } + spin_unlock_bh(&cq->compl_lock); + } + kfree(nq_work); +} + +/* NQ */ +static int bnxt_qplib_process_dbqn(struct bnxt_qplib_nq *nq, + struct nq_dbq_event *nqe) +{ + u32 db_xid, db_type, db_pfid, db_dpi; + + if ((nqe->event) != + NQ_DBQ_EVENT_EVENT_DBQ_THRESHOLD_EVENT) { + dev_warn(&nq->res->pdev->dev, + "QPLIB: DBQ event 0x%x not handled", nqe->event); + return -EINVAL; + } + db_type = le32_to_cpu(nqe->db_type_db_xid) & NQ_DBQ_EVENT_DB_TYPE_MASK + >> NQ_DBQ_EVENT_DB_TYPE_SFT; + db_xid = le32_to_cpu(nqe->db_type_db_xid) & NQ_DBQ_EVENT_DB_XID_MASK + >> NQ_DBQ_EVENT_DB_XID_SFT; + db_pfid = le16_to_cpu(nqe->db_pfid) & NQ_DBQ_EVENT_DB_DPI_MASK + >> NQ_DBQ_EVENT_DB_DPI_SFT; + db_dpi = le32_to_cpu(nqe->db_dpi) & NQ_DBQ_EVENT_DB_DPI_MASK + >> NQ_DBQ_EVENT_DB_DPI_SFT; + + dev_dbg(&nq->res->pdev->dev, + "QPLIB: DBQ notification xid 0x%x type 0x%x pfid 0x%x dpi 0x%x", + db_xid, db_type, db_pfid, db_dpi); + bnxt_re_schedule_dbq_event(nq->res); + return 0; +} + +static void bnxt_qplib_put_hdr_buf(struct pci_dev *pdev, + struct bnxt_qplib_hdrbuf *buf) +{ + dma_free_coherent(&pdev->dev, buf->len, buf->va, buf->dma_map); + kfree(buf); +} + +static void *bnxt_qplib_get_hdr_buf(struct pci_dev *pdev, u32 step, u32 cnt) +{ + struct bnxt_qplib_hdrbuf *hdrbuf; + u32 len; + + hdrbuf = kmalloc(sizeof(*hdrbuf), GFP_KERNEL); + if (!hdrbuf) + return NULL; + + len = ALIGN((step * cnt), PAGE_SIZE); + hdrbuf->va = dma_alloc_coherent(&pdev->dev, len, + &hdrbuf->dma_map, GFP_KERNEL); + if (!hdrbuf->va) + goto out; + + hdrbuf->len = len; + hdrbuf->step = step; + return hdrbuf; +out: + kfree(hdrbuf); + return NULL; +} + +void bnxt_qplib_free_hdr_buf(struct bnxt_qplib_res *res, + struct bnxt_qplib_qp *qp) +{ + if (qp->rq_hdr_buf) { + bnxt_qplib_put_hdr_buf(res->pdev, qp->rq_hdr_buf); + qp->rq_hdr_buf = NULL; + } + + if (qp->sq_hdr_buf) { + bnxt_qplib_put_hdr_buf(res->pdev, qp->sq_hdr_buf); + qp->sq_hdr_buf = NULL; + } +} + +int bnxt_qplib_alloc_hdr_buf(struct bnxt_qplib_res *res, + struct bnxt_qplib_qp *qp, u32 sstep, u32 rstep) +{ + struct pci_dev *pdev; + + pdev = res->pdev; + if (sstep) { + qp->sq_hdr_buf = bnxt_qplib_get_hdr_buf(pdev, sstep, + qp->sq.max_wqe); + if (!qp->sq_hdr_buf) { + dev_err(&pdev->dev, "QPLIB: Failed to get sq_hdr_buf"); + return -ENOMEM; + } + } + + if (rstep) { + qp->rq_hdr_buf = bnxt_qplib_get_hdr_buf(pdev, rstep, + qp->rq.max_wqe); + if (!qp->rq_hdr_buf) { + dev_err(&pdev->dev, "QPLIB: Failed to get rq_hdr_buf"); + goto err_put_sq_hdr_buf; + } + } + + return 0; +err_put_sq_hdr_buf: + bnxt_qplib_put_hdr_buf(res->pdev, qp->sq_hdr_buf); + qp->sq_hdr_buf = NULL; + return -ENOMEM; +} + +/** + * clean_nq - Invalidate cqe from given nq. + * @cq - Completion queue + * + * Traverse whole notification queue and invalidate any completion + * associated cq handler provided by caller. + * Note - This function traverse the hardware queue but do not update + * consumer index. Invalidated cqe(marked from this function) will be + * ignored from actual completion of notification queue. + */ +static void clean_nq(struct bnxt_qplib_cq *cq) +{ + struct bnxt_qplib_hwq *nq_hwq = NULL; + struct bnxt_qplib_nq *nq = NULL; + struct nq_base *hw_nqe = NULL; + struct nq_cn *nqcne = NULL; + u32 peek_flags, peek_cons; + u64 q_handle; + u32 type; + int i; + + nq = cq->nq; + nq_hwq = &nq->hwq; + + spin_lock_bh(&nq_hwq->lock); + peek_flags = nq->nq_db.dbinfo.flags; + peek_cons = nq_hwq->cons; + for (i = 0; i < nq_hwq->max_elements; i++) { + hw_nqe = bnxt_qplib_get_qe(nq_hwq, peek_cons, NULL); + if (!NQE_CMP_VALID(hw_nqe, peek_flags)) + break; + + /* The valid test of the entry must be done first + * before reading any further. + */ + dma_rmb(); + type = le16_to_cpu(hw_nqe->info10_type) & + NQ_BASE_TYPE_MASK; + + /* Processing only NQ_BASE_TYPE_CQ_NOTIFICATION */ + if (type == NQ_BASE_TYPE_CQ_NOTIFICATION) { + nqcne = (struct nq_cn *)hw_nqe; + + q_handle = le32_to_cpu(nqcne->cq_handle_low); + q_handle |= (u64)le32_to_cpu(nqcne->cq_handle_high) << 32; + if (q_handle == (u64)cq) { + nqcne->cq_handle_low = 0; + nqcne->cq_handle_high = 0; + cq->cnq_events++; + } + } + bnxt_qplib_hwq_incr_cons(nq_hwq->max_elements, &peek_cons, + 1, &peek_flags); + } + spin_unlock_bh(&nq_hwq->lock); +} + +/* Wait for receiving all NQEs for this CQ. + * clean_nq is tried 100 times, each time clean_cq + * loops upto budget times. budget is based on the + * number of CQs shared by that NQ. So any NQE from + * CQ would be already in the NQ. + */ +static void __wait_for_all_nqes(struct bnxt_qplib_cq *cq, u16 cnq_events) +{ + u32 retry_cnt = 100; + u16 total_events; + + if (!cnq_events) { + clean_nq(cq); + return; + } + while (retry_cnt--) { + total_events = cq->cnq_events; + + /* Increment total_events by 1 if any CREQ event received with CQ notification */ + if (cq->is_cq_err_event) + total_events++; + + if (cnq_events == total_events) { + dev_dbg(&cq->nq->res->pdev->dev, + "QPLIB: NQ cleanup - Received all NQ events"); + return; + } + msleep(1); + clean_nq(cq); + } +} + +static void bnxt_qplib_service_nq( +#ifdef HAS_TASKLET_SETUP + struct tasklet_struct *t +#else + unsigned long data +#endif + ) +{ +#ifdef HAS_TASKLET_SETUP + struct bnxt_qplib_nq *nq = from_tasklet(nq, t, nq_tasklet); +#else + struct bnxt_qplib_nq *nq = (struct bnxt_qplib_nq *)data; +#endif + struct bnxt_qplib_hwq *nq_hwq = &nq->hwq; + int rc, budget = nq->budget; + struct bnxt_qplib_res *res; + struct bnxt_qplib_cq *cq; + struct pci_dev *pdev; + struct nq_base *nqe; + u32 hw_polled = 0; + u64 q_handle; + u32 type; + + res = nq->res; + pdev = res->pdev; + + spin_lock_bh(&nq_hwq->lock); + /* Service the NQ until empty or budget expired */ + while (budget--) { + nqe = bnxt_qplib_get_qe(nq_hwq, nq_hwq->cons, NULL); + if (!NQE_CMP_VALID(nqe, nq->nq_db.dbinfo.flags)) + break; + /* The valid test of the entry must be done first before + * reading any further. + */ + dma_rmb(); + type = le16_to_cpu(nqe->info10_type) & NQ_BASE_TYPE_MASK; + switch (type) { + case NQ_BASE_TYPE_CQ_NOTIFICATION: + { + struct nq_cn *nqcne = (struct nq_cn *)nqe; + + q_handle = le32_to_cpu(nqcne->cq_handle_low); + q_handle |= (u64)le32_to_cpu(nqcne->cq_handle_high) << 32; + cq = (struct bnxt_qplib_cq *)q_handle; + if (!cq) + break; + cq->toggle = (le16_to_cpu(nqe->info10_type) & NQ_CN_TOGGLE_MASK) >> NQ_CN_TOGGLE_SFT; + cq->dbinfo.toggle = cq->toggle; + bnxt_qplib_armen_db(&cq->dbinfo, + DBC_DBC_TYPE_CQ_ARMENA); + spin_lock_bh(&cq->compl_lock); + atomic_set(&cq->arm_state, 0) ; + if (!nq->cqn_handler(nq, (cq))) + nq->stats.num_cqne_processed++; + else + dev_warn(&pdev->dev, + "QPLIB: cqn - type 0x%x not handled", + type); + cq->cnq_events++; + spin_unlock_bh(&cq->compl_lock); + break; + } + case NQ_BASE_TYPE_SRQ_EVENT: + { + struct bnxt_qplib_srq *srq; + struct nq_srq_event *nqsrqe = + (struct nq_srq_event *)nqe; + + q_handle = le32_to_cpu(nqsrqe->srq_handle_low); + q_handle |= (u64)le32_to_cpu(nqsrqe->srq_handle_high) << 32; + srq = (struct bnxt_qplib_srq *)q_handle; + srq->toggle = (le16_to_cpu(nqe->info10_type) & NQ_CN_TOGGLE_MASK) + >> NQ_CN_TOGGLE_SFT; + srq->dbinfo.toggle = srq->toggle; + bnxt_qplib_armen_db(&srq->dbinfo, + DBC_DBC_TYPE_SRQ_ARMENA); + if (!nq->srqn_handler(nq, + (struct bnxt_qplib_srq *)q_handle, + nqsrqe->event)) + nq->stats.num_srqne_processed++; + else + dev_warn(&pdev->dev, + "QPLIB: SRQ event 0x%x not handled", + nqsrqe->event); + break; + } + case NQ_BASE_TYPE_DBQ_EVENT: + rc = bnxt_qplib_process_dbqn(nq, + (struct nq_dbq_event *)nqe); + nq->stats.num_dbqne_processed++; + break; + default: + dev_warn(&pdev->dev, + "QPLIB: nqe with opcode = 0x%x not handled", + type); + break; + } + hw_polled++; + bnxt_qplib_hwq_incr_cons(nq_hwq->max_elements, &nq_hwq->cons, + 1, &nq->nq_db.dbinfo.flags); + } + nqe = bnxt_qplib_get_qe(nq_hwq, nq_hwq->cons, NULL); + if (!NQE_CMP_VALID(nqe, nq->nq_db.dbinfo.flags)) { + nq->stats.num_nq_rearm++; + bnxt_qplib_ring_nq_db(&nq->nq_db.dbinfo, res->cctx, true); + } else if (nq->requested) { + /* Update the consumer index only and dont enable arm */ + bnxt_qplib_ring_nq_db(&nq->nq_db.dbinfo, res->cctx, false); + nq->stats.num_tasklet_resched++; + tasklet_schedule(&nq->nq_tasklet); + } + dev_dbg(&pdev->dev, "QPLIB: cqn/srqn/dbqn "); + dev_dbg(&pdev->dev, + "QPLIB: serviced %llu/%llu/%llu budget 0x%x reaped 0x%x", + nq->stats.num_cqne_processed, nq->stats.num_srqne_processed, + nq->stats.num_dbqne_processed, budget, hw_polled); + dev_dbg(&pdev->dev, + "QPLIB: resched_cnt = %llu arm_count = %llu\n", + nq->stats.num_tasklet_resched, nq->stats.num_nq_rearm); + spin_unlock_bh(&nq_hwq->lock); +} + +/* bnxt_re_synchronize_nq - self polling notification queue. + * @nq - notification queue pointer + * + * This function will start polling entries of a given notification queue + * for all pending entries. + * This function is useful to synchronize notification entries while resources + * are going away. + * + * + * Returns: Nothing + * + */ +void bnxt_re_synchronize_nq(struct bnxt_qplib_nq *nq) +{ + nq->budget = nq->hwq.max_elements; + bnxt_qplib_service_nq( +#ifdef HAS_TASKLET_SETUP + (struct tasklet_struct *)&nq->nq_tasklet +#else + (unsigned long)nq +#endif + ); +} + +static irqreturn_t bnxt_qplib_nq_irq(int irq, void *dev_instance) +{ + struct bnxt_qplib_nq *nq = dev_instance; + struct bnxt_qplib_hwq *nq_hwq = &nq->hwq; + u32 sw_cons; + + /* Prefetch the NQ element */ + sw_cons = HWQ_CMP(nq_hwq->cons, nq_hwq); + prefetch(bnxt_qplib_get_qe(nq_hwq, sw_cons, NULL)); + + /* Fan out to CPU affinitized kthreads? */ + tasklet_schedule(&nq->nq_tasklet); + + return IRQ_HANDLED; +} + +void bnxt_qplib_nq_stop_irq(struct bnxt_qplib_nq *nq, bool kill) +{ + struct bnxt_qplib_res *res; + + if (!nq->requested) + return; + + nq->requested = false; + res = nq->res; + /* Mask h/w interrupt */ + bnxt_qplib_ring_nq_db(&nq->nq_db.dbinfo, res->cctx, false); + /* Sync with last running IRQ handler */ + synchronize_irq(nq->msix_vec); + irq_set_affinity_hint(nq->msix_vec, NULL); + free_irq(nq->msix_vec, nq); + kfree(nq->name); + nq->name = NULL; + + /* Cleanup Tasklet */ + if (kill) + tasklet_kill(&nq->nq_tasklet); + tasklet_disable(&nq->nq_tasklet); +} + +void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq) +{ + if (nq->cqn_wq) { + destroy_workqueue(nq->cqn_wq); + nq->cqn_wq = NULL; + } + /* Make sure the HW is stopped! */ + bnxt_qplib_nq_stop_irq(nq, true); + + nq->nq_db.reg.bar_reg = NULL; + nq->nq_db.db = NULL; + + nq->cqn_handler = NULL; + nq->srqn_handler = NULL; + nq->msix_vec = 0; +} + +int bnxt_qplib_nq_start_irq(struct bnxt_qplib_nq *nq, int nq_indx, + int msix_vector, bool need_init) +{ + struct bnxt_qplib_res *res; + int rc; + + res = nq->res; + if (nq->requested) + return -EFAULT; + + nq->msix_vec = msix_vector; + if (need_init) + compat_tasklet_init(&nq->nq_tasklet, bnxt_qplib_service_nq, + (unsigned long)nq); + else + tasklet_enable(&nq->nq_tasklet); + + nq->name = kasprintf(GFP_KERNEL, "bnxt_re-nq-%d@pci:%s", + nq_indx, pci_name(res->pdev)); + if (!nq->name) + return -ENOMEM; + rc = request_irq(nq->msix_vec, bnxt_qplib_nq_irq, 0, nq->name, nq); + if (rc) { + kfree(nq->name); + nq->name = NULL; + tasklet_disable(&nq->nq_tasklet); + return rc; + } + + cpumask_clear(&nq->mask); + cpumask_set_cpu(nq_indx, &nq->mask); + rc = irq_set_affinity_hint(nq->msix_vec, &nq->mask); + if (rc) + dev_warn(&res->pdev->dev, + "QPLIB: set affinity failed; vector: %d nq_idx: %d\n", + nq->msix_vec, nq_indx); + nq->requested = true; + bnxt_qplib_ring_nq_db(&nq->nq_db.dbinfo, res->cctx, true); + + return rc; +} + +static void bnxt_qplib_map_nq_db(struct bnxt_qplib_nq *nq, u32 reg_offt) +{ + struct bnxt_qplib_reg_desc *dbreg; + struct bnxt_qplib_nq_db *nq_db; + struct bnxt_qplib_res *res; + + nq_db = &nq->nq_db; + res = nq->res; + dbreg = &res->dpi_tbl.ucreg; + + nq_db->reg.bar_id = dbreg->bar_id; + nq_db->reg.bar_base = dbreg->bar_base; + nq_db->reg.bar_reg = dbreg->bar_reg + reg_offt; + nq_db->reg.len = _is_chip_gen_p5_p7(res->cctx) ? sizeof(u64) : + sizeof(u32); + + nq_db->dbinfo.db = nq_db->reg.bar_reg; + nq_db->dbinfo.hwq = &nq->hwq; + nq_db->dbinfo.xid = nq->ring_id; + nq_db->dbinfo.seed = nq->ring_id; + nq_db->dbinfo.flags = 0; + spin_lock_init(&nq_db->dbinfo.lock); + nq_db->dbinfo.shadow_key = BNXT_QPLIB_DBR_KEY_INVALID; + nq_db->dbinfo.res = nq->res; + + return; +} + +int bnxt_qplib_enable_nq(struct bnxt_qplib_nq *nq, int nq_idx, + int msix_vector, int bar_reg_offset, + cqn_handler_t cqn_handler, + srqn_handler_t srqn_handler) +{ + struct pci_dev *pdev; + int rc; + + pdev = nq->res->pdev; + nq->cqn_handler = cqn_handler; + nq->srqn_handler = srqn_handler; + nq->load = 0; + mutex_init(&nq->lock); + + /* Have a task to schedule CQ notifiers in post send case */ + nq->cqn_wq = create_singlethread_workqueue("bnxt_qplib_nq"); + if (!nq->cqn_wq) + return -ENOMEM; + + bnxt_qplib_map_nq_db(nq, bar_reg_offset); + rc = bnxt_qplib_nq_start_irq(nq, nq_idx, msix_vector, true); + if (rc) { + dev_err(&pdev->dev, + "QPLIB: Failed to request irq for nq-idx %d", nq_idx); + goto fail; + } + dev_dbg(&pdev->dev, "QPLIB: NQ max = 0x%x", nq->hwq.max_elements); + + return 0; +fail: + bnxt_qplib_disable_nq(nq); + return rc; +} + +void bnxt_qplib_free_nq_mem(struct bnxt_qplib_nq *nq) +{ + if (nq->hwq.max_elements) { + bnxt_qplib_free_hwq(nq->res, &nq->hwq); + nq->hwq.max_elements = 0; + } +} + +int bnxt_qplib_alloc_nq_mem(struct bnxt_qplib_res *res, + struct bnxt_qplib_nq *nq) +{ + struct bnxt_qplib_hwq_attr hwq_attr = {}; + struct bnxt_qplib_sg_info sginfo = {}; + + nq->res = res; + if (!nq->hwq.max_elements || + nq->hwq.max_elements > BNXT_QPLIB_NQE_MAX_CNT) + nq->hwq.max_elements = BNXT_QPLIB_NQE_MAX_CNT; + + sginfo.pgsize = PAGE_SIZE; + sginfo.pgshft = PAGE_SHIFT; + hwq_attr.res = res; + hwq_attr.sginfo = &sginfo; + hwq_attr.depth = nq->hwq.max_elements; + hwq_attr.stride = sizeof(struct nq_base); + hwq_attr.type = _get_hwq_type(res); + if (bnxt_qplib_alloc_init_hwq(&nq->hwq, &hwq_attr)) { + dev_err(&res->pdev->dev, "QPLIB: FP NQ allocation failed"); + return -ENOMEM; + } + nq->budget = 8; + return 0; +} + +/* SRQ */ +static int __qplib_destroy_srq(struct bnxt_qplib_rcfw *rcfw, + struct bnxt_qplib_srq *srq) +{ + struct creq_destroy_srq_resp resp = {}; + struct bnxt_qplib_cmdqmsg msg = {}; + struct cmdq_destroy_srq req = {}; + /* Configure the request */ + req.srq_cid = cpu_to_le32(srq->id); + bnxt_qplib_rcfw_cmd_prep(&req, CMDQ_BASE_OPCODE_DESTROY_SRQ, + sizeof(req)); + bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), + sizeof(resp), 0); + return bnxt_qplib_rcfw_send_message(rcfw, &msg); +} + +int bnxt_qplib_destroy_srq(struct bnxt_qplib_res *res, + struct bnxt_qplib_srq *srq) +{ + struct bnxt_qplib_rcfw *rcfw = res->rcfw; + int rc; + + rc = __qplib_destroy_srq(rcfw, srq); + if (rc) + return rc; + bnxt_qplib_free_hwq(res, &srq->hwq); + kfree(srq->swq); + return 0; +} + +int bnxt_qplib_create_srq(struct bnxt_qplib_res *res, + struct bnxt_qplib_srq *srq) +{ + struct bnxt_qplib_hwq_attr hwq_attr = {}; + struct bnxt_qplib_rcfw *rcfw = res->rcfw; + struct creq_create_srq_resp resp = {}; + struct bnxt_qplib_cmdqmsg msg = {}; + struct cmdq_create_srq req = {}; + u16 pg_sz_lvl = 0; + u16 srq_size; + u8 cmd_size; + int rc, idx; + + hwq_attr.res = res; + hwq_attr.sginfo = &srq->sginfo; + hwq_attr.depth = srq->max_wqe; + hwq_attr.stride = srq->wqe_size; + hwq_attr.type = HWQ_TYPE_QUEUE; + rc = bnxt_qplib_alloc_init_hwq(&srq->hwq, &hwq_attr); + if (rc) + goto exit; + /* Configure the request */ + req.dpi = cpu_to_le32(srq->dpi->dpi); + req.srq_handle = cpu_to_le64(srq); + srq_size = min_t(u32, srq->hwq.depth, U16_MAX); + req.srq_size = cpu_to_le16(srq_size); + pg_sz_lvl |= (_get_base_pg_size(&srq->hwq) << + CMDQ_CREATE_SRQ_PG_SIZE_SFT); + pg_sz_lvl |= (srq->hwq.level & CMDQ_CREATE_SRQ_LVL_MASK); + req.pg_size_lvl = cpu_to_le16(pg_sz_lvl); + req.pbl = cpu_to_le64(_get_base_addr(&srq->hwq)); + req.pd_id = cpu_to_le32(srq->pd->id); + req.eventq_id = cpu_to_le16(srq->eventq_hw_ring_id); + if (srq->small_recv_wqe_sup) + req.srq_fwo = (srq->max_sge << CMDQ_CREATE_SRQ_SRQ_SGE_SFT) & + CMDQ_CREATE_SRQ_SRQ_SGE_MASK; + cmd_size = sizeof(req); + if (!_is_steering_tag_supported(res)) + cmd_size -= BNXT_RE_STEERING_TAG_SUPPORTED_CMD_SIZE; + + bnxt_qplib_rcfw_cmd_prep(&req, CMDQ_BASE_OPCODE_CREATE_SRQ, + cmd_size); + bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, cmd_size, + sizeof(resp), 0); + rc = bnxt_qplib_rcfw_send_message(rcfw, &msg); + if (rc) + goto fail; + if (!srq->is_user) { + srq->swq = kcalloc(srq->hwq.depth, sizeof(*srq->swq), + GFP_KERNEL); + if (!srq->swq) + goto srq_fail; + srq->start_idx = 0; + srq->last_idx = srq->hwq.depth - 1; + for (idx = 0; idx < srq->hwq.depth; idx++) + srq->swq[idx].next_idx = idx + 1; + srq->swq[srq->last_idx].next_idx = -1; + } + + spin_lock_init(&srq->lock); + srq->id = le32_to_cpu(resp.xid); + srq->cctx = res->cctx; + srq->dbinfo.hwq = &srq->hwq; + srq->dbinfo.xid = srq->id; + srq->dbinfo.db = srq->dpi->dbr; + srq->dbinfo.max_slot = 1; + srq->dbinfo.priv_db = res->dpi_tbl.priv_db; + srq->dbinfo.flags = 0; + spin_lock_init(&srq->dbinfo.lock); + srq->dbinfo.shadow_key = BNXT_QPLIB_DBR_KEY_INVALID; + srq->dbinfo.shadow_key_arm_ena = BNXT_QPLIB_DBR_KEY_INVALID; + srq->dbinfo.res = res; + srq->dbinfo.seed = srq->id; + if (srq->threshold) + bnxt_qplib_armen_db(&srq->dbinfo, DBC_DBC_TYPE_SRQ_ARMENA); + srq->arm_req = false; + return 0; +srq_fail: + __qplib_destroy_srq(rcfw, srq); +fail: + bnxt_qplib_free_hwq(res, &srq->hwq); +exit: + return rc; +} + +int bnxt_qplib_modify_srq(struct bnxt_qplib_res *res, + struct bnxt_qplib_srq *srq) +{ + struct bnxt_qplib_hwq *srq_hwq = &srq->hwq; + u32 avail = 0; + +#ifdef ENABLE_FP_SPINLOCK + spin_lock_irqsave(&srq_hwq->lock, flags); +#endif + avail = __bnxt_qplib_get_avail(srq_hwq); + if (avail <= srq->threshold) { + srq->arm_req = false; + bnxt_qplib_srq_arm_db(&srq->dbinfo); + } else { + /* Deferred arming */ + srq->arm_req = true; + } +#ifdef ENABLE_FP_SPINLOCK + spin_unlock_irqrestore(&srq_hwq->lock, flags); +#endif + return 0; +} + +int bnxt_qplib_query_srq(struct bnxt_qplib_res *res, + struct bnxt_qplib_srq *srq) +{ + struct bnxt_qplib_rcfw *rcfw = res->rcfw; + struct creq_query_srq_resp resp = {}; + struct bnxt_qplib_cmdqmsg msg = {}; + struct creq_query_srq_resp_sb *sb; + struct bnxt_qplib_rcfw_sbuf sbuf; + struct cmdq_query_srq req = {}; + int rc = 0; + + bnxt_qplib_rcfw_cmd_prep(&req, CMDQ_BASE_OPCODE_QUERY_SRQ, + sizeof(req)); + sbuf.size = ALIGN(sizeof(*sb), BNXT_QPLIB_CMDQE_UNITS); + sbuf.sb = dma_zalloc_coherent(&rcfw->pdev->dev, sbuf.size, + &sbuf.dma_addr, GFP_KERNEL); + if (!sbuf.sb) + return -ENOMEM; + req.resp_size = sbuf.size / BNXT_QPLIB_CMDQE_UNITS; + req.srq_cid = cpu_to_le32(srq->id); + bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, &sbuf, sizeof(req), + sizeof(resp), 0); + rc = bnxt_qplib_rcfw_send_message(rcfw, &msg); + /* TODO: What to do with the query? */ + dma_free_coherent(&rcfw->pdev->dev, sbuf.size, + sbuf.sb, sbuf.dma_addr); + + return rc; +} + +int bnxt_qplib_post_srq_recv(struct bnxt_qplib_srq *srq, + struct bnxt_qplib_swqe *wqe) +{ + struct bnxt_qplib_hwq *srq_hwq = &srq->hwq; + struct sq_sge *hw_sge; + struct rq_wqe *srqe; + int i, rc = 0, next; + u32 avail; + + spin_lock(&srq_hwq->lock); + if (srq->start_idx == srq->last_idx) { + dev_err(&srq_hwq->pdev->dev, "QPLIB: FP: SRQ (0x%x) is full!", + srq->id); + rc = -EINVAL; + spin_unlock(&srq_hwq->lock); + goto done; + } + next = srq->start_idx; + srq->start_idx = srq->swq[next].next_idx; + spin_unlock(&srq_hwq->lock); + + srqe = bnxt_qplib_get_qe(srq_hwq, srq_hwq->prod, NULL); + memset(srqe, 0, srq->wqe_size); + /* Calculate wqe_size and data_len */ + for (i = 0, hw_sge = (struct sq_sge *)srqe->data; + i < wqe->num_sge; i++, hw_sge++) { + hw_sge->va_or_pa = cpu_to_le64(wqe->sg_list[i].addr); + hw_sge->l_key = cpu_to_le32(wqe->sg_list[i].lkey); + hw_sge->size = cpu_to_le32(wqe->sg_list[i].size); + } + srqe->wqe_type = wqe->type; + srqe->flags = wqe->flags; + srqe->wqe_size = wqe->num_sge + + ((offsetof(typeof(*srqe), data) + 15) >> 4); + if (!wqe->num_sge) + srqe->wqe_size++; + srqe->wr_id[0] = cpu_to_le32((u32)next); + srq->swq[next].wr_id = wqe->wr_id; + bnxt_qplib_hwq_incr_prod(&srq->dbinfo, srq_hwq, srq->dbinfo.max_slot); + /* retaining srq_hwq->cons for this logic actually the lock is only + * required to read srq_hwq->cons. + */ + spin_lock(&srq_hwq->lock); + avail = __bnxt_qplib_get_avail(srq_hwq); + spin_unlock(&srq_hwq->lock); + /* Ring DB */ + bnxt_qplib_ring_prod_db(&srq->dbinfo, DBC_DBC_TYPE_SRQ); + if (srq->arm_req && avail <= srq->threshold) { + srq->arm_req = false; + bnxt_qplib_srq_arm_db(&srq->dbinfo); + } +done: + return rc; +} + +/* QP */ +static int __qplib_destroy_qp(struct bnxt_qplib_rcfw *rcfw, + struct bnxt_qplib_qp *qp) +{ + struct creq_destroy_qp_resp resp = {}; + struct bnxt_qplib_cmdqmsg msg = {}; + struct cmdq_destroy_qp req = {}; + + req.qp_cid = cpu_to_le32(qp->id); + bnxt_qplib_rcfw_cmd_prep(&req, CMDQ_BASE_OPCODE_DESTROY_QP, + sizeof(req)); + bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), + sizeof(resp), 0); + return bnxt_qplib_rcfw_send_message(rcfw, &msg); +} + +static int bnxt_qplib_alloc_init_swq(struct bnxt_qplib_q *que) +{ + int indx; + + que->swq = kcalloc(que->max_sw_wqe, sizeof(*que->swq), GFP_KERNEL); + if (!que->swq) + return -ENOMEM; + + que->swq_start = 0; + que->swq_last = que->max_sw_wqe - 1; + for (indx = 0; indx < que->max_sw_wqe; indx++) + que->swq[indx].next_idx = indx + 1; + que->swq[que->swq_last].next_idx = 0; /* Make it circular */ + que->swq_last = 0; + + return 0; +} + +static struct bnxt_qplib_swq *bnxt_qplib_get_swqe(struct bnxt_qplib_q *que, + u32 *swq_idx) +{ + u32 idx; + + idx = que->swq_start; + if (swq_idx) + *swq_idx = idx; + return &que->swq[idx]; +} + +static void bnxt_qplib_swq_mod_start(struct bnxt_qplib_q *que, u32 idx) +{ + que->swq_start = que->swq[idx].next_idx; +} + +static u32 bnxt_qplib_get_stride(void) +{ + return sizeof(struct sq_sge); +} + +static u32 bnxt_qplib_get_depth(struct bnxt_qplib_q *que, u8 wqe_mode, bool is_sq) +{ + u32 slots; + + /* Queue depth is the number of slots. */ + slots = (que->wqe_size * que->max_wqe) / bnxt_qplib_get_stride(); + /* For variable WQE mode, need to align the slots to 256 */ + if (wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE && is_sq) + slots = ALIGN(slots, BNXT_VAR_MAX_SLOT_ALIGN); + return slots; +} + +static u32 _set_sq_size(struct bnxt_qplib_q *que, u8 wqe_mode) +{ + /* For static wqe mode, sq_size is max_wqe. + * For variable wqe mode, sq_size is que depth. + */ + return (wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC) ? + que->max_wqe : bnxt_qplib_get_depth(que, wqe_mode, true); +} + +static u32 _set_sq_max_slot(u8 wqe_mode) +{ + /* for static mode index divisor is 8 */ + return (wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC) ? + sizeof(struct sq_send) / sizeof(struct sq_sge) : 1; +} + +static u32 _set_rq_max_slot(struct bnxt_qplib_q *que) +{ + return (que->wqe_size / sizeof(struct sq_sge)); +} + +int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) +{ + struct bnxt_qplib_hwq_attr hwq_attr = {}; + struct bnxt_qplib_rcfw *rcfw = res->rcfw; + struct creq_create_qp1_resp resp = {}; + struct bnxt_qplib_cmdqmsg msg = {}; + struct bnxt_qplib_q *sq = &qp->sq; + struct bnxt_qplib_q *rq = &qp->rq; + struct cmdq_create_qp1 req = {}; + struct bnxt_qplib_reftbl *tbl; + unsigned long flag; + u8 pg_sz_lvl = 0; + u32 qp_flags = 0; + int rc; + + /* General */ + req.type = qp->type; + req.dpi = cpu_to_le32(qp->dpi->dpi); + req.qp_handle = cpu_to_le64(qp->qp_handle); + /* SQ */ + hwq_attr.res = res; + hwq_attr.sginfo = &sq->sginfo; + hwq_attr.stride = bnxt_qplib_get_stride(); + hwq_attr.depth = bnxt_qplib_get_depth(sq, qp->wqe_mode, true); + hwq_attr.type = HWQ_TYPE_QUEUE; + rc = bnxt_qplib_alloc_init_hwq(&sq->hwq, &hwq_attr); + if (rc) + goto exit; + + req.sq_size = cpu_to_le32(_set_sq_size(sq, qp->wqe_mode)); + sq->max_sw_wqe = _set_sq_size(sq, qp->wqe_mode); + req.sq_pbl = cpu_to_le64(_get_base_addr(&sq->hwq)); + pg_sz_lvl = _get_base_pg_size(&sq->hwq) << + CMDQ_CREATE_QP1_SQ_PG_SIZE_SFT; + pg_sz_lvl |= ((sq->hwq.level & CMDQ_CREATE_QP1_SQ_LVL_MASK) << + CMDQ_CREATE_QP1_SQ_LVL_SFT); + req.sq_pg_size_sq_lvl = pg_sz_lvl; + req.sq_fwo_sq_sge = cpu_to_le16(((0 << CMDQ_CREATE_QP1_SQ_FWO_SFT) & + CMDQ_CREATE_QP1_SQ_FWO_MASK) | + (sq->max_sge & + CMDQ_CREATE_QP1_SQ_SGE_MASK)); + req.scq_cid = cpu_to_le32(qp->scq->id); + + /* RQ */ + if (!qp->srq) { + hwq_attr.res = res; + hwq_attr.sginfo = &rq->sginfo; + hwq_attr.stride = bnxt_qplib_get_stride(); + hwq_attr.depth = bnxt_qplib_get_depth(rq, qp->wqe_mode, false); + hwq_attr.type = HWQ_TYPE_QUEUE; + rc = bnxt_qplib_alloc_init_hwq(&rq->hwq, &hwq_attr); + if (rc) + goto fail_sq; + req.rq_size = cpu_to_le32(rq->max_wqe); + req.rq_pbl = cpu_to_le64(_get_base_addr(&rq->hwq)); + pg_sz_lvl = _get_base_pg_size(&rq->hwq) << + CMDQ_CREATE_QP1_RQ_PG_SIZE_SFT; + pg_sz_lvl |= ((rq->hwq.level & CMDQ_CREATE_QP1_RQ_LVL_MASK) << + CMDQ_CREATE_QP1_RQ_LVL_SFT); + req.rq_pg_size_rq_lvl = pg_sz_lvl; + req.rq_fwo_rq_sge = + cpu_to_le16(((0 << CMDQ_CREATE_QP1_RQ_FWO_SFT) & + CMDQ_CREATE_QP1_RQ_FWO_MASK) | + (rq->max_sge & + CMDQ_CREATE_QP1_RQ_SGE_MASK)); + } else { + /* SRQ */ + qp_flags |= CMDQ_CREATE_QP1_QP_FLAGS_SRQ_USED; + req.srq_cid = cpu_to_le32(qp->srq->id); + } + req.rcq_cid = cpu_to_le32(qp->rcq->id); + + qp_flags |= CMDQ_CREATE_QP1_QP_FLAGS_RESERVED_LKEY_ENABLE; + req.qp_flags = cpu_to_le32(qp_flags); + req.pd_id = cpu_to_le32(qp->pd->id); + + bnxt_qplib_rcfw_cmd_prep(&req, CMDQ_BASE_OPCODE_CREATE_QP1, + sizeof(req)); + bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), + sizeof(resp), 0); + rc = bnxt_qplib_rcfw_send_message(rcfw, &msg); + if (rc) + goto fail_rq; + + /* Store xid immediately so that we can destroy QP on error path */ + qp->id = le32_to_cpu(resp.xid); + + rc = bnxt_qplib_alloc_init_swq(sq); + if (rc) + goto sq_swq; + + if (!qp->srq) { + rc = bnxt_qplib_alloc_init_swq(rq); + if (rc) + goto rq_swq; + } + + qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET; + qp->cctx = res->cctx; + sq->dbinfo.hwq = &sq->hwq; + sq->dbinfo.xid = qp->id; + sq->dbinfo.db = qp->dpi->dbr; + sq->dbinfo.max_slot = _set_sq_max_slot(qp->wqe_mode); + sq->dbinfo.flags = 0; + spin_lock_init(&sq->dbinfo.lock); + sq->dbinfo.shadow_key = BNXT_QPLIB_DBR_KEY_INVALID; + sq->dbinfo.res = res; + if (rq->max_wqe) { + rq->dbinfo.hwq = &rq->hwq; + rq->dbinfo.xid = qp->id; + rq->dbinfo.db = qp->dpi->dbr; + rq->dbinfo.max_slot = _set_rq_max_slot(rq); + rq->dbinfo.flags = 0; + spin_lock_init(&rq->dbinfo.lock); + rq->dbinfo.shadow_key = BNXT_QPLIB_DBR_KEY_INVALID; + rq->dbinfo.res = res; + } + + tbl = &res->reftbl.qpref; + spin_lock_irqsave(&tbl->lock, flag); + tbl->rec[tbl->max].xid = qp->id; + tbl->rec[tbl->max].handle = qp; + spin_unlock_irqrestore(&tbl->lock, flag); + + return 0; +rq_swq: + kfree(sq->swq); +sq_swq: + __qplib_destroy_qp(rcfw, qp); +fail_rq: + bnxt_qplib_free_hwq(res, &rq->hwq); +fail_sq: + bnxt_qplib_free_hwq(res, &sq->hwq); +exit: + return rc; +} + +static void bnxt_qplib_init_psn_ptr(struct bnxt_qplib_qp *qp, int size) +{ + struct bnxt_qplib_hwq *sq_hwq; + struct bnxt_qplib_q *sq; + u64 fpsne, psn_pg; + u16 indx_pad = 0; + + sq = &qp->sq; + sq_hwq = &sq->hwq; + /* First psn entry */ + fpsne = (u64)bnxt_qplib_get_qe(sq_hwq, sq_hwq->depth, &psn_pg); + if (!IS_ALIGNED(fpsne, PAGE_SIZE)) + indx_pad = (fpsne & ~PAGE_MASK) / size; + sq_hwq->pad_pgofft = indx_pad; + sq_hwq->pad_pg = (u64 *)psn_pg; + sq_hwq->pad_stride = size; +} + +int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) +{ + struct bnxt_qplib_hwq_attr hwq_attr = {}; + struct bnxt_qplib_rcfw *rcfw = res->rcfw; + struct bnxt_qplib_sg_info sginfo = {}; + struct creq_create_qp_resp resp = {}; + struct bnxt_qplib_cmdqmsg msg = {}; + struct bnxt_qplib_q *sq = &qp->sq; + struct bnxt_qplib_q *rq = &qp->rq; + struct cmdq_create_qp req = {}; + struct bnxt_qplib_reftbl *tbl; + struct bnxt_qplib_hwq *xrrq; + int rc, req_size, psn_sz; + unsigned long flag; + u8 pg_sz_lvl = 0; + u32 qp_flags = 0; + u8 cmd_size; + u32 qp_idx; + u16 nsge; + u32 sqsz; + + qp->cctx = res->cctx; + if (res->dattr) + qp->dev_cap_flags = res->dattr->dev_cap_flags; + /* General */ + req.type = qp->type; + req.dpi = cpu_to_le32(qp->dpi->dpi); + req.qp_handle = cpu_to_le64(qp->qp_handle); + + /* SQ */ + if (qp->type == CMDQ_CREATE_QP_TYPE_RC) { + psn_sz = _is_chip_gen_p5_p7(qp->cctx) ? + sizeof(struct sq_psn_search_ext) : + sizeof(struct sq_psn_search); + if (BNXT_RE_HW_RETX(qp->dev_cap_flags)) { + psn_sz = sizeof(struct sq_msn_search); + qp->msn = 0; + } + } else { + psn_sz = 0; + } + + hwq_attr.res = res; + hwq_attr.sginfo = &sq->sginfo; + hwq_attr.stride = bnxt_qplib_get_stride(); + hwq_attr.depth = bnxt_qplib_get_depth(sq, qp->wqe_mode, true); + hwq_attr.aux_stride = psn_sz; + hwq_attr.aux_depth = (psn_sz) ? + _set_sq_size(sq, qp->wqe_mode) : 0; + /* Update msn tbl size */ + if (BNXT_RE_HW_RETX(qp->dev_cap_flags) && psn_sz) { + if (qp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC) + hwq_attr.aux_depth = roundup_pow_of_two(_set_sq_size(sq, qp->wqe_mode)); + else + hwq_attr.aux_depth = roundup_pow_of_two(_set_sq_size(sq, qp->wqe_mode)) / 2; + qp->msn_tbl_sz = hwq_attr.aux_depth; + qp->msn = 0; + } + hwq_attr.type = HWQ_TYPE_QUEUE; + rc = bnxt_qplib_alloc_init_hwq(&sq->hwq, &hwq_attr); + if (rc) + goto exit; + + sqsz = _set_sq_size(sq, qp->wqe_mode); + /* Initialize the max_sw_wqe same as the slot for variable size wqe */ + sq->max_sw_wqe = sqsz; + /* 0xffff is the max sq size hw limits to */ + if (sqsz > BNXT_QPLIB_MAX_SQSZ) { + pr_err("QPLIB: FP: QP (0x%x) exceeds sq size %d", qp->id, sqsz); + goto fail_sq; + } + req.sq_size = cpu_to_le32(sqsz); + req.sq_pbl = cpu_to_le64(_get_base_addr(&sq->hwq)); + pg_sz_lvl = _get_base_pg_size(&sq->hwq) << + CMDQ_CREATE_QP_SQ_PG_SIZE_SFT; + pg_sz_lvl |= ((sq->hwq.level & CMDQ_CREATE_QP_SQ_LVL_MASK) << + CMDQ_CREATE_QP_SQ_LVL_SFT); + req.sq_pg_size_sq_lvl = pg_sz_lvl; + req.sq_fwo_sq_sge = cpu_to_le16(((0 << CMDQ_CREATE_QP_SQ_FWO_SFT) & + CMDQ_CREATE_QP_SQ_FWO_MASK) | + (sq->max_sge & + CMDQ_CREATE_QP_SQ_SGE_MASK)); + req.scq_cid = cpu_to_le32(qp->scq->id); + + /* RQ/SRQ */ + if (!qp->srq) { + hwq_attr.res = res; + hwq_attr.sginfo = &rq->sginfo; + hwq_attr.stride = bnxt_qplib_get_stride(); + hwq_attr.depth = bnxt_qplib_get_depth(rq, qp->wqe_mode, false); + hwq_attr.aux_stride = 0; + hwq_attr.aux_depth = 0; + hwq_attr.type = HWQ_TYPE_QUEUE; + rc = bnxt_qplib_alloc_init_hwq(&rq->hwq, &hwq_attr); + if (rc) + goto fail_sq; + req.rq_size = cpu_to_le32(rq->max_wqe); + req.rq_pbl = cpu_to_le64(_get_base_addr(&rq->hwq)); + pg_sz_lvl = _get_base_pg_size(&rq->hwq) << + CMDQ_CREATE_QP_RQ_PG_SIZE_SFT; + pg_sz_lvl |= ((rq->hwq.level & CMDQ_CREATE_QP_RQ_LVL_MASK) << + CMDQ_CREATE_QP_RQ_LVL_SFT); + req.rq_pg_size_rq_lvl = pg_sz_lvl; + nsge = (qp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC) ? + res->dattr->max_qp_sges : rq->max_sge; + if (qp->small_recv_wqe_sup) + nsge = rq->max_sge; + req.rq_fwo_rq_sge = + cpu_to_le16(((0 << CMDQ_CREATE_QP_RQ_FWO_SFT) & + CMDQ_CREATE_QP_RQ_FWO_MASK) | + (nsge & CMDQ_CREATE_QP_RQ_SGE_MASK)); + } else { + qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_SRQ_USED; + req.srq_cid = cpu_to_le32(qp->srq->id); + } + req.rcq_cid = cpu_to_le32(qp->rcq->id); + + qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_RESERVED_LKEY_ENABLE; + qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_FR_PMR_ENABLED; + if (qp->sig_type) + qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_FORCE_COMPLETION; + if (qp->wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE) + qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_VARIABLE_SIZED_WQE_ENABLED; + if (res->cctx->modes.te_bypass) + qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_OPTIMIZED_TRANSMIT_ENABLED; + if (res->dattr && + bnxt_ext_stats_supported(qp->cctx, res->dattr->dev_cap_flags, res->is_vf)) + qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_EXT_STATS_ENABLED; + req.qp_flags = cpu_to_le32(qp_flags); + + /* ORRQ and IRRQ */ + if (psn_sz) { + xrrq = &qp->orrq; + xrrq->max_elements = + ORD_LIMIT_TO_ORRQ_SLOTS(qp->max_rd_atomic); + req_size = xrrq->max_elements * + BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE + PAGE_SIZE - 1; + req_size &= ~(PAGE_SIZE - 1); + sginfo.pgsize = req_size; + sginfo.pgshft = PAGE_SHIFT; + + hwq_attr.res = res; + hwq_attr.sginfo = &sginfo; + hwq_attr.depth = xrrq->max_elements; + hwq_attr.stride = BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE; + hwq_attr.aux_stride = 0; + hwq_attr.aux_depth = 0; + hwq_attr.type = HWQ_TYPE_CTX; + rc = bnxt_qplib_alloc_init_hwq(xrrq, &hwq_attr); + if (rc) + goto fail_rq; + req.orrq_addr = cpu_to_le64(_get_base_addr(xrrq)); + + xrrq = &qp->irrq; + xrrq->max_elements = IRD_LIMIT_TO_IRRQ_SLOTS( + qp->max_dest_rd_atomic); + req_size = xrrq->max_elements * + BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE + PAGE_SIZE - 1; + req_size &= ~(PAGE_SIZE - 1); + sginfo.pgsize = req_size; + hwq_attr.depth = xrrq->max_elements; + hwq_attr.stride = BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE; + rc = bnxt_qplib_alloc_init_hwq(xrrq, &hwq_attr); + if (rc) + goto fail_orrq; + req.irrq_addr = cpu_to_le64(_get_base_addr(xrrq)); + } + req.pd_id = cpu_to_le32(qp->pd->id); + + cmd_size = sizeof(req); + if (res->cctx->hwrm_intf_ver < HWRM_VERSION_ROCE_QP_EXT_STATS_CTX_ID_VALID) + cmd_size -= BNXT_RE_CREATE_QP_EXT_STAT_CONTEXT_SIZE; + + if (!_is_qp_exp_mode_supported(res)) + cmd_size -= BNXT_RE_EXP_MODE_ENABLED_CMD_SIZE_CREATE_QP; + + if (!_is_steering_tag_supported(res)) + cmd_size -= BNXT_RE_STEERING_TAG_SUPPORTED_CMD_SIZE_CREATE_QP; + + bnxt_qplib_rcfw_cmd_prep(&req, CMDQ_BASE_OPCODE_CREATE_QP, + cmd_size); + bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, cmd_size, + sizeof(resp), 0); + rc = bnxt_qplib_rcfw_send_message(rcfw, &msg); + if (rc) + goto fail; + + /* Store xid immediately so that we can destroy QP on error path */ + qp->id = le32_to_cpu(resp.xid); + + if (!qp->is_user) { + rc = bnxt_qplib_alloc_init_swq(sq); + if (rc) + goto swq_sq; + if (!qp->srq) { + rc = bnxt_qplib_alloc_init_swq(rq); + if (rc) + goto swq_rq; + } + if (psn_sz) + bnxt_qplib_init_psn_ptr(qp, psn_sz); + } + + qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET; + INIT_LIST_HEAD(&qp->sq_flush); + INIT_LIST_HEAD(&qp->rq_flush); + + sq->dbinfo.hwq = &sq->hwq; + sq->dbinfo.xid = qp->id; + sq->dbinfo.db = qp->dpi->dbr; + sq->dbinfo.max_slot = _set_sq_max_slot(qp->wqe_mode); + sq->dbinfo.flags = 0; + spin_lock_init(&sq->dbinfo.lock); + sq->dbinfo.shadow_key = BNXT_QPLIB_DBR_KEY_INVALID; + sq->dbinfo.res = res; + sq->dbinfo.seed = qp->id; + if (rq->max_wqe) { + rq->dbinfo.hwq = &rq->hwq; + rq->dbinfo.xid = qp->id; + rq->dbinfo.db = qp->dpi->dbr; + rq->dbinfo.max_slot = _set_rq_max_slot(rq); + rq->dbinfo.flags = 0; + spin_lock_init(&rq->dbinfo.lock); + rq->dbinfo.shadow_key = BNXT_QPLIB_DBR_KEY_INVALID; + rq->dbinfo.res = res; + rq->dbinfo.seed = qp->id; + } + + tbl = &res->reftbl.qpref; + qp_idx = map_qp_id_to_tbl_indx(qp->id, tbl); + spin_lock_irqsave(&tbl->lock, flag); + tbl->rec[qp_idx].xid = qp->id; + tbl->rec[qp_idx].handle = qp; + spin_unlock_irqrestore(&tbl->lock, flag); + + return 0; +swq_rq: + kfree(sq->swq); +swq_sq: + __qplib_destroy_qp(rcfw, qp); +fail: + bnxt_qplib_free_hwq(res, &qp->irrq); +fail_orrq: + bnxt_qplib_free_hwq(res, &qp->orrq); +fail_rq: + bnxt_qplib_free_hwq(res, &rq->hwq); +fail_sq: + bnxt_qplib_free_hwq(res, &sq->hwq); +exit: + return rc; +} + +static void __filter_modify_flags(struct bnxt_qplib_qp *qp) +{ + switch (qp->cur_qp_state) { + case CMDQ_MODIFY_QP_NEW_STATE_RESET: + switch (qp->state) { + case CMDQ_MODIFY_QP_NEW_STATE_INIT: + break; + default: + break; + } + break; + case CMDQ_MODIFY_QP_NEW_STATE_INIT: + switch (qp->state) { + case CMDQ_MODIFY_QP_NEW_STATE_RTR: + /* INIT->RTR, configure the path_mtu to the default + 2048 if not being requested */ + if (!(qp->modify_flags & + CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU)) { + qp->modify_flags |= + CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU; + qp->path_mtu = CMDQ_MODIFY_QP_PATH_MTU_MTU_2048; + } + qp->modify_flags &= + ~CMDQ_MODIFY_QP_MODIFY_MASK_VLAN_ID; + /* Bono FW requires the max_dest_rd_atomic to be >= 1 */ + if (qp->max_dest_rd_atomic < 1) + qp->max_dest_rd_atomic = 1; + /* TODO: Bono FW 0.0.12.0+ does not allow SRC_MAC + modification */ + qp->modify_flags &= ~CMDQ_MODIFY_QP_MODIFY_MASK_SRC_MAC; + /* Bono FW 20.6.5 requires SGID_INDEX to be configured */ + if (!(qp->modify_flags & + CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX)) { + qp->modify_flags |= + CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX; + qp->ah.sgid_index = 0; + } + break; + default: + break; + } + break; + case CMDQ_MODIFY_QP_NEW_STATE_RTR: + switch (qp->state) { + case CMDQ_MODIFY_QP_NEW_STATE_RTS: + /* Bono FW requires the max_rd_atomic to be >= 1 */ + if (qp->max_rd_atomic < 1) + qp->max_rd_atomic = 1; + /* TODO: Bono FW 0.0.12.0+ does not allow PKEY_INDEX, + DGID, FLOW_LABEL, SGID_INDEX, HOP_LIMIT, + TRAFFIC_CLASS, DEST_MAC, PATH_MTU, RQ_PSN, + MIN_RNR_TIMER, MAX_DEST_RD_ATOMIC, DEST_QP_ID + modification */ + qp->modify_flags &= + ~(CMDQ_MODIFY_QP_MODIFY_MASK_PKEY | + CMDQ_MODIFY_QP_MODIFY_MASK_DGID | + CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL | + CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX | + CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT | + CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS | + CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC | + CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU | + CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN | + CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER | + CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC | + CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID); + break; + default: + break; + } + break; + case CMDQ_MODIFY_QP_NEW_STATE_RTS: + break; + case CMDQ_MODIFY_QP_NEW_STATE_SQD: + break; + case CMDQ_MODIFY_QP_NEW_STATE_SQE: + break; + case CMDQ_MODIFY_QP_NEW_STATE_ERR: + break; + default: + break; + } +} + +int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) +{ + struct bnxt_qplib_rcfw *rcfw = res->rcfw; + struct creq_modify_qp_resp resp = {}; + struct bnxt_qplib_cmdqmsg msg = {}; + struct cmdq_modify_qp req = {}; + bool ppp_requested = false; + u32 temp32[4]; + u8 cmd_size; + u32 bmask; + int rc; + + /* Filter out the qp_attr_mask based on the state->new transition */ + __filter_modify_flags(qp); + bmask = qp->modify_flags; + req.modify_mask = cpu_to_le32(qp->modify_flags); + req.qp_cid = cpu_to_le32(qp->id); + if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_STATE) { + req.network_type_en_sqd_async_notify_new_state = + (qp->state & CMDQ_MODIFY_QP_NEW_STATE_MASK) | + (qp->en_sqd_async_notify == true ? + CMDQ_MODIFY_QP_EN_SQD_ASYNC_NOTIFY : 0); + if (__can_request_ppp(qp)) { + req.path_mtu_pingpong_push_enable = + CMDQ_MODIFY_QP_PINGPONG_PUSH_ENABLE; + req.pingpong_push_dpi = qp->ppp.dpi; + ppp_requested = true; + } + } + req.network_type_en_sqd_async_notify_new_state |= qp->nw_type; + + if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_ACCESS) { + req.access = qp->access; + } + if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_PKEY) + req.pkey = cpu_to_le16(IB_DEFAULT_PKEY_FULL); + + if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_QKEY) { + req.qkey = cpu_to_le32(qp->qkey); + } + if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DGID) { + memcpy(temp32, qp->ah.dgid.data, sizeof(struct bnxt_qplib_gid)); + req.dgid[0] = cpu_to_le32(temp32[0]); + req.dgid[1] = cpu_to_le32(temp32[1]); + req.dgid[2] = cpu_to_le32(temp32[2]); + req.dgid[3] = cpu_to_le32(temp32[3]); + } + if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL) { + req.flow_label = cpu_to_le32(qp->ah.flow_label); + } + if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX) { + req.sgid_index = cpu_to_le16(res->sgid_tbl.hw_id[qp->ah.sgid_index]); + } + if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT) { + req.hop_limit = qp->ah.hop_limit; + } + if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS) { + req.traffic_class = qp->ah.traffic_class; + } + if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC) { + memcpy(req.dest_mac, qp->ah.dmac, 6); + } + if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU) { + req.path_mtu_pingpong_push_enable = qp->path_mtu; + } + if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_TIMEOUT) { + req.timeout = qp->timeout; + } + if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RETRY_CNT) { + req.retry_cnt = qp->retry_cnt; + } + if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RNR_RETRY) { + req.rnr_retry = qp->rnr_retry; + } + if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER) { + req.min_rnr_timer = qp->min_rnr_timer; + } + if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN) { + req.rq_psn = cpu_to_le32(qp->rq.psn); + } + if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN) { + req.sq_psn = cpu_to_le32(qp->sq.psn); + } + if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MAX_RD_ATOMIC) { + req.max_rd_atomic = + ORD_LIMIT_TO_ORRQ_SLOTS(qp->max_rd_atomic); + } + if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC) { + req.max_dest_rd_atomic = + IRD_LIMIT_TO_IRRQ_SLOTS(qp->max_dest_rd_atomic); + } + req.sq_size = cpu_to_le32(qp->sq.hwq.depth); + req.rq_size = cpu_to_le32(qp->rq.hwq.depth); + req.sq_sge = cpu_to_le16(qp->sq.max_sge); + req.rq_sge = cpu_to_le16(qp->rq.max_sge); + req.max_inline_data = cpu_to_le32(qp->max_inline_data); + if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID) + req.dest_qp_id = cpu_to_le32(qp->dest_qpn); + if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_ENABLE_CC) + req.enable_cc = cpu_to_le16(CMDQ_MODIFY_QP_ENABLE_CC); + if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_TOS_ECN) + req.tos_dscp_tos_ecn = + ((qp->tos_ecn << CMDQ_MODIFY_QP_TOS_ECN_SFT) & + CMDQ_MODIFY_QP_TOS_ECN_MASK); + if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_TOS_DSCP) + req.tos_dscp_tos_ecn |= + ((qp->tos_dscp << CMDQ_MODIFY_QP_TOS_DSCP_SFT) & + CMDQ_MODIFY_QP_TOS_DSCP_MASK); + req.vlan_pcp_vlan_dei_vlan_id = cpu_to_le16(qp->vlan_id); + bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), + sizeof(resp), 0); + msg.qp_state = qp->state; + + cmd_size = sizeof(req); + if (res->cctx->hwrm_intf_ver < HWRM_VERSION_ROCE_QP_EXT_STATS_CTX_ID_VALID) + cmd_size -= BNXT_RE_MODIFY_QP_EXT_STAT_CONTEXT_SIZE; + if (!_is_steering_tag_supported(res)) + cmd_size -= BNXT_RE_STEERING_TAG_SUPPORTED_CMD_SIZE_MODIFY_QP; + + bnxt_qplib_rcfw_cmd_prep(&req, CMDQ_BASE_OPCODE_MODIFY_QP, + cmd_size); + rc = bnxt_qplib_rcfw_send_message(rcfw, &msg); + if (rc == -ETIMEDOUT && (qp->state == CMDQ_MODIFY_QP_NEW_STATE_ERR)) { + qp->cur_qp_state = qp->state; + return 0; + } else if (rc) { + return rc; + } + if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_RTR) + qp->lag_src_mac = be32_to_cpu(resp.lag_src_mac); + + if (ppp_requested) + qp->ppp.st_idx_en = resp.pingpong_push_state_index_enabled; + + qp->cur_qp_state = qp->state; + return 0; +} + +int bnxt_qplib_query_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) +{ + struct bnxt_qplib_rcfw *rcfw = res->rcfw; + struct creq_query_qp_resp resp = {}; + struct bnxt_qplib_cmdqmsg msg = {}; + struct bnxt_qplib_rcfw_sbuf sbuf; + struct creq_query_qp_resp_sb *sb; + struct cmdq_query_qp req = {}; + u32 temp32[4]; + int i, rc; + + sbuf.size = ALIGN(sizeof(*sb), BNXT_QPLIB_CMDQE_UNITS); + sbuf.sb = dma_zalloc_coherent(&rcfw->pdev->dev, sbuf.size, + &sbuf.dma_addr, GFP_KERNEL); + if (!sbuf.sb) + return -ENOMEM; + sb = sbuf.sb; + + bnxt_qplib_rcfw_cmd_prep(&req, CMDQ_BASE_OPCODE_QUERY_QP, + sizeof(req)); + req.qp_cid = cpu_to_le32(qp->id); + req.resp_size = sbuf.size / BNXT_QPLIB_CMDQE_UNITS; + bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, &sbuf, sizeof(req), + sizeof(resp), 0); + rc = bnxt_qplib_rcfw_send_message(rcfw, &msg); + if (rc) + goto bail; + + /* Extract the context from the side buffer */ + qp->state = sb->en_sqd_async_notify_state & + CREQ_QUERY_QP_RESP_SB_STATE_MASK; + qp->cur_qp_state = qp->state; + qp->en_sqd_async_notify = sb->en_sqd_async_notify_state & + CREQ_QUERY_QP_RESP_SB_EN_SQD_ASYNC_NOTIFY ? + true : false; + qp->access = sb->access; + qp->pkey_index = le16_to_cpu(sb->pkey); + qp->qkey = le32_to_cpu(sb->qkey); + + temp32[0] = le32_to_cpu(sb->dgid[0]); + temp32[1] = le32_to_cpu(sb->dgid[1]); + temp32[2] = le32_to_cpu(sb->dgid[2]); + temp32[3] = le32_to_cpu(sb->dgid[3]); + memcpy(qp->ah.dgid.data, temp32, sizeof(qp->ah.dgid.data)); + + qp->ah.flow_label = le32_to_cpu(sb->flow_label); + + qp->ah.sgid_index = 0; + for (i = 0; i < res->sgid_tbl.max; i++) { + if (res->sgid_tbl.hw_id[i] == le16_to_cpu(sb->sgid_index)) { + qp->ah.sgid_index = i; + break; + } + } + if (i == res->sgid_tbl.max) + dev_dbg(&res->pdev->dev, + "QPLIB: SGID not found qp->id = 0x%x sgid_index = 0x%x\n", + qp->id, le16_to_cpu(sb->sgid_index)); + + qp->ah.hop_limit = sb->hop_limit; + qp->ah.traffic_class = sb->traffic_class; + memcpy(qp->ah.dmac, sb->dest_mac, ETH_ALEN); + qp->ah.vlan_id = le16_to_cpu(sb->path_mtu_dest_vlan_id) & + CREQ_QUERY_QP_RESP_SB_VLAN_ID_MASK >> + CREQ_QUERY_QP_RESP_SB_VLAN_ID_SFT; + qp->path_mtu = le16_to_cpu(sb->path_mtu_dest_vlan_id) & + CREQ_QUERY_QP_RESP_SB_PATH_MTU_MASK; + qp->timeout = sb->timeout; + qp->retry_cnt = sb->retry_cnt; + qp->rnr_retry = sb->rnr_retry; + qp->min_rnr_timer = sb->min_rnr_timer; + qp->rq.psn = le32_to_cpu(sb->rq_psn); + qp->max_rd_atomic = ORRQ_SLOTS_TO_ORD_LIMIT(sb->max_rd_atomic); + qp->sq.psn = le32_to_cpu(sb->sq_psn); + qp->max_dest_rd_atomic = + IRRQ_SLOTS_TO_IRD_LIMIT(sb->max_dest_rd_atomic); + qp->sq.max_wqe = qp->sq.hwq.max_elements; + qp->rq.max_wqe = qp->rq.hwq.max_elements; + qp->sq.max_sge = le16_to_cpu(sb->sq_sge); + qp->rq.max_sge = le16_to_cpu(sb->rq_sge); + qp->max_inline_data = le32_to_cpu(sb->max_inline_data); + qp->dest_qpn = le32_to_cpu(sb->dest_qp_id); + memcpy(qp->smac, sb->src_mac, ETH_ALEN); + qp->vlan_id = le16_to_cpu(sb->vlan_pcp_vlan_dei_vlan_id); + qp->port_id = le16_to_cpu(sb->port_id); +bail: + dma_free_coherent(&rcfw->pdev->dev, sbuf.size, + sbuf.sb, sbuf.dma_addr); + return rc; +} + +static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp) +{ + struct bnxt_qplib_hwq *cq_hwq = &cq->hwq; + u32 peek_flags, peek_cons; + struct cq_base *hw_cqe; + int i; + + peek_flags = cq->dbinfo.flags; + peek_cons = cq_hwq->cons; + for (i = 0; i < cq_hwq->depth; i++) { + hw_cqe = bnxt_qplib_get_qe(cq_hwq, peek_cons, NULL); + if (CQE_CMP_VALID(hw_cqe, peek_flags)) { + dma_rmb(); + switch (hw_cqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK) { + case CQ_BASE_CQE_TYPE_REQ: + case CQ_BASE_CQE_TYPE_TERMINAL: + { + struct cq_req *cqe = (struct cq_req *)hw_cqe; + + if (qp == le64_to_cpu(cqe->qp_handle)) + cqe->qp_handle = 0; + break; + } + case CQ_BASE_CQE_TYPE_RES_RC: + case CQ_BASE_CQE_TYPE_RES_UD: + case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1: + { + struct cq_res_rc *cqe = (struct cq_res_rc *)hw_cqe; + + if (qp == le64_to_cpu(cqe->qp_handle)) + cqe->qp_handle = 0; + break; + } + default: + break; + } + } + bnxt_qplib_hwq_incr_cons(cq_hwq->depth, &peek_cons, + 1, &peek_flags); + } +} + +#ifdef ENABLE_FP_SPINLOCK +static unsigned long bnxt_qplib_lock_cqs(struct bnxt_qplib_qp *qp) +{ + unsigned long flags; + + spin_lock_irqsave(&qp->scq->hwq.lock, flags); + if (qp->rcq && qp->rcq != qp->scq) + spin_lock(&qp->rcq->hwq.lock); + + return flags; +} + +static void bnxt_qplib_unlock_cqs(struct bnxt_qplib_qp *qp, + unsigned long flags) +{ + if (qp->rcq && qp->rcq != qp->scq) + spin_unlock(&qp->rcq->hwq.lock); + spin_unlock_irqrestore(&qp->scq->hwq.lock, flags); +} +#endif + +int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res, + struct bnxt_qplib_qp *qp) +{ + struct bnxt_qplib_rcfw *rcfw = res->rcfw; + struct bnxt_qplib_reftbl *tbl; + unsigned long flags; + u32 qp_idx; + + tbl = &res->reftbl.qpref; + qp_idx = map_qp_id_to_tbl_indx(qp->id, tbl); + spin_lock_irqsave(&tbl->lock, flags); + tbl->rec[qp_idx].xid = BNXT_QPLIB_QP_ID_INVALID; + tbl->rec[qp_idx].handle = NULL; + spin_unlock_irqrestore(&tbl->lock, flags); + + return __qplib_destroy_qp(rcfw, qp); +} + +void bnxt_qplib_free_qp_res(struct bnxt_qplib_res *res, + struct bnxt_qplib_qp *qp) +{ + if (qp->irrq.max_elements) + bnxt_qplib_free_hwq(res, &qp->irrq); + if (qp->orrq.max_elements) + bnxt_qplib_free_hwq(res, &qp->orrq); + + if (!qp->is_user) + kfree(qp->rq.swq); + bnxt_qplib_free_hwq(res, &qp->rq.hwq); + + if (!qp->is_user) + kfree(qp->sq.swq); + bnxt_qplib_free_hwq(res, &qp->sq.hwq); +} + +void *bnxt_qplib_get_qp1_sq_buf(struct bnxt_qplib_qp *qp, + struct bnxt_qplib_sge *sge) +{ + struct bnxt_qplib_q *sq = &qp->sq; + struct bnxt_qplib_hdrbuf *buf; + u32 sw_prod; + + memset(sge, 0, sizeof(*sge)); + + buf = qp->sq_hdr_buf; + if (buf) { + sw_prod = sq->swq_start; + sge->addr = (dma_addr_t)(buf->dma_map + sw_prod * buf->step); + sge->lkey = 0xFFFFFFFF; + sge->size = buf->step; + return buf->va + sw_prod * sge->size; + } + return NULL; +} + +u32 bnxt_qplib_get_rq_prod_index(struct bnxt_qplib_qp *qp) +{ + struct bnxt_qplib_q *rq = &qp->rq; + + return rq->swq_start; +} + +void *bnxt_qplib_get_qp1_rq_buf(struct bnxt_qplib_qp *qp, + struct bnxt_qplib_sge *sge) +{ + struct bnxt_qplib_q *rq = &qp->rq; + struct bnxt_qplib_hdrbuf *buf; + u32 sw_prod; + + memset(sge, 0, sizeof(*sge)); + + buf = qp->rq_hdr_buf; + if (buf) { + sw_prod = rq->swq_start; + sge->addr = (dma_addr_t)(buf->dma_map + sw_prod * buf->step); + sge->lkey = 0xFFFFFFFF; + sge->size = buf->step; + return buf->va + sw_prod * sge->size; + } + return NULL; +} + +/* Fil the MSN table into the next psn row */ +static void bnxt_qplib_fill_msn_search(struct bnxt_qplib_qp *qp, + struct bnxt_qplib_swqe *wqe, + struct bnxt_qplib_swq *swq) +{ + struct sq_msn_search *msns; + u32 start_psn, next_psn; + u16 start_idx; + + msns = (struct sq_msn_search *)swq->psn_search; + msns->start_idx_next_psn_start_psn = 0; + + start_psn = swq->start_psn; + next_psn = swq->next_psn; + start_idx = swq->slot_idx; + msns->start_idx_next_psn_start_psn |= + bnxt_re_update_msn_tbl(start_idx, next_psn, start_psn); + pr_debug("QP_LIB MSN %d START_IDX %u NEXT_PSN %u START_PSN %u", + qp->msn, + (u16) + cpu_to_le16(BNXT_RE_MSN_IDX(msns->start_idx_next_psn_start_psn)), + (u32) + cpu_to_le32(BNXT_RE_MSN_NPSN(msns->start_idx_next_psn_start_psn)), + (u32) + cpu_to_le32(BNXT_RE_MSN_SPSN(msns->start_idx_next_psn_start_psn))); + qp->msn++; + qp->msn %= qp->msn_tbl_sz; +} + +static void bnxt_qplib_fill_psn_search(struct bnxt_qplib_qp *qp, + struct bnxt_qplib_swqe *wqe, + struct bnxt_qplib_swq *swq) +{ + struct sq_psn_search_ext *psns_ext; + struct sq_psn_search *psns; + u32 flg_npsn; + u32 op_spsn; + + if (!swq->psn_search) + return; + + /* Handle MSN differently on cap flags */ + if (BNXT_RE_HW_RETX(qp->dev_cap_flags)) { + bnxt_qplib_fill_msn_search(qp, wqe, swq); + return; + } + psns = (struct sq_psn_search *)swq->psn_search; + psns_ext = (struct sq_psn_search_ext *)swq->psn_search; + + op_spsn = ((swq->start_psn << SQ_PSN_SEARCH_START_PSN_SFT) & + SQ_PSN_SEARCH_START_PSN_MASK); + op_spsn |= ((wqe->type << SQ_PSN_SEARCH_OPCODE_SFT) & + SQ_PSN_SEARCH_OPCODE_MASK); + flg_npsn = ((swq->next_psn << SQ_PSN_SEARCH_NEXT_PSN_SFT) & + SQ_PSN_SEARCH_NEXT_PSN_MASK); + + if (_is_chip_gen_p5_p7(qp->cctx)) { + psns_ext->opcode_start_psn = cpu_to_le32(op_spsn); + psns_ext->flags_next_psn = cpu_to_le32(flg_npsn); + psns_ext->start_slot_idx = cpu_to_le16(swq->slot_idx); + } else { + psns->opcode_start_psn = cpu_to_le32(op_spsn); + psns->flags_next_psn = cpu_to_le32(flg_npsn); + } +} + +static u16 _calc_ilsize(struct bnxt_qplib_swqe *wqe) +{ + u16 size = 0; + int indx; + + for (indx = 0; indx < wqe->num_sge; indx++) + size += wqe->sg_list[indx].size; + return size; +} + +static int bnxt_qplib_put_inline(struct bnxt_qplib_qp *qp, + struct bnxt_qplib_swqe *wqe, + u32 *sw_prod) +{ + struct bnxt_qplib_hwq *sq_hwq; + int len, t_len, offt = 0; + int t_cplen = 0, cplen; + bool pull_dst = true; + void *il_dst = NULL; + void *il_src = NULL; + int indx; + + sq_hwq = &qp->sq.hwq; + t_len = 0; + for (indx = 0; indx < wqe->num_sge; indx++) { + len = wqe->sg_list[indx].size; + il_src = (void *)wqe->sg_list[indx].addr; + t_len += len; + if (t_len > qp->max_inline_data) + return -ENOMEM; + while (len) { + if (pull_dst) { + pull_dst = false; + il_dst = bnxt_qplib_get_qe(sq_hwq, ((*sw_prod) % + sq_hwq->depth), NULL); + (*sw_prod)++; + t_cplen = 0; + offt = 0; + } + cplen = min_t(int, len, sizeof(struct sq_sge)); + cplen = min_t(int, cplen, + (sizeof(struct sq_sge) - offt)); + memcpy(il_dst, il_src, cplen); + t_cplen += cplen; + il_src += cplen; + il_dst += cplen; + offt += cplen; + len -= cplen; + if (t_cplen == sizeof(struct sq_sge)) + pull_dst = true; + } + } + + return t_len; +} + +static int bnxt_qplib_put_sges(struct bnxt_qplib_hwq *sq_hwq, + struct bnxt_qplib_sge *ssge, + u32 nsge, u32 *sw_prod) +{ + struct sq_sge *dsge; + int indx, len = 0; + + for (indx = 0; indx < nsge; indx++, (*sw_prod)++) { + dsge = bnxt_qplib_get_qe(sq_hwq, ((*sw_prod) % sq_hwq->depth), NULL); + dsge->va_or_pa = cpu_to_le64(ssge[indx].addr); + dsge->l_key = cpu_to_le32(ssge[indx].lkey); + dsge->size = cpu_to_le32(ssge[indx].size); + len += ssge[indx].size; +#ifdef ENABLE_DEBUG_SGE + dev_dbg(&sq_hwq->pdev->dev, + "QPLIB: FP: va/pa=0x%llx lkey=0x%x size=0x%x", + dsge->va_or_pa, dsge->l_key, dsge->size); +#endif + + } + return len; +} + +static u16 _calculate_wqe_byte(struct bnxt_qplib_qp *qp, + struct bnxt_qplib_swqe *wqe, u16 *wqe_byte) +{ + u16 wqe_size; + u32 ilsize; + u16 nsge; + + nsge = wqe->num_sge; + if (wqe->flags & BNXT_QPLIB_SWQE_FLAGS_INLINE) { + ilsize = _calc_ilsize(wqe); + wqe_size = (ilsize > qp->max_inline_data) ? + qp->max_inline_data : ilsize; + wqe_size = ALIGN(wqe_size, sizeof(struct sq_sge)); + } else { + wqe_size = nsge * sizeof(struct sq_sge); + } + /* Adding sq_send_hdr is a misnomer, for rq also hdr size is same. */ + wqe_size += sizeof(struct sq_send_hdr); + if (wqe_byte) + *wqe_byte = wqe_size; + return wqe_size / sizeof(struct sq_sge); +} + +static u16 _translate_q_full_delta(struct bnxt_qplib_q *que, u16 wqe_bytes) +{ + /* For Cu/Wh delta = 128, stride = 16, wqe_bytes = 128 + * For Gen-p5 B/C mode delta = 0, stride = 16, wqe_bytes = 128. + * For Gen-p5 delta = 0, stride = 16, 32 <= wqe_bytes <= 512. + * when 8916 is disabled. + */ + return (que->q_full_delta * wqe_bytes) / que->hwq.element_size; +} + +static void bnxt_qplib_pull_psn_buff(struct bnxt_qplib_qp *qp, struct bnxt_qplib_q *sq, + struct bnxt_qplib_swq *swq, bool hw_retx) +{ + struct bnxt_qplib_hwq *sq_hwq; + u32 pg_num, pg_indx; + void *buff; + u32 tail; + + sq_hwq = &sq->hwq; + if (!sq_hwq->pad_pg) + return; + + tail = swq->slot_idx / sq->dbinfo.max_slot; + if (hw_retx) { + /* For HW retx use qp msn index */ + tail = qp->msn; + tail %= qp->msn_tbl_sz; + } + pg_num = (tail + sq_hwq->pad_pgofft) / (PAGE_SIZE / sq_hwq->pad_stride); + pg_indx = (tail + sq_hwq->pad_pgofft) % (PAGE_SIZE / sq_hwq->pad_stride); + buff = (void *)(sq_hwq->pad_pg[pg_num] + pg_indx * sq_hwq->pad_stride); + /* the start ptr for buff is same ie after the SQ */ + swq->psn_search = buff; +} + +void bnxt_qplib_post_send_db(struct bnxt_qplib_qp *qp) +{ + struct bnxt_qplib_q *sq = &qp->sq; + + bnxt_qplib_ring_prod_db(&sq->dbinfo, DBC_DBC_TYPE_SQ); +} + +int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp, + struct bnxt_qplib_swqe *wqe) +{ + struct bnxt_qplib_nq_work *nq_work = NULL; + int i, rc = 0, data_len = 0, pkt_num = 0; + struct bnxt_qplib_q *sq = &qp->sq; + struct bnxt_qplib_hwq *sq_hwq; + struct bnxt_qplib_swq *swq; + bool sch_handler = false; +#ifdef ENABLE_FP_SPINLOCK + unsigned long flags; +#endif + u16 slots_needed; + void *base_hdr; + bool msn_update; + void *ext_hdr; + __le32 temp32; + u16 qfd_slots; + u8 wqe_slots; + u16 wqe_size; + u32 sw_prod; + u32 wqe_idx; + + sq_hwq = &sq->hwq; +#ifdef ENABLE_FP_SPINLOCK + spin_lock_irqsave(&sq_hwq->lock, flags); +#endif + + if (qp->state != CMDQ_MODIFY_QP_NEW_STATE_RTS && + qp->state != CMDQ_MODIFY_QP_NEW_STATE_ERR) { + dev_err(&sq_hwq->pdev->dev, + "QPLIB: FP: QP (0x%x) is in the 0x%x state", + qp->id, qp->state); + rc = -EINVAL; + goto done; + } + + wqe_slots = _calculate_wqe_byte(qp, wqe, &wqe_size); + slots_needed = (qp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC) ? + sq->dbinfo.max_slot : wqe_slots; + qfd_slots = _translate_q_full_delta(sq, wqe_size); + if (bnxt_qplib_queue_full(sq_hwq, (slots_needed + qfd_slots))) { + dev_err(&sq_hwq->pdev->dev, + "QPLIB: FP: QP (0x%x) SQ is full!", qp->id); + dev_err(&sq_hwq->pdev->dev, + "QPLIB: prod = %#x cons = %#x qdepth = %#x delta = %#x slots = %#x", + HWQ_CMP(sq_hwq->prod, sq_hwq), + HWQ_CMP(sq_hwq->cons, sq_hwq), + sq_hwq->max_elements, qfd_slots, slots_needed); + dev_err(&sq_hwq->pdev->dev, + "QPLIB: phantom_wqe_cnt: %d phantom_cqe_cnt: %d\n", + sq->phantom_wqe_cnt, sq->phantom_cqe_cnt); + rc = -ENOMEM; + goto done; + } + + sw_prod = sq_hwq->prod; + swq = bnxt_qplib_get_swqe(sq, &wqe_idx); + swq->slot_idx = sw_prod; + bnxt_qplib_pull_psn_buff(qp, sq, swq, BNXT_RE_HW_RETX(qp->dev_cap_flags)); + + swq->wr_id = wqe->wr_id; + swq->type = wqe->type; + swq->flags = wqe->flags; + swq->slots = slots_needed; + swq->start_psn = sq->psn & BTH_PSN_MASK; + if (qp->sig_type || wqe->flags & BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP) + swq->flags |= SQ_SEND_FLAGS_SIGNAL_COMP; + + dev_dbg(&sq_hwq->pdev->dev, + "QPLIB: FP: QP(0x%x) post SQ wr_id[%d] = 0x%llx", + qp->id, wqe_idx, swq->wr_id); + if (qp->cur_qp_state == CMDQ_MODIFY_QP_NEW_STATE_ERR) { + sch_handler = true; + dev_dbg(&sq_hwq->pdev->dev, + "%s Error QP. Scheduling for poll_cq\n", __func__); + goto queue_err; + } + + base_hdr = bnxt_qplib_get_qe(sq_hwq, sw_prod, NULL); + sw_prod++; + ext_hdr = bnxt_qplib_get_qe(sq_hwq, (sw_prod % sq_hwq->depth), NULL); + sw_prod++; + memset(base_hdr, 0, sizeof(struct sq_sge)); + memset(ext_hdr, 0, sizeof(struct sq_sge)); + + if (wqe->flags & BNXT_QPLIB_SWQE_FLAGS_INLINE) + data_len = bnxt_qplib_put_inline(qp, wqe, &sw_prod); + else + data_len = bnxt_qplib_put_sges(sq_hwq, wqe->sg_list, + wqe->num_sge, &sw_prod); + if (data_len < 0) + goto queue_err; + /* Make sure we update MSN table only for wired wqes */ + msn_update = true; + /* Specifics */ + switch (wqe->type) { + case BNXT_QPLIB_SWQE_TYPE_SEND: + if (qp->type == CMDQ_CREATE_QP_TYPE_RAW_ETHERTYPE || + qp->type == CMDQ_CREATE_QP1_TYPE_GSI) { + /* Assemble info for Raw Ethertype QPs */ + struct sq_send_raweth_qp1_hdr *sqe = base_hdr; + struct sq_raw_ext_hdr *ext_sqe = ext_hdr; + + sqe->wqe_type = wqe->type; + sqe->flags = wqe->flags; + sqe->wqe_size = wqe_slots; + sqe->cfa_action = cpu_to_le16(wqe->rawqp1.cfa_action); + sqe->lflags = cpu_to_le16(wqe->rawqp1.lflags); + sqe->length = cpu_to_le32(data_len); + ext_sqe->cfa_meta = cpu_to_le32((wqe->rawqp1.cfa_meta & + SQ_SEND_RAWETH_QP1_CFA_META_VLAN_VID_MASK) << + SQ_SEND_RAWETH_QP1_CFA_META_VLAN_VID_SFT); + + dev_dbg(&sq_hwq->pdev->dev, + "QPLIB: FP: RAW/QP1 Send WQE:\n" + "\twqe_type = 0x%x\n" + "\tflags = 0x%x\n" + "\twqe_size = 0x%x\n" + "\tlflags = 0x%x\n" + "\tcfa_action = 0x%x\n" + "\tlength = 0x%x\n" + "\tcfa_meta = 0x%x", + sqe->wqe_type, sqe->flags, sqe->wqe_size, + sqe->lflags, sqe->cfa_action, + sqe->length, ext_sqe->cfa_meta); + break; + } + fallthrough; + case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM: + fallthrough; + case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV: + { + struct sq_send_hdr *sqe = base_hdr; + struct sq_ud_ext_hdr *ext_sqe = ext_hdr; + + sqe->wqe_type = wqe->type; + sqe->flags = wqe->flags; + sqe->wqe_size = wqe_slots; + sqe->inv_key_or_imm_data = cpu_to_le32(wqe->send.inv_key); + if (qp->type == CMDQ_CREATE_QP_TYPE_UD || + qp->type == CMDQ_CREATE_QP_TYPE_GSI) { + sqe->q_key = cpu_to_le32(wqe->send.q_key); + sqe->length = cpu_to_le32(data_len); + ext_sqe->dst_qp = cpu_to_le32( + wqe->send.dst_qp & SQ_SEND_DST_QP_MASK); + ext_sqe->avid = cpu_to_le32(wqe->send.avid & + SQ_SEND_AVID_MASK); + sq->psn = (sq->psn + 1) & BTH_PSN_MASK; + msn_update = false; + } else { + sqe->length = cpu_to_le32(data_len); + if (qp->mtu) + pkt_num = (data_len + qp->mtu - 1) / qp->mtu; + if (!pkt_num) + pkt_num = 1; + sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK; + } + dev_dbg(&sq_hwq->pdev->dev, + "QPLIB: FP: Send WQE:\n" + "\twqe_type = 0x%x\n" + "\tflags = 0x%x\n" + "\twqe_size = 0x%x\n" + "\tinv_key/immdata = 0x%x\n" + "\tq_key = 0x%x\n" + "\tdst_qp = 0x%x\n" + "\tlength = 0x%x\n" + "\tavid = 0x%x", + sqe->wqe_type, sqe->flags, sqe->wqe_size, + sqe->inv_key_or_imm_data, sqe->q_key, ext_sqe->dst_qp, + sqe->length, ext_sqe->avid); + break; + } + case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE: + /* fall-thru */ + case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM: + /* fall-thru */ + case BNXT_QPLIB_SWQE_TYPE_RDMA_READ: + { + struct sq_rdma_hdr *sqe = base_hdr; + struct sq_rdma_ext_hdr *ext_sqe = ext_hdr; + + sqe->wqe_type = wqe->type; + sqe->flags = wqe->flags; + sqe->wqe_size = wqe_slots; + sqe->imm_data = cpu_to_le32(wqe->rdma.inv_key); + sqe->length = cpu_to_le32((u32)data_len); + ext_sqe->remote_va = cpu_to_le64(wqe->rdma.remote_va); + ext_sqe->remote_key = cpu_to_le32(wqe->rdma.r_key); + if (qp->mtu) + pkt_num = (data_len + qp->mtu - 1) / qp->mtu; + if (!pkt_num) + pkt_num = 1; + sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK; + + dev_dbg(&sq_hwq->pdev->dev, + "QPLIB: FP: RDMA WQE:\n" + "\twqe_type = 0x%x\n" + "\tflags = 0x%x\n" + "\twqe_size = 0x%x\n" + "\timmdata = 0x%x\n" + "\tlength = 0x%x\n" + "\tremote_va = 0x%llx\n" + "\tremote_key = 0x%x", + sqe->wqe_type, sqe->flags, sqe->wqe_size, + sqe->imm_data, sqe->length, ext_sqe->remote_va, + ext_sqe->remote_key); + break; + } + case BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP: + /* fall-thru */ + case BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD: + { + struct sq_atomic_hdr *sqe = base_hdr; + struct sq_atomic_ext_hdr *ext_sqe = ext_hdr; + + sqe->wqe_type = wqe->type; + sqe->flags = wqe->flags; + sqe->remote_key = cpu_to_le32(wqe->atomic.r_key); + sqe->remote_va = cpu_to_le64(wqe->atomic.remote_va); + ext_sqe->swap_data = cpu_to_le64(wqe->atomic.swap_data); + ext_sqe->cmp_data = cpu_to_le64(wqe->atomic.cmp_data); + if (qp->mtu) + pkt_num = (data_len + qp->mtu - 1) / qp->mtu; + if (!pkt_num) + pkt_num = 1; + sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK; + break; + } + case BNXT_QPLIB_SWQE_TYPE_LOCAL_INV: + { + struct sq_localinvalidate_hdr *sqe = base_hdr; + + sqe->wqe_type = wqe->type; + sqe->flags = wqe->flags; + sqe->inv_l_key = cpu_to_le32(wqe->local_inv.inv_l_key); + + dev_dbg(&sq_hwq->pdev->dev, + "QPLIB: FP: LOCAL INV WQE:\n" + "\twqe_type = 0x%x\n" + "\tflags = 0x%x\n" + "\tinv_l_key = 0x%x", + sqe->wqe_type, sqe->flags, sqe->inv_l_key); + msn_update = false; + break; + } + case BNXT_QPLIB_SWQE_TYPE_FAST_REG_MR: + { + struct sq_fr_pmr_hdr *sqe = base_hdr; + struct sq_fr_pmr_ext_hdr *ext_sqe = ext_hdr; + + sqe->wqe_type = wqe->type; + sqe->flags = wqe->flags; + sqe->access_cntl = wqe->frmr.access_cntl | + SQ_FR_PMR_ACCESS_CNTL_LOCAL_WRITE; + sqe->zero_based_page_size_log = + (wqe->frmr.pg_sz_log & SQ_FR_PMR_PAGE_SIZE_LOG_MASK) << + SQ_FR_PMR_PAGE_SIZE_LOG_SFT | + (wqe->frmr.zero_based == true ? SQ_FR_PMR_ZERO_BASED : 0); + sqe->l_key = cpu_to_le32(wqe->frmr.l_key); + /* TODO: OFED only provides length of MR up to 32-bits for FRMR */ + temp32 = cpu_to_le32(wqe->frmr.length); + memcpy(sqe->length, &temp32, sizeof(wqe->frmr.length)); + sqe->numlevels_pbl_page_size_log = + ((wqe->frmr.pbl_pg_sz_log << + SQ_FR_PMR_PBL_PAGE_SIZE_LOG_SFT) & + SQ_FR_PMR_PBL_PAGE_SIZE_LOG_MASK) | + ((wqe->frmr.levels << SQ_FR_PMR_NUMLEVELS_SFT) & + SQ_FR_PMR_NUMLEVELS_MASK); + if (!wqe->frmr.levels && !wqe->frmr.pbl_ptr) { + ext_sqe->pblptr = cpu_to_le64(wqe->frmr.page_list[0]); + } else { + for (i = 0; i < wqe->frmr.page_list_len; i++) + wqe->frmr.pbl_ptr[i] = cpu_to_le64( + wqe->frmr.page_list[i] | + PTU_PTE_VALID); + ext_sqe->pblptr = cpu_to_le64(wqe->frmr.pbl_dma_ptr); + } + ext_sqe->va = cpu_to_le64(wqe->frmr.va); + dev_dbg(&sq_hwq->pdev->dev, + "QPLIB: FP: FRMR WQE:\n" + "\twqe_type = 0x%x\n" + "\tflags = 0x%x\n" + "\taccess_cntl = 0x%x\n" + "\tzero_based_page_size_log = 0x%x\n" + "\tl_key = 0x%x\n" + "\tlength = 0x%x\n" + "\tnumlevels_pbl_page_size_log = 0x%x\n" + "\tpblptr = 0x%llx\n" + "\tva = 0x%llx", + sqe->wqe_type, sqe->flags, sqe->access_cntl, + sqe->zero_based_page_size_log, sqe->l_key, + *(u32 *)sqe->length, sqe->numlevels_pbl_page_size_log, + ext_sqe->pblptr, ext_sqe->va); + msn_update = false; + break; + } + case BNXT_QPLIB_SWQE_TYPE_BIND_MW: + { + struct sq_bind_hdr *sqe = base_hdr; + struct sq_bind_ext_hdr *ext_sqe = ext_hdr; + + sqe->wqe_type = wqe->type; + sqe->flags = wqe->flags; + sqe->access_cntl = wqe->bind.access_cntl; + sqe->mw_type_zero_based = wqe->bind.mw_type | + (wqe->bind.zero_based == true ? SQ_BIND_ZERO_BASED : 0); + sqe->parent_l_key = cpu_to_le32(wqe->bind.parent_l_key); + sqe->l_key = cpu_to_le32(wqe->bind.r_key); + ext_sqe->va = cpu_to_le64(wqe->bind.va); + ext_sqe->length_lo = cpu_to_le32(wqe->bind.length); + dev_dbg(&sq_hwq->pdev->dev, + "QPLIB: FP: BIND WQE:\n" + "\twqe_type = 0x%x\n" + "\tflags = 0x%x\n" + "\taccess_cntl = 0x%x\n" + "\tmw_type_zero_based = 0x%x\n" + "\tparent_l_key = 0x%x\n" + "\tl_key = 0x%x\n" + "\tva = 0x%llx\n" + "\tlength = 0x%x", + sqe->wqe_type, sqe->flags, sqe->access_cntl, + sqe->mw_type_zero_based, sqe->parent_l_key, + sqe->l_key, sqe->va, ext_sqe->length_lo); + msn_update = false; + break; + } + default: + /* Bad wqe, return error */ + rc = -EINVAL; + goto done; + } + /* + * Ensure we update MSN table only for wired WQEs only. + * Free entry for all other NICS psn, psn_ext + */ + if (!BNXT_RE_HW_RETX(qp->dev_cap_flags) || msn_update) { + swq->next_psn = sq->psn & BTH_PSN_MASK; + bnxt_qplib_fill_psn_search(qp, wqe, swq); + } + +#ifdef ENABLE_DEBUG_SGE + for (i = 0, hw_sge = (struct sq_sge *)hw_sq_send_hdr->data; + i < wqe->num_sge; i++, hw_sge++) + dev_dbg(&sq_hwq->pdev->dev, + "QPLIB: FP: va/pa=0x%llx lkey=0x%x size=0x%x", + hw_sge->va_or_pa, hw_sge->l_key, hw_sge->size); +#endif +queue_err: + bnxt_qplib_swq_mod_start(sq, wqe_idx); + bnxt_qplib_hwq_incr_prod(&sq->dbinfo, sq_hwq, swq->slots); + qp->wqe_cnt++; +done: +#ifdef ENABLE_FP_SPINLOCK + spin_unlock_irqrestore(&sq_hwq->lock, flags); +#endif + + if (sch_handler) { + nq_work = kzalloc(sizeof(*nq_work), GFP_ATOMIC); + if (nq_work) { + nq_work->cq = qp->scq; + nq_work->nq = qp->scq->nq; + INIT_WORK(&nq_work->work, bnxt_qpn_cqn_sched_task); + queue_work(qp->scq->nq->cqn_wq, &nq_work->work); + } else { + dev_err(&sq->hwq.pdev->dev, + "QPLIB: FP: Failed to allocate SQ nq_work!"); + rc = -ENOMEM; + } + } + return rc; +} + +void bnxt_qplib_post_recv_db(struct bnxt_qplib_qp *qp) +{ + struct bnxt_qplib_q *rq = &qp->rq; + + bnxt_qplib_ring_prod_db(&rq->dbinfo, DBC_DBC_TYPE_RQ); +} + +void bnxt_re_handle_cqn(struct bnxt_qplib_cq *cq) +{ + struct bnxt_qplib_nq *nq; + + if (!(cq && cq->nq)) + return; + + nq = cq->nq; + spin_lock_bh(&cq->compl_lock); + if (nq->cqn_handler) { + dev_dbg(&nq->res->pdev->dev, + "%s:Trigger cq = %p event nq = %p\n", + __func__, cq, nq); + nq->cqn_handler(nq, cq); + } + spin_unlock_bh(&cq->compl_lock); +} + +int bnxt_qplib_post_recv(struct bnxt_qplib_qp *qp, + struct bnxt_qplib_swqe *wqe) +{ + struct bnxt_qplib_nq_work *nq_work = NULL; + struct bnxt_qplib_q *rq = &qp->rq; + struct bnxt_qplib_hwq *rq_hwq; + struct bnxt_qplib_swq *swq; + bool sch_handler = false; + struct rq_wqe_hdr *base_hdr; + struct rq_ext_hdr *ext_hdr; + struct sq_sge *dsge; +#ifdef ENABLE_FP_SPINLOCK + unsigned long flags; +#endif + u8 wqe_slots; + u32 wqe_idx; + u32 sw_prod; + int rc = 0; + + rq_hwq = &rq->hwq; +#ifdef ENABLE_FP_SPINLOCK + spin_lock_irqsave(&rq_hwq->lock, flags); +#endif + if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_RESET) { + dev_err(&rq_hwq->pdev->dev, + "QPLIB: FP: QP (0x%x) is in the 0x%x state", + qp->id, qp->state); + rc = -EINVAL; + goto done; + } + + wqe_slots = _calculate_wqe_byte(qp, wqe, NULL); + if (bnxt_qplib_queue_full(rq_hwq, rq->dbinfo.max_slot)) { + dev_err(&rq_hwq->pdev->dev, + "QPLIB: FP: QP (0x%x) RQ is full!", qp->id); + rc = -EINVAL; + goto done; + } + + swq = bnxt_qplib_get_swqe(rq, &wqe_idx); + swq->wr_id = wqe->wr_id; + swq->slots = rq->dbinfo.max_slot; + dev_dbg(&rq_hwq->pdev->dev, + "QPLIB: FP: post RQ wr_id[%d] = 0x%llx", + wqe_idx, swq->wr_id); + if (qp->cur_qp_state == CMDQ_MODIFY_QP_NEW_STATE_ERR) { + sch_handler = true; + dev_dbg(&rq_hwq->pdev->dev, "%s Error QP. Sched a flushed cmpl\n", + __func__); + goto queue_err; + } + + sw_prod = rq_hwq->prod; + base_hdr = bnxt_qplib_get_qe(rq_hwq, sw_prod, NULL); + sw_prod++; + ext_hdr = bnxt_qplib_get_qe(rq_hwq, (sw_prod % rq_hwq->depth), NULL); + sw_prod++; + memset(base_hdr, 0, sizeof(struct sq_sge)); + memset(ext_hdr, 0, sizeof(struct sq_sge)); + + if (!wqe->num_sge) { + dsge = bnxt_qplib_get_qe(rq_hwq, (sw_prod % rq_hwq->depth), NULL); + dsge->size = 0; + wqe_slots++; + } else { + bnxt_qplib_put_sges(rq_hwq, wqe->sg_list, wqe->num_sge, &sw_prod); + } + base_hdr->wqe_type = wqe->type; + base_hdr->flags = wqe->flags; + base_hdr->wqe_size = wqe_slots; + base_hdr->wr_id[0] = cpu_to_le32(wqe_idx); +queue_err: + bnxt_qplib_swq_mod_start(rq, wqe_idx); + bnxt_qplib_hwq_incr_prod(&rq->dbinfo, &rq->hwq, swq->slots); +done: +#ifdef ENABLE_FP_SPINLOCK + spin_unlock_irqrestore(&rq->hwq.lock, flags); +#endif + if (sch_handler) { + nq_work = kzalloc(sizeof(*nq_work), GFP_ATOMIC); + if (nq_work) { + nq_work->cq = qp->rcq; + nq_work->nq = qp->rcq->nq; + INIT_WORK(&nq_work->work, bnxt_qpn_cqn_sched_task); + queue_work(qp->rcq->nq->cqn_wq, &nq_work->work); + } else { + dev_err(&rq->hwq.pdev->dev, + "QPLIB: FP: Failed to allocate RQ nq_work!"); + rc = -ENOMEM; + } + } + return rc; +} + +/* CQ */ +int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq) +{ + struct bnxt_qplib_hwq_attr hwq_attr = {}; + struct bnxt_qplib_rcfw *rcfw = res->rcfw; + struct creq_create_cq_resp resp = {}; + struct bnxt_qplib_cmdqmsg msg = {}; + struct cmdq_create_cq req = {}; + struct bnxt_qplib_reftbl *tbl; + unsigned long flag; + u32 pg_sz_lvl = 0; + u8 cmd_size; + int rc; + + if (!cq->dpi) { + dev_err(&rcfw->pdev->dev, + "QPLIB: FP: CREATE_CQ failed due to NULL DPI"); + return -EINVAL; + } + + hwq_attr.res = res; + hwq_attr.depth = cq->max_wqe; + hwq_attr.stride = sizeof(struct cq_base); + hwq_attr.type = HWQ_TYPE_QUEUE; + hwq_attr.sginfo = &cq->sginfo; + rc = bnxt_qplib_alloc_init_hwq(&cq->hwq, &hwq_attr); + if (rc) + return rc; + + cmd_size = sizeof(req); + if (!_is_steering_tag_supported(res)) + cmd_size -= BNXT_RE_STEERING_TAG_SUPPORTED_CMD_SIZE; + + bnxt_qplib_rcfw_cmd_prep(&req, CMDQ_BASE_OPCODE_CREATE_CQ, + cmd_size); + req.dpi = cpu_to_le32(cq->dpi->dpi); + req.cq_handle = cpu_to_le64(cq->cq_handle); + if (res->cctx->modes.hdbr_enabled) + req.flags |= + cpu_to_le16(CMDQ_CREATE_CQ_FLAGS_DISABLE_CQ_OVERFLOW_DETECTION); + + req.cq_size = cpu_to_le32(cq->max_wqe); + req.pbl = cpu_to_le64(_get_base_addr(&cq->hwq)); + pg_sz_lvl = _get_base_pg_size(&cq->hwq) << CMDQ_CREATE_CQ_PG_SIZE_SFT; + pg_sz_lvl |= ((cq->hwq.level & CMDQ_CREATE_CQ_LVL_MASK) << + CMDQ_CREATE_CQ_LVL_SFT); + req.pg_size_lvl = cpu_to_le32(pg_sz_lvl); + + req.cq_fco_cnq_id = cpu_to_le32( + (cq->cnq_hw_ring_id & CMDQ_CREATE_CQ_CNQ_ID_MASK) << + CMDQ_CREATE_CQ_CNQ_ID_SFT); + bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, cmd_size, + sizeof(resp), 0); + rc = bnxt_qplib_rcfw_send_message(rcfw, &msg); + if (rc) + goto fail; + cq->id = le32_to_cpu(resp.xid); + cq->period = BNXT_QPLIB_QUEUE_START_PERIOD; + init_waitqueue_head(&cq->waitq); + INIT_LIST_HEAD(&cq->sqf_head); + INIT_LIST_HEAD(&cq->rqf_head); + spin_lock_init(&cq->flush_lock); + spin_lock_init(&cq->compl_lock); + + /* init dbinfo */ + cq->cctx = res->cctx; + cq->dbinfo.hwq = &cq->hwq; + cq->dbinfo.xid = cq->id; + cq->dbinfo.db = cq->dpi->dbr; + cq->dbinfo.priv_db = res->dpi_tbl.priv_db; + cq->dbinfo.flags = 0; + cq->dbinfo.toggle = 0; + cq->dbinfo.res = res; + cq->dbinfo.seed = cq->id; + spin_lock_init(&cq->dbinfo.lock); + cq->dbinfo.shadow_key = BNXT_QPLIB_DBR_KEY_INVALID; + cq->dbinfo.shadow_key_arm_ena = BNXT_QPLIB_DBR_KEY_INVALID; + + tbl = &res->reftbl.cqref; + spin_lock_irqsave(&tbl->lock, flag); + tbl->rec[GET_TBL_INDEX(cq->id, tbl)].xid = cq->id; + tbl->rec[GET_TBL_INDEX(cq->id, tbl)].handle = cq; + spin_unlock_irqrestore(&tbl->lock, flag); + + bnxt_qplib_armen_db(&cq->dbinfo, DBC_DBC_TYPE_CQ_ARMENA); + return 0; + +fail: + bnxt_qplib_free_hwq(res, &cq->hwq); + return rc; +} + +int bnxt_qplib_modify_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq) +{ + /* TODO: Modify CQ threshold are passed to the HW via DBR */ + return 0; +} + +void bnxt_qplib_resize_cq_complete(struct bnxt_qplib_res *res, + struct bnxt_qplib_cq *cq) +{ + bnxt_qplib_free_hwq(res, &cq->hwq); + memcpy(&cq->hwq, &cq->resize_hwq, sizeof(cq->hwq)); + /* Reset only the cons bit in the flags */ + cq->dbinfo.flags &= ~(1UL << BNXT_QPLIB_FLAG_EPOCH_CONS_SHIFT); + + /* Tell HW to switch over to the new CQ */ + if (!cq->resize_hwq.is_user) + bnxt_qplib_cq_coffack_db(&cq->dbinfo); +} + +int bnxt_qplib_resize_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq, + int new_cqes) +{ + struct bnxt_qplib_hwq_attr hwq_attr = {}; + struct bnxt_qplib_rcfw *rcfw = res->rcfw; + struct creq_resize_cq_resp resp = {}; + struct bnxt_qplib_cmdqmsg msg = {}; + struct cmdq_resize_cq req = {}; + u32 pgsz = 0, lvl = 0, nsz = 0; + struct bnxt_qplib_pbl *pbl; + u16 count = -1; + int rc; + + bnxt_qplib_rcfw_cmd_prep(&req, CMDQ_BASE_OPCODE_RESIZE_CQ, + sizeof(req)); + + hwq_attr.sginfo = &cq->sginfo; + hwq_attr.res = res; + hwq_attr.depth = new_cqes; + hwq_attr.stride = sizeof(struct cq_base); + hwq_attr.type = HWQ_TYPE_QUEUE; + rc = bnxt_qplib_alloc_init_hwq(&cq->resize_hwq, &hwq_attr); + if (rc) + return rc; + + dev_dbg(&rcfw->pdev->dev, "QPLIB: FP: %s: pbl_lvl: %d\n", __func__, + cq->resize_hwq.level); + req.cq_cid = cpu_to_le32(cq->id); + pbl = &cq->resize_hwq.pbl[PBL_LVL_0]; + pgsz = ((pbl->pg_size == ROCE_PG_SIZE_4K ? CMDQ_RESIZE_CQ_PG_SIZE_PG_4K : + pbl->pg_size == ROCE_PG_SIZE_8K ? CMDQ_RESIZE_CQ_PG_SIZE_PG_8K : + pbl->pg_size == ROCE_PG_SIZE_64K ? CMDQ_RESIZE_CQ_PG_SIZE_PG_64K : + pbl->pg_size == ROCE_PG_SIZE_2M ? CMDQ_RESIZE_CQ_PG_SIZE_PG_2M : + pbl->pg_size == ROCE_PG_SIZE_8M ? CMDQ_RESIZE_CQ_PG_SIZE_PG_8M : + pbl->pg_size == ROCE_PG_SIZE_1G ? CMDQ_RESIZE_CQ_PG_SIZE_PG_1G : + CMDQ_RESIZE_CQ_PG_SIZE_PG_4K) & CMDQ_RESIZE_CQ_PG_SIZE_MASK); + lvl = (cq->resize_hwq.level << CMDQ_RESIZE_CQ_LVL_SFT) & + CMDQ_RESIZE_CQ_LVL_MASK; + nsz = (new_cqes << CMDQ_RESIZE_CQ_NEW_CQ_SIZE_SFT) & + CMDQ_RESIZE_CQ_NEW_CQ_SIZE_MASK; + req.new_cq_size_pg_size_lvl = cpu_to_le32(nsz|pgsz|lvl); + req.new_pbl = cpu_to_le64(pbl->pg_map_arr[0]); + + if (!cq->resize_hwq.is_user) + set_bit(CQ_FLAGS_RESIZE_IN_PROG, &cq->flags); + + bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), + sizeof(resp), 0); + rc = bnxt_qplib_rcfw_send_message(rcfw, &msg); + if (rc) + goto fail; + + if (!cq->resize_hwq.is_user) { +wait: + /* Wait here for the HW to switch the CQ over */ + if (wait_event_interruptible_timeout(cq->waitq, + !test_bit(CQ_FLAGS_RESIZE_IN_PROG, &cq->flags), + msecs_to_jiffies(CQ_RESIZE_WAIT_TIME_MS)) == + -ERESTARTSYS && count--) + goto wait; + + if (test_bit(CQ_FLAGS_RESIZE_IN_PROG, &cq->flags)) { + dev_err(&rcfw->pdev->dev, + "QPLIB: FP: RESIZE_CQ timed out"); + rc = -ETIMEDOUT; + goto fail; + } + + bnxt_qplib_resize_cq_complete(res, cq); + } + + return 0; +fail: + if (!cq->resize_hwq.is_user) { + bnxt_qplib_free_hwq(res, &cq->resize_hwq); + clear_bit(CQ_FLAGS_RESIZE_IN_PROG, &cq->flags); + } + return rc; +} + +void bnxt_qplib_free_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq) +{ + bnxt_qplib_free_hwq(res, &cq->hwq); +} + +static void bnxt_qplib_sync_cq(struct bnxt_qplib_cq *cq) +{ + struct bnxt_qplib_nq *nq = cq->nq; + /* Flush any pending work and synchronize irq */ + flush_workqueue(cq->nq->cqn_wq); + mutex_lock(&nq->lock); + if (nq->requested) + synchronize_irq(nq->msix_vec); + mutex_unlock(&nq->lock); +} + +int bnxt_qplib_destroy_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq) +{ + struct bnxt_qplib_rcfw *rcfw = res->rcfw; + struct creq_destroy_cq_resp resp = {}; + struct bnxt_qplib_cmdqmsg msg = {}; + struct cmdq_destroy_cq req = {}; + struct bnxt_qplib_reftbl *tbl; + u16 total_cnq_events; + unsigned long flag; + int rc; + + tbl = &res->reftbl.cqref; + spin_lock_irqsave(&tbl->lock, flag); + tbl->rec[GET_TBL_INDEX(cq->id, tbl)].handle = NULL; + tbl->rec[GET_TBL_INDEX(cq->id, tbl)].xid = 0; + spin_unlock_irqrestore(&tbl->lock, flag); + + bnxt_qplib_rcfw_cmd_prep(&req, CMDQ_BASE_OPCODE_DESTROY_CQ, + sizeof(req)); + + req.cq_cid = cpu_to_le32(cq->id); + bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), + sizeof(resp), 0); + rc = bnxt_qplib_rcfw_send_message(rcfw, &msg); + if (rc) + return rc; + + total_cnq_events = le16_to_cpu(resp.total_cnq_events); + dev_dbg(&rcfw->pdev->dev, + "%s: cq_id = 0x%x cq = 0x%p resp.total_cnq_events = 0x%x\n", + __func__, cq->id, cq, total_cnq_events); + __wait_for_all_nqes(cq, total_cnq_events); + bnxt_qplib_sync_cq(cq); + bnxt_qplib_free_hwq(res, &cq->hwq); + return 0; +} + +static int __flush_sq(struct bnxt_qplib_q *sq, struct bnxt_qplib_qp *qp, + struct bnxt_qplib_cqe **pcqe, int *budget) +{ + struct bnxt_qplib_cqe *cqe; + u32 start, last; + int rc = 0; + + /* Now complete all outstanding SQEs with FLUSHED_ERR */ + start = sq->swq_start; + cqe = *pcqe; + while (*budget) { + last = sq->swq_last; + if (start == last) { + break; + } + /* Skip the FENCE WQE completions */ + if (sq->swq[last].wr_id == BNXT_QPLIB_FENCE_WRID) { + bnxt_re_legacy_cancel_phantom_processing(qp); + goto skip_compl; + } + + memset(cqe, 0, sizeof(*cqe)); + cqe->status = CQ_REQ_STATUS_WORK_REQUEST_FLUSHED_ERR; + cqe->opcode = CQ_BASE_CQE_TYPE_REQ; + cqe->qp_handle = (u64)qp; + cqe->wr_id = sq->swq[last].wr_id; + cqe->src_qp = qp->id; + cqe->type = sq->swq[last].type; + dev_dbg(&sq->hwq.pdev->dev, + "QPLIB: FP: CQ Processed terminal Req "); + dev_dbg(&sq->hwq.pdev->dev, + "QPLIB: wr_id[%d] = 0x%llx with status 0x%x", + last, cqe->wr_id, cqe->status); + cqe++; + (*budget)--; +skip_compl: + bnxt_qplib_hwq_incr_cons(sq->hwq.depth, + &sq->hwq.cons, + sq->swq[last].slots, + &sq->dbinfo.flags); + sq->swq_last = sq->swq[last].next_idx; + } + *pcqe = cqe; + if (!*budget && sq->swq_last != start) + /* Out of budget */ + rc = -EAGAIN; + dev_dbg(&sq->hwq.pdev->dev, "QPLIB: FP: Flush SQ rc = 0x%x", rc); + + return rc; +} + +static int __flush_rq(struct bnxt_qplib_q *rq, struct bnxt_qplib_qp *qp, + struct bnxt_qplib_cqe **pcqe, int *budget) +{ + struct bnxt_qplib_cqe *cqe; + u32 start, last; + int opcode = 0; + int rc = 0; + + switch (qp->type) { + case CMDQ_CREATE_QP1_TYPE_GSI: + opcode = CQ_BASE_CQE_TYPE_RES_RAWETH_QP1; + break; + case CMDQ_CREATE_QP_TYPE_RC: + opcode = CQ_BASE_CQE_TYPE_RES_RC; + break; + case CMDQ_CREATE_QP_TYPE_UD: + opcode = CQ_BASE_CQE_TYPE_RES_UD; + break; + } + + /* Flush the rest of the RQ */ + start = rq->swq_start; + cqe = *pcqe; + while (*budget) { + last = rq->swq_last; + if (last == start) + break; + memset(cqe, 0, sizeof(*cqe)); + cqe->status = + CQ_RES_RC_STATUS_WORK_REQUEST_FLUSHED_ERR; + cqe->opcode = opcode; + cqe->qp_handle = (u64)qp; + cqe->wr_id = rq->swq[last].wr_id; + dev_dbg(&rq->hwq.pdev->dev, "QPLIB: FP: CQ Processed Res RC "); + dev_dbg(&rq->hwq.pdev->dev, + "QPLIB: rq[%d] = 0x%llx with status 0x%x", + last, cqe->wr_id, cqe->status); + cqe++; + (*budget)--; + bnxt_qplib_hwq_incr_cons(rq->hwq.depth, + &rq->hwq.cons, + rq->swq[last].slots, + &rq->dbinfo.flags); + rq->swq_last = rq->swq[last].next_idx; + } + *pcqe = cqe; + if (!*budget && rq->swq_last != start) + /* Out of budget */ + rc = -EAGAIN; + + dev_dbg(&rq->hwq.pdev->dev, "QPLIB: FP: Flush RQ rc = 0x%x", rc); + return rc; +} + +void bnxt_qplib_mark_qp_error(void *qp_handle) +{ + struct bnxt_qplib_qp *qp = qp_handle; + + if (!qp) + return; + + /* Must block new posting of SQ and RQ */ + qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_ERR; + qp->state = qp->cur_qp_state; + + /* Add qp to flush list of the CQ */ + if (!qp->is_user) + bnxt_qplib_add_flush_qp(qp); +} + +/* Note: SQE is valid from sw_sq_cons up to cqe_sq_cons (exclusive) + * CQE is track from sw_cq_cons to max_element but valid only if VALID=1 + */ +static int bnxt_re_legacy_do_wa9060(struct bnxt_qplib_qp *qp, + struct bnxt_qplib_cq *cq, + u32 cq_cons, u32 swq_last, + u32 cqe_sq_cons) +{ + struct bnxt_qplib_q *sq = &qp->sq; + struct bnxt_qplib_swq *swq; + u32 peek_sw_cq_cons, peek_sq_cons_idx, peek_flags; + struct cq_terminal *peek_term_hwcqe; + struct cq_req *peek_req_hwcqe; + struct bnxt_qplib_qp *peek_qp; + struct bnxt_qplib_q *peek_sq; + struct cq_base *peek_hwcqe; + int i, rc = 0; + + /* Check for the psn_search marking before completing */ + swq = &sq->swq[swq_last]; + if (swq->psn_search && + le32_to_cpu(swq->psn_search->flags_next_psn) & 0x80000000) { + /* Unmark */ + swq->psn_search->flags_next_psn = cpu_to_le32 + (le32_to_cpu(swq->psn_search->flags_next_psn) + & ~0x80000000); + dev_dbg(&cq->hwq.pdev->dev, + "FP: Process Req cq_cons=0x%x qp=0x%x sq cons sw=0x%x cqe=0x%x marked!\n", + cq_cons, qp->id, swq_last, cqe_sq_cons); + sq->condition = true; + sq->legacy_send_phantom = true; + + /* TODO: Only ARM if the previous SQE is ARMALL */ + bnxt_qplib_ring_db(&cq->dbinfo, DBC_DBC_TYPE_CQ_ARMALL); + + rc = -EAGAIN; + goto out; + } + if (sq->condition == true) { + /* Peek at the completions */ + peek_flags = cq->dbinfo.flags; + peek_sw_cq_cons = cq_cons; + i = cq->hwq.depth; + while (i--) { + peek_hwcqe = bnxt_qplib_get_qe(&cq->hwq, + peek_sw_cq_cons, NULL); + /* If the next hwcqe is VALID */ + if (CQE_CMP_VALID(peek_hwcqe, peek_flags)) { + /* If the next hwcqe is a REQ */ + dma_rmb(); + switch (peek_hwcqe->cqe_type_toggle & + CQ_BASE_CQE_TYPE_MASK) { + case CQ_BASE_CQE_TYPE_REQ: + peek_req_hwcqe = (struct cq_req *) + peek_hwcqe; + peek_qp = (struct bnxt_qplib_qp *) + le64_to_cpu( + peek_req_hwcqe->qp_handle); + peek_sq = &peek_qp->sq; + peek_sq_cons_idx = + ((le16_to_cpu( + peek_req_hwcqe->sq_cons_idx) + - 1) % sq->max_wqe); + /* If the hwcqe's sq's wr_id matches */ + if (peek_sq == sq && + sq->swq[peek_sq_cons_idx].wr_id == + BNXT_QPLIB_FENCE_WRID) { + /* Unbreak only if the phantom + comes back */ + dev_dbg(&cq->hwq.pdev->dev, + "FP: Process Req qp=0x%x current sq cons sw=0x%x cqe=0x%x", + qp->id, swq_last, + cqe_sq_cons); + sq->condition = false; + sq->single = true; + sq->phantom_cqe_cnt++; + dev_dbg(&cq->hwq.pdev->dev, + "qp %#x condition restored at peek cq_cons=%#x sq_cons_idx %#x, phantom_cqe_cnt: %d unmark\n", + peek_qp->id, + peek_sw_cq_cons, + peek_sq_cons_idx, + sq->phantom_cqe_cnt); + rc = 0; + goto out; + } + break; + + case CQ_BASE_CQE_TYPE_TERMINAL: + /* In case the QP has gone into the + error state */ + peek_term_hwcqe = (struct cq_terminal *) + peek_hwcqe; + peek_qp = (struct bnxt_qplib_qp *) + le64_to_cpu( + peek_term_hwcqe->qp_handle); + if (peek_qp == qp) { + sq->condition = false; + rc = 0; + goto out; + } + break; + default: + break; + } + /* Valid but not the phantom, so keep looping */ + } else { + /* Not valid yet, just exit and wait */ + rc = -EINVAL; + goto out; + } + bnxt_qplib_hwq_incr_cons(cq->hwq.depth, + &peek_sw_cq_cons, + 1, &peek_flags); + } + dev_err(&cq->hwq.pdev->dev, + "Should not have come here! cq_cons=0x%x qp=0x%x sq cons sw=0x%x hw=0x%x", + cq_cons, qp->id, swq_last, cqe_sq_cons); + rc = -EINVAL; + } +out: + return rc; +} + +static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq, + struct cq_req *hwcqe, + struct bnxt_qplib_cqe **pcqe, int *budget, + u32 cq_cons, struct bnxt_qplib_qp **lib_qp) +{ + struct bnxt_qplib_qp *qp; + struct bnxt_qplib_q *sq; + struct bnxt_qplib_cqe *cqe; + u32 cqe_sq_cons; +#ifdef ENABLE_FP_SPINLOCK + unsigned long flags; +#endif + struct bnxt_qplib_swq *swq; + int rc = 0; + + qp = (struct bnxt_qplib_qp *)le64_to_cpu(hwcqe->qp_handle); + dev_dbg(&cq->hwq.pdev->dev, "FP: Process Req qp=0x%p", qp); + if (!qp) { + dev_err(&cq->hwq.pdev->dev, + "QPLIB: FP: Process Req qp is NULL"); + return -EINVAL; + } + sq = &qp->sq; + + cqe_sq_cons = le16_to_cpu(hwcqe->sq_cons_idx) % sq->max_sw_wqe; +#ifdef ENABLE_FP_SPINLOCK + spin_lock_irqsave(&sq->hwq.lock, flags); +#endif + if (qp->sq.flushed) { + dev_dbg(&cq->hwq.pdev->dev, + "%s: QPLIB: QP in Flush QP = %p\n", __func__, qp); + goto done; + } + + /* Require to walk the sq's swq to fabricate CQEs for all previously + * signaled SWQEs due to CQE aggregation from the current sq cons + * to the cqe_sq_cons + */ + cqe = *pcqe; + while (*budget) { + if (sq->swq_last == cqe_sq_cons) + /* Done */ + break; + + swq = &sq->swq[sq->swq_last]; + memset(cqe, 0, sizeof(*cqe)); + cqe->opcode = CQ_BASE_CQE_TYPE_REQ; + cqe->qp_handle = (u64)qp; + cqe->src_qp = qp->id; + cqe->wr_id = swq->wr_id; + + if (cqe->wr_id == BNXT_QPLIB_FENCE_WRID) + goto skip; + + cqe->type = swq->type; + + /* For the last CQE, check for status. For errors, regardless + * of the request being signaled or not, it must complete with + * the hwcqe error status + */ + if (swq->next_idx == cqe_sq_cons && + hwcqe->status != CQ_REQ_STATUS_OK) { + cqe->status = hwcqe->status; + dev_err(&cq->hwq.pdev->dev, + "QPLIB: FP: CQ Processed Req "); + dev_err(&cq->hwq.pdev->dev, + "QPLIB: QP 0x%x wr_id[%d] = 0x%llx vendor type 0x%x with vendor status 0x%x", + cqe->src_qp, sq->swq_last, cqe->wr_id, cqe->type, cqe->status); + cqe++; + (*budget)--; + bnxt_qplib_mark_qp_error(qp); + } else { + /* Before we complete, do WA 9060 */ + if (!_is_chip_gen_p5_p7(qp->cctx)) { + if (bnxt_re_legacy_do_wa9060(qp, cq, cq_cons, + sq->swq_last, + cqe_sq_cons)) { + *lib_qp = qp; + goto out; + } + } + if (swq->flags & SQ_SEND_FLAGS_SIGNAL_COMP) { + + dev_dbg(&cq->hwq.pdev->dev, + "QPLIB: FP: CQ Processed Req "); + dev_dbg(&cq->hwq.pdev->dev, + "QPLIB: wr_id[%d] = 0x%llx ", + sq->swq_last, cqe->wr_id); + dev_dbg(&cq->hwq.pdev->dev, + "QPLIB: with status 0x%x", cqe->status); + cqe->status = CQ_REQ_STATUS_OK; + cqe++; + (*budget)--; + } + } +skip: + bnxt_qplib_hwq_incr_cons(sq->hwq.depth, &sq->hwq.cons, + swq->slots, &sq->dbinfo.flags); + sq->swq_last = swq->next_idx; + if (sq->single == true) + break; + } +out: + *pcqe = cqe; + if (sq->swq_last != cqe_sq_cons) { + /* Out of budget */ + rc = -EAGAIN; + goto done; + } + /* Back to normal completion mode only after it has completed all of + the WC for this CQE */ + sq->single = false; +done: +#ifdef ENABLE_FP_SPINLOCK + spin_unlock_irqrestore(&sq->hwq.lock, flags); +#endif + return rc; +} + +static void bnxt_qplib_release_srqe(struct bnxt_qplib_srq *srq, u32 tag) +{ + spin_lock(&srq->hwq.lock); + srq->swq[srq->last_idx].next_idx = (int)tag; + srq->last_idx = (int)tag; + srq->swq[srq->last_idx].next_idx = -1; + bnxt_qplib_hwq_incr_cons(srq->hwq.depth, &srq->hwq.cons, + srq->dbinfo.max_slot, &srq->dbinfo.flags); + spin_unlock(&srq->hwq.lock); +} + +static int bnxt_qplib_cq_process_res_rc(struct bnxt_qplib_cq *cq, + struct cq_res_rc *hwcqe, + struct bnxt_qplib_cqe **pcqe, + int *budget) +{ + struct bnxt_qplib_srq *srq; + struct bnxt_qplib_cqe *cqe; + struct bnxt_qplib_qp *qp; + struct bnxt_qplib_q *rq; +#ifdef ENABLE_FP_SPINLOCK + unsigned long flags; +#endif + u32 wr_id_idx; + int rc = 0; + + qp = (struct bnxt_qplib_qp *)le64_to_cpu(hwcqe->qp_handle); + if (!qp) { + dev_err(&cq->hwq.pdev->dev, "QPLIB: process_cq RC qp is NULL"); + return -EINVAL; + } + if (qp->rq.flushed) { + dev_dbg(&cq->hwq.pdev->dev, + "%s: QPLIB: QP in Flush QP = %p\n", __func__, qp); + goto done; + } + + cqe = *pcqe; + cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK; + cqe->length = le32_to_cpu(hwcqe->length); + cqe->invrkey = le32_to_cpu(hwcqe->imm_data_or_inv_r_key); + cqe->mr_handle = le64_to_cpu(hwcqe->mr_handle); + cqe->flags = le16_to_cpu(hwcqe->flags); + cqe->status = hwcqe->status; + cqe->qp_handle = (u64)(unsigned long)qp; + + wr_id_idx = le32_to_cpu(hwcqe->srq_or_rq_wr_id) & + CQ_RES_RC_SRQ_OR_RQ_WR_ID_MASK; + if (cqe->flags & CQ_RES_RC_FLAGS_SRQ_SRQ) { + srq = qp->srq; + if (!srq) { + dev_err(&cq->hwq.pdev->dev, + "QPLIB: FP: SRQ used but not defined??"); + return -EINVAL; + } + if (wr_id_idx > srq->hwq.depth - 1) { + dev_err(&cq->hwq.pdev->dev, + "QPLIB: FP: CQ Process RC "); + dev_err(&cq->hwq.pdev->dev, + "QPLIB: wr_id idx 0x%x exceeded SRQ max 0x%x", + wr_id_idx, srq->hwq.depth); + return -EINVAL; + } + cqe->wr_id = srq->swq[wr_id_idx].wr_id; + bnxt_qplib_release_srqe(srq, wr_id_idx); + dev_dbg(&srq->hwq.pdev->dev, + "QPLIB: FP: CQ Processed RC SRQ wr_id[%d] = 0x%llx", + wr_id_idx, cqe->wr_id); + cqe++; + (*budget)--; + *pcqe = cqe; + } else { + rq = &qp->rq; + if (wr_id_idx > (rq->max_wqe - 1)) { + dev_err(&cq->hwq.pdev->dev, + "QPLIB: FP: CQ Process RC "); + dev_err(&cq->hwq.pdev->dev, + "QPLIB: wr_id idx 0x%x exceeded RQ max 0x%x", + wr_id_idx, rq->hwq.depth); + return -EINVAL; + } + if (wr_id_idx != rq->swq_last) + return -EINVAL; +#ifdef ENABLE_FP_SPINLOCK + spin_lock_irqsave(&rq->hwq.lock, flags); +#endif + + cqe->wr_id = rq->swq[rq->swq_last].wr_id; + dev_dbg(&cq->hwq.pdev->dev, + "QPLIB: FP: CQ Processed RC RQ wr_id[%d] = 0x%llx", + rq->swq_last, cqe->wr_id); + cqe++; + (*budget)--; + bnxt_qplib_hwq_incr_cons(rq->hwq.depth, &rq->hwq.cons, + rq->swq[rq->swq_last].slots, + &rq->dbinfo.flags); + rq->swq_last = rq->swq[rq->swq_last].next_idx; + *pcqe = cqe; + + if (hwcqe->status != CQ_RES_RC_STATUS_OK) + bnxt_qplib_mark_qp_error(qp); + +#ifdef ENABLE_FP_SPINLOCK + spin_unlock_irqrestore(&rq->hwq.lock, flags); +#endif + } +done: + return rc; +} + +static int bnxt_qplib_cq_process_res_ud(struct bnxt_qplib_cq *cq, + struct cq_res_ud_v2 *hwcqe, + struct bnxt_qplib_cqe **pcqe, + int *budget) +{ + struct bnxt_qplib_srq *srq; + struct bnxt_qplib_cqe *cqe; + struct bnxt_qplib_qp *qp; + struct bnxt_qplib_q *rq; +#ifdef ENABLE_FP_SPINLOCK + unsigned long flags; +#endif + u32 wr_id_idx; + int rc = 0; + u16 *smac; + + qp = (struct bnxt_qplib_qp *)le64_to_cpu(hwcqe->qp_handle); + if (!qp) { + dev_err(&cq->hwq.pdev->dev, "QPLIB: process_cq UD qp is NULL"); + return -EINVAL; + } + if (qp->rq.flushed) { + dev_dbg(&cq->hwq.pdev->dev, + "%s: QPLIB: QP in Flush QP = %p\n", __func__, qp); + goto done; + } + cqe = *pcqe; + cqe->opcode = hwcqe->cqe_type_toggle & CQ_RES_UD_V2_CQE_TYPE_MASK; + cqe->length = le32_to_cpu((hwcqe->length & CQ_RES_UD_V2_LENGTH_MASK)); + cqe->cfa_meta = le16_to_cpu(hwcqe->cfa_metadata0); + /* V2 format has metadata1 */ + cqe->cfa_meta |= (((le32_to_cpu(hwcqe->src_qp_high_srq_or_rq_wr_id) & + CQ_RES_UD_V2_CFA_METADATA1_MASK) >> + CQ_RES_UD_V2_CFA_METADATA1_SFT) << + BNXT_QPLIB_META1_SHIFT); + cqe->invrkey = le32_to_cpu(hwcqe->imm_data); + cqe->flags = le16_to_cpu(hwcqe->flags); + cqe->status = hwcqe->status; + cqe->qp_handle = (u64)(unsigned long)qp; + smac = (u16 *)cqe->smac; + smac[2] = ntohs(le16_to_cpu(hwcqe->src_mac[0])); + smac[1] = ntohs(le16_to_cpu(hwcqe->src_mac[1])); + smac[0] = ntohs(le16_to_cpu(hwcqe->src_mac[2])); + wr_id_idx = le32_to_cpu(hwcqe->src_qp_high_srq_or_rq_wr_id) + & CQ_RES_UD_V2_SRQ_OR_RQ_WR_ID_MASK; + cqe->src_qp = le16_to_cpu(hwcqe->src_qp_low) | + ((le32_to_cpu( + hwcqe->src_qp_high_srq_or_rq_wr_id) & + CQ_RES_UD_V2_SRC_QP_HIGH_MASK) >> 8); + + if (cqe->flags & CQ_RES_UD_V2_FLAGS_SRQ) { + srq = qp->srq; + if (!srq) { + dev_err(&cq->hwq.pdev->dev, + "QPLIB: FP: SRQ used but not defined??"); + return -EINVAL; + } + if (wr_id_idx > srq->hwq.depth - 1) { + dev_err(&cq->hwq.pdev->dev, + "QPLIB: FP: CQ Process UD "); + dev_err(&cq->hwq.pdev->dev, + "QPLIB: wr_id idx 0x%x exceeded SRQ max 0x%x", + wr_id_idx, srq->hwq.depth); + return -EINVAL; + } + cqe->wr_id = srq->swq[wr_id_idx].wr_id; + bnxt_qplib_release_srqe(srq, wr_id_idx); + dev_dbg(&cq->hwq.pdev->dev, + "QPLIB: FP: CQ Processed UD SRQ wr_id[%d] = 0x%llx", + wr_id_idx, cqe->wr_id); + cqe++; + (*budget)--; + *pcqe = cqe; + } else { + rq = &qp->rq; + if (wr_id_idx > (rq->max_wqe - 1)) { + dev_err(&cq->hwq.pdev->dev, + "QPLIB: FP: CQ Process UD "); + dev_err(&cq->hwq.pdev->dev, + "QPLIB: wr_id idx 0x%x exceeded RQ max 0x%x", + wr_id_idx, rq->hwq.depth); + return -EINVAL; + } + if (rq->swq_last != wr_id_idx) + return -EINVAL; + +#ifdef ENABLE_FP_SPINLOCK + spin_lock_irqsave(&rq->hwq.lock, flags); +#endif + cqe->wr_id = rq->swq[rq->swq_last].wr_id; + dev_dbg(&cq->hwq.pdev->dev, + "QPLIB: FP: CQ Processed UD RQ wr_id[%d] = 0x%llx", + rq->swq_last, cqe->wr_id); + cqe++; + (*budget)--; + bnxt_qplib_hwq_incr_cons(rq->hwq.depth, &rq->hwq.cons, + rq->swq[rq->swq_last].slots, + &rq->dbinfo.flags); + rq->swq_last = rq->swq[rq->swq_last].next_idx; + *pcqe = cqe; + + if (hwcqe->status != CQ_RES_UD_V2_STATUS_OK) + bnxt_qplib_mark_qp_error(qp); + +#ifdef ENABLE_FP_SPINLOCK + spin_unlock_irqrestore(&rq->hwq.lock, flags); +#endif + } +done: + return rc; +} + +bool bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq *cq) +{ + + struct cq_base *hw_cqe; + unsigned long flags; + bool rc = true; + + spin_lock_irqsave(&cq->hwq.lock, flags); + hw_cqe = bnxt_qplib_get_qe(&cq->hwq, cq->hwq.cons, NULL); + + /* Check for Valid bit. If the CQE is valid, return false */ + rc = !CQE_CMP_VALID(hw_cqe, cq->dbinfo.flags); + spin_unlock_irqrestore(&cq->hwq.lock, flags); + return rc; +} + +static int bnxt_qplib_cq_process_res_raweth_qp1(struct bnxt_qplib_cq *cq, + struct cq_res_raweth_qp1 *hwcqe, + struct bnxt_qplib_cqe **pcqe, + int *budget) +{ + struct bnxt_qplib_qp *qp; + struct bnxt_qplib_q *rq; + struct bnxt_qplib_srq *srq; + struct bnxt_qplib_cqe *cqe; + u32 wr_id_idx; +#ifdef ENABLE_FP_SPINLOCK + unsigned long flags; +#endif + int rc = 0; + + qp = (struct bnxt_qplib_qp *)le64_to_cpu(hwcqe->qp_handle); + if (!qp) { + dev_err(&cq->hwq.pdev->dev, + "QPLIB: process_cq Raw/QP1 qp is NULL"); + return -EINVAL; + } + if (qp->rq.flushed) { + dev_dbg(&cq->hwq.pdev->dev, + "%s: QPLIB: QP in Flush QP = %p\n", __func__, qp); + goto done; + } + cqe = *pcqe; + cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK; + cqe->flags = le16_to_cpu(hwcqe->flags); + cqe->qp_handle = (u64)(unsigned long)qp; + + wr_id_idx = le32_to_cpu(hwcqe->raweth_qp1_payload_offset_srq_or_rq_wr_id) + & CQ_RES_RAWETH_QP1_SRQ_OR_RQ_WR_ID_MASK; + cqe->src_qp = qp->id; + if (qp->id == 1 && !cqe->length) { + /* Add workaround for the length misdetection */ + cqe->length = 296; + } else { + cqe->length = le16_to_cpu(hwcqe->length); + } + cqe->pkey_index = qp->pkey_index; + memcpy(cqe->smac, qp->smac, 6); + + cqe->raweth_qp1_flags = le16_to_cpu(hwcqe->raweth_qp1_flags); + cqe->raweth_qp1_flags2 = le32_to_cpu(hwcqe->raweth_qp1_flags2); + cqe->raweth_qp1_metadata = le32_to_cpu(hwcqe->raweth_qp1_metadata); + + dev_dbg(&cq->hwq.pdev->dev, + "QPLIB: raweth_qp1_flags = 0x%x raweth_qp1_flags2 = 0x%x\n", + cqe->raweth_qp1_flags, cqe->raweth_qp1_flags2); + + if (cqe->flags & CQ_RES_RAWETH_QP1_FLAGS_SRQ_SRQ) { + srq = qp->srq; + if (!srq) { + dev_err(&cq->hwq.pdev->dev, + "QPLIB: FP: SRQ used but not defined??"); + return -EINVAL; + } + if (wr_id_idx > srq->hwq.depth - 1) { + dev_err(&cq->hwq.pdev->dev, + "QPLIB: FP: CQ Process Raw/QP1 "); + dev_err(&cq->hwq.pdev->dev, + "QPLIB: wr_id idx 0x%x exceeded SRQ max 0x%x", + wr_id_idx, srq->hwq.depth); + return -EINVAL; + } +#ifdef ENABLE_FP_SPINLOCK + spin_lock_irqsave(&srq->hwq.lock, flags); +#endif + cqe->wr_id = srq->swq[wr_id_idx].wr_id; + dev_dbg(&cq->hwq.pdev->dev, + "QPLIB: FP: CQ Processed Raw/QP1 SRQ "); + dev_dbg(&cq->hwq.pdev->dev, + "QPLIB: wr_id[%d] = 0x%llx with status = 0x%x", + wr_id_idx, cqe->wr_id, hwcqe->status); + cqe++; + (*budget)--; + srq->hwq.cons++; + *pcqe = cqe; +#ifdef ENABLE_FP_SPINLOCK + spin_unlock_irqrestore(&srq->hwq.lock, flags); +#endif + } else { + rq = &qp->rq; + if (wr_id_idx > (rq->max_wqe - 1)) { + dev_err(&cq->hwq.pdev->dev, + "QPLIB: FP: CQ Process Raw/QP1 RQ wr_id "); + dev_err(&cq->hwq.pdev->dev, + "QPLIB: ix 0x%x exceeded RQ max 0x%x", + wr_id_idx, rq->max_wqe); + return -EINVAL; + } + if (wr_id_idx != rq->swq_last) + return -EINVAL; +#ifdef ENABLE_FP_SPINLOCK + spin_lock_irqsave(&rq->hwq.lock, flags); +#endif + cqe->wr_id = rq->swq[rq->swq_last].wr_id; + dev_dbg(&cq->hwq.pdev->dev, + "QPLIB: FP: CQ Processed Raw/QP1 RQ "); + dev_dbg(&cq->hwq.pdev->dev, + "QPLIB: wr_id[%d] = 0x%llx with status = 0x%x", + wr_id_idx, cqe->wr_id, hwcqe->status); + cqe++; + (*budget)--; + bnxt_qplib_hwq_incr_cons(rq->hwq.depth, &rq->hwq.cons, + rq->swq[wr_id_idx].slots, + &rq->dbinfo.flags); + rq->swq_last = rq->swq[rq->swq_last].next_idx; + *pcqe = cqe; + + if (hwcqe->status != CQ_RES_RC_STATUS_OK) + bnxt_qplib_mark_qp_error(qp); + +#ifdef ENABLE_FP_SPINLOCK + spin_unlock_irqrestore(&rq->hwq.lock, flags); +#endif + } +done: + return rc; +} + +static int bnxt_qplib_cq_process_terminal(struct bnxt_qplib_cq *cq, + struct cq_terminal *hwcqe, + struct bnxt_qplib_cqe **pcqe, + int *budget) +{ + struct bnxt_qplib_q *sq, *rq; + struct bnxt_qplib_cqe *cqe; + struct bnxt_qplib_qp *qp; +#ifdef ENABLE_FP_SPINLOCK + unsigned long flags; +#endif + u32 cqe_cons; + int rc = 0; + + /* Check the Status */ + if (hwcqe->status != CQ_TERMINAL_STATUS_OK) + dev_warn(&cq->hwq.pdev->dev, + "QPLIB: FP: CQ Process Terminal Error status = 0x%x", + hwcqe->status); + + qp = (struct bnxt_qplib_qp *)le64_to_cpu(hwcqe->qp_handle); + if (!qp) + return -EINVAL; + dev_dbg(&cq->hwq.pdev->dev, + "QPLIB: FP: CQ Process terminal for qp (0x%x)", qp->id); + + /* Terminal CQE requires all posted RQEs to complete with FLUSHED_ERR + * from the current rq->cons to the rq->prod regardless what the + * rq->cons the terminal CQE indicates. + */ + bnxt_qplib_mark_qp_error(qp); + + sq = &qp->sq; + rq = &qp->rq; + + cqe_cons = le16_to_cpu(hwcqe->sq_cons_idx); + if (cqe_cons == 0xFFFF) + goto do_rq; + + cqe_cons %= sq->max_wqe; +#ifdef ENABLE_FP_SPINLOCK + spin_lock_irqsave(&sq->hwq.lock, flags); +#endif + if (qp->sq.flushed) { + dev_dbg(&cq->hwq.pdev->dev, + "%s: QPLIB: QP in Flush QP = %p\n", __func__, qp); + goto sq_done; + } + + /* Terminal CQE can also include aggregated successful CQEs prior. + So we must complete all CQEs from the current sq's cons to the + cq_cons with status OK */ + cqe = *pcqe; + while (*budget) { + /*sw_cons = HWQ_CMP(sq->hwq.cons, &sq->hwq);*/ + if (sq->swq_last == cqe_cons) + break; + if (sq->swq[sq->swq_last].flags & SQ_SEND_FLAGS_SIGNAL_COMP) { + memset(cqe, 0, sizeof(*cqe)); + cqe->status = CQ_REQ_STATUS_OK; + cqe->opcode = CQ_BASE_CQE_TYPE_REQ; + cqe->qp_handle = (u64)qp; + cqe->src_qp = qp->id; + cqe->wr_id = sq->swq[sq->swq_last].wr_id; + cqe->type = sq->swq[sq->swq_last].type; + dev_dbg(&cq->hwq.pdev->dev, + "QPLIB: FP: CQ Processed terminal Req "); + dev_dbg(&cq->hwq.pdev->dev, + "QPLIB: wr_id[%d] = 0x%llx with status 0x%x", + sq->swq_last, cqe->wr_id, cqe->status); + cqe++; + (*budget)--; + } + bnxt_qplib_hwq_incr_cons(sq->hwq.depth, &sq->hwq.cons, + sq->swq[sq->swq_last].slots, + &sq->dbinfo.flags); + sq->swq_last = sq->swq[sq->swq_last].next_idx; + } + *pcqe = cqe; + if (!*budget && sq->swq_last != cqe_cons) { + /* Out of budget */ + rc = -EAGAIN; + goto sq_done; + } +sq_done: +#ifdef ENABLE_FP_SPINLOCK + spin_unlock_irqrestore(&sq->hwq.lock, flags); +#endif + if (rc) + return rc; +do_rq: + cqe_cons = le16_to_cpu(hwcqe->rq_cons_idx); + if (cqe_cons == 0xFFFF) { + goto done; + } else if (cqe_cons > (rq->max_wqe - 1)) { + dev_err(&cq->hwq.pdev->dev, + "QPLIB: FP: CQ Processed terminal "); + dev_err(&cq->hwq.pdev->dev, + "QPLIB: reported rq_cons_idx 0x%x exceeds max 0x%x", + cqe_cons, rq->hwq.depth); + goto done; + } +#ifdef ENABLE_FP_SPINLOCK + spin_lock_irqsave(&rq->hwq.lock, flags); +#endif + if (qp->rq.flushed) { + dev_dbg(&cq->hwq.pdev->dev, + "%s: QPLIB: QP in Flush QP = %p\n", __func__, qp); + rc = 0; + goto rq_done; + } + +rq_done: +#ifdef ENABLE_FP_SPINLOCK + spin_unlock_irqrestore(&rq->hwq.lock, flags); +#endif +done: + return rc; +} + +static int bnxt_qplib_cq_process_cutoff(struct bnxt_qplib_cq *cq, + struct cq_cutoff *hwcqe) +{ + /* Check the Status */ + if (hwcqe->status != CQ_CUTOFF_STATUS_OK) { + dev_err(&cq->hwq.pdev->dev, + "QPLIB: FP: CQ Process Cutoff Error status = 0x%x", + hwcqe->status); + return -EINVAL; + } + clear_bit(CQ_FLAGS_RESIZE_IN_PROG, &cq->flags); + wake_up_interruptible(&cq->waitq); + + dev_dbg(&cq->hwq.pdev->dev, "QPLIB: FP: CQ Processed Cutoff"); + return 0; +} + +int bnxt_qplib_process_flush_list(struct bnxt_qplib_cq *cq, + struct bnxt_qplib_cqe *cqe, + int num_cqes) +{ + struct bnxt_qplib_qp *qp = NULL; + u32 budget = num_cqes; + unsigned long flags; + + spin_lock_irqsave(&cq->flush_lock, flags); + list_for_each_entry(qp, &cq->sqf_head, sq_flush) { + dev_dbg(&cq->hwq.pdev->dev, + "QPLIB: FP: Flushing SQ QP= %p", + qp); + __flush_sq(&qp->sq, qp, &cqe, &budget); + } + + list_for_each_entry(qp, &cq->rqf_head, rq_flush) { + dev_dbg(&cq->hwq.pdev->dev, + "QPLIB: FP: Flushing RQ QP= %p", + qp); + __flush_rq(&qp->rq, qp, &cqe, &budget); + } + spin_unlock_irqrestore(&cq->flush_lock, flags); + + return num_cqes - budget; +} + +int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe, + int num_cqes, struct bnxt_qplib_qp **lib_qp) +{ + struct cq_base *hw_cqe; + u32 hw_polled = 0; + int budget, rc = 0; + u8 type; + +#ifdef ENABLE_FP_SPINLOCK + unsigned long flags; + spin_lock_irqsave(&cq->hwq.lock, flags); +#endif + budget = num_cqes; + + while (budget) { + hw_cqe = bnxt_qplib_get_qe(&cq->hwq, cq->hwq.cons, NULL); + + /* Check for Valid bit */ + if (!CQE_CMP_VALID(hw_cqe, cq->dbinfo.flags)) + break; + + /* The valid test of the entry must be done first before + * reading any further. + */ + dma_rmb(); + /* From the device's respective CQE format to qplib_wc*/ + type = hw_cqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK; + switch (type) { + case CQ_BASE_CQE_TYPE_REQ: + rc = bnxt_qplib_cq_process_req(cq, + (struct cq_req *)hw_cqe, &cqe, &budget, + cq->hwq.cons, lib_qp); + break; + case CQ_BASE_CQE_TYPE_RES_RC: + rc = bnxt_qplib_cq_process_res_rc(cq, + (struct cq_res_rc *)hw_cqe, &cqe, + &budget); + break; + case CQ_BASE_CQE_TYPE_RES_UD: + rc = bnxt_qplib_cq_process_res_ud(cq, + (struct cq_res_ud_v2 *)hw_cqe, + &cqe, &budget); + break; + case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1: + rc = bnxt_qplib_cq_process_res_raweth_qp1(cq, + (struct cq_res_raweth_qp1 *) + hw_cqe, &cqe, &budget); + break; + case CQ_BASE_CQE_TYPE_TERMINAL: + rc = bnxt_qplib_cq_process_terminal(cq, + (struct cq_terminal *)hw_cqe, + &cqe, &budget); + break; + case CQ_BASE_CQE_TYPE_CUT_OFF: + bnxt_qplib_cq_process_cutoff(cq, + (struct cq_cutoff *)hw_cqe); + /* Done processing this CQ */ + goto exit; + default: + dev_err(&cq->hwq.pdev->dev, + "QPLIB: process_cq unknown type 0x%lx", + hw_cqe->cqe_type_toggle & + CQ_BASE_CQE_TYPE_MASK); + rc = -EINVAL; + break; + } + if (rc < 0) { + dev_dbg(&cq->hwq.pdev->dev, + "QPLIB: process_cqe rc = 0x%x", rc); + if (rc == -EAGAIN) + break; + /* Error while processing the CQE, just skip to the + next one */ + if (type != CQ_BASE_CQE_TYPE_TERMINAL) + dev_err(&cq->hwq.pdev->dev, + "QPLIB: process_cqe error rc = 0x%x", + rc); + } + hw_polled++; + bnxt_qplib_hwq_incr_cons(cq->hwq.depth, &cq->hwq.cons, + 1, &cq->dbinfo.flags); + } + if (hw_polled) + bnxt_qplib_ring_db(&cq->dbinfo, DBC_DBC_TYPE_CQ); +exit: +#ifdef ENABLE_FP_SPINLOCK + spin_unlock_irqrestore(&cq->hwq.lock, flags); +#endif + return num_cqes - budget; +} + +void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type) +{ +#ifdef ENABLE_FP_SPINLOCK + unsigned long flags; + + spin_lock_irqsave(&cq->hwq.lock, flags); +#endif + cq->dbinfo.toggle = cq->toggle; + if (arm_type) + bnxt_qplib_ring_db(&cq->dbinfo, arm_type); + /* Using cq->arm_state variable to track whether to issue cq handler */ + atomic_set(&cq->arm_state, 1); +#ifdef ENABLE_FP_SPINLOCK + spin_unlock_irqrestore(&cq->hwq.lock, flags); +#endif +} + +void bnxt_qplib_flush_cqn_wq(struct bnxt_qplib_qp *qp) +{ + flush_workqueue(qp->scq->nq->cqn_wq); + if (qp->scq != qp->rcq) + flush_workqueue(qp->rcq->nq->cqn_wq); +} diff --git a/bnxt_re-1.10.3-229.0.139.0/qplib_fp.h b/bnxt_re-1.10.3-229.0.139.0/qplib_fp.h new file mode 100644 index 0000000..a0f1459 --- /dev/null +++ b/bnxt_re-1.10.3-229.0.139.0/qplib_fp.h @@ -0,0 +1,651 @@ +/* + * Copyright (c) 2015-2023, Broadcom. All rights reserved. The term + * Broadcom refers to Broadcom Inc. and/or its subsidiaries. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * BSD license below: + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * Author: Eddie Wai + * + * Description: Fast Path Operators (header) + */ + +#ifndef __BNXT_QPLIB_FP_H__ +#define __BNXT_QPLIB_FP_H__ + +/* Temp header structures for SQ */ +struct sq_ud_ext_hdr { + __le32 dst_qp; + __le32 avid; + __le64 rsvd; +}; + +struct sq_raw_ext_hdr { + __le32 cfa_meta; + __le32 rsvd0; + __le64 rsvd1; +}; + +struct sq_rdma_ext_hdr { + __le64 remote_va; + __le32 remote_key; + __le32 rsvd; +}; + +struct sq_atomic_ext_hdr { + __le64 swap_data; + __le64 cmp_data; +}; + +struct sq_fr_pmr_ext_hdr { + __le64 pblptr; + __le64 va; +}; + +struct sq_bind_ext_hdr { + __le64 va; + __le32 length_lo; + __le32 length_hi; +}; + +struct rq_ext_hdr { + __le64 rsvd1; + __le64 rsvd2; +}; + +#define BNXT_QPLIB_ETHTYPE_ROCEV1 0x8915 + +struct bnxt_qplib_srq { + struct bnxt_qplib_pd *pd; + struct bnxt_qplib_dpi *dpi; + struct bnxt_qplib_chip_ctx *cctx; + struct bnxt_qplib_cq *cq; + struct bnxt_qplib_swq *swq; + struct bnxt_qplib_hwq hwq; + struct bnxt_qplib_db_info dbinfo; + struct bnxt_qplib_sg_info sginfo; + u64 srq_handle; + u32 id; + u16 wqe_size; + u32 max_wqe; + u32 max_sge; + u32 threshold; + bool arm_req; + int start_idx; + int last_idx; + u16 eventq_hw_ring_id; + bool is_user; + bool small_recv_wqe_sup; + u8 toggle; + spinlock_t lock; +}; + +struct bnxt_qplib_sge { + u64 addr; + u32 size; + u32 lkey; +}; + +/* + * Buffer space for ETH(14), IP or GRH(40), UDP header(8) + * and ib_bth + ib_deth (20). + * Max required is 82 when RoCE V2 is enabled + */ + +/* + * RoCE V1 (38 bytes needed) + * +------------+----------+--------+--------+-------+ + * |Eth-hdr(14B)| GRH (40B)|bth+deth| Mad | iCRC | + * | | supplied | 20B |payload | 4B | + * | | by user |supplied| 256B | | + * | | mad | |by user | | + * | | | | | | + * | sge 1 | sge 2 | sge 3 | sge 4 | sge 5 | + * +------------+----------+--------+--------+-------+ + */ + +/* + * RoCE V2-IPv4 (46 Bytes needed) + * +------------+----------+--------+--------+-------+ + * |Eth-hdr(14B)| IP-hdr |UDP-hdr | Mad | iCRC | + * | | supplied | 8B |payload | 4B | + * | | by user |bth+deth| 256B | | + * | | mad lower| 20B |supplied| | + * | | 20B out | (sge 3)|by user | | + * | | of 40B | | | | + * | | grh space| | | | + * | sge 1 | sge 2 | sge 3 | sge 4 | sge 5 | + * +------------+----------+--------+--------+-------+ + */ + +/* + * RoCE V2-IPv6 (46 Bytes needed) + * +------------+----------+--------+--------+-------+ + * |Eth-hdr(14B)| IPv6 |UDP-hdr | Mad | iCRC | + * | | supplied | 8B |payload | 4B | + * | | by user |bth+deth| 256B | | + * | | mad lower| 20B |supplied| | + * | | 40 bytes | |by user | | + * | | grh space| | | | + * | | | | | | + * | sge 1 | sge 2 | sge 3 | sge 4 | sge 5 | + * +------------+----------+--------+--------+-------+ + */ + +#define BNXT_QPLIB_MAX_QP1_SQ_HDR_SIZE 74 +#define BNXT_QPLIB_MAX_QP1_SQ_HDR_SIZE_V2 86 +#define BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE 46 +#define BNXT_QPLIB_MAX_QP1_RQ_ETH_HDR_SIZE 14 +#define BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2 512 +#define BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV4 20 +#define BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6 40 +#define BNXT_QPLIB_MAX_QP1_RQ_BDETH_HDR_SIZE 20 +#define BNXT_QPLIB_MAX_SQSZ 0xFFFF +/* TODO modify the length to 334 */ +struct bnxt_qplib_hdrbuf { + dma_addr_t dma_map; + void *va; + u32 len; + u32 step; +}; + +struct bnxt_qplib_swq { + u64 wr_id; + int next_idx; + u8 type; + u8 flags; + u32 start_psn; + u32 next_psn; + u32 slot_idx; + u8 slots; + /* WIP: make it void * to handle legacy also */ + struct sq_psn_search *psn_search; + void *inline_data; +}; + +struct bnxt_qplib_swqe { + /* General */ +#define BNXT_QPLIB_FENCE_WRID 0x46454E43 /* "FENC" */ +#define BNXT_QPLIB_QP1_DUMMY_WRID 0x44554D59 /* "DUMY" */ + u64 wr_id; + u8 reqs_type; + u8 type; +#define BNXT_QPLIB_SWQE_TYPE_SEND 0 +#define BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM 1 +#define BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV 2 +#define BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE 4 +#define BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM 5 +#define BNXT_QPLIB_SWQE_TYPE_RDMA_READ 6 +#define BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP 8 +#define BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD 11 +#define BNXT_QPLIB_SWQE_TYPE_LOCAL_INV 12 +#define BNXT_QPLIB_SWQE_TYPE_FAST_REG_MR 13 +#define BNXT_QPLIB_SWQE_TYPE_REG_MR 13 +#define BNXT_QPLIB_SWQE_TYPE_BIND_MW 14 +#define BNXT_QPLIB_SWQE_TYPE_RECV 128 +#define BNXT_QPLIB_SWQE_TYPE_RECV_RDMA_IMM 129 + u8 flags; +#define BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP (1 << 0) +#define BNXT_QPLIB_SWQE_FLAGS_RD_ATOMIC_FENCE (1 << 1) +#define BNXT_QPLIB_SWQE_FLAGS_UC_FENCE (1 << 2) +#define BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT (1 << 3) +#define BNXT_QPLIB_SWQE_FLAGS_INLINE (1 << 4) + struct bnxt_qplib_sge *sg_list; + int num_sge; + + union { + /* Send, with imm, inval key */ + struct { + union { + __be32 imm_data; + u32 inv_key; + }; + u32 q_key; + u32 dst_qp; + u16 avid; + } send; + + /* Send Raw Ethernet and QP1 */ + struct { + u16 lflags; + u16 cfa_action; + u32 cfa_meta; + } rawqp1; + + /* RDMA write, with imm, read */ + struct { + union { + __be32 imm_data; + u32 inv_key; + }; + u64 remote_va; + u32 r_key; + } rdma; + + /* Atomic cmp/swap, fetch/add */ + struct { + u64 remote_va; + u32 r_key; + u64 swap_data; + u64 cmp_data; + } atomic; + + /* Local Invalidate */ + struct { + u32 inv_l_key; + } local_inv; + + /* FR-PMR */ + struct { + u8 access_cntl; + u8 pg_sz_log; + bool zero_based; + u32 l_key; + u32 length; + u8 pbl_pg_sz_log; +#define BNXT_QPLIB_SWQE_PAGE_SIZE_4K 0 +#define BNXT_QPLIB_SWQE_PAGE_SIZE_8K 1 +#define BNXT_QPLIB_SWQE_PAGE_SIZE_64K 4 +#define BNXT_QPLIB_SWQE_PAGE_SIZE_256K 6 +#define BNXT_QPLIB_SWQE_PAGE_SIZE_1M 8 +#define BNXT_QPLIB_SWQE_PAGE_SIZE_2M 9 +#define BNXT_QPLIB_SWQE_PAGE_SIZE_4M 10 +#define BNXT_QPLIB_SWQE_PAGE_SIZE_1G 18 + u8 levels; +#define PAGE_SHIFT_4K 12 + __le64 *pbl_ptr; + dma_addr_t pbl_dma_ptr; + u64 *page_list; + u16 page_list_len; + u64 va; + } frmr; + + /* Bind */ + struct { + u8 access_cntl; +#define BNXT_QPLIB_BIND_SWQE_ACCESS_LOCAL_WRITE (1 << 0) +#define BNXT_QPLIB_BIND_SWQE_ACCESS_REMOTE_READ (1 << 1) +#define BNXT_QPLIB_BIND_SWQE_ACCESS_REMOTE_WRITE (1 << 2) +#define BNXT_QPLIB_BIND_SWQE_ACCESS_REMOTE_ATOMIC (1 << 3) +#define BNXT_QPLIB_BIND_SWQE_ACCESS_WINDOW_BIND (1 << 4) + bool zero_based; + u8 mw_type; + u32 parent_l_key; + u32 r_key; + u64 va; + u32 length; + } bind; + }; +}; + +struct bnxt_qplib_q { + struct bnxt_qplib_swq *swq; + struct bnxt_qplib_db_info dbinfo; + struct bnxt_qplib_sg_info sginfo; + struct bnxt_qplib_hwq hwq; + u32 max_wqe; + u16 max_sge; + u16 wqe_size; + u16 q_full_delta; + u16 max_sw_wqe; + u32 psn; + bool condition; + bool single; + bool legacy_send_phantom; + u32 phantom_wqe_cnt; + u32 phantom_cqe_cnt; + u32 next_cq_cons; + bool flushed; + u32 swq_start; + u32 swq_last; +}; + +#define BNXT_QPLIB_PPP_REQ 0x1 +#define BNXT_QPLIB_PPP_ST_IDX_SHIFT 0x1 + +struct bnxt_qplib_ppp { + u32 dpi; + u8 req; + u8 st_idx_en; +}; + +struct bnxt_qplib_qp { + struct bnxt_qplib_pd *pd; + struct bnxt_qplib_dpi *dpi; + struct bnxt_qplib_chip_ctx *cctx; + u64 qp_handle; +#define BNXT_QPLIB_QP_ID_INVALID 0xFFFFFFFF + u32 id; + u8 type; + u8 sig_type; + u8 wqe_mode; + u8 state; + u8 cur_qp_state; + u8 is_user; + bool small_recv_wqe_sup; + u64 modify_flags; + u32 max_inline_data; + u32 mtu; + u32 path_mtu; + bool en_sqd_async_notify; + u16 pkey_index; + u32 qkey; + u32 dest_qp_id; + u8 access; + u8 timeout; + u8 retry_cnt; + u8 rnr_retry; + u64 wqe_cnt; + u32 min_rnr_timer; + u32 max_rd_atomic; + u32 max_dest_rd_atomic; + u32 dest_qpn; + u8 smac[6]; + u16 vlan_id; + u8 nw_type; + u16 port_id; + struct bnxt_qplib_ah ah; + struct bnxt_qplib_ppp ppp; + +#define BTH_PSN_MASK ((1 << 24) - 1) + /* SQ */ + struct bnxt_qplib_q sq; + /* RQ */ + struct bnxt_qplib_q rq; + /* SRQ */ + struct bnxt_qplib_srq *srq; + /* CQ */ + struct bnxt_qplib_cq *scq; + struct bnxt_qplib_cq *rcq; + /* IRRQ and ORRQ */ + struct bnxt_qplib_hwq irrq; + struct bnxt_qplib_hwq orrq; + /* Header buffer for QP1 */ + struct bnxt_qplib_hdrbuf *sq_hdr_buf; + struct bnxt_qplib_hdrbuf *rq_hdr_buf; + + /* ToS */ + u8 tos_ecn; + u8 tos_dscp; + /* To track the SQ and RQ flush list */ + struct list_head sq_flush; + struct list_head rq_flush; + /* 4 bytes of QP's scrabled mac received from FW */ + u32 lag_src_mac; + u32 msn; + u32 msn_tbl_sz; + /* get devflags in PI code */ + u16 dev_cap_flags; +}; + +#define CQE_CMP_VALID(hdr, pass) \ + (!!((hdr)->cqe_type_toggle & CQ_BASE_TOGGLE) == \ + !(pass & BNXT_QPLIB_FLAG_EPOCH_CONS_MASK)) + +static inline u32 __bnxt_qplib_get_avail(struct bnxt_qplib_hwq *hwq) +{ + int cons, prod, avail; + + /* False full is possible retrying post-send makes sense */ + cons = hwq->cons; + prod = hwq->prod; + avail = cons - prod; + if (cons <= prod) + avail += hwq->depth; + return avail; +} + +static inline bool bnxt_qplib_queue_full(struct bnxt_qplib_hwq *hwq, u8 slots) +{ + return __bnxt_qplib_get_avail(hwq) <= slots; +} + +struct bnxt_qplib_cqe { + u8 status; + u8 type; + u8 opcode; + u32 length; + /* Lower 16 is cfa_metadata0, Upper 16 is cfa_metadata1 */ + u32 cfa_meta; +#define BNXT_QPLIB_META1_SHIFT 16 +#define BNXT_QPLIB_CQE_CFA_META1_VALID 0x80000UL + u64 wr_id; + union { + __be32 immdata; + u32 invrkey; + }; + u64 qp_handle; + u64 mr_handle; + u16 flags; + u8 smac[6]; + u32 src_qp; + u16 raweth_qp1_flags; + u16 raweth_qp1_errors; + u16 raweth_qp1_cfa_code; + u32 raweth_qp1_flags2; + u32 raweth_qp1_metadata; + u8 raweth_qp1_payload_offset; + u16 pkey_index; +}; + +#define BNXT_QPLIB_QUEUE_START_PERIOD 0x01 +struct bnxt_qplib_cq { + struct bnxt_qplib_dpi *dpi; + struct bnxt_qplib_chip_ctx *cctx; + struct bnxt_qplib_nq *nq; + struct bnxt_qplib_db_info dbinfo; + struct bnxt_qplib_sg_info sginfo; + struct bnxt_qplib_hwq hwq; + struct bnxt_qplib_hwq resize_hwq; + struct list_head sqf_head; + struct list_head rqf_head; + u32 max_wqe; + u32 id; + u16 count; + u16 period; + u32 cnq_hw_ring_id; + u64 cq_handle; + atomic_t arm_state; +#define CQ_RESIZE_WAIT_TIME_MS 500 + unsigned long flags; +#define CQ_FLAGS_RESIZE_IN_PROG 1 + wait_queue_head_t waitq; + spinlock_t flush_lock; /* lock flush queue list */ + spinlock_t compl_lock; /* synch CQ handlers */ + u16 cnq_events; + bool is_cq_err_event; + u8 toggle; +}; + +#define BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE sizeof(struct xrrq_irrq) +#define BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE sizeof(struct xrrq_orrq) +#define IRD_LIMIT_TO_IRRQ_SLOTS(x) (2 * x + 2) +#define IRRQ_SLOTS_TO_IRD_LIMIT(s) ((s >> 1) - 1) +#define ORD_LIMIT_TO_ORRQ_SLOTS(x) (x + 1) +#define ORRQ_SLOTS_TO_ORD_LIMIT(s) (s - 1) + +#define NQE_CMP_VALID(hdr, pass) \ + (!!(le32_to_cpu((hdr)->info63_v[0]) & NQ_BASE_V) == \ + !(pass & BNXT_QPLIB_FLAG_EPOCH_CONS_MASK)) + +#define BNXT_QPLIB_NQE_MAX_CNT (128 * 1024) + +/* MSN table print macros for debugging */ +#define BNXT_RE_MSN_IDX(m) (((m) & SQ_MSN_SEARCH_START_IDX_MASK) >> \ + SQ_MSN_SEARCH_START_IDX_SFT) +#define BNXT_RE_MSN_NPSN(m) (((m) & SQ_MSN_SEARCH_NEXT_PSN_MASK) >> \ + SQ_MSN_SEARCH_NEXT_PSN_SFT) +#define BNXT_RE_MSN_SPSN(m) (((m) & SQ_MSN_SEARCH_START_PSN_MASK) >> \ + SQ_MSN_SEARCH_START_PSN_SFT) +#define BNXT_MSN_TBLE_SGE 6 /* For Thor2 SGE should be 6 to pass address sanity by HW */ + +struct bnxt_qplib_nq_stats { + u64 num_dbqne_processed; + u64 num_srqne_processed; + u64 num_cqne_processed; + u64 num_tasklet_resched; + u64 num_nq_rearm; +}; + +struct bnxt_qplib_nq_db { + struct bnxt_qplib_reg_desc reg; + void __iomem *db; + struct bnxt_qplib_db_info dbinfo; +}; + +typedef int (*cqn_handler_t)(struct bnxt_qplib_nq *nq, + struct bnxt_qplib_cq *cq); +typedef int (*srqn_handler_t)(struct bnxt_qplib_nq *nq, + struct bnxt_qplib_srq *srq, u8 event); + +struct bnxt_qplib_nq { + struct bnxt_qplib_res *res; + struct bnxt_qplib_hwq hwq; + struct bnxt_qplib_nq_db nq_db; + + char *name; + u16 ring_id; + int msix_vec; + cpumask_t mask; + struct tasklet_struct nq_tasklet; + bool requested; + int budget; + u32 load; + struct mutex lock; + + cqn_handler_t cqn_handler; + srqn_handler_t srqn_handler; + struct workqueue_struct *cqn_wq; + struct bnxt_qplib_nq_stats stats; +}; + +struct bnxt_qplib_nq_work { + struct work_struct work; + struct bnxt_qplib_nq *nq; + struct bnxt_qplib_cq *cq; +}; + +static inline dma_addr_t +bnxt_qplib_get_qp_buf_from_index(struct bnxt_qplib_qp *qp, u32 index) +{ + struct bnxt_qplib_hdrbuf *buf; + + buf = qp->rq_hdr_buf; + return (buf->dma_map + index * buf->step); +} + +void bnxt_qplib_nq_stop_irq(struct bnxt_qplib_nq *nq, bool kill); +void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq); +int bnxt_qplib_nq_start_irq(struct bnxt_qplib_nq *nq, int nq_indx, + int msix_vector, bool need_init); +int bnxt_qplib_enable_nq(struct bnxt_qplib_nq *nq, int nq_idx, + int msix_vector, int bar_reg_offset, + cqn_handler_t cqn_handler, + srqn_handler_t srq_handler); +int bnxt_qplib_create_srq(struct bnxt_qplib_res *res, + struct bnxt_qplib_srq *srq); +int bnxt_qplib_modify_srq(struct bnxt_qplib_res *res, + struct bnxt_qplib_srq *srq); +int bnxt_qplib_query_srq(struct bnxt_qplib_res *res, + struct bnxt_qplib_srq *srq); +int bnxt_qplib_destroy_srq(struct bnxt_qplib_res *res, + struct bnxt_qplib_srq *srq); +int bnxt_qplib_post_srq_recv(struct bnxt_qplib_srq *srq, + struct bnxt_qplib_swqe *wqe); +int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp); +int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp); +int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp); +int bnxt_qplib_query_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp); +int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp); +void bnxt_qplib_clean_qp(struct bnxt_qplib_qp *qp); +void bnxt_qplib_free_qp_res(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp); +void *bnxt_qplib_get_qp1_sq_buf(struct bnxt_qplib_qp *qp, + struct bnxt_qplib_sge *sge); +void *bnxt_qplib_get_qp1_rq_buf(struct bnxt_qplib_qp *qp, + struct bnxt_qplib_sge *sge); +u32 bnxt_qplib_get_rq_prod_index(struct bnxt_qplib_qp *qp); +void bnxt_qplib_post_send_db(struct bnxt_qplib_qp *qp); +int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp, + struct bnxt_qplib_swqe *wqe); +void bnxt_qplib_post_recv_db(struct bnxt_qplib_qp *qp); +int bnxt_qplib_post_recv(struct bnxt_qplib_qp *qp, + struct bnxt_qplib_swqe *wqe); +int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq); +int bnxt_qplib_modify_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq); +int bnxt_qplib_resize_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq, + int new_cqes); +void bnxt_qplib_resize_cq_complete(struct bnxt_qplib_res *res, + struct bnxt_qplib_cq *cq); +int bnxt_qplib_destroy_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq); +void bnxt_qplib_free_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq); +int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe, + int num, struct bnxt_qplib_qp **qp); +bool bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq *cq); +void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type); +void bnxt_qplib_free_nq_mem(struct bnxt_qplib_nq *nq); +int bnxt_qplib_alloc_nq_mem(struct bnxt_qplib_res *res, + struct bnxt_qplib_nq *nq); +void bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp); +void bnxt_qplib_del_flush_qp(struct bnxt_qplib_qp *qp); +int bnxt_qplib_process_flush_list(struct bnxt_qplib_cq *cq, + struct bnxt_qplib_cqe *cqe, + int num_cqes); +void bnxt_qplib_flush_cqn_wq(struct bnxt_qplib_qp *qp); +void bnxt_qplib_free_hdr_buf(struct bnxt_qplib_res *res, + struct bnxt_qplib_qp *qp); +int bnxt_qplib_alloc_hdr_buf(struct bnxt_qplib_res *res, + struct bnxt_qplib_qp *qp, u32 slen, u32 rlen); +void bnxt_re_synchronize_nq(struct bnxt_qplib_nq *nq); + +static inline bool __can_request_ppp(struct bnxt_qplib_qp *qp) +{ + bool can_request = false; + + if (qp->cur_qp_state == CMDQ_MODIFY_QP_NEW_STATE_RESET && + qp->state == CMDQ_MODIFY_QP_NEW_STATE_INIT && + qp->ppp.req && + !(qp->ppp.st_idx_en & + CREQ_MODIFY_QP_RESP_PINGPONG_PUSH_ENABLED)) + can_request = true; + return can_request; +} + +/* MSN table update inlin */ +static inline uint64_t bnxt_re_update_msn_tbl(uint32_t st_idx, uint32_t npsn, uint32_t start_psn) +{ + return cpu_to_le64((((u64)(st_idx) << SQ_MSN_SEARCH_START_IDX_SFT) & + SQ_MSN_SEARCH_START_IDX_MASK) | + (((u64)(npsn) << SQ_MSN_SEARCH_NEXT_PSN_SFT) & + SQ_MSN_SEARCH_NEXT_PSN_MASK) | + (((start_psn) << SQ_MSN_SEARCH_START_PSN_SFT) & + SQ_MSN_SEARCH_START_PSN_MASK)); +} + +void bnxt_re_schedule_dbq_event(struct bnxt_qplib_res *res); +#endif diff --git a/bnxt_re-1.10.3-229.0.139.0/qplib_rcfw.c b/bnxt_re-1.10.3-229.0.139.0/qplib_rcfw.c new file mode 100644 index 0000000..c21d165 --- /dev/null +++ b/bnxt_re-1.10.3-229.0.139.0/qplib_rcfw.c @@ -0,0 +1,1429 @@ +/* + * Copyright (c) 2015-2023, Broadcom. All rights reserved. The term + * Broadcom refers to Broadcom Inc. and/or its subsidiaries. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * BSD license below: + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * Author: Eddie Wai + * + * Description: RDMA Controller HW interface + */ + +#include +#include +#include +#include +#include +#include + +#include "roce_hsi.h" + +#include "qplib_tlv.h" +#include "qplib_res.h" +#include "qplib_sp.h" +#include "qplib_rcfw.h" +#include "compat.h" +#include "bnxt_re.h" + +static void bnxt_qplib_service_creq( +#ifdef HAS_TASKLET_SETUP + struct tasklet_struct *t +#else + unsigned long data +#endif + ); + +int __check_cmdq_stall(struct bnxt_qplib_rcfw *rcfw, + u32 *cur_prod, u32 *cur_cons) +{ + struct bnxt_qplib_cmdq_ctx *cmdq; + + cmdq = &rcfw->cmdq; + + if (*cur_prod == cmdq->hwq.prod && + *cur_cons == cmdq->hwq.cons) + /* No activity on CMDQ or CREQ. FW down */ + return -ETIMEDOUT; + + *cur_prod = cmdq->hwq.prod; + *cur_cons = cmdq->hwq.cons; + return 0; +} + +static int bnxt_qplib_map_rc(u8 opcode) +{ + switch (opcode) { + case CMDQ_BASE_OPCODE_DESTROY_QP: + case CMDQ_BASE_OPCODE_DESTROY_SRQ: + case CMDQ_BASE_OPCODE_DESTROY_CQ: + case CMDQ_BASE_OPCODE_DEALLOCATE_KEY: + case CMDQ_BASE_OPCODE_DEREGISTER_MR: + case CMDQ_BASE_OPCODE_DELETE_GID: + case CMDQ_BASE_OPCODE_DESTROY_QP1: + case CMDQ_BASE_OPCODE_DESTROY_AH: + case CMDQ_BASE_OPCODE_DEINITIALIZE_FW: + case CMDQ_BASE_OPCODE_MODIFY_ROCE_CC: + case CMDQ_BASE_OPCODE_SET_LINK_AGGR_MODE: + return 0; + default: + return -ETIMEDOUT; + } +} + +/** + * bnxt_re_is_fw_stalled - Check firmware health + * @rcfw: rcfw channel instance of rdev + * @cookie: cookie to track the command + * + * If firmware has not responded any rcfw command within + * rcfw->max_timeout, consider firmware as stalled. + * + * Returns: + * 0 if firmware is responding + * -ENODEV if firmware is not responding + */ +static int bnxt_re_is_fw_stalled(struct bnxt_qplib_rcfw *rcfw, u16 cookie) +{ + struct bnxt_qplib_cmdq_ctx *cmdq; + struct bnxt_qplib_crsqe *crsqe; + + crsqe = &rcfw->crsqe_tbl[cookie]; + cmdq = &rcfw->cmdq; + + if (time_after(jiffies, cmdq->last_seen + + (rcfw->max_timeout * HZ))) { + dev_warn_ratelimited(&rcfw->pdev->dev, + "%s: FW STALL Detected. cmdq[%#x]=%#x waited (%d > %d) msec active %d ", + __func__, cookie, crsqe->opcode, + jiffies_to_msecs(jiffies - cmdq->last_seen), + rcfw->max_timeout * 1000, + crsqe->is_in_used); + return -ENODEV; + } + + return 0; +} +/** + * __wait_for_resp - Don't hold the cpu context and wait for response + * @rcfw: rcfw channel instance of rdev + * @cookie: cookie to track the command + * + * Wait for command completion in sleepable context. + * + * Returns: + * 0 if command is completed by firmware. + * Non zero error code for rest of the case. + */ +static int __wait_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie) +{ + struct bnxt_qplib_cmdq_ctx *cmdq; + struct bnxt_qplib_crsqe *crsqe; + unsigned long issue_time; + int ret; + + cmdq = &rcfw->cmdq; + issue_time = jiffies; + crsqe = &rcfw->crsqe_tbl[cookie]; + + do { + if (RCFW_NO_FW_ACCESS(rcfw)) + return bnxt_qplib_map_rc(crsqe->opcode); + if (test_bit(FIRMWARE_STALL_DETECTED, &cmdq->flags)) + return -ETIMEDOUT; + + /* Non zero means command completed */ + ret = wait_event_timeout(cmdq->waitq, + !crsqe->is_in_used || + RCFW_NO_FW_ACCESS(rcfw), + msecs_to_jiffies(rcfw->max_timeout * 1000)); + + if (!crsqe->is_in_used) + return 0; + /* + * Take care if interrupt miss or other cases like DBR drop + */ + +#ifdef HAS_TASKLET_SETUP + bnxt_qplib_service_creq(&rcfw->creq.creq_tasklet); +#else + bnxt_qplib_service_creq((unsigned long)rcfw); +#endif + dev_warn_ratelimited(&rcfw->pdev->dev, + "Non-Blocking QPLIB: cmdq[%#x]=%#x waited (%u) msec bit %d", + cookie, crsqe->opcode, + jiffies_to_msecs(jiffies - issue_time), + crsqe->is_in_used); + + if (!crsqe->is_in_used) + return 0; + + ret = bnxt_re_is_fw_stalled(rcfw, cookie); + if (ret) + return ret; + + } while (true); +}; + +/** + * __block_for_resp - hold the cpu context and wait for response + * @rcfw: rcfw channel instance of rdev + * @cookie: cookie to track the command + * + * This function will hold the cpu (non-sleepable context) and + * wait for command completion. Maximum holding interval is 8 second. + * + * Returns: + * -ETIMEOUT if command is not completed in specific time interval. + * 0 if command is completed by firmware. + */ +static int __block_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie) +{ + struct bnxt_qplib_cmdq_ctx *cmdq = &rcfw->cmdq; + struct bnxt_qplib_crsqe *crsqe; + unsigned long issue_time = 0; + + issue_time = jiffies; + crsqe = &rcfw->crsqe_tbl[cookie]; + + do { + /* TBD - Can we merge below ?? */ + if (RCFW_NO_FW_ACCESS(rcfw)) + return bnxt_qplib_map_rc(crsqe->opcode); + if (test_bit(FIRMWARE_STALL_DETECTED, &cmdq->flags)) + return -ETIMEDOUT; + + udelay(1); + + /* Below call is must since there can be a deadlock + * if interrupt is mapped to the same cpu + */ +#ifdef HAS_TASKLET_SETUP + bnxt_qplib_service_creq(&rcfw->creq.creq_tasklet); +#else + bnxt_qplib_service_creq((unsigned long)rcfw); +#endif + if (!crsqe->is_in_used) + return 0; + + } while (time_before(jiffies, issue_time + (8 * HZ))); + + dev_info_ratelimited(&rcfw->pdev->dev, + "Blocking QPLIB: cmdq[%#x]=%#x taken (%u) msec", + cookie, crsqe->opcode, + jiffies_to_msecs(jiffies - issue_time)); + + return -ETIMEDOUT; +}; + +/* __send_message_no_waiter - get cookie and post the message. + * @rcfw: rcfw channel instance of rdev + * @msg: qplib message internal + * + * This function will just post and don't bother about completion. + * Current design of this function is - + * user must hold the completion queue hwq->lock. + * user must have used existing completion and free the resources. + * this function will not check queue full condition. + * this function will explicitly set is_waiter_alive=false. + * current use case is - send destroy_ah if create_ah is return + * after waiter of create_ah is lost. It can be extended for other + * use case as well. + * + * Returns: Nothing + * + */ +static void __send_message_no_waiter(struct bnxt_qplib_rcfw *rcfw, + struct bnxt_qplib_cmdqmsg *msg) +{ + struct bnxt_qplib_cmdq_ctx *cmdq = &rcfw->cmdq; + struct bnxt_qplib_hwq *cmdq_hwq = &cmdq->hwq; + struct bnxt_qplib_crsqe *crsqe; + struct bnxt_qplib_cmdqe *cmdqe; + u32 sw_prod, cmdq_prod, bsize; + u16 cookie; + u8 *preq; + + cookie = cmdq->seq_num & RCFW_MAX_COOKIE_VALUE; + __set_cmdq_base_cookie(msg->req, msg->req_sz, cpu_to_le16(cookie)); + crsqe = &rcfw->crsqe_tbl[cookie]; + + /* Set cmd_size in terms of 16B slots in req. */ + bsize = bnxt_qplib_set_cmd_slots(msg->req); + /* GET_CMD_SIZE would return number of slots in either case of tlv + * and non-tlv commands after call to bnxt_qplib_set_cmd_slots() + */ + crsqe->send_timestamp = jiffies; + crsqe->is_internal_cmd = true; + crsqe->is_waiter_alive = false; + crsqe->is_in_used = true; + crsqe->req_size = __get_cmdq_base_cmd_size(msg->req, msg->req_sz); + + preq = (u8 *)msg->req; + do { + /* Locate the next cmdq slot */ + sw_prod = HWQ_CMP(cmdq_hwq->prod, cmdq_hwq); + cmdqe = bnxt_qplib_get_qe(cmdq_hwq, sw_prod, NULL); + /* Copy a segment of the req cmd to the cmdq */ + memset(cmdqe, 0, sizeof(*cmdqe)); + memcpy(cmdqe, preq, min_t(u32, bsize, sizeof(*cmdqe))); + preq += min_t(u32, bsize, sizeof(*cmdqe)); + bsize -= min_t(u32, bsize, sizeof(*cmdqe)); + cmdq_hwq->prod++; + } while (bsize > 0); + cmdq->seq_num++; + + cmdq_prod = cmdq_hwq->prod & 0xFFFF; + atomic_inc(&rcfw->timeout_send); + /* ring CMDQ DB */ + wmb(); + writel(cmdq_prod, cmdq->cmdq_mbox.prod); + writel(RCFW_CMDQ_TRIG_VAL, cmdq->cmdq_mbox.db); +} + +static int __send_message(struct bnxt_qplib_rcfw *rcfw, + struct bnxt_qplib_cmdqmsg *msg) +{ + u32 bsize, free_slots, required_slots; + struct bnxt_qplib_cmdq_ctx *cmdq; + struct bnxt_qplib_crsqe *crsqe; + struct bnxt_qplib_cmdqe *cmdqe; + struct bnxt_qplib_hwq *cmdq_hwq; + u32 sw_prod, cmdq_prod; + struct pci_dev *pdev; + unsigned long flags; + u16 cookie; + u8 opcode; + u8 *preq; + + cmdq = &rcfw->cmdq; + cmdq_hwq = &cmdq->hwq; + pdev = rcfw->pdev; + opcode = __get_cmdq_base_opcode(msg->req, msg->req_sz); + + /* Cmdq are in 16-byte units, each request can consume 1 or more + cmdqe */ + spin_lock_irqsave(&cmdq_hwq->lock, flags); + required_slots = bnxt_qplib_get_cmd_slots(msg->req); + free_slots = HWQ_FREE_SLOTS(cmdq_hwq); + cookie = cmdq->seq_num & RCFW_MAX_COOKIE_VALUE; + crsqe = &rcfw->crsqe_tbl[cookie]; + + if (required_slots >= free_slots) { + dev_info_ratelimited(&pdev->dev, + "QPLIB: RCFW: CMDQ is full req/free %d/%d!", + required_slots, free_slots); + rcfw->cmdq_full_dbg++; + spin_unlock_irqrestore(&cmdq_hwq->lock, flags); + return -EAGAIN; + } + + if (crsqe->is_in_used) + panic("QPLIB: Cookie was not requested %d\n", + cookie); + + if (msg->block) + cookie |= RCFW_CMD_IS_BLOCKING; + __set_cmdq_base_cookie(msg->req, msg->req_sz, cpu_to_le16(cookie)); + + /* Set cmd_size in terms of 16B slots in req. */ + bsize = bnxt_qplib_set_cmd_slots(msg->req); + /* GET_CMD_SIZE would return number of slots in either case of tlv + * and non-tlv commands after call to bnxt_qplib_set_cmd_slots() + */ + crsqe->send_timestamp = jiffies; + crsqe->free_slots = free_slots; + crsqe->resp = (struct creq_qp_event *)msg->resp; + crsqe->resp->cookie = cpu_to_le16(cookie); + crsqe->is_internal_cmd = false; + crsqe->is_waiter_alive = true; + crsqe->is_in_used = true; + crsqe->opcode = opcode; + crsqe->requested_qp_state = msg->qp_state; + + crsqe->req_size = __get_cmdq_base_cmd_size(msg->req, msg->req_sz); + if (__get_cmdq_base_resp_size(msg->req, msg->req_sz) && msg->sb) { + struct bnxt_qplib_rcfw_sbuf *sbuf = msg->sb; + + __set_cmdq_base_resp_addr(msg->req, msg->req_sz, + cpu_to_le64(sbuf->dma_addr)); + __set_cmdq_base_resp_size(msg->req, msg->req_sz, + ALIGN(sbuf->size, BNXT_QPLIB_CMDQE_UNITS) / + BNXT_QPLIB_CMDQE_UNITS); + } + + preq = (u8 *)msg->req; + do { + /* Locate the next cmdq slot */ + sw_prod = HWQ_CMP(cmdq_hwq->prod, cmdq_hwq); + cmdqe = bnxt_qplib_get_qe(cmdq_hwq, sw_prod, NULL); + /* Copy a segment of the req cmd to the cmdq */ + memset(cmdqe, 0, sizeof(*cmdqe)); + memcpy(cmdqe, preq, min_t(u32, bsize, sizeof(*cmdqe))); + preq += min_t(u32, bsize, sizeof(*cmdqe)); + bsize -= min_t(u32, bsize, sizeof(*cmdqe)); + cmdq_hwq->prod++; + } while (bsize > 0); + cmdq->seq_num++; + + cmdq_prod = cmdq_hwq->prod & 0xFFFF; + if (test_bit(FIRMWARE_FIRST_FLAG, &cmdq->flags)) { + /* The very first doorbell write + * is required to set this flag + * which prompts the FW to reset + * its internal pointers + */ + cmdq_prod |= BIT(FIRMWARE_FIRST_FLAG); + clear_bit(FIRMWARE_FIRST_FLAG, &cmdq->flags); + } + /* ring CMDQ DB */ + wmb(); + writel(cmdq_prod, cmdq->cmdq_mbox.prod); + writel(RCFW_CMDQ_TRIG_VAL, cmdq->cmdq_mbox.db); + + dev_dbg(&pdev->dev, "QPLIB: RCFW sent request with 0x%x 0x%x 0x%x", + cmdq_prod, cmdq_hwq->prod, crsqe->req_size); + dev_dbg(&pdev->dev, + "QPLIB: opcode 0x%x with cookie 0x%x at cmdq/crsq 0x%p/0x%p", + opcode, + __get_cmdq_base_cookie(msg->req, msg->req_sz), + cmdqe, crsqe); + spin_unlock_irqrestore(&cmdq_hwq->lock, flags); + /* Return the CREQ response pointer */ + return 0; +} + +/** + * __poll_for_resp - self poll completion for rcfw command + * @rcfw: rcfw channel instance of rdev + * @cookie: cookie to track the command + * + * It works same as __wait_for_resp except this function will + * do self polling in sort interval since interrupt is disabled. + * This function can not be called from non-sleepable context. + * + * Returns: + * -ETIMEOUT if command is not completed in specific time interval. + * 0 if command is completed by firmware. + */ +static int __poll_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie) +{ + struct bnxt_qplib_cmdq_ctx *cmdq = &rcfw->cmdq; + struct bnxt_qplib_crsqe *crsqe; + unsigned long issue_time; + int ret; + + issue_time = jiffies; + crsqe = &rcfw->crsqe_tbl[cookie]; + + do { + if (RCFW_NO_FW_ACCESS(rcfw)) + return bnxt_qplib_map_rc(crsqe->opcode); + if (test_bit(FIRMWARE_STALL_DETECTED, &cmdq->flags)) + return -ETIMEDOUT; + + usleep_range(1000, 1001); + +#ifdef HAS_TASKLET_SETUP + bnxt_qplib_service_creq(&rcfw->creq.creq_tasklet); +#else + bnxt_qplib_service_creq((unsigned long)rcfw); +#endif + if (!crsqe->is_in_used) + return 0; + + if (jiffies_to_msecs(jiffies - issue_time) > + (rcfw->max_timeout * 1000)) { + dev_info_ratelimited(&rcfw->pdev->dev, + "Self Polling QPLIB: cmdq[%#x]=%#x taken (%u) msec", + cookie, crsqe->opcode, + jiffies_to_msecs(jiffies - issue_time)); + ret = bnxt_re_is_fw_stalled(rcfw, cookie); + if (ret) + return ret; + } + } while (true); + +}; + +static int __send_message_basic_sanity(struct bnxt_qplib_rcfw *rcfw, + struct bnxt_qplib_cmdqmsg *msg, u8 opcode) +{ + struct bnxt_qplib_cmdq_ctx *cmdq; + + cmdq = &rcfw->cmdq; + + /* Prevent posting if f/w is not in a state to process */ + if (RCFW_NO_FW_ACCESS(rcfw)) + return -ENXIO; + + if (test_bit(FIRMWARE_STALL_DETECTED, &cmdq->flags)) + return -ETIMEDOUT; + + if (test_bit(FIRMWARE_INITIALIZED_FLAG, &cmdq->flags) && + opcode == CMDQ_BASE_OPCODE_INITIALIZE_FW) { + dev_err(&rcfw->pdev->dev, "QPLIB: RCFW already initialized!"); + return -EINVAL; + } + + if (!test_bit(FIRMWARE_INITIALIZED_FLAG, &cmdq->flags) && + (opcode != CMDQ_BASE_OPCODE_QUERY_FUNC && + opcode != CMDQ_BASE_OPCODE_INITIALIZE_FW && + opcode != CMDQ_BASE_OPCODE_QUERY_VERSION)) { + dev_err(&rcfw->pdev->dev, + "QPLIB: RCFW not initialized, reject opcode 0x%x", + opcode); + return -ENOTSUPP; + } + + return 0; +} + +/* This function will just post and do not bother about completion */ +static void __destroy_timedout_ah(struct bnxt_qplib_rcfw *rcfw, + struct creq_create_ah_resp *create_ah_resp) +{ + struct bnxt_qplib_cmdqmsg msg = {}; + struct cmdq_destroy_ah req = {}; + + bnxt_qplib_rcfw_cmd_prep(&req, CMDQ_BASE_OPCODE_DESTROY_AH, + sizeof(req)); + req.ah_cid = create_ah_resp->xid; + msg.req = (struct cmdq_base *)&req; + msg.req_sz = sizeof(req); + __send_message_no_waiter(rcfw, &msg); + dev_info_ratelimited(&rcfw->pdev->dev, + "From %s: ah_cid = %d timeout_send %d\n", __func__, + req.ah_cid, + atomic_read(&rcfw->timeout_send)); +} + +/** + * __bnxt_qplib_rcfw_send_message - qplib interface to send + * and complete rcfw command. + * @rcfw: rcfw channel instance of rdev + * @msg: qplib message internal + * + * This function does not account shadow queue depth. It will send + * all the command unconditionally as long as send queue is not full. + * + * Returns: + * 0 if command completed by firmware. + * Non zero if the command is not completed by firmware. + */ +int __bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw, + struct bnxt_qplib_cmdqmsg *msg) +{ + struct bnxt_qplib_crsqe *crsqe; + struct creq_qp_event *event; + unsigned long flags; + u16 cookie; + int rc = 0; + u8 opcode; + + opcode = __get_cmdq_base_opcode(msg->req, msg->req_sz); + + rc = __send_message_basic_sanity(rcfw, msg, opcode); + if (rc) + return rc == -ENXIO ? bnxt_qplib_map_rc(opcode) : rc; + + rc = __send_message(rcfw, msg); + if (rc) + return rc; + + cookie = le16_to_cpu(__get_cmdq_base_cookie(msg->req, + msg->req_sz)) & RCFW_MAX_COOKIE_VALUE; + + /* TBD - __poll_for_resp may not be needed + * if sync irq/tasklet is handled correctly. + */ + if (msg->block) + rc = __block_for_resp(rcfw, cookie); + else if (atomic_read(&rcfw->rcfw_intr_enabled)) + rc = __wait_for_resp(rcfw, cookie); + else + rc = __poll_for_resp(rcfw, cookie); + + if (rc) { + /* First check if it is FW stall. + * Use hwq.lock to avoid race with actual completion. + */ + spin_lock_irqsave(&rcfw->cmdq.hwq.lock, flags); + crsqe = &rcfw->crsqe_tbl[cookie]; + crsqe->is_waiter_alive = false; + if (rc == -ENODEV) + set_bit(FIRMWARE_STALL_DETECTED, &rcfw->cmdq.flags); + spin_unlock_irqrestore(&rcfw->cmdq.hwq.lock, flags); + + return -ETIMEDOUT; + } + + event = (struct creq_qp_event *)msg->resp; + if (event->status) { + /* failed with status */ + dev_err(&rcfw->pdev->dev, "QPLIB: cmdq[%#x]=%#x status %d", + cookie, opcode, event->status); + rc = -EFAULT; + /* + * Workaround to avoid errors in the stack during bond + * creation and deletion. + * Disable error returned for ADD_GID/DEL_GID + */ + if (opcode == CMDQ_BASE_OPCODE_ADD_GID || + opcode == CMDQ_BASE_OPCODE_DELETE_GID) + rc = 0; + } + + return rc; +} + +/** + * bnxt_qplib_rcfw_send_message - qplib interface to send + * and complete rcfw command. + * @rcfw: rcfw channel instance of rdev + * @msg: qplib message internal + * + * Driver interact with Firmware through rcfw channel/slow path in two ways. + * a. Blocking rcfw command send. In this path, driver cannot hold + * the context for longer period since it is holding cpu until + * command is not completed. + * b. Non-blocking rcfw command send. In this path, driver can hold the + * context for longer period. There may be many pending command waiting + * for completion because of non-blocking nature. + * + * Driver will use shadow queue depth. Current queue depth of 8K + * (due to size of rcfw message it can be actual ~4K rcfw outstanding) + * is not optimal for rcfw command processing in firmware. + * RCFW_CMD_NON_BLOCKING_SHADOW_QD is defined as 64. + * Restrict at max 64 Non-Blocking rcfw commands. + * Do not allow more than 64 non-blocking command to the Firmware. + * Allow all blocking commands until there is no queue full. + * + * Returns: + * 0 if command completed by firmware. + * Non zero if the command is not completed by firmware. + */ +int bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw, + struct bnxt_qplib_cmdqmsg *msg) +{ + int ret; + + if (!msg->block) { + down(&rcfw->rcfw_inflight); + ret = __bnxt_qplib_rcfw_send_message(rcfw, msg); + up(&rcfw->rcfw_inflight); + } else { + ret = __bnxt_qplib_rcfw_send_message(rcfw, msg); + } + + return ret; +} + +static void bnxt_re_add_perf_stats(struct bnxt_qplib_rcfw *rcfw, + struct bnxt_qplib_crsqe *crsqe) +{ + u32 latency_msec, dest_stats_id; + u64 *dest_stats_ptr = NULL; + + latency_msec = jiffies_to_msecs(rcfw->cmdq.last_seen - + crsqe->send_timestamp); + if (latency_msec/1000 < RCFW_MAX_LATENCY_SEC_SLAB_INDEX) + rcfw->rcfw_lat_slab_sec[latency_msec/1000]++; + + if (!rcfw->sp_perf_stats_enabled) + return; + + if (latency_msec < RCFW_MAX_LATENCY_MSEC_SLAB_INDEX) + rcfw->rcfw_lat_slab_msec[latency_msec]++; + + switch (crsqe->opcode) { + case CMDQ_BASE_OPCODE_CREATE_QP: + dest_stats_id = rcfw->qp_create_stats_id++; + dest_stats_id = dest_stats_id % RCFW_MAX_STAT_INDEX; + dest_stats_ptr = &rcfw->qp_create_stats[dest_stats_id]; + break; + case CMDQ_BASE_OPCODE_DESTROY_QP: + dest_stats_id = rcfw->qp_destroy_stats_id++; + dest_stats_id = dest_stats_id % RCFW_MAX_STAT_INDEX; + dest_stats_ptr = &rcfw->qp_destroy_stats[dest_stats_id]; + break; + case CMDQ_BASE_OPCODE_REGISTER_MR: + dest_stats_id = rcfw->mr_create_stats_id++; + dest_stats_id = dest_stats_id % RCFW_MAX_STAT_INDEX; + dest_stats_ptr = &rcfw->mr_create_stats[dest_stats_id]; + break; + case CMDQ_BASE_OPCODE_DEREGISTER_MR: + case CMDQ_BASE_OPCODE_DEALLOCATE_KEY: + dest_stats_id = rcfw->mr_destroy_stats_id++; + dest_stats_id = dest_stats_id % RCFW_MAX_STAT_INDEX; + dest_stats_ptr = &rcfw->mr_destroy_stats[dest_stats_id]; + break; + case CMDQ_BASE_OPCODE_MODIFY_QP: + if (crsqe->requested_qp_state != IB_QPS_ERR) + break; + dest_stats_id = rcfw->qp_modify_stats_id++; + dest_stats_id = dest_stats_id % RCFW_MAX_STAT_INDEX; + dest_stats_ptr = &rcfw->qp_modify_stats[dest_stats_id]; + break; + default: + break; + } + if (dest_stats_ptr) + *dest_stats_ptr = max_t(unsigned long, + (rcfw->cmdq.last_seen - crsqe->send_timestamp), 1); + +} + +/* Completions */ +static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw, + struct creq_qp_event *event, + u32 *num_wait) +{ + struct bnxt_qplib_hwq *cmdq_hwq = &rcfw->cmdq.hwq; + struct creq_cq_error_notification *cqerr; + struct creq_qp_error_notification *qperr; + struct bnxt_qplib_crsqe *crsqe; + struct bnxt_qplib_reftbl *tbl; + struct bnxt_qplib_qp *qp; + struct bnxt_qplib_cq *cq; + u16 cookie, blocked = 0; + struct pci_dev *pdev; + bool is_waiter_alive; + unsigned long flags; + u32 wait_cmds = 0; + u32 xid, qp_idx; + u32 req_size; + int rc = 0; + + pdev = rcfw->pdev; + switch (event->event) { + case CREQ_QP_EVENT_EVENT_QP_ERROR_NOTIFICATION: + tbl = &rcfw->res->reftbl.qpref; + qperr = (struct creq_qp_error_notification *)event; + xid = le32_to_cpu(qperr->xid); + qp_idx = map_qp_id_to_tbl_indx(xid, tbl); + spin_lock(&tbl->lock); + qp = tbl->rec[qp_idx].handle; + if (!qp) { + spin_unlock(&tbl->lock); + break; + } + bnxt_qplib_mark_qp_error(qp); + rc = rcfw->creq.aeq_handler(rcfw, event, qp); + spin_unlock(&tbl->lock); + /* + * Keeping these prints as debug to avoid flooding of log + * messages during modify QP to error state by applications + */ + dev_dbg(&pdev->dev, "QPLIB: QP Error encountered!"); + dev_dbg(&pdev->dev, + "QPLIB: qpid 0x%x, req_err=0x%x, resp_err=0x%x\n", + xid, qperr->req_err_state_reason, + qperr->res_err_state_reason); + break; + case CREQ_QP_EVENT_EVENT_CQ_ERROR_NOTIFICATION: + tbl = &rcfw->res->reftbl.cqref; + cqerr = (struct creq_cq_error_notification *)event; + xid = le32_to_cpu(cqerr->xid); + spin_lock(&tbl->lock); + cq = tbl->rec[GET_TBL_INDEX(xid, tbl)].handle; + if (!cq) { + spin_unlock(&tbl->lock); + break; + } + rc = rcfw->creq.aeq_handler(rcfw, event, cq); + spin_unlock(&tbl->lock); + dev_dbg(&pdev->dev, "QPLIB: CQ error encountered!"); + break; + default: + /* + * Command Response + * cmdq hwq lock needs to be acquired to synchronize + * the command send and completion reaping. This function + * is always called with creq hwq lock held. So there is no + * chance of deadlock here as the locking is in correct sequence. + * Using the nested variant of spin_lock to annotate + */ + spin_lock_irqsave_nested(&cmdq_hwq->lock, flags, + SINGLE_DEPTH_NESTING); + cookie = le16_to_cpu(event->cookie); + blocked = cookie & RCFW_CMD_IS_BLOCKING; + cookie &= RCFW_MAX_COOKIE_VALUE; + + crsqe = &rcfw->crsqe_tbl[cookie]; + + bnxt_re_add_perf_stats(rcfw, crsqe); + + if (WARN_ONCE(test_bit(FIRMWARE_STALL_DETECTED, + &rcfw->cmdq.flags), + "QPLIB: Unreponsive rcfw channel detected.!!")) { + dev_info(&pdev->dev, "rcfw timedout: cookie = %#x," + " latency_msec = %d free_slots = %d", cookie, + jiffies_to_msecs(rcfw->cmdq.last_seen - + crsqe->send_timestamp), + crsqe->free_slots); + spin_unlock_irqrestore(&cmdq_hwq->lock, flags); + return rc; + } + + if (crsqe->is_internal_cmd && !event->status) + atomic_dec(&rcfw->timeout_send); + + if (crsqe->is_waiter_alive) { + if (crsqe->resp) { + memcpy(crsqe->resp, event, sizeof(*event)); + /* + * Insert write memory barrier so that memcopy + * on crsqe->resp buffer is flushed + */ + smp_wmb(); + } + if (!blocked) + wait_cmds++; + } + + req_size = crsqe->req_size; + is_waiter_alive = crsqe->is_waiter_alive; + + crsqe->req_size = 0; + if (!crsqe->is_waiter_alive) + crsqe->resp = NULL; + crsqe->is_in_used = false; + /* Consumer is updated so that __send_message_no_waiter + * can never see queue full. + * It is safe since we are still holding cmdq_hwq->lock. + */ + cmdq_hwq->cons += req_size; + + /* This is a case to handle below scenario - + * Create AH is completed successfully by firmware, + * but completion took more time and driver already lost + * the context of create_ah from caller. + * We have already return failure for create_ah verbs, + * so let's destroy the same address vector since it is + * no more used in stack. We don't care about completion + * in __send_message_no_waiter. + * If destroy_ah is failued by firmware, there will be AH + * resource leak and relatively not critical + unlikely + * scenario. Current design is not to handle such case. + */ + if (!is_waiter_alive && !event->status && + event->event == CREQ_QP_EVENT_EVENT_CREATE_AH) + __destroy_timedout_ah(rcfw, + (struct creq_create_ah_resp *) + event); + + spin_unlock_irqrestore(&cmdq_hwq->lock, flags); + } + *num_wait += wait_cmds; + return rc; +} + +/* SP - CREQ Completion handlers */ +static void bnxt_qplib_service_creq( +#ifdef HAS_TASKLET_SETUP + struct tasklet_struct *t +#else + unsigned long data +#endif + ) +{ +#ifdef HAS_TASKLET_SETUP + struct bnxt_qplib_rcfw *rcfw = from_tasklet(rcfw, t, creq.creq_tasklet); +#else + + struct bnxt_qplib_rcfw *rcfw = (struct bnxt_qplib_rcfw *)data; +#endif + struct bnxt_qplib_creq_ctx *creq = &rcfw->creq; + struct bnxt_qplib_res *res; + u32 type, budget = CREQ_ENTRY_POLL_BUDGET; + struct bnxt_qplib_hwq *creq_hwq = &creq->hwq; + struct creq_base *creqe; + struct pci_dev *pdev; + unsigned long flags; + u32 num_wakeup = 0; + int rc; + + pdev = rcfw->pdev; + res = rcfw->res; + /* Service the CREQ until empty */ + spin_lock_irqsave(&creq_hwq->lock, flags); + while (budget > 0) { + if (RCFW_NO_FW_ACCESS(rcfw)) { + spin_unlock_irqrestore(&creq_hwq->lock, flags); + return; + } + creqe = bnxt_qplib_get_qe(creq_hwq, creq_hwq->cons, NULL); + if (!CREQ_CMP_VALID(creqe, creq->creq_db.dbinfo.flags)) + break; + /* The valid test of the entry must be done first before + * reading any further. + */ + dma_rmb(); + type = creqe->type & CREQ_BASE_TYPE_MASK; + rcfw->cmdq.last_seen = jiffies; + + switch (type) { + case CREQ_BASE_TYPE_QP_EVENT: + bnxt_qplib_process_qp_event + (rcfw,(struct creq_qp_event *)creqe, + &num_wakeup); + creq->stats.creq_qp_event_processed++; + break; + case CREQ_BASE_TYPE_FUNC_EVENT: + rc = rcfw->creq.aeq_handler(rcfw, creqe, NULL); + if (rc) + dev_warn(&pdev->dev, + "QPLIB: async event type = 0x%x not handled", + type); + creq->stats.creq_func_event_processed++; + break; + default: + if (type != ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT) { + dev_warn(&pdev->dev, + "QPLIB: op_event = 0x%x not handled", + type); + } + break; + } + budget--; + bnxt_qplib_hwq_incr_cons(creq_hwq->max_elements, &creq_hwq->cons, + 1, &creq->creq_db.dbinfo.flags); + } + if (budget == CREQ_ENTRY_POLL_BUDGET && + !CREQ_CMP_VALID(creqe, creq->creq_db.dbinfo.flags)) { + /* No completions received during this poll. Enable interrupt now */ + bnxt_qplib_ring_nq_db(&creq->creq_db.dbinfo, res->cctx, true); + creq->stats.creq_arm_count++; + dev_dbg(&pdev->dev, "QPLIB: Num of Func (0x%llx) ", + creq->stats.creq_func_event_processed); + dev_dbg(&pdev->dev, "QPLIB: QP (0x%llx) events processed", + creq->stats.creq_qp_event_processed); + dev_dbg(&pdev->dev, "QPLIB: Armed:%#llx resched:%#llx ", + creq->stats.creq_arm_count, + creq->stats.creq_tasklet_schedule_count); + } else if (creq->requested) { + /* + * To reduce the number of interrupts from HW, + * reschedule the tasklet instead of + * enabling interrupts. Ring doorbell to update + * consumer index. + */ + bnxt_qplib_ring_nq_db(&creq->creq_db.dbinfo, res->cctx, false); + tasklet_schedule(&creq->creq_tasklet); + creq->stats.creq_tasklet_schedule_count++; + } + spin_unlock_irqrestore(&creq_hwq->lock, flags); + if (num_wakeup) + wake_up_nr(&rcfw->cmdq.waitq, num_wakeup); +} + +static irqreturn_t bnxt_qplib_creq_irq(int irq, void *dev_instance) +{ + struct bnxt_qplib_rcfw *rcfw = dev_instance; + + bnxt_qplib_service_creq( +#ifdef HAS_TASKLET_SETUP + &rcfw->creq.creq_tasklet +#else + (unsigned long)rcfw +#endif + ); + return IRQ_HANDLED; +} + +/* RCFW */ +int bnxt_qplib_deinit_rcfw(struct bnxt_qplib_rcfw *rcfw) +{ + struct creq_deinitialize_fw_resp resp = {}; + struct cmdq_deinitialize_fw req = {}; + struct bnxt_qplib_cmdqmsg msg = {}; + int rc; + + bnxt_qplib_rcfw_cmd_prep(&req, CMDQ_BASE_OPCODE_DEINITIALIZE_FW, + sizeof(req)); + bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, + sizeof(req), sizeof(resp), 0); + rc = bnxt_qplib_rcfw_send_message(rcfw, &msg); + if (rc) + return rc; + clear_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->cmdq.flags); + return 0; +} + +int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw, int is_virtfn) +{ + u32 drv_ver_maj, drv_ver_min, drv_ver_upd, drv_ver_patch; + struct creq_initialize_fw_resp resp = {}; + struct cmdq_initialize_fw req = {}; + struct bnxt_qplib_cmdqmsg msg = {}; + struct bnxt_qplib_chip_ctx *cctx; + struct bnxt_qplib_ctx *hctx; + struct bnxt_qplib_res *res; + struct bnxt_qplib_hwq *hwq; + u8 cmd_size; + int rc; + + res = rcfw->res; + cctx = res->cctx; + hctx = res->hctx; + + cmd_size = sizeof(req); + if (!_is_drv_ver_reg_supported(res->dattr->dev_cap_ext_flags)) + cmd_size -= BNXT_RE_INIT_FW_DRV_VER_SUPPORT_CMD_SIZE; + + bnxt_qplib_rcfw_cmd_prep(&req, CMDQ_BASE_OPCODE_INITIALIZE_FW, + cmd_size); + /* Supply (log-base-2-of-host-page-size - base-page-shift) + * to bono to adjust the doorbell page sizes. + */ + req.log2_dbr_pg_size = cpu_to_le16(PAGE_SHIFT - + RCFW_DBR_BASE_PAGE_SHIFT); + /* + * VFs need not setup the HW context area, PF + * shall setup this area for VF. Skipping the + * HW programming + */ + if (is_virtfn || _is_chip_gen_p5_p7(cctx)) + goto skip_ctx_setup; + + hwq = &hctx->qp_ctx.hwq; + req.qpc_page_dir = cpu_to_le64(_get_base_addr(hwq)); + req.number_of_qp = cpu_to_le32(hwq->max_elements); + req.qpc_pg_size_qpc_lvl = (_get_pte_pg_size(hwq) << + CMDQ_INITIALIZE_FW_QPC_PG_SIZE_SFT) | + (u8)hwq->level; + + hwq = &hctx->mrw_ctx.hwq; + req.mrw_page_dir = cpu_to_le64(_get_base_addr(hwq)); + req.number_of_mrw = cpu_to_le32(hwq->max_elements); + req.mrw_pg_size_mrw_lvl = (_get_pte_pg_size(hwq) << + CMDQ_INITIALIZE_FW_MRW_PG_SIZE_SFT) | + (u8)hwq->level; + + hwq = &hctx->srq_ctx.hwq; + req.srq_page_dir = cpu_to_le64(_get_base_addr(hwq)); + req.number_of_srq = cpu_to_le32(hwq->max_elements); + req.srq_pg_size_srq_lvl = (_get_pte_pg_size(hwq) << + CMDQ_INITIALIZE_FW_SRQ_PG_SIZE_SFT) | + (u8)hwq->level; + + hwq = &hctx->cq_ctx.hwq; + req.cq_page_dir = cpu_to_le64(_get_base_addr(hwq)); + req.number_of_cq = cpu_to_le32(hwq->max_elements); + req.cq_pg_size_cq_lvl = (_get_pte_pg_size(hwq) << + CMDQ_INITIALIZE_FW_CQ_PG_SIZE_SFT) | + (u8)hwq->level; + + hwq = &hctx->tim_ctx.hwq; + req.tim_page_dir = cpu_to_le64(_get_base_addr(hwq)); + req.tim_pg_size_tim_lvl = (_get_pte_pg_size(hwq) << + CMDQ_INITIALIZE_FW_TIM_PG_SIZE_SFT) | + (u8)hwq->level; + hwq = &hctx->tqm_ctx.pde; + req.tqm_page_dir = cpu_to_le64(_get_base_addr(hwq)); + req.tqm_pg_size_tqm_lvl = (_get_pte_pg_size(hwq) << + CMDQ_INITIALIZE_FW_TQM_PG_SIZE_SFT) | + (u8)hwq->level; +skip_ctx_setup: + if (BNXT_RE_HW_RETX(res->dattr->dev_cap_flags)) + req.flags |= CMDQ_INITIALIZE_FW_FLAGS_HW_REQUESTER_RETX_SUPPORTED; + + if (_is_drv_ver_reg_supported(res->dattr->dev_cap_ext_flags)) { + req.flags |= CMDQ_INITIALIZE_FW_FLAGS_DRV_VERSION; + if (sscanf(ROCE_DRV_MODULE_VERSION, "%u.%u.%u.%u\n", + &drv_ver_maj, &drv_ver_min, &drv_ver_upd, + &drv_ver_patch) == 4) { + req.drv_hsi_ver_maj = HWRM_VERSION_MAJOR; + req.drv_hsi_ver_min = HWRM_VERSION_MINOR; + req.drv_hsi_ver_upd = HWRM_VERSION_UPDATE; + req.drv_build_ver_maj = drv_ver_maj; + req.drv_build_ver_min = drv_ver_min; + req.drv_build_ver_upd = drv_ver_upd; + req.drv_build_ver_patch = drv_ver_patch; + } + } + + if (res->en_dev->flags & BNXT_EN_FLAG_ROCE_VF_RES_MGMT) + req.flags |= CMDQ_INITIALIZE_FW_FLAGS_L2_VF_RESOURCE_MGMT; + req.stat_ctx_id = cpu_to_le32(hctx->stats.fw_id); + bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, + cmd_size, sizeof(resp), 0); + rc = bnxt_qplib_rcfw_send_message(rcfw, &msg); + if (rc) + return rc; + set_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->cmdq.flags); + + return 0; +} + +void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_res *res) +{ + struct bnxt_qplib_rcfw *rcfw = res->rcfw; + + vfree(rcfw->rcfw_lat_slab_msec); + rcfw->rcfw_lat_slab_msec = NULL; + vfree(rcfw->qp_create_stats); + rcfw->qp_create_stats = NULL; + vfree(rcfw->qp_destroy_stats); + rcfw->qp_destroy_stats = NULL; + vfree(rcfw->mr_create_stats); + rcfw->mr_create_stats = NULL; + vfree(rcfw->mr_destroy_stats); + rcfw->mr_destroy_stats = NULL; + vfree(rcfw->qp_modify_stats); + rcfw->qp_modify_stats = NULL; + rcfw->sp_perf_stats_enabled = false; + + kfree(rcfw->crsqe_tbl); + rcfw->crsqe_tbl = NULL; + + bnxt_qplib_free_hwq(res, &rcfw->cmdq.hwq); + bnxt_qplib_free_hwq(res, &rcfw->creq.hwq); + rcfw->pdev = NULL; +} + +int bnxt_qplib_alloc_rcfw_channel(struct bnxt_qplib_res *res) +{ + struct bnxt_qplib_hwq_attr hwq_attr = {}; + struct bnxt_qplib_rcfw *rcfw = res->rcfw; + struct bnxt_qplib_sg_info sginfo = {}; + struct bnxt_qplib_cmdq_ctx *cmdq; + struct bnxt_qplib_creq_ctx *creq; + + rcfw->pdev = res->pdev; + rcfw->res = res; + cmdq = &rcfw->cmdq; + creq = &rcfw->creq; + + sginfo.pgsize = PAGE_SIZE; + sginfo.pgshft = PAGE_SHIFT; + + hwq_attr.sginfo = &sginfo; + hwq_attr.res = rcfw->res; + hwq_attr.depth = BNXT_QPLIB_CREQE_MAX_CNT; + hwq_attr.stride = BNXT_QPLIB_CREQE_UNITS; + hwq_attr.type = _get_hwq_type(res); + + if (bnxt_qplib_alloc_init_hwq(&creq->hwq, &hwq_attr)) { + dev_err(&rcfw->pdev->dev, + "QPLIB: HW channel CREQ allocation failed"); + return -ENOMEM; + } + + sginfo.pgsize = BNXT_QPLIB_CMDQE_PAGE_SIZE; + hwq_attr.depth = BNXT_QPLIB_CMDQE_MAX_CNT & 0x7FFFFFFF; + hwq_attr.stride = BNXT_QPLIB_CMDQE_UNITS; + hwq_attr.type = HWQ_TYPE_CTX; + if (bnxt_qplib_alloc_init_hwq(&cmdq->hwq, &hwq_attr)) { + dev_err(&rcfw->pdev->dev, + "QPLIB: HW channel CMDQ allocation failed"); + goto fail_free_creq_hwq; + } + + rcfw->crsqe_tbl = kcalloc(cmdq->hwq.max_elements, + sizeof(*rcfw->crsqe_tbl), GFP_KERNEL); + if (!rcfw->crsqe_tbl) { + dev_err(&rcfw->pdev->dev, + "QPLIB: HW channel CRSQ allocation failed"); + goto fail_free_cmdq_hwq; + } + + rcfw->max_timeout = res->cctx->hwrm_cmd_max_timeout; + + rcfw->sp_perf_stats_enabled = false; + rcfw->rcfw_lat_slab_msec = vzalloc(sizeof(u32) * + RCFW_MAX_LATENCY_MSEC_SLAB_INDEX); + rcfw->qp_create_stats = vzalloc(sizeof(u64) * RCFW_MAX_STAT_INDEX); + rcfw->qp_destroy_stats = vzalloc(sizeof(u64) * RCFW_MAX_STAT_INDEX); + rcfw->mr_create_stats = vzalloc(sizeof(u64) * RCFW_MAX_STAT_INDEX); + rcfw->mr_destroy_stats = vzalloc(sizeof(u64) * RCFW_MAX_STAT_INDEX); + rcfw->qp_modify_stats = vzalloc(sizeof(u64) * RCFW_MAX_STAT_INDEX); + + if (rcfw->rcfw_lat_slab_msec && + rcfw->qp_create_stats && + rcfw->qp_destroy_stats && + rcfw->mr_create_stats && + rcfw->mr_destroy_stats && + rcfw->qp_modify_stats) + rcfw->sp_perf_stats_enabled = true; + + return 0; +fail_free_cmdq_hwq: + bnxt_qplib_free_hwq(res, &rcfw->cmdq.hwq); +fail_free_creq_hwq: + bnxt_qplib_free_hwq(res, &rcfw->creq.hwq); + return -ENOMEM; +} + +void bnxt_qplib_rcfw_stop_irq(struct bnxt_qplib_rcfw *rcfw, bool kill) +{ + struct bnxt_qplib_creq_ctx *creq; + struct bnxt_qplib_res *res; + + creq = &rcfw->creq; + res = rcfw->res; + + if (!creq->requested) + return; + + creq->requested = false; + /* Mask h/w interrupts */ + bnxt_qplib_ring_nq_db(&creq->creq_db.dbinfo, res->cctx, false); + /* Sync with last running IRQ-handler */ + synchronize_irq(creq->msix_vec); + free_irq(creq->msix_vec, rcfw); + kfree(creq->irq_name); + creq->irq_name = NULL; + /* rcfw_intr_enabled should not be greater than 1. Debug + * print to check if that is the case + */ + if (atomic_read(&rcfw->rcfw_intr_enabled) > 1) { + dev_err(&rcfw->pdev->dev, + "%s: rcfw->rcfw_intr_enabled = 0x%x", __func__, + atomic_read(&rcfw->rcfw_intr_enabled)); + } + atomic_set(&rcfw->rcfw_intr_enabled, 0); + rcfw->num_irq_stopped++; + /* Cleanup Tasklet */ + if (kill) + tasklet_kill(&creq->creq_tasklet); + tasklet_disable(&creq->creq_tasklet); +} + +void bnxt_qplib_disable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw) +{ + struct bnxt_qplib_creq_ctx *creq; + struct bnxt_qplib_cmdq_ctx *cmdq; + + creq = &rcfw->creq; + cmdq = &rcfw->cmdq; + /* Make sure the HW channel is stopped! */ + bnxt_qplib_rcfw_stop_irq(rcfw, true); + + creq->creq_db.reg.bar_reg = NULL; + creq->creq_db.db = NULL; + + if (cmdq->cmdq_mbox.reg.bar_reg) { + iounmap(cmdq->cmdq_mbox.reg.bar_reg); + cmdq->cmdq_mbox.reg.bar_reg = NULL; + cmdq->cmdq_mbox.prod = NULL; + cmdq->cmdq_mbox.db = NULL; + } + + creq->aeq_handler = NULL; + creq->msix_vec = 0; +} + +int bnxt_qplib_rcfw_start_irq(struct bnxt_qplib_rcfw *rcfw, int msix_vector, + bool need_init) +{ + struct bnxt_qplib_creq_ctx *creq; + struct bnxt_qplib_res *res; + int rc; + + creq = &rcfw->creq; + res = rcfw->res; + + if (creq->requested) + return -EFAULT; + + creq->msix_vec = msix_vector; + if (need_init) + compat_tasklet_init(&creq->creq_tasklet, + bnxt_qplib_service_creq, + (unsigned long)rcfw); + else + tasklet_enable(&creq->creq_tasklet); + + creq->irq_name = kasprintf(GFP_KERNEL, "bnxt_re-creq@pci:%s", + pci_name(res->pdev)); + if (!creq->irq_name) + return -ENOMEM; + rc = request_irq(creq->msix_vec, bnxt_qplib_creq_irq, 0, + creq->irq_name, rcfw); + if (rc) { + kfree(creq->irq_name); + creq->irq_name = NULL; + tasklet_disable(&creq->creq_tasklet); + return rc; + } + creq->requested = true; + + bnxt_qplib_ring_nq_db(&creq->creq_db.dbinfo, res->cctx, true); + + rcfw->num_irq_started++; + /* Debug print to check rcfw interrupt enable/disable is invoked + * out of sequence + */ + if (atomic_read(&rcfw->rcfw_intr_enabled) > 0) { + dev_err(&rcfw->pdev->dev, + "%s: rcfw->rcfw_intr_enabled = 0x%x", __func__, + atomic_read(&rcfw->rcfw_intr_enabled)); + } + atomic_inc(&rcfw->rcfw_intr_enabled); + return 0; +} + +static int bnxt_qplib_map_cmdq_mbox(struct bnxt_qplib_rcfw *rcfw) +{ + struct bnxt_qplib_cmdq_mbox *mbox; + resource_size_t bar_reg; + struct pci_dev *pdev; + + pdev = rcfw->pdev; + mbox = &rcfw->cmdq.cmdq_mbox; + + mbox->reg.bar_id = RCFW_COMM_PCI_BAR_REGION; + mbox->reg.len = RCFW_COMM_SIZE; + mbox->reg.bar_base = pci_resource_start(pdev, mbox->reg.bar_id); + if (!mbox->reg.bar_base) { + dev_err(&pdev->dev, + "QPLIB: CMDQ BAR region %d resc start is 0!\n", + mbox->reg.bar_id); + return -ENOMEM; + } + + bar_reg = mbox->reg.bar_base + RCFW_COMM_BASE_OFFSET; + mbox->reg.len = RCFW_COMM_SIZE; + mbox->reg.bar_reg = ioremap(bar_reg, mbox->reg.len); + if (!mbox->reg.bar_reg) { + dev_err(&pdev->dev, + "QPLIB: CMDQ BAR region %d mapping failed\n", + mbox->reg.bar_id); + return -ENOMEM; + } + + mbox->prod = (void __iomem *)((char *)mbox->reg.bar_reg + + RCFW_PF_VF_COMM_PROD_OFFSET); + mbox->db = (void __iomem *)((char *)mbox->reg.bar_reg + + RCFW_COMM_TRIG_OFFSET); + return 0; +} + +static int bnxt_qplib_map_creq_db(struct bnxt_qplib_rcfw *rcfw, u32 reg_offt) +{ + struct bnxt_qplib_creq_db *creq_db; + struct bnxt_qplib_reg_desc *dbreg; + struct bnxt_qplib_res *res; + + res = rcfw->res; + creq_db = &rcfw->creq.creq_db; + dbreg = &res->dpi_tbl.ucreg; + + creq_db->reg.bar_id = dbreg->bar_id; + creq_db->reg.bar_base = dbreg->bar_base; + creq_db->reg.bar_reg = dbreg->bar_reg + reg_offt; + creq_db->reg.len = _is_chip_gen_p5_p7(res->cctx) ? sizeof(u64) : + sizeof(u32); + + creq_db->dbinfo.db = creq_db->reg.bar_reg; + creq_db->dbinfo.hwq = &rcfw->creq.hwq; + creq_db->dbinfo.xid = rcfw->creq.ring_id; + creq_db->dbinfo.seed = rcfw->creq.ring_id; + creq_db->dbinfo.flags = 0; + spin_lock_init(&creq_db->dbinfo.lock); + creq_db->dbinfo.shadow_key = BNXT_QPLIB_DBR_KEY_INVALID; + creq_db->dbinfo.res = rcfw->res; + + return 0; +} + +static void bnxt_qplib_start_rcfw(struct bnxt_qplib_rcfw *rcfw) +{ + struct bnxt_qplib_cmdq_ctx *cmdq; + struct bnxt_qplib_creq_ctx *creq; + struct bnxt_qplib_cmdq_mbox *mbox; + struct cmdq_init init = {0}; + + cmdq = &rcfw->cmdq; + creq = &rcfw->creq; + mbox = &cmdq->cmdq_mbox; + + init.cmdq_pbl = cpu_to_le64(cmdq->hwq.pbl[PBL_LVL_0].pg_map_arr[0]); + init.cmdq_size_cmdq_lvl = cpu_to_le16( + ((BNXT_QPLIB_CMDQE_MAX_CNT << CMDQ_INIT_CMDQ_SIZE_SFT) & + CMDQ_INIT_CMDQ_SIZE_MASK) | + ((cmdq->hwq.level << CMDQ_INIT_CMDQ_LVL_SFT) & + CMDQ_INIT_CMDQ_LVL_MASK)); + init.creq_ring_id = cpu_to_le16(creq->ring_id); + /* Write to the Bono mailbox register */ + __iowrite32_copy(mbox->reg.bar_reg, &init, sizeof(init) / 4); +} + +int bnxt_qplib_enable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw, + int msix_vector, + int cp_bar_reg_off, + aeq_handler_t aeq_handler) +{ + struct bnxt_qplib_cmdq_ctx *cmdq; + struct bnxt_qplib_creq_ctx *creq; + int rc; + + cmdq = &rcfw->cmdq; + creq = &rcfw->creq; + + /* Clear to defaults */ + cmdq->seq_num = 0; + set_bit(FIRMWARE_FIRST_FLAG, &cmdq->flags); + init_waitqueue_head(&cmdq->waitq); + + creq->stats.creq_qp_event_processed = 0; + creq->stats.creq_func_event_processed = 0; + creq->aeq_handler = aeq_handler; + + rc = bnxt_qplib_map_cmdq_mbox(rcfw); + if (rc) + return rc; + + rc = bnxt_qplib_map_creq_db(rcfw, cp_bar_reg_off); + if (rc) + return rc; + + rc = bnxt_qplib_rcfw_start_irq(rcfw, msix_vector, true); + if (rc) { + dev_err(&rcfw->pdev->dev, + "QPLIB: Failed to request IRQ for CREQ rc = 0x%x", rc); + bnxt_qplib_disable_rcfw_channel(rcfw); + return rc; + } + + /* TBD - shadow qd 32 or 64 is good for 2.23 firmware. + * Can be improved based on firmware requirement. + */ + + rcfw->curr_shadow_qd = min_not_zero(cmdq_shadow_qd, + (unsigned int)RCFW_CMD_NON_BLOCKING_SHADOW_QD); + sema_init(&rcfw->rcfw_inflight, rcfw->curr_shadow_qd); + dev_dbg(&rcfw->pdev->dev, + "Perf Debug: shadow qd %d", rcfw->curr_shadow_qd); + bnxt_qplib_start_rcfw(rcfw); + + return 0; +} diff --git a/bnxt_re-1.10.3-229.0.139.0/qplib_rcfw.h b/bnxt_re-1.10.3-229.0.139.0/qplib_rcfw.h new file mode 100644 index 0000000..8339258 --- /dev/null +++ b/bnxt_re-1.10.3-229.0.139.0/qplib_rcfw.h @@ -0,0 +1,300 @@ +/* + * Copyright (c) 2015-2023, Broadcom. All rights reserved. The term + * Broadcom refers to Broadcom Inc. and/or its subsidiaries. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * BSD license below: + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * Author: Eddie Wai + * + * Description: RDMA Controller HW interface (header) + */ + +#ifndef __BNXT_QPLIB_RCFW_H__ +#define __BNXT_QPLIB_RCFW_H__ + +#include +#include "qplib_tlv.h" + +#define RCFW_CMDQ_TRIG_VAL 1 +#define RCFW_COMM_PCI_BAR_REGION 0 +#define RCFW_COMM_CONS_PCI_BAR_REGION 2 +#define RCFW_COMM_BASE_OFFSET 0x600 +#define RCFW_PF_VF_COMM_PROD_OFFSET 0xc +#define RCFW_COMM_TRIG_OFFSET 0x100 +#define RCFW_COMM_SIZE 0x104 + +#define RCFW_DBR_PCI_BAR_REGION 2 +#define RCFW_DBR_BASE_PAGE_SHIFT 12 +#define RCFW_MAX_LATENCY_SEC_SLAB_INDEX 128 +#define RCFW_MAX_LATENCY_MSEC_SLAB_INDEX 3000 +#define RCFW_MAX_STAT_INDEX 0xFFFF +#define RCFW_FW_STALL_MAX_TIMEOUT 40 + +extern unsigned int cmdq_shadow_qd; +/* Cmdq contains a fix number of a 16-Byte slots */ +struct bnxt_qplib_cmdqe { + u8 data[16]; +}; +#define BNXT_QPLIB_CMDQE_UNITS sizeof(struct bnxt_qplib_cmdqe) + +static inline void bnxt_qplib_rcfw_cmd_prep(void *r, u8 opcode, u8 cmd_size) +{ + struct cmdq_base *req = r; + + req->opcode = opcode; + req->cmd_size = cmd_size; +} + +/* Shadow queue depth for non blocking command */ +#define RCFW_CMD_NON_BLOCKING_SHADOW_QD 64 +#define RCFW_CMD_DEV_ERR_CHECK_TIME_MS 1000 /* 1 Second time out*/ +#define RCFW_ERR_RETRY_COUNT (RCFW_CMD_WAIT_TIME_MS / RCFW_CMD_DEV_ERR_CHECK_TIME_MS) + +/* CMDQ elements */ +#define BNXT_QPLIB_CMDQE_MAX_CNT 8192 +#define BNXT_QPLIB_CMDQE_BYTES (BNXT_QPLIB_CMDQE_MAX_CNT * \ + BNXT_QPLIB_CMDQE_UNITS) +#define BNXT_QPLIB_CMDQE_NPAGES ((BNXT_QPLIB_CMDQE_BYTES % \ + PAGE_SIZE) ? \ + ((BNXT_QPLIB_CMDQE_BYTES / \ + PAGE_SIZE) + 1) : \ + (BNXT_QPLIB_CMDQE_BYTES / \ + PAGE_SIZE)) +#define BNXT_QPLIB_CMDQE_PAGE_SIZE (BNXT_QPLIB_CMDQE_NPAGES * \ + PAGE_SIZE) + +#define RCFW_MAX_OUTSTANDING_CMD BNXT_QPLIB_CMDQE_MAX_CNT +#define RCFW_MAX_COOKIE_VALUE (BNXT_QPLIB_CMDQE_MAX_CNT - 1) +#define RCFW_CMD_IS_BLOCKING 0x8000 +#define RCFW_NO_FW_ACCESS(rcfw) \ + (test_bit(ERR_DEVICE_DETACHED, &(rcfw)->cmdq.flags) || \ + pci_channel_offline((rcfw)->pdev)) + +/* Get the number of command units required for the req. The + * function returns correct value only if called before + * setting using bnxt_qplib_set_cmd_slots + */ +static inline u32 bnxt_qplib_get_cmd_slots(struct cmdq_base *req) +{ + u32 cmd_units = 0; + + if (HAS_TLV_HEADER(req)) { + struct roce_tlv *tlv_req = (struct roce_tlv *)req; + cmd_units = tlv_req->total_size; + } else { + cmd_units = (req->cmd_size + BNXT_QPLIB_CMDQE_UNITS - 1) / + BNXT_QPLIB_CMDQE_UNITS; + } + return cmd_units; +} + +/* Set the cmd_size to a factor of CMDQE unit */ +static inline u32 bnxt_qplib_set_cmd_slots(struct cmdq_base *req) +{ + u32 cmd_byte = 0; + + if (HAS_TLV_HEADER(req)) { + struct roce_tlv *tlv_req = (struct roce_tlv *)req; + cmd_byte = tlv_req->total_size * BNXT_QPLIB_CMDQE_UNITS; + } else { + cmd_byte = req->cmd_size; + req->cmd_size = (req->cmd_size + BNXT_QPLIB_CMDQE_UNITS - 1) / + BNXT_QPLIB_CMDQE_UNITS; + } + + return cmd_byte; +} + +/* CREQ */ +/* Allocate 1 per QP for async error notification for now */ +#define BNXT_QPLIB_CREQE_MAX_CNT (64 * 1024) +#define BNXT_QPLIB_CREQE_UNITS 16 /* 16-Bytes per prod unit */ + +#define CREQ_CMP_VALID(hdr, pass) \ + (!!((hdr)->v & CREQ_BASE_V) == \ + !(pass & BNXT_QPLIB_FLAG_EPOCH_CONS_MASK)) + +#define CREQ_ENTRY_POLL_BUDGET 8 + +typedef int (*aeq_handler_t)(struct bnxt_qplib_rcfw *, void *, void *); + +struct bnxt_qplib_crsqe { + struct creq_qp_event *resp; + u32 req_size; + bool is_waiter_alive; + bool is_internal_cmd; + bool is_in_used; + + /* Free slots at the time of submission */ + u32 free_slots; + unsigned long send_timestamp; + u8 opcode; + u8 requested_qp_state; +}; + +struct bnxt_qplib_rcfw_sbuf { + void *sb; + dma_addr_t dma_addr; + u32 size; +}; + +#define BNXT_QPLIB_OOS_COUNT_MASK 0xFFFFFFFF + +#define FIRMWARE_INITIALIZED_FLAG (0) +#define FIRMWARE_FIRST_FLAG (31) +#define FIRMWARE_STALL_DETECTED (3) +#define ERR_DEVICE_DETACHED (4) +struct bnxt_qplib_cmdq_mbox { + struct bnxt_qplib_reg_desc reg; + void __iomem *prod; + void __iomem *db; +}; + +struct bnxt_qplib_cmdq_ctx { + struct bnxt_qplib_hwq hwq; + struct bnxt_qplib_cmdq_mbox cmdq_mbox; + wait_queue_head_t waitq; + unsigned long flags; + unsigned long last_seen; + u32 seq_num; +}; + +struct bnxt_qplib_creq_db { + struct bnxt_qplib_reg_desc reg; + void __iomem *db; + struct bnxt_qplib_db_info dbinfo; +}; + +struct bnxt_qplib_creq_stat { + u64 creq_arm_count; + u64 creq_tasklet_schedule_count; + u64 creq_qp_event_processed; + u64 creq_func_event_processed; +}; + +struct bnxt_qplib_creq_ctx { + struct bnxt_qplib_hwq hwq; + struct bnxt_qplib_creq_db creq_db; + struct bnxt_qplib_creq_stat stats; + struct tasklet_struct creq_tasklet; + aeq_handler_t aeq_handler; + char *irq_name; + int msix_vec; + u16 ring_id; + bool requested; /*irq handler installed */ +}; + +/* RCFW Communication Channels */ +#define BNXT_QPLIB_RCFW_SEND_RETRY_COUNT 4000 +struct bnxt_qplib_rcfw { + struct pci_dev *pdev; + struct bnxt_qplib_res *res; + struct bnxt_qplib_cmdq_ctx cmdq; + struct bnxt_qplib_creq_ctx creq; + struct bnxt_qplib_crsqe *crsqe_tbl; + u32 rcfw_lat_slab_sec[RCFW_MAX_LATENCY_SEC_SLAB_INDEX]; + + /* Slow path Perf Stats */ + u32 *rcfw_lat_slab_msec; + u64 *qp_create_stats; + u64 *qp_destroy_stats; + u64 *qp_modify_stats; + u64 *mr_create_stats; + u64 *mr_destroy_stats; + u32 qp_create_stats_id; + u32 qp_destroy_stats_id; + u32 qp_modify_stats_id; + u32 mr_create_stats_id; + u32 mr_destroy_stats_id; + bool sp_perf_stats_enabled; + /* odd place to have following members. */ + bool init_oos_stats; + /* cached from chip cctx for quick reference in slow path */ + u16 max_timeout; + u64 oos_prev; + u32 num_irq_stopped; + u32 num_irq_started; + u32 poll_in_intr_en; + u32 poll_in_intr_dis; + atomic_t rcfw_intr_enabled; + u32 cmdq_full_dbg; + struct semaphore rcfw_inflight; + unsigned int curr_shadow_qd; + atomic_t timeout_send; +}; + +struct bnxt_qplib_cmdqmsg { + struct cmdq_base *req; + struct creq_base *resp; + void *sb; + u32 req_sz; + u32 res_sz; + u8 block; + /* TBD - xid can be used in future for generic tracking */ + u8 qp_state; +}; + +static inline void bnxt_qplib_fill_cmdqmsg(struct bnxt_qplib_cmdqmsg *msg, + void *req, void *resp, void *sb, + u32 req_sz, u32 res_sz, u8 block) +{ + msg->req = req; + msg->resp = resp; + msg->sb = sb; + msg->req_sz = req_sz; + msg->res_sz = res_sz; + msg->block = block; +} + +void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_res *res); +int bnxt_qplib_alloc_rcfw_channel(struct bnxt_qplib_res *res); +void bnxt_qplib_rcfw_stop_irq(struct bnxt_qplib_rcfw *rcfw, bool kill); +void bnxt_qplib_disable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw); +int bnxt_qplib_rcfw_start_irq(struct bnxt_qplib_rcfw *rcfw, int msix_vector, + bool need_init); +int bnxt_qplib_enable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw, + int msix_vector, + int cp_bar_reg_off, + aeq_handler_t aeq_handler); + +struct bnxt_qplib_rcfw_sbuf *bnxt_qplib_rcfw_alloc_sbuf( + struct bnxt_qplib_rcfw *rcfw, + u32 size); +void bnxt_qplib_rcfw_free_sbuf(struct bnxt_qplib_rcfw *rcfw, + struct bnxt_qplib_rcfw_sbuf *sbuf); +int bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw, + struct bnxt_qplib_cmdqmsg *msg); + +int bnxt_qplib_deinit_rcfw(struct bnxt_qplib_rcfw *rcfw); +int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw, int is_virtfn); +void bnxt_qplib_mark_qp_error(void *qp_handle); +int __check_cmdq_stall(struct bnxt_qplib_rcfw *rcfw, + u32 *cur_prod, u32 *cur_cons); +#endif diff --git a/bnxt_re-1.10.3-229.0.139.0/qplib_res.c b/bnxt_re-1.10.3-229.0.139.0/qplib_res.c new file mode 100644 index 0000000..3aa19f0 --- /dev/null +++ b/bnxt_re-1.10.3-229.0.139.0/qplib_res.c @@ -0,0 +1,1224 @@ +/* + * Copyright (c) 2015-2023, Broadcom. All rights reserved. The term + * Broadcom refers to Broadcom Inc. and/or its subsidiaries. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * BSD license below: + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * Author: Eddie Wai + * + * Description: QPLib resource manager + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "roce_hsi.h" +#include "qplib_res.h" +#include "qplib_sp.h" +#include "qplib_rcfw.h" +#include "compat.h" + +inline bool _is_alloc_mr_unified(struct bnxt_qplib_dev_attr *dattr) +{ + return dattr->dev_cap_flags & + CREQ_QUERY_FUNC_RESP_SB_MR_REGISTER_ALLOC; +} + +/* PBL */ +static void __free_pbl(struct bnxt_qplib_res *res, + struct bnxt_qplib_pbl *pbl, bool is_umem) +{ + struct pci_dev *pdev; + int i; + + pdev = res->pdev; + if (is_umem == false) { + for (i = 0; i < pbl->pg_count; i++) { + if (pbl->pg_arr[i]) { + dma_free_coherent(&pdev->dev, pbl->pg_size, + (void *)((u64)pbl->pg_arr[i] & + PAGE_MASK), + pbl->pg_map_arr[i]); + } + else + dev_warn(&pdev->dev, + "QPLIB: PBL free pg_arr[%d] empty?!", + i); + pbl->pg_arr[i] = NULL; + } + } + + if (pbl->pg_arr) { + vfree(pbl->pg_arr); + pbl->pg_arr = NULL; + } + if (pbl->pg_map_arr) { + vfree(pbl->pg_map_arr); + pbl->pg_map_arr = NULL; + } + pbl->pg_count = 0; + pbl->pg_size = 0; +} + +#if !defined(HAVE_RDMA_UMEM_FOR_EACH_DMA_BLOCK) && !defined(HAVE_FOR_EACH_SG_DMA_PAGE) +struct qplib_sg { + dma_addr_t pg_map_arr; + u32 size; +}; + +static int __fill_user_dma_pages(struct bnxt_qplib_pbl *pbl, + struct bnxt_qplib_sg_info *sginfo) +{ + int sg_indx, pg_indx, tmp_size, offset; + struct qplib_sg *tmp_sg = NULL; + struct scatterlist *sg; + u64 pmask, addr; + + tmp_sg = vzalloc(sginfo->nmap * sizeof(struct qplib_sg)); + if (!tmp_sg) + return -ENOMEM; + + pmask = BIT_ULL(sginfo->pgshft) - 1; + sg_indx = 0; + for_each_sg(sginfo->sghead, sg, sginfo->nmap, sg_indx) { + tmp_sg[sg_indx].pg_map_arr = sg_dma_address(sg); + tmp_sg[sg_indx].size = sg_dma_len(sg); + } + pg_indx = 0; + for (sg_indx = 0; sg_indx < sginfo->nmap; sg_indx++) { + tmp_size = tmp_sg[sg_indx].size; + offset = 0; + while (tmp_size > 0) { + addr = tmp_sg[sg_indx].pg_map_arr + offset; + if ((!sg_indx && !pg_indx) || !(addr & pmask)) { + pbl->pg_map_arr[pg_indx] = addr &(~pmask); + pbl->pg_count++; + pg_indx++; + } + offset += sginfo->pgsize; + tmp_size -= sginfo->pgsize; + } + } + + vfree(tmp_sg); + return 0; +} +#endif + +static int bnxt_qplib_fill_user_dma_pages(struct bnxt_qplib_pbl *pbl, + struct bnxt_qplib_sg_info *sginfo) +{ + int rc = 0; + +#ifdef HAVE_RDMA_UMEM_FOR_EACH_DMA_BLOCK + struct ib_block_iter biter; + int pg_indx = 0; + + rdma_umem_for_each_dma_block(sginfo->umem, &biter, sginfo->pgsize) { + pbl->pg_map_arr[pg_indx] = rdma_block_iter_dma_address(&biter); + pbl->pg_arr[pg_indx] = NULL; + pbl->pg_count++; + pg_indx++; + } +#else +#ifdef HAVE_FOR_EACH_SG_DMA_PAGE + struct sg_dma_page_iter sg_iter; + int indx = 0, pg_indx = 0; + u64 pmask, addr; + + /* TODO: Use rdma block iterator. */ + pmask = BIT_ULL(sginfo->pgshft) - 1; + for_each_sg_dma_page(sginfo->sghead, &sg_iter, sginfo->nmap, 0) { + addr = sg_page_iter_dma_address(&sg_iter); + if (!indx || !(addr & pmask)) { + pbl->pg_map_arr[pg_indx] = (addr & (~pmask)); + pbl->pg_arr[pg_indx] = NULL; + pbl->pg_count++; + pg_indx++; + } + indx++; + } +#else + rc = __fill_user_dma_pages(pbl, sginfo); +#endif +#endif + return rc; +} + +static int __alloc_pbl(struct bnxt_qplib_res *res, struct bnxt_qplib_pbl *pbl, + struct bnxt_qplib_sg_info *sginfo) +{ + struct pci_dev *pdev; + bool is_umem = false; + int i; + + if (sginfo->nopte) + return 0; + + pdev = res->pdev; + /* page ptr arrays */ + pbl->pg_arr = vmalloc_array(sginfo->npages, sizeof(void *)); + if (!pbl->pg_arr) + return -ENOMEM; + + pbl->pg_map_arr = vmalloc_array(sginfo->npages, sizeof(dma_addr_t)); + if (!pbl->pg_map_arr) { + vfree(pbl->pg_arr); + return -ENOMEM; + } + pbl->pg_count = 0; + pbl->pg_size = sginfo->pgsize; +#ifndef HAVE_RDMA_UMEM_FOR_EACH_DMA_BLOCK + if (!sginfo->sghead) { +#else + if (!sginfo->umem) { +#endif + for (i = 0; i < sginfo->npages; i++) { + pbl->pg_arr[i] = dma_zalloc_coherent(&pdev->dev, + pbl->pg_size, + &pbl->pg_map_arr[i], + GFP_KERNEL); + if (!pbl->pg_arr[i]) + goto fail; + pbl->pg_count++; + } + } else { + is_umem = true; + if (bnxt_qplib_fill_user_dma_pages(pbl, sginfo)) + goto fail; + } + + return 0; +fail: + __free_pbl(res, pbl, is_umem); + return -ENOMEM; +} + +/* HWQ */ +void bnxt_qplib_free_hwq(struct bnxt_qplib_res *res, + struct bnxt_qplib_hwq *hwq) +{ + int i; + + if (!hwq->max_elements) + return; + if (hwq->level >= PBL_LVL_MAX) + return; + + for (i = 0; i < hwq->level + 1; i++) { + if (i == hwq->level) + __free_pbl(res, &hwq->pbl[i], hwq->is_user); + else + __free_pbl(res, &hwq->pbl[i], false); + } + + hwq->level = PBL_LVL_MAX; + hwq->max_elements = 0; + hwq->element_size = 0; + hwq->prod = hwq->cons = 0; + hwq->cp_bit = 0; +} + +/* All HWQs are power of 2 in size */ +int bnxt_qplib_alloc_init_hwq(struct bnxt_qplib_hwq *hwq, + struct bnxt_qplib_hwq_attr *hwq_attr) +{ + u32 npages = 0, depth, stride, aux_pages = 0; + dma_addr_t *src_phys_ptr, **dst_virt_ptr; + struct bnxt_qplib_sg_info sginfo = {}; + u32 aux_size = 0, npbl, npde; + void *umem; + struct bnxt_qplib_res *res; + u32 aux_slots, pg_size; + struct pci_dev *pdev; + int i, rc, lvl; + + res = hwq_attr->res; + pdev = res->pdev; +#ifndef HAVE_RDMA_UMEM_FOR_EACH_DMA_BLOCK + umem = hwq_attr->sginfo->sghead; +#else + umem = hwq_attr->sginfo->umem; +#endif + pg_size = hwq_attr->sginfo->pgsize; + hwq->level = PBL_LVL_MAX; + + depth = roundup_pow_of_two(hwq_attr->depth); + stride = roundup_pow_of_two(hwq_attr->stride); + if (hwq_attr->aux_depth) { + aux_slots = hwq_attr->aux_depth; + aux_size = roundup_pow_of_two(hwq_attr->aux_stride); + aux_pages = (aux_slots * aux_size) / pg_size; + if ((aux_slots * aux_size) % pg_size) + aux_pages++; + } + + if (!umem) { + hwq->is_user = false; + npages = (depth * stride) / pg_size + aux_pages; + if ((depth * stride) % pg_size) + npages++; + if (!npages) + return -EINVAL; + hwq_attr->sginfo->npages = npages; + } else { + hwq->is_user = true; + npages = hwq_attr->sginfo->npages; + npages = (npages * (u64)pg_size) / + BIT_ULL(hwq_attr->sginfo->pgshft); + if ((hwq_attr->sginfo->npages * (u64)pg_size) % + BIT_ULL(hwq_attr->sginfo->pgshft)) + npages++; + } +#ifdef ENABLE_DEBUG_SGE + dev_dbg(&pdev->dev, "QPLIB: Alloc HWQ slots 0x%x size 0x%x pages 0x%x", + slots, size, pages); +#endif + if (npages == MAX_PBL_LVL_0_PGS && !hwq_attr->sginfo->nopte) { + /* This request is Level 0, map PTE */ + rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_0], hwq_attr->sginfo); + if (rc) + goto fail; + hwq->level = PBL_LVL_0; +#ifdef ENABLE_DEBUG_SGE + dev_dbg(&pdev->dev, "QPLIB: PBL_LVL_0 DMA=0x%llx", + hwq->pbl[PBL_LVL_0].pg_map_arr[0]); +#endif + goto done; + } + + if (npages >= MAX_PBL_LVL_0_PGS) { + if (npages > MAX_PBL_LVL_1_PGS) { + u32 flag = (hwq_attr->type == HWQ_TYPE_L2_CMPL) ? + 0 : PTU_PTE_VALID; + /* 2 levels of indirection */ + npbl = npages >> MAX_PBL_LVL_1_PGS_SHIFT; + if (npages % BIT(MAX_PBL_LVL_1_PGS_SHIFT)) + npbl++; + npde = npbl >> MAX_PDL_LVL_SHIFT; + if(npbl % BIT(MAX_PDL_LVL_SHIFT)) + npde++; + /* Alloc PDE pages */ + sginfo.pgsize = npde * PAGE_SIZE; + sginfo.npages = 1; + rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_0], &sginfo); + + /* Alloc PBL pages */ + sginfo.npages = npbl; + sginfo.pgsize = PAGE_SIZE; + rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_1], &sginfo); + if (rc) + goto fail; + /* Fill PDL with PBL page pointers */ + dst_virt_ptr = + (dma_addr_t **)hwq->pbl[PBL_LVL_0].pg_arr; + src_phys_ptr = hwq->pbl[PBL_LVL_1].pg_map_arr; + for (i = 0; i < hwq->pbl[PBL_LVL_1].pg_count; i++) + dst_virt_ptr[0][i] = src_phys_ptr[i] | flag; + /* Alloc or init PTEs */ + rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_2], + hwq_attr->sginfo); + if (rc) + goto fail; + hwq->level = PBL_LVL_2; + if (hwq_attr->sginfo->nopte) + goto done; +#ifdef ENABLE_DEBUG_SGE + dev_dbg(&pdev->dev, "QPLIB: PBL_LVL_1 alloc "); + dev_dbg(&pdev->dev, + "QPLIB: PBL_LVL_1.pg_count = 0x%x aux_pages=0x%x", + hwq->pbl[PBL_LVL_1].pg_count, aux_pages); +#endif + /* Fill PBLs with PTE pointers */ + dst_virt_ptr = + (dma_addr_t **)hwq->pbl[PBL_LVL_1].pg_arr; + src_phys_ptr = hwq->pbl[PBL_LVL_2].pg_map_arr; + for (i = 0; i < hwq->pbl[PBL_LVL_2].pg_count; i++) { + dst_virt_ptr[PTR_PG(i)][PTR_IDX(i)] = + src_phys_ptr[i] | PTU_PTE_VALID; +#ifdef ENABLE_DEBUG_SGE + dev_dbg(&pdev->dev, + "QPLIB: dst_virt_ptr[%d] = 0x%llx", i, + dst_virt_ptr[PTR_PG(i)][PTR_IDX(i)]); +#endif + } + if (hwq_attr->type == HWQ_TYPE_QUEUE) { + /* Find the last pg of the size */ + i = hwq->pbl[PBL_LVL_2].pg_count; + dst_virt_ptr[PTR_PG(i - 1)][PTR_IDX(i - 1)] |= + PTU_PTE_LAST; + if (i > 1) + dst_virt_ptr[PTR_PG(i - 2)] + [PTR_IDX(i - 2)] |= + PTU_PTE_NEXT_TO_LAST; + } +#ifdef ENABLE_DEBUG_SGE + dev_dbg(&pdev->dev, "QPLIB: PBL_LVL_2 alloc "); + dev_dbg(&pdev->dev, + "QPLIB: PBL_LVL_2.pg_count = 0x%x aux_pages=0x%x", + hwq->pbl[PBL_LVL_2].pg_count, aux_pages); + for (i = 0; i < hwq->pbl[PBL_LVL_2].pg_count; i++) + dev_dbg(&pdev->dev, + "QPLIB: dst_virt_ptr[%d] = 0x%llx", i, + dst_virt_ptr[PTR_PG(i)][PTR_IDX(i)]); +#endif + } else { /* pages < 512 npbl = 1, npde = 0 */ + u32 flag = (hwq_attr->type == HWQ_TYPE_L2_CMPL) ? + 0 : PTU_PTE_VALID; + + /* 1 level of indirection */ + npbl = npages >> MAX_PBL_LVL_1_PGS_SHIFT; + if (npages % BIT(MAX_PBL_LVL_1_PGS_SHIFT)) + npbl++; + sginfo.npages = npbl; + sginfo.pgsize = PAGE_SIZE; + /* Alloc PBL page */ + rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_0], &sginfo); + if (rc) + goto fail; + /* Alloc or init PTEs */ + rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_1], + hwq_attr->sginfo); + if (rc) + goto fail; + hwq->level = PBL_LVL_1; + if (hwq_attr->sginfo->nopte) + goto done; +#ifdef ENABLE_DEBUG_SGE + dev_dbg(&pdev->dev, "QPLIB: PBL_LVL_1 alloc "); + dev_dbg(&pdev->dev, + "QPLIB: PBL_LVL_1.pg_count = 0x%x aux_pages=0x%x", + hwq->pbl[PBL_LVL_1].pg_count, aux_pages); +#endif + /* Fill PBL with PTE pointers */ + dst_virt_ptr = + (dma_addr_t **)hwq->pbl[PBL_LVL_0].pg_arr; + src_phys_ptr = hwq->pbl[PBL_LVL_1].pg_map_arr; + for (i = 0; i < hwq->pbl[PBL_LVL_1].pg_count; i++) + dst_virt_ptr[PTR_PG(i)][PTR_IDX(i)] = + src_phys_ptr[i] | flag; + if (hwq_attr->type == HWQ_TYPE_QUEUE) { + /* Find the last pg of the size */ + i = hwq->pbl[PBL_LVL_1].pg_count; + dst_virt_ptr[PTR_PG(i - 1)][PTR_IDX(i - 1)] |= + PTU_PTE_LAST; + if (i > 1) + dst_virt_ptr[PTR_PG(i - 2)] + [PTR_IDX(i - 2)] |= + PTU_PTE_NEXT_TO_LAST; + } +#ifdef ENABLE_DEBUG_SGE + for (i = 0; i < hwq->pbl[PBL_LVL_1].pg_count; i++) + dev_dbg(&pdev->dev, + "QPLIB: dst_virt_ptr[%d] = 0x%llx", + i, dst_virt_ptr[PTR_PG(i)][PTR_IDX(i)]); +#endif + } + } +done: + hwq->prod = 0; + hwq->cons = 0; + hwq->pdev = pdev; + hwq->depth = hwq_attr->depth; + hwq->max_elements = depth; + hwq->element_size = stride; + hwq->qe_ppg = (pg_size/stride); + + if (hwq->level >= PBL_LVL_MAX) + goto fail; + /* For direct access to the elements */ + lvl = hwq->level; + if (hwq_attr->sginfo->nopte && hwq->level) + lvl = hwq->level - 1; + hwq->pbl_ptr = hwq->pbl[lvl].pg_arr; + hwq->pbl_dma_ptr = hwq->pbl[lvl].pg_map_arr; + spin_lock_init(&hwq->lock); + + return 0; +fail: + bnxt_qplib_free_hwq(res, hwq); + return -ENOMEM; +} + +/* Context Tables */ +void bnxt_qplib_free_hwctx(struct bnxt_qplib_res *res) +{ + struct bnxt_qplib_ctx *hctx; + int i; + + hctx = res->hctx; + bnxt_qplib_free_hwq(res, &hctx->qp_ctx.hwq); + bnxt_qplib_free_hwq(res, &hctx->mrw_ctx.hwq); + bnxt_qplib_free_hwq(res, &hctx->srq_ctx.hwq); + bnxt_qplib_free_hwq(res, &hctx->cq_ctx.hwq); + bnxt_qplib_free_hwq(res, &hctx->tim_ctx.hwq); + for (i = 0; i < MAX_TQM_ALLOC_REQ; i++) + bnxt_qplib_free_hwq(res, &hctx->tqm_ctx.qtbl[i]); + /* restore original pde level before destroy */ + hctx->tqm_ctx.pde.level = hctx->tqm_ctx.pde_level; + bnxt_qplib_free_hwq(res, &hctx->tqm_ctx.pde); +} + +static int bnxt_qplib_alloc_tqm_rings(struct bnxt_qplib_res *res, + struct bnxt_qplib_ctx *hctx) +{ + struct bnxt_qplib_hwq_attr hwq_attr = {}; + struct bnxt_qplib_sg_info sginfo = {}; + struct bnxt_qplib_tqm_ctx *tqmctx; + int rc = 0; + int i; + + tqmctx = &hctx->tqm_ctx; + + sginfo.pgsize = PAGE_SIZE; + sginfo.pgshft = PAGE_SHIFT; + hwq_attr.sginfo = &sginfo; + hwq_attr.res = res; + hwq_attr.type = HWQ_TYPE_CTX; + hwq_attr.depth = 512; + hwq_attr.stride = sizeof(u64); + /* Alloc pdl buffer */ + rc = bnxt_qplib_alloc_init_hwq(&tqmctx->pde, &hwq_attr); + if (rc) + return rc; + /* Save original pdl level */ + tqmctx->pde_level = tqmctx->pde.level; + + hwq_attr.stride = 1; + for (i = 0; i < MAX_TQM_ALLOC_REQ; i++) { + if (!tqmctx->qcount[i]) + continue; + hwq_attr.depth = hctx->qp_ctx.max * tqmctx->qcount[i]; + rc = bnxt_qplib_alloc_init_hwq(&tqmctx->qtbl[i], &hwq_attr); + if (rc) + return rc; + } + + return 0; +} + +static void bnxt_qplib_map_tqm_pgtbl(struct bnxt_qplib_tqm_ctx *ctx) +{ + struct bnxt_qplib_hwq *qtbl_hwq; + dma_addr_t *dma_ptr; + __le64 **pbl_ptr, *ptr; + int i, j, k; + int fnz_idx = -1; + int pg_count; + + pbl_ptr = (__le64 **)ctx->pde.pbl_ptr; + + for (i = 0, j = 0; i < MAX_TQM_ALLOC_REQ; + i++, j += MAX_TQM_ALLOC_BLK_SIZE) { + qtbl_hwq = &ctx->qtbl[i]; + if (!qtbl_hwq->max_elements) + continue; + if (fnz_idx == -1) + fnz_idx = i; /* first non-zero index */ + switch (qtbl_hwq->level) { + case PBL_LVL_2: + pg_count = qtbl_hwq->pbl[PBL_LVL_1].pg_count; + for (k = 0; k < pg_count; k++) { + ptr = &pbl_ptr[PTR_PG(j + k)][PTR_IDX(j + k)]; + dma_ptr = &qtbl_hwq->pbl[PBL_LVL_1].pg_map_arr[k]; + *ptr = cpu_to_le64(*dma_ptr | PTU_PTE_VALID); + } + break; + case PBL_LVL_1: + case PBL_LVL_0: + default: + ptr = &pbl_ptr[PTR_PG(j)][PTR_IDX(j)]; + *ptr = cpu_to_le64(qtbl_hwq->pbl[PBL_LVL_0].pg_map_arr[0] | + PTU_PTE_VALID); + break; + } + } + if (fnz_idx == -1) + fnz_idx = 0; + /* update pde level as per page table programming */ + ctx->pde.level = (ctx->qtbl[fnz_idx].level == PBL_LVL_2) ? PBL_LVL_2 : + ctx->qtbl[fnz_idx].level + 1; +} + +static int bnxt_qplib_setup_tqm_rings(struct bnxt_qplib_res *res, + struct bnxt_qplib_ctx *hctx) +{ + int rc; + + rc = bnxt_qplib_alloc_tqm_rings(res, hctx); + if (rc) + return rc; + + bnxt_qplib_map_tqm_pgtbl(&hctx->tqm_ctx); + + return 0; +} + +/* + * Routine: bnxt_qplib_alloc_hwctx + * Description: + * Context tables are memories which are used by the chip. + * The 6 tables defined are: + * QPC ctx - holds QP states + * MRW ctx - holds memory region and window + * SRQ ctx - holds shared RQ states + * CQ ctx - holds completion queue states + * TQM ctx - holds Tx Queue Manager context + * TIM ctx - holds timer context + * Depending on the size of the tbl requested, either a 1 Page Buffer List + * or a 1-to-2-stage indirection Page Directory List + 1 PBL is used + * instead. + * Table might be employed as follows: + * For 0 < ctx size <= 1 PAGE, 0 level of ind is used + * For 1 PAGE < ctx size <= 512 entries size, 1 level of ind is used + * For 512 < ctx size <= MAX, 2 levels of ind is used + * Returns: + * 0 if success, else -ERRORS + */ +int bnxt_qplib_alloc_hwctx(struct bnxt_qplib_res *res) +{ + struct bnxt_qplib_hwq_attr hwq_attr = {}; + struct bnxt_qplib_sg_info sginfo = {}; + struct bnxt_qplib_ctx *hctx; + struct bnxt_qplib_hwq *hwq; + int rc = 0; + + hctx = res->hctx; + /* QPC Tables */ + sginfo.pgsize = PAGE_SIZE; + sginfo.pgshft = PAGE_SHIFT; + hwq_attr.sginfo = &sginfo; + + hwq_attr.res = res; + hwq_attr.depth = hctx->qp_ctx.max; + hwq_attr.stride = BNXT_QPLIB_MAX_QP_CTX_ENTRY_SIZE; + hwq_attr.type = HWQ_TYPE_CTX; + hwq = &hctx->qp_ctx.hwq; + rc = bnxt_qplib_alloc_init_hwq(hwq, &hwq_attr); + if (rc) + goto fail; + + /* MRW Tables */ + hwq_attr.depth = hctx->mrw_ctx.max; + hwq_attr.stride = BNXT_QPLIB_MAX_MRW_CTX_ENTRY_SIZE; + hwq = &hctx->mrw_ctx.hwq; + rc = bnxt_qplib_alloc_init_hwq(hwq, &hwq_attr); + if (rc) + goto fail; + + /* SRQ Tables */ + hwq_attr.depth = hctx->srq_ctx.max; + hwq_attr.stride = BNXT_QPLIB_MAX_SRQ_CTX_ENTRY_SIZE; + hwq = &hctx->srq_ctx.hwq; + rc = bnxt_qplib_alloc_init_hwq(hwq, &hwq_attr); + if (rc) + goto fail; + + /* CQ Tables */ + hwq_attr.depth = hctx->cq_ctx.max; + hwq_attr.stride = BNXT_QPLIB_MAX_CQ_CTX_ENTRY_SIZE; + hwq = &hctx->cq_ctx.hwq; + rc = bnxt_qplib_alloc_init_hwq(hwq, &hwq_attr); + if (rc) + goto fail; + + /* TQM Buffer */ + rc = bnxt_qplib_setup_tqm_rings(res, hctx); + if (rc) + goto fail; + /* TIM Buffer */ + hwq_attr.depth = hctx->qp_ctx.max * 16; + hwq_attr.stride = 1; + hwq = &hctx->tim_ctx.hwq; + rc = bnxt_qplib_alloc_init_hwq(hwq, &hwq_attr); + if (rc) + goto fail; + + return 0; +fail: + bnxt_qplib_free_hwctx(res); + return rc; +} + +static void bnxt_qplib_free_sgid_tbl(struct bnxt_qplib_res *res) +{ + struct bnxt_qplib_sgid_tbl *sgid_tbl; + + sgid_tbl = &res->sgid_tbl; + + kfree(sgid_tbl->tbl); + sgid_tbl->tbl = NULL; + kfree(sgid_tbl->hw_id); + sgid_tbl->hw_id = NULL; + kfree(sgid_tbl->ctx); + sgid_tbl->ctx = NULL; + kfree(sgid_tbl->vlan); + sgid_tbl->vlan = NULL; + sgid_tbl->max = 0; + sgid_tbl->active = 0; +} + +static void bnxt_qplib_free_reftbls(struct bnxt_qplib_res *res) +{ + struct bnxt_qplib_reftbl *tbl; + + tbl = &res->reftbl.srqref; + vfree(tbl->rec); + + tbl = &res->reftbl.cqref; + vfree(tbl->rec); + + tbl = &res->reftbl.qpref; + vfree(tbl->rec); +} + +static int bnxt_qplib_alloc_reftbl(struct bnxt_qplib_reftbl *tbl, u32 max) +{ + tbl->max = max; + tbl->rec = vzalloc(sizeof(*tbl->rec) * max); + if (!tbl->rec) + return -ENOMEM; + spin_lock_init(&tbl->lock); + return 0; +} + +static int bnxt_qplib_alloc_reftbls(struct bnxt_qplib_res *res, + struct bnxt_qplib_dev_attr *dattr) +{ + u32 max_cq = BNXT_QPLIB_MAX_CQ_COUNT; + struct bnxt_qplib_reftbl *tbl; + u32 res_cnt; + int rc; + + /* + * Allocating one extra entry to hold QP1 info. + * Store QP1 info at the last entry of the table. + * Decrement the tbl->max by one so that modulo + * operation to get the qp table index from qp id + * returns any value between 0 and max_qp-1 + */ + res_cnt = max_t(u32, BNXT_QPLIB_MAX_QPC_COUNT + 1, dattr->max_qp); + tbl = &res->reftbl.qpref; + rc = bnxt_qplib_alloc_reftbl(tbl, res_cnt); + if (rc) + return rc; + tbl->max--; + + if (_is_chip_gen_p5_p7(res->cctx)) + max_cq = BNXT_QPLIB_MAX_CQ_COUNT_P5; + res_cnt = max_t(u32, max_cq, dattr->max_cq); + tbl = &res->reftbl.cqref; + rc = bnxt_qplib_alloc_reftbl(tbl, res_cnt); + if (rc) + goto free_qpref_tbl; + + res_cnt = max_t(u32, BNXT_QPLIB_MAX_SRQC_COUNT, dattr->max_cq); + tbl = &res->reftbl.srqref; + rc = bnxt_qplib_alloc_reftbl(tbl, BNXT_QPLIB_MAX_SRQC_COUNT); + if (rc) + goto free_cqref_tbl; + + return 0; +free_cqref_tbl: + tbl = &res->reftbl.cqref; + vfree(tbl->rec); +free_qpref_tbl: + tbl = &res->reftbl.qpref; + vfree(tbl->rec); + return rc; +} + +static int bnxt_qplib_alloc_sgid_tbl(struct bnxt_qplib_res *res, u16 max) +{ + struct bnxt_qplib_sgid_tbl *sgid_tbl; + u32 i; + + sgid_tbl = &res->sgid_tbl; + + sgid_tbl->tbl = kcalloc(max, sizeof(*sgid_tbl->tbl), GFP_KERNEL); + if (!sgid_tbl->tbl) + return -ENOMEM; + + sgid_tbl->hw_id = kcalloc(max, sizeof(u32), GFP_KERNEL); + if (!sgid_tbl->hw_id) + goto free_tbl; + + sgid_tbl->ctx = kcalloc(max, sizeof(void *), GFP_KERNEL); + if (!sgid_tbl->ctx) + goto free_hw_id; + + sgid_tbl->vlan = kcalloc(max, sizeof(u8), GFP_KERNEL); + if (!sgid_tbl->vlan) + goto free_ctx; + + sgid_tbl->max = max; + + for (i = 0; i < sgid_tbl->max; i++) + sgid_tbl->tbl[i].vlan_id = 0xffff; + memset(sgid_tbl->hw_id, -1, sizeof(u16) * sgid_tbl->max); + return 0; +free_ctx: + kfree(sgid_tbl->ctx); +free_hw_id: + kfree(sgid_tbl->hw_id); +free_tbl: + kfree(sgid_tbl->tbl); + return -ENOMEM; +}; + +static void bnxt_qplib_cleanup_sgid_tbl(struct bnxt_qplib_res *res, + struct bnxt_qplib_sgid_tbl *sgid_tbl) +{ + int i; + + for (i = 0; i < sgid_tbl->max; i++) { + if (memcmp(&sgid_tbl->tbl[i], &bnxt_qplib_gid_zero, + sizeof(bnxt_qplib_gid_zero))) + bnxt_qplib_del_sgid(sgid_tbl, &sgid_tbl->tbl[i].gid, + sgid_tbl->tbl[i].vlan_id, true); + } + memset(sgid_tbl->tbl, 0, sizeof(*sgid_tbl->tbl) * sgid_tbl->max); + memset(sgid_tbl->hw_id, -1, sizeof(u16) * sgid_tbl->max); + memset(sgid_tbl->vlan, 0, sizeof(u8) * sgid_tbl->max); + sgid_tbl->active = 0; +} + +/* PDs */ +int bnxt_qplib_alloc_pd(struct bnxt_qplib_res *res, struct bnxt_qplib_pd *pd) +{ + u32 bit_num; + struct bnxt_qplib_pd_tbl *pdt = &res->pd_tbl; + + mutex_lock(&res->pd_tbl_lock); + bit_num = find_first_bit(pdt->tbl, pdt->max); + if (bit_num == pdt->max - 1) {/* Last bit is reserved */ + mutex_unlock(&res->pd_tbl_lock); + return -ENOMEM; + } + + /* Found unused PD */ + clear_bit(bit_num, pdt->tbl); + pd->id = bit_num; + + mutex_unlock(&res->pd_tbl_lock); + return 0; +} + +int bnxt_qplib_dealloc_pd(struct bnxt_qplib_res *res, + struct bnxt_qplib_pd_tbl *pdt, + struct bnxt_qplib_pd *pd) +{ + mutex_lock(&res->pd_tbl_lock); + if (test_and_set_bit(pd->id, pdt->tbl)) { + dev_warn(&res->pdev->dev, "Freeing an unused PD? pdn = %d", + pd->id); + mutex_unlock(&res->pd_tbl_lock); + return -EINVAL; + } + /* Reset to reserved pdid. */ + pd->id = pdt->max - 1; + + mutex_unlock(&res->pd_tbl_lock); + return 0; +} + +static void bnxt_qplib_free_pd_tbl(struct bnxt_qplib_pd_tbl *pdt) +{ + kfree(pdt->tbl); + pdt->tbl = NULL; + pdt->max = 0; +} + +static int bnxt_qplib_alloc_pd_tbl(struct bnxt_qplib_res *res, u32 max) +{ + struct bnxt_qplib_pd_tbl *pdt; + u32 bytes; + + pdt = &res->pd_tbl; + + max++; /* One extra for reserved pdid. */ + bytes = DIV_ROUND_UP(max, 8); + + if (!bytes) + bytes = 1; + pdt->tbl = kmalloc(bytes, GFP_KERNEL); + if (!pdt->tbl) { + dev_err(&res->pdev->dev, + "QPLIB: PD tbl allocation failed for size = %d", bytes); + return -ENOMEM; + } + pdt->max = max; + memset((u8 *)pdt->tbl, 0xFF, bytes); + mutex_init(&res->pd_tbl_lock); + + return 0; +} + +/* DPIs */ +int bnxt_qplib_alloc_dpi(struct bnxt_qplib_res *res, + struct bnxt_qplib_dpi *dpi, + void *app, enum bnxt_qplib_dpi_type type) +{ + struct bnxt_qplib_dpi_tbl *dpit = &res->dpi_tbl; + struct bnxt_qplib_reg_desc *reg; + u32 bit_num; + u64 umaddr; + int rc = 0; + + if (type == BNXT_QPLIB_DPI_TYPE_KERNEL) { + /* + * Priviledged dbr was already mapped at bar base off + * ucreg.offset. It is sharing the same normal DB page + * with L2 driver. Here we only need to initialize it. + */ + dpi->umdbr = dpit->ucreg.bar_base + dpit->ucreg.offset; + dpi->dbr = dpit->priv_db; + dpi->dpi = dpit->ucreg.offset / PAGE_SIZE; + dpi->type = type; + return 0; + } + + reg = &dpit->wcreg; + mutex_lock(&res->dpi_tbl_lock); + if (!dpit->tbl || + (type == BNXT_QPLIB_DPI_TYPE_WC && + BNXT_RE_PPP_ENABLED(res->cctx) && !dpit->avail_ppp)) { + rc = -ENOMEM; + goto exit; + } + bit_num = find_first_bit(dpit->tbl, dpit->max); + if (bit_num >= dpit->max) { + rc = -ENOMEM; + goto exit; + } + /* Found unused DPI */ + clear_bit(bit_num, dpit->tbl); + dpit->app_tbl[bit_num] = app; + dpi->bit = bit_num; + dpi->dpi = bit_num + (reg->offset - dpit->ucreg.offset) / PAGE_SIZE; + umaddr = reg->bar_base + reg->offset + bit_num * PAGE_SIZE; + dpi->umdbr = umaddr; + if (type == BNXT_QPLIB_DPI_TYPE_WC) { + dpi->dbr = ioremap_wc(umaddr, PAGE_SIZE); + if (BNXT_RE_PPP_ENABLED(res->cctx) && dpi->dbr) + dpit->avail_ppp--; + } else { + dpi->dbr = ioremap(umaddr, PAGE_SIZE); + } + if (!dpi->dbr) { + dev_err(&res->pdev->dev, "QPLIB: DB remap failed, type = %d\n", + type); + rc = -ENOMEM; + /* Cleanup the dpi->tbl on failure */ + set_bit(bit_num, dpit->tbl); + dpit->app_tbl[bit_num] = NULL; + goto exit; + } + dpi->type = type; +exit: + mutex_unlock(&res->dpi_tbl_lock); + return rc; +} + +int bnxt_qplib_dealloc_dpi(struct bnxt_qplib_res *res, + struct bnxt_qplib_dpi *dpi) +{ + struct bnxt_qplib_dpi_tbl *dpit = &res->dpi_tbl; + int rc = 0; + + if (dpi->type == BNXT_QPLIB_DPI_TYPE_KERNEL) { + memset(dpi, 0, sizeof(*dpi)); + return 0; + } + + mutex_lock(&res->dpi_tbl_lock); + if (dpi->bit >= dpit->max) { + dev_warn(&res->pdev->dev, + "Invalid DPI? dpi = %d, bit = %d\n", + dpi->dpi, dpi->bit); + rc = -EINVAL; + goto fail; + } + + if (dpi->dpi) { + if (dpi->type == BNXT_QPLIB_DPI_TYPE_WC && + BNXT_RE_PPP_ENABLED(res->cctx) && dpi->dbr) + dpit->avail_ppp++; + pci_iounmap(res->pdev, dpi->dbr); + } + + if (test_and_set_bit(dpi->bit, dpit->tbl)) { + dev_warn(&res->pdev->dev, + "Freeing an unused DPI? dpi = %d, bit = %d\n", + dpi->dpi, dpi->bit); + rc = -EINVAL; + goto fail; + } + if (dpit->app_tbl) + dpit->app_tbl[dpi->bit] = NULL; + memset(dpi, 0, sizeof(*dpi)); +fail: + mutex_unlock(&res->dpi_tbl_lock); + return rc; +} + +static void bnxt_qplib_free_dpi_tbl(struct bnxt_qplib_dpi_tbl *dpit) +{ + kfree(dpit->tbl); + kfree(dpit->app_tbl); + dpit->tbl = NULL; + dpit->app_tbl = NULL; + dpit->max = 0; +} + +static int bnxt_qplib_alloc_dpi_tbl(struct bnxt_qplib_res *res, + struct bnxt_qplib_dev_attr *dev_attr) +{ + struct bnxt_qplib_dpi_tbl *dpit; + struct bnxt_qplib_reg_desc *reg; + unsigned long bar_len; + u32 dbr_offset; + u32 bytes; + + dpit = &res->dpi_tbl; + reg = &dpit->wcreg; + + if (!_is_chip_gen_p5_p7(res->cctx)) { + /* Offest should come from L2 driver */ + dbr_offset = dev_attr->l2_db_size; + dpit->ucreg.offset = dbr_offset; + dpit->wcreg.offset = dbr_offset; + } + + bar_len = pci_resource_len(res->pdev, reg->bar_id); + if (reg->offset >= bar_len) { + dev_warn(&res->pdev->dev, "No PCI resource reserved for RoCE apps.\n"); + dpit->app_tbl = NULL; + dpit->tbl = NULL; + dpit->max = 0; + dpit->avail_ppp = 0; + goto done; + } + + dpit->max = (bar_len - reg->offset) / PAGE_SIZE; + if (dev_attr->max_dpi) + dpit->max = min_t(u32, dpit->max, dev_attr->max_dpi); + + dpit->app_tbl = kzalloc(dpit->max * sizeof(void*), GFP_KERNEL); + if (!dpit->app_tbl) { + dev_err(&res->pdev->dev, + "QPLIB: DPI app tbl allocation failed"); + return -ENOMEM; + } + + bytes = (dpit->max + 7) >> 3; + dpit->tbl = kmalloc(bytes, GFP_KERNEL); + if (!dpit->tbl) { + kfree(dpit->app_tbl); + dev_err(&res->pdev->dev, + "QPLIB: DPI tbl allocation failed for size = %d", + bytes); + return -ENOMEM; + } + + memset((u8 *)dpit->tbl, 0xFF, bytes); + /* + * Allocating the 512 extended PPP pages is based on first + * come, first served policy. Any function could use number + * of pages from 0 to all. + */ + if (BNXT_RE_PPP_ENABLED(res->cctx)) + dpit->avail_ppp = BNXT_QPLIB_MAX_EXTENDED_PPP_PAGES; + +done: + mutex_init(&res->dpi_tbl_lock); + dpit->priv_db = dpit->ucreg.bar_reg + dpit->ucreg.offset; + + return 0; +} + +/* Stats */ +void bnxt_qplib_free_stat_mem(struct bnxt_qplib_res *res, + struct bnxt_qplib_stats *stats) +{ + struct pci_dev *pdev; + + pdev = res->pdev; + if (stats->dma) + dma_free_coherent(&pdev->dev, stats->size, + stats->dma, stats->dma_map); + + memset(stats, 0, sizeof(*stats)); + stats->fw_id = -1; +} + +int bnxt_qplib_alloc_stat_mem(struct pci_dev *pdev, + struct bnxt_qplib_chip_ctx *cctx, + struct bnxt_qplib_stats *stats) +{ + memset(stats, 0, sizeof(*stats)); + stats->fw_id = -1; + stats->size = cctx->hw_stats_size; + stats->dma = dma_alloc_coherent(&pdev->dev, stats->size, + &stats->dma_map, GFP_KERNEL); + if (!stats->dma) { + dev_err(&pdev->dev, "QPLIB: Stats DMA allocation failed"); + return -ENOMEM; + } + return 0; +} + +/* Resource */ +int bnxt_qplib_stop_res(struct bnxt_qplib_res *res) +{ + struct bnxt_qplib_rcfw *rcfw = res->rcfw; + struct creq_stop_func_resp resp = {}; + struct bnxt_qplib_cmdqmsg msg = {}; + struct cmdq_stop_func req = {}; + int rc; + + bnxt_qplib_rcfw_cmd_prep(&req, CMDQ_BASE_OPCODE_STOP_FUNC, + sizeof(req)); + bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), + sizeof(resp), 0); + rc = bnxt_qplib_rcfw_send_message(rcfw, &msg); + return rc; +} + +void bnxt_qplib_clear_tbls(struct bnxt_qplib_res *res) +{ + bnxt_qplib_cleanup_sgid_tbl(res, &res->sgid_tbl); +} + +void bnxt_qplib_free_tbls(struct bnxt_qplib_res *res) +{ + bnxt_qplib_free_sgid_tbl(res); + bnxt_qplib_free_pd_tbl(&res->pd_tbl); + bnxt_qplib_free_dpi_tbl(&res->dpi_tbl); + bnxt_qplib_free_reftbls(res); +} + +int bnxt_qplib_alloc_tbls(struct bnxt_qplib_res *res) +{ + struct bnxt_qplib_dev_attr *dev_attr; + int rc = 0; + + dev_attr = res->dattr; + + rc = bnxt_qplib_alloc_reftbls(res, dev_attr); + if (rc) + return rc; + + rc = bnxt_qplib_alloc_sgid_tbl(res, dev_attr->max_sgid); + if (rc) + goto free_reftbls; + + rc = bnxt_qplib_alloc_pd_tbl(res, dev_attr->max_pd); + if (rc) + goto free_sgidtbl; + + rc = bnxt_qplib_alloc_dpi_tbl(res, dev_attr); + if (rc) + goto free_pdtbl; + + return 0; +free_pdtbl: + bnxt_qplib_free_pd_tbl(&res->pd_tbl); +free_sgidtbl: + bnxt_qplib_free_sgid_tbl(res); +free_reftbls: + bnxt_qplib_free_reftbls(res); + return rc; +} + +void bnxt_qplib_unmap_db_bar(struct bnxt_qplib_res *res) +{ + struct bnxt_qplib_reg_desc *reg; + + reg = &res->dpi_tbl.ucreg; + if (reg->bar_reg) + pci_iounmap(res->pdev, reg->bar_reg); + reg->bar_reg = NULL; + reg->bar_base = 0; + reg->len = 0; + reg->bar_id = 0; /* Zero? or ff */ +} + +int bnxt_qplib_map_db_bar(struct bnxt_qplib_res *res) +{ + struct bnxt_qplib_reg_desc *ucreg; + struct bnxt_qplib_reg_desc *wcreg; + + wcreg = &res->dpi_tbl.wcreg; + wcreg->bar_id = RCFW_DBR_PCI_BAR_REGION; + wcreg->bar_base = pci_resource_start(res->pdev, wcreg->bar_id); + /* No need to set the wcreg->len here */ + + ucreg = &res->dpi_tbl.ucreg; + ucreg->bar_id = RCFW_DBR_PCI_BAR_REGION; + ucreg->bar_base = pci_resource_start(res->pdev, ucreg->bar_id); + ucreg->len = ucreg->offset + PAGE_SIZE; + if (!ucreg->len) { + dev_err(&res->pdev->dev, "QPLIB: invalid dbr length %d", + (int)ucreg->len); + return -EINVAL; + } + ucreg->bar_reg = ioremap(ucreg->bar_base, ucreg->len); + if (!ucreg->bar_reg) { + dev_err(&res->pdev->dev, "privileged dpi map failed!"); + return -ENOMEM; + } + + return 0; +} + +int bnxt_qplib_enable_atomic_ops_to_root(struct pci_dev *dev, bool is_virtfn) +{ + u16 ctl2; + + if (is_virtfn) + return -EOPNOTSUPP; + + if(pci_enable_atomic_ops_to_root(dev, PCI_EXP_DEVCAP2_ATOMIC_COMP32) && + pci_enable_atomic_ops_to_root(dev, PCI_EXP_DEVCAP2_ATOMIC_COMP64)) + return -EOPNOTSUPP; + + pcie_capability_read_word(dev, PCI_EXP_DEVCTL2, &ctl2); + return !(ctl2 & PCI_EXP_DEVCTL2_ATOMIC_REQ); +} diff --git a/bnxt_re-1.10.3-229.0.139.0/qplib_res.h b/bnxt_re-1.10.3-229.0.139.0/qplib_res.h new file mode 100644 index 0000000..d49d903 --- /dev/null +++ b/bnxt_re-1.10.3-229.0.139.0/qplib_res.h @@ -0,0 +1,984 @@ +/* + * Copyright (c) 2015-2023, Broadcom. All rights reserved. The term + * Broadcom refers to Broadcom Inc. and/or its subsidiaries. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * BSD license below: + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * Author: Eddie Wai + * + * Description: QPLib resource manager (header) + */ + +#ifndef __BNXT_QPLIB_RES_H__ +#define __BNXT_QPLIB_RES_H__ + +#include "bnxt_dbr.h" +#include "bnxt_ulp.h" + +extern const struct bnxt_qplib_gid bnxt_qplib_gid_zero; + +#define CHIP_NUM_57508 0x1750 +#define CHIP_NUM_57504 0x1751 +#define CHIP_NUM_57502 0x1752 +#define CHIP_NUM_58818 0xd818 +#define CHIP_NUM_57608 0x1760 + +#define BNXT_QPLIB_MAX_QPC_COUNT (64 * 1024) +#define BNXT_QPLIB_MAX_SRQC_COUNT (64 * 1024) +#define BNXT_QPLIB_MAX_CQ_COUNT (64 * 1024) +#define BNXT_QPLIB_MAX_CQ_COUNT_P5 (128 * 1024) + +/* TODO:Remove this temporary define once HSI change is merged */ +#define BNXT_QPLIB_DBR_VALID (0x1UL << 26) +#define BNXT_QPLIB_DBR_EPOCH_SHIFT 24 +#define BNXT_QPLIB_DBR_TOGGLE_SHIFT 25 + +#define BNXT_QPLIB_DBR_PF_DB_OFFSET 0x10000 +#define BNXT_QPLIB_DBR_VF_DB_OFFSET 0x4000 + +#define BNXT_QPLIB_DBR_KEY_INVALID -1 + +enum bnxt_qplib_wqe_mode { + BNXT_QPLIB_WQE_MODE_STATIC = 0x00, + BNXT_QPLIB_WQE_MODE_VARIABLE = 0x01, + BNXT_QPLIB_WQE_MODE_INVALID = 0x02 +}; + +#define BNXT_RE_PUSH_MODE_NONE 0 +#define BNXT_RE_PUSH_MODE_WCB 1 +#define BNXT_RE_PUSH_MODE_PPP 2 +#define BNXT_RE_PUSH_ENABLED(mode) ((mode) == BNXT_RE_PUSH_MODE_WCB ||\ + (mode) == BNXT_RE_PUSH_MODE_PPP) +#define BNXT_RE_PPP_ENABLED(cctx) ((cctx)->modes.db_push_mode ==\ + BNXT_RE_PUSH_MODE_PPP) + +struct bnxt_qplib_drv_modes { + u8 wqe_mode; + u8 te_bypass; + u8 db_push_mode; + /* To control advanced cc params display in configfs */ + u8 cc_pr_mode; + /* Other modes to follow here e.g. GSI QP mode */ + u8 dbr_pacing; + u8 dbr_pacing_ext; + u8 dbr_drop_recov; + u8 dbr_primary_pf; + u8 dbr_pacing_v0; + u8 hdbr_enabled; + u8 steering_tag_supported; + u8 express_mode_supported; +}; + +struct bnxt_qplib_chip_ctx { + u16 chip_num; + u8 chip_rev; + u8 chip_metal; + u64 hwrm_intf_ver; + struct bnxt_qplib_drv_modes modes; + u32 dbr_stat_db_fifo; + u32 dbr_aeq_arm_reg; + u32 dbr_throttling_reg; + u16 hw_stats_size; + u16 hwrm_cmd_max_timeout; +}; + +static inline bool _is_chip_num_p7(u16 chip_num) +{ + return (chip_num == CHIP_NUM_58818 || + chip_num == CHIP_NUM_57608); +} + +static inline bool _is_chip_p7(struct bnxt_qplib_chip_ctx *cctx) +{ + return _is_chip_num_p7(cctx->chip_num); +} + +/* SR2 is Gen P5 */ +static inline bool _is_chip_gen_p5(struct bnxt_qplib_chip_ctx *cctx) +{ + return (cctx->chip_num == CHIP_NUM_57508 || + cctx->chip_num == CHIP_NUM_57504 || + cctx->chip_num == CHIP_NUM_57502); +} + +static inline bool _is_chip_gen_p5_p7(struct bnxt_qplib_chip_ctx *cctx) +{ + return (_is_chip_gen_p5(cctx) || _is_chip_p7(cctx)); +} + +static inline bool _is_wqe_mode_variable(struct bnxt_qplib_chip_ctx *cctx) +{ + return cctx->modes.wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE; +} + +struct bnxt_qplib_db_pacing_data { + u32 do_pacing; + u32 pacing_th; + u32 dev_err_state; + u32 alarm_th; + u32 grc_reg_offset; + u32 fifo_max_depth; + u32 fifo_room_mask; + u8 fifo_room_shift; +}; + +static inline u8 bnxt_qplib_dbr_pacing_en(struct bnxt_qplib_chip_ctx *cctx) +{ + return cctx->modes.dbr_pacing; +} + +static inline u8 bnxt_qplib_dbr_pacing_ext_en(struct bnxt_qplib_chip_ctx *cctx) +{ + return cctx->modes.dbr_pacing_ext; +} + +static inline u8 bnxt_qplib_dbr_pacing_is_primary_pf(struct bnxt_qplib_chip_ctx *cctx) +{ + return cctx->modes.dbr_primary_pf; +} + +static inline void bnxt_qplib_dbr_pacing_set_primary_pf + (struct bnxt_qplib_chip_ctx *cctx, u8 val) +{ + cctx->modes.dbr_primary_pf = val; +} + +/* Defines for handling the HWRM version check */ +#define HWRM_VERSION_DEV_ATTR_MAX_DPI 0x1000A0000000D +#define HWRM_VERSION_ROCE_STATS_FN_ID 0x1000A00000045 +#define HWRM_VERSION_ROCE_QP_EXT_STATS_CTX_ID_VALID 0x1000A00020098 + +#define PTR_CNT_PER_PG (PAGE_SIZE / sizeof(void *)) +#define PTR_MAX_IDX_PER_PG (PTR_CNT_PER_PG - 1) +#define PTR_PG(x) (((x) & ~PTR_MAX_IDX_PER_PG) / PTR_CNT_PER_PG) +#define PTR_IDX(x) ((x) & PTR_MAX_IDX_PER_PG) + +#define HWQ_CMP(idx, hwq) ((idx) & ((hwq)->max_elements - 1)) +#define HWQ_FREE_SLOTS(hwq) (hwq->max_elements - \ + ((HWQ_CMP(hwq->prod, hwq)\ + - HWQ_CMP(hwq->cons, hwq))\ + & (hwq->max_elements - 1))) +enum bnxt_qplib_hwq_type { + HWQ_TYPE_CTX, + HWQ_TYPE_QUEUE, + HWQ_TYPE_L2_CMPL, + HWQ_TYPE_MR +}; + +#define MAX_PBL_LVL_0_PGS 1 +#define MAX_PBL_LVL_1_PGS 512 +#define MAX_PBL_LVL_1_PGS_SHIFT 9 +#define MAX_PDL_LVL_SHIFT 9 + +enum bnxt_qplib_pbl_lvl { + PBL_LVL_0, + PBL_LVL_1, + PBL_LVL_2, + PBL_LVL_MAX +}; + +#define ROCE_PG_SIZE_4K (4 * 1024) +#define ROCE_PG_SIZE_8K (8 * 1024) +#define ROCE_PG_SIZE_64K (64 * 1024) +#define ROCE_PG_SIZE_2M (2 * 1024 * 1024) +#define ROCE_PG_SIZE_8M (8 * 1024 * 1024) +#define ROCE_PG_SIZE_1G (1024 * 1024 * 1024) +enum bnxt_qplib_hwrm_pg_size { + BNXT_QPLIB_HWRM_PG_SIZE_4K = 0, + BNXT_QPLIB_HWRM_PG_SIZE_8K = 1, + BNXT_QPLIB_HWRM_PG_SIZE_64K = 2, + BNXT_QPLIB_HWRM_PG_SIZE_2M = 3, + BNXT_QPLIB_HWRM_PG_SIZE_8M = 4, + BNXT_QPLIB_HWRM_PG_SIZE_1G = 5, +}; + +struct bnxt_qplib_reg_desc { + u8 bar_id; + resource_size_t bar_base; + unsigned long offset; + void __iomem *bar_reg; + size_t len; +}; + +struct bnxt_qplib_pbl { + u32 pg_count; + u32 pg_size; + void **pg_arr; + dma_addr_t *pg_map_arr; +}; + +struct bnxt_qplib_sg_info { +#ifndef HAVE_RDMA_UMEM_FOR_EACH_DMA_BLOCK + struct scatterlist *sghead; + u32 nmap; +#else + struct ib_umem *umem; +#endif + u32 npages; + u32 pgshft; + u32 pgsize; + bool nopte; +}; + +struct bnxt_qplib_hwq_attr { + struct bnxt_qplib_res *res; + struct bnxt_qplib_sg_info *sginfo; + enum bnxt_qplib_hwq_type type; + u32 depth; + u32 stride; + u32 aux_stride; + u32 aux_depth; +}; + +struct bnxt_qplib_hwq { + struct pci_dev *pdev; + spinlock_t lock; + struct bnxt_qplib_pbl pbl[PBL_LVL_MAX]; + enum bnxt_qplib_pbl_lvl level; /* 0, 1, or 2 */ + void **pbl_ptr; /* ptr for easy access + to the PBL entries */ + dma_addr_t *pbl_dma_ptr; /* ptr for easy access + to the dma_addr */ + u32 max_elements; + u32 depth; /* original requested depth */ + u16 element_size; /* Size of each entry */ + u16 qe_ppg; /* queue entry per page */ + + u32 prod; /* raw */ + u32 cons; /* raw */ + u8 cp_bit; + u8 is_user; + u64 *pad_pg; + u32 pad_stride; + u32 pad_pgofft; +}; + +struct bnxt_qplib_db_info { + void __iomem *db; + void __iomem *priv_db; + struct bnxt_qplib_hwq *hwq; + struct bnxt_qplib_res *res; + u32 xid; + u32 max_slot; + u32 flags; + u8 toggle; + spinlock_t lock; + u64 shadow_key; + u64 shadow_key_arm_ena; + /* DB copy Thor2 recovery */ + __le64 *dbc; /* offset 0 of the DB copy block */ + int ktbl_idx; + void *app; + u8 dbc_dt; + u32 seed; /* For DB pacing */ +}; + +enum bnxt_qplib_db_info_flags_mask { + BNXT_QPLIB_FLAG_EPOCH_CONS_SHIFT = 0x0UL, + BNXT_QPLIB_FLAG_EPOCH_PROD_SHIFT = 0x1UL, + BNXT_QPLIB_FLAG_EPOCH_CONS_MASK = 0x1UL, + BNXT_QPLIB_FLAG_EPOCH_PROD_MASK = 0x2UL, +}; + +enum bnxt_qplib_db_epoch_flag_shift { + BNXT_QPLIB_DB_EPOCH_CONS_SHIFT = BNXT_QPLIB_DBR_EPOCH_SHIFT, + BNXT_QPLIB_DB_EPOCH_PROD_SHIFT = (BNXT_QPLIB_DBR_EPOCH_SHIFT - 1) +}; + +/* Tables */ +struct bnxt_qplib_pd_tbl { + unsigned long *tbl; + u32 max; +}; + +struct bnxt_qplib_sgid_tbl { + struct bnxt_qplib_gid_info *tbl; + u16 *hw_id; + u16 max; + u16 active; + void *ctx; + bool *vlan; +}; + +enum bnxt_qplib_dpi_type { + BNXT_QPLIB_DPI_TYPE_KERNEL = 0, + BNXT_QPLIB_DPI_TYPE_UC = 1, + BNXT_QPLIB_DPI_TYPE_WC = 2 +}; + +struct bnxt_qplib_dpi { + u32 dpi; + u32 bit; + u64 umdbr; + void __iomem *dbr; + enum bnxt_qplib_dpi_type type; +}; + +#define BNXT_QPLIB_MAX_EXTENDED_PPP_PAGES 512 +struct bnxt_qplib_dpi_tbl { + void **app_tbl; + unsigned long *tbl; + u16 max; + u16 avail_ppp; + struct bnxt_qplib_reg_desc ucreg; /* Hold entire DB bar. */ + struct bnxt_qplib_reg_desc wcreg; + void __iomem *priv_db; +}; + +struct bnxt_qplib_stats { + dma_addr_t dma_map; + void *dma; + u32 size; + u32 fw_id; +}; + +struct bnxt_qplib_vf_res { + u32 max_qp; + u32 max_mrw; + u32 max_srq; + u32 max_cq; + u32 max_gid; +}; + +#define BNXT_QPLIB_MAX_QP_CTX_ENTRY_SIZE 448 +#define BNXT_QPLIB_MAX_SRQ_CTX_ENTRY_SIZE 64 +#define BNXT_QPLIB_MAX_CQ_CTX_ENTRY_SIZE 64 +#define BNXT_QPLIB_MAX_MRW_CTX_ENTRY_SIZE 128 + +#define MAX_TQM_ALLOC_REQ 48 +#define MAX_TQM_ALLOC_BLK_SIZE 8 +struct bnxt_qplib_tqm_ctx { + struct bnxt_qplib_hwq pde; + enum bnxt_qplib_pbl_lvl pde_level; /* Original level */ + struct bnxt_qplib_hwq qtbl[MAX_TQM_ALLOC_REQ]; + u8 qcount[MAX_TQM_ALLOC_REQ]; +}; + +struct bnxt_qplib_hctx { + struct bnxt_qplib_hwq hwq; + u32 max; +}; + +struct bnxt_qplib_refrec { + void *handle; + u32 xid; +}; + +struct bnxt_qplib_reftbl { + struct bnxt_qplib_refrec *rec; + u32 max; + spinlock_t lock; /* reftbl lock */ +}; + +struct bnxt_qplib_reftbls { + struct bnxt_qplib_reftbl qpref; + struct bnxt_qplib_reftbl cqref; + struct bnxt_qplib_reftbl srqref; +}; + +#define GET_TBL_INDEX(id, tbl) ((id) % (((tbl)->max) - 1)) +static inline u32 map_qp_id_to_tbl_indx(u32 qid, struct bnxt_qplib_reftbl *tbl) +{ + return (qid == 1) ? tbl->max : GET_TBL_INDEX(qid, tbl); +} + +/* + * This structure includes the number of various roce resource table sizes + * actually allocated by the driver. May be less than the maximums the firmware + * allows if the driver imposes lower limits than the firmware. + */ +struct bnxt_qplib_ctx { + struct bnxt_qplib_hctx qp_ctx; + struct bnxt_qplib_hctx mrw_ctx; + struct bnxt_qplib_hctx srq_ctx; + struct bnxt_qplib_hctx cq_ctx; + struct bnxt_qplib_hctx tim_ctx; + struct bnxt_qplib_tqm_ctx tqm_ctx; + + struct bnxt_qplib_stats stats; + struct bnxt_qplib_stats stats2; + struct bnxt_qplib_vf_res vf_res; +}; + +struct bnxt_qplib_res { + struct pci_dev *pdev; + struct bnxt_qplib_chip_ctx *cctx; + struct bnxt_qplib_dev_attr *dattr; + struct bnxt_qplib_ctx *hctx; + struct net_device *netdev; + struct bnxt_en_dev *en_dev; + + struct bnxt_qplib_rcfw *rcfw; + + struct bnxt_qplib_pd_tbl pd_tbl; + struct mutex pd_tbl_lock; + struct bnxt_qplib_sgid_tbl sgid_tbl; + struct bnxt_qplib_dpi_tbl dpi_tbl; + struct mutex dpi_tbl_lock; + struct bnxt_qplib_reftbls reftbl; + bool prio; + bool is_vf; + struct bnxt_qplib_db_pacing_data *pacing_data; +}; + +struct bnxt_qplib_query_stats_info { + u32 function_id; + u8 collection_id; + bool vf_valid; +}; + +struct bnxt_qplib_query_qp_info { + u32 function_id; + u32 num_qps; + u32 start_index; + bool vf_valid; +}; + +struct bnxt_qplib_query_fn_info { + bool vf_valid; + u32 host; + u32 filter; +}; + +#define to_bnxt_qplib(ptr, type, member) \ + container_of(ptr, type, member) + +struct bnxt_qplib_pd; +struct bnxt_qplib_dev_attr; + +bool _is_alloc_mr_unified(struct bnxt_qplib_dev_attr *dattr); +void bnxt_qplib_free_hwq(struct bnxt_qplib_res *res, + struct bnxt_qplib_hwq *hwq); +int bnxt_qplib_alloc_init_hwq(struct bnxt_qplib_hwq *hwq, + struct bnxt_qplib_hwq_attr *hwq_attr); +int bnxt_qplib_alloc_pd(struct bnxt_qplib_res *res, + struct bnxt_qplib_pd *pd); +int bnxt_qplib_dealloc_pd(struct bnxt_qplib_res *res, + struct bnxt_qplib_pd_tbl *pd_tbl, + struct bnxt_qplib_pd *pd); +int bnxt_qplib_alloc_dpi(struct bnxt_qplib_res *res, + struct bnxt_qplib_dpi *dpi, + void *app, enum bnxt_qplib_dpi_type type); +int bnxt_qplib_dealloc_dpi(struct bnxt_qplib_res *res, + struct bnxt_qplib_dpi *dpi); +int bnxt_qplib_stop_res(struct bnxt_qplib_res *res); +void bnxt_qplib_clear_tbls(struct bnxt_qplib_res *res); +void bnxt_qplib_init_tbls(struct bnxt_qplib_res *res); +void bnxt_qplib_free_tbls(struct bnxt_qplib_res *res); +int bnxt_qplib_alloc_tbls(struct bnxt_qplib_res *res); +void bnxt_qplib_free_hwctx(struct bnxt_qplib_res *res); +int bnxt_qplib_alloc_hwctx(struct bnxt_qplib_res *res); +int bnxt_qplib_alloc_stat_mem(struct pci_dev *pdev, + struct bnxt_qplib_chip_ctx *cctx, + struct bnxt_qplib_stats *stats); +void bnxt_qplib_free_stat_mem(struct bnxt_qplib_res *res, + struct bnxt_qplib_stats *stats); + +int bnxt_qplib_map_db_bar(struct bnxt_qplib_res *res); +void bnxt_qplib_unmap_db_bar(struct bnxt_qplib_res *res); +int bnxt_qplib_enable_atomic_ops_to_root(struct pci_dev *dev, bool is_virtfn); + +static inline void *bnxt_qplib_get_qe(struct bnxt_qplib_hwq *hwq, + u32 indx, u64 *pg) +{ + u32 pg_num, pg_idx; + + pg_num = (indx / hwq->qe_ppg); + pg_idx = (indx % hwq->qe_ppg); + if (pg) + *pg = (u64)&hwq->pbl_ptr[pg_num]; + return (void *)(hwq->pbl_ptr[pg_num] + hwq->element_size * pg_idx); +} + +static inline void bnxt_qplib_hwq_incr_prod(struct bnxt_qplib_db_info *dbinfo, + struct bnxt_qplib_hwq *hwq, u32 cnt) +{ + /* move prod and update toggle/epoch if wrap around */ + hwq->prod += cnt; + if (hwq->prod >= hwq->depth) { + hwq->prod %= hwq->depth; + dbinfo->flags ^= 1UL << BNXT_QPLIB_FLAG_EPOCH_PROD_SHIFT; + } +} + +static inline void bnxt_qplib_hwq_incr_cons(u32 max_elements, u32 *cons, + u32 cnt, u32 *dbinfo_flags) +{ + /* move cons and update toggle/epoch if wrap around */ + *cons += cnt; + if (*cons >= max_elements) { + *cons %= max_elements; + *dbinfo_flags ^= 1UL << BNXT_QPLIB_FLAG_EPOCH_CONS_SHIFT; + } +} + +static inline u8 _get_pte_pg_size(struct bnxt_qplib_hwq *hwq) +{ + u8 pg_size = BNXT_QPLIB_HWRM_PG_SIZE_4K; + struct bnxt_qplib_pbl *pbl; + + pbl = &hwq->pbl[hwq->level]; + switch (pbl->pg_size) { + case ROCE_PG_SIZE_4K: pg_size = BNXT_QPLIB_HWRM_PG_SIZE_4K; + break; + case ROCE_PG_SIZE_8K: pg_size = BNXT_QPLIB_HWRM_PG_SIZE_8K; + break; + case ROCE_PG_SIZE_64K: pg_size = BNXT_QPLIB_HWRM_PG_SIZE_64K; + break; + case ROCE_PG_SIZE_2M: pg_size = BNXT_QPLIB_HWRM_PG_SIZE_2M; + break; + case ROCE_PG_SIZE_8M: pg_size = BNXT_QPLIB_HWRM_PG_SIZE_8M; + break; + case ROCE_PG_SIZE_1G: pg_size = BNXT_QPLIB_HWRM_PG_SIZE_1G; + break; + default: + break; + } + return pg_size; +} + +static inline u64 _get_base_addr(struct bnxt_qplib_hwq *hwq) +{ + return hwq->pbl[PBL_LVL_0].pg_map_arr[0]; +} + +static inline u8 _get_base_pg_size(struct bnxt_qplib_hwq *hwq) +{ + u8 pg_size = BNXT_QPLIB_HWRM_PG_SIZE_4K; + struct bnxt_qplib_pbl *pbl; + + pbl = &hwq->pbl[PBL_LVL_0]; + switch (pbl->pg_size) { + case ROCE_PG_SIZE_4K: pg_size = BNXT_QPLIB_HWRM_PG_SIZE_4K; + break; + case ROCE_PG_SIZE_8K: pg_size = BNXT_QPLIB_HWRM_PG_SIZE_8K; + break; + case ROCE_PG_SIZE_64K: pg_size = BNXT_QPLIB_HWRM_PG_SIZE_64K; + break; + case ROCE_PG_SIZE_2M: pg_size = BNXT_QPLIB_HWRM_PG_SIZE_2M; + break; + case ROCE_PG_SIZE_8M: pg_size = BNXT_QPLIB_HWRM_PG_SIZE_8M; + break; + case ROCE_PG_SIZE_1G: pg_size = BNXT_QPLIB_HWRM_PG_SIZE_1G; + break; + default: + break; + } + return pg_size; +} + +static inline enum bnxt_qplib_hwq_type _get_hwq_type(struct bnxt_qplib_res *res) +{ + return _is_chip_gen_p5_p7(res->cctx) ? HWQ_TYPE_QUEUE : HWQ_TYPE_L2_CMPL; +} + +static inline bool _is_ext_stats_supported(u16 dev_cap_flags) +{ + return dev_cap_flags & + CREQ_QUERY_FUNC_RESP_SB_EXT_STATS; +} + +static inline int bnxt_ext_stats_supported(struct bnxt_qplib_chip_ctx *ctx, + u16 flags, bool virtfn) +{ + /* ext stats supported if cap flag is set AND is a PF OR a Thor2 VF */ + return (_is_ext_stats_supported(flags) && + ((virtfn && _is_chip_p7(ctx)) || (!virtfn))); +} + +static inline bool _is_hw_retx_supported(u16 dev_cap_flags) +{ + return dev_cap_flags & + (CREQ_QUERY_FUNC_RESP_SB_HW_REQUESTER_RETX_ENABLED | + CREQ_QUERY_FUNC_RESP_SB_HW_RESPONDER_RETX_ENABLED); +} + +static inline bool _is_drv_ver_reg_supported(u8 dev_cap_ext_flags) +{ + return dev_cap_ext_flags & + CREQ_QUERY_FUNC_RESP_SB_DRV_VERSION_RGTR_SUPPORTED; +} + +static inline bool _is_small_recv_wqe_supported(u8 dev_cap_ext_flags) +{ + return dev_cap_ext_flags & + CREQ_QUERY_FUNC_RESP_SB_CREATE_SRQ_SGE_SUPPORTED; +} + +#define BNXT_RE_INIT_FW_DRV_VER_SUPPORT_CMD_SIZE 16 + +#define BNXT_RE_CREATE_QP_EXT_STAT_CONTEXT_SIZE 8 +#define BNXT_RE_MODIFY_QP_EXT_STAT_CONTEXT_SIZE 8 + +#define BNXT_RE_EXP_MODE_ENABLED_CMD_SIZE_CREATE_QP 4 +#define BNXT_RE_STEERING_TAG_SUPPORTED_CMD_SIZE_CREATE_QP 4 +#define BNXT_RE_STEERING_TAG_SUPPORTED_CMD_SIZE_MODIFY_QP 8 +#define BNXT_RE_STEERING_TAG_SUPPORTED_CMD_SIZE 16 +static inline bool _is_steering_tag_supported(struct bnxt_qplib_res *res) +{ + return res->cctx->modes.steering_tag_supported; +} + +static inline bool _is_qp_exp_mode_supported(struct bnxt_qplib_res *res) +{ + return res->cctx->modes.express_mode_supported; +} + +/* Disable HW_RETX */ +#define BNXT_RE_HW_RETX(a) _is_hw_retx_supported((a)) + +static inline bool _is_cqe_v2_supported(u16 dev_cap_flags) +{ + return dev_cap_flags & + CREQ_QUERY_FUNC_RESP_SB_CQE_V2; +} + +static inline void bnxt_qplib_do_pacing(struct bnxt_qplib_db_info *info) +{ + struct bnxt_qplib_db_pacing_data *pacing_data; + struct bnxt_qplib_res *res = info->res; + + pacing_data = res->pacing_data; + if (pacing_data && pacing_data->do_pacing) + bnxt_do_pacing(res->en_dev->bar0, res->en_dev->en_dbr, &info->seed, + pacing_data->pacing_th, pacing_data->do_pacing); +} + +static inline void bnxt_qplib_ring_db32(struct bnxt_qplib_db_info *info, + bool arm) +{ + u32 key = 0; + + key = info->hwq->cons | (CMPL_DOORBELL_IDX_VALID | + (CMPL_DOORBELL_KEY_CMPL & CMPL_DOORBELL_KEY_MASK)); + if (!arm) + key |= CMPL_DOORBELL_MASK; + /* memory barrier */ + wmb(); + writel(key, info->db); +} + +#define HDBR_DBC_DEBUG_TRACE (0x1ULL << 59) +#define HDBR_DBC_OFFSET_SQ 0 +#define HDBR_DBC_OFFSET_RQ 0 +#define HDBR_DBC_OFFSET_SRQ 0 +#define HDBR_DBC_OFFSET_SRQ_ARMENA 1 +#define HDBR_DBC_OFFSET_SRQ_ARM 2 +#define HDBR_DBC_OFFSET_CQ 0 +#define HDBR_DBC_OFFSET_CQ_ARMENA 1 +#define HDBR_DBC_OFFSET_CQ_ARMSE 2 +#define HDBR_DBC_OFFSET_CQ_ARMALL 2 +#define HDBR_DBC_OFFSET_CQ_CUTOFF_ACK 3 + +static inline int bnxt_re_hdbr_get_dbc_offset(u32 type) +{ + switch (type) { + case DBC_DBC_TYPE_SQ: + return HDBR_DBC_OFFSET_SQ; + + case DBC_DBC_TYPE_RQ: + return HDBR_DBC_OFFSET_RQ; + + case DBC_DBC_TYPE_SRQ: + return HDBR_DBC_OFFSET_SRQ; + + case DBC_DBC_TYPE_SRQ_ARMENA: + return HDBR_DBC_OFFSET_SRQ_ARMENA; + + case DBC_DBC_TYPE_SRQ_ARM: + return HDBR_DBC_OFFSET_SRQ_ARM; + + case DBC_DBC_TYPE_CQ: + return HDBR_DBC_OFFSET_CQ; + + case DBC_DBC_TYPE_CQ_ARMENA: + return HDBR_DBC_OFFSET_CQ_ARMENA; + + case DBC_DBC_TYPE_CQ_ARMSE: + return HDBR_DBC_OFFSET_CQ_ARMSE; + + case DBC_DBC_TYPE_CQ_ARMALL: + return HDBR_DBC_OFFSET_CQ_ARMALL; + + case DBC_DBC_TYPE_CQ_CUTOFF_ACK: + return HDBR_DBC_OFFSET_CQ_CUTOFF_ACK; + } + + return -1; +} + +static inline void bnxt_re_hdbr_db_copy(struct bnxt_qplib_db_info *info, u64 key) +{ + int offset; + + if (!info->dbc) + return; + offset = bnxt_re_hdbr_get_dbc_offset((u32)(key >> 32) & DBC_DBC_TYPE_MASK); + if (offset < 0) + return; + if (info->dbc_dt) + key |= HDBR_DBC_DEBUG_TRACE; + *(info->dbc + offset) = cpu_to_le64(key); + wmb(); /* Sync DB copy before it is written into HW */ +} + +#define BNXT_QPLIB_INIT_DBHDR(xid, type, indx, toggle) \ + (((u64)(((xid) & DBC_DBC_XID_MASK) | DBC_DBC_PATH_ROCE | \ + (type) | BNXT_QPLIB_DBR_VALID) << 32) | (indx) | \ + (((u32)(toggle)) << (BNXT_QPLIB_DBR_TOGGLE_SHIFT))) + +static inline void bnxt_qplib_write_db(struct bnxt_qplib_db_info *info, + u64 key, void __iomem *db, + u64 *shadow_key) +{ + unsigned long flags; + + spin_lock_irqsave(&info->lock, flags); + bnxt_qplib_do_pacing(info); + *shadow_key = key; + bnxt_re_hdbr_db_copy(info, key); + writeq(key, db); + spin_unlock_irqrestore(&info->lock, flags); +} + +static inline void __replay_writeq(u64 key, void __iomem *db) +{ + /* No need to replay uninitialised shadow_keys */ + if (key != BNXT_QPLIB_DBR_KEY_INVALID) + writeq(key, db); +} + +static inline void bnxt_qplib_replay_db(struct bnxt_qplib_db_info *info, + bool is_arm_ena) + +{ + unsigned long flags; + + if (!spin_trylock_irqsave(&info->lock, flags)) + return; + + bnxt_qplib_do_pacing(info); + if (is_arm_ena) + __replay_writeq(info->shadow_key_arm_ena, info->priv_db); + else + __replay_writeq(info->shadow_key, info->db); + + spin_unlock_irqrestore(&info->lock, flags); +} + +static inline void bnxt_qplib_ring_db(struct bnxt_qplib_db_info *info, + u32 type) +{ + u64 key = 0; + u32 indx; + u8 toggle = 0; + + if (type == DBC_DBC_TYPE_CQ_ARMALL || + type == DBC_DBC_TYPE_CQ_ARMSE) + toggle = info->toggle; + + indx = ((info->hwq->cons & DBC_DBC_INDEX_MASK) | + ((info->flags & BNXT_QPLIB_FLAG_EPOCH_CONS_MASK) << + BNXT_QPLIB_DB_EPOCH_CONS_SHIFT)); + + key = BNXT_QPLIB_INIT_DBHDR(info->xid, type, indx, toggle); + bnxt_qplib_write_db(info, key, info->db, &info->shadow_key); +} + +static inline void bnxt_qplib_ring_prod_db(struct bnxt_qplib_db_info *info, + u32 type) +{ + u64 key = 0; + u32 indx; + + indx = (((info->hwq->prod / info->max_slot) & DBC_DBC_INDEX_MASK) | + ((info->flags & BNXT_QPLIB_FLAG_EPOCH_PROD_MASK) << + BNXT_QPLIB_DB_EPOCH_PROD_SHIFT)); + key = BNXT_QPLIB_INIT_DBHDR(info->xid, type, indx, 0); + bnxt_qplib_write_db(info, key, info->db, &info->shadow_key); +} + +static inline void bnxt_qplib_armen_db(struct bnxt_qplib_db_info *info, + u32 type) +{ + u64 key = 0; + u8 toggle = 0; + + if (type == DBC_DBC_TYPE_CQ_ARMENA || type == DBC_DBC_TYPE_SRQ_ARMENA) + toggle = info->toggle; + /* Index always at 0 */ + key = BNXT_QPLIB_INIT_DBHDR(info->xid, type, 0, toggle); + bnxt_qplib_write_db(info, key, info->priv_db, + &info->shadow_key_arm_ena); +} + +static inline void bnxt_qplib_cq_coffack_db(struct bnxt_qplib_db_info *info) +{ + u64 key = 0; + + /* Index always at 0 */ + key = BNXT_QPLIB_INIT_DBHDR(info->xid, DBC_DBC_TYPE_CQ_CUTOFF_ACK, 0, 0); + bnxt_qplib_write_db(info, key, info->priv_db, &info->shadow_key); +} + +static inline void bnxt_qplib_srq_arm_db(struct bnxt_qplib_db_info *info) +{ + u64 key = 0; + u8 toggle = 0; + + toggle = info->toggle; + /* Index always at 0 */ + key = BNXT_QPLIB_INIT_DBHDR(info->xid, DBC_DBC_TYPE_SRQ_ARM, 0, toggle); + bnxt_qplib_write_db(info, key, info->priv_db, &info->shadow_key); +} + +static inline void bnxt_qplib_ring_nq_db(struct bnxt_qplib_db_info *info, + struct bnxt_qplib_chip_ctx *cctx, + bool arm) +{ + u32 type; + + type = arm ? DBC_DBC_TYPE_NQ_ARM : DBC_DBC_TYPE_NQ; + if (_is_chip_gen_p5_p7(cctx)) + bnxt_qplib_ring_db(info, type); + else + bnxt_qplib_ring_db32(info, arm); +} + +struct bnxt_qplib_max_res { + u32 max_qp; + u32 max_mr; + u32 max_cq; + u32 max_srq; + u32 max_ah; + u32 max_pd; +}; + +/* + * Defines for maximum resources supported for chip revisions + * Maximum PDs supported are restricted to Max QPs + * GENP4 - Wh+ + * GENP7 - Thor2 + * DEFAULT - Thor + */ +#define BNXT_QPLIB_GENP4_PF_MAX_QP (16 * 1024) +#define BNXT_QPLIB_GENP4_PF_MAX_MRW (16 * 1024) +#define BNXT_QPLIB_GENP4_PF_MAX_CQ (16 * 1024) +#define BNXT_QPLIB_GENP4_PF_MAX_SRQ (1 * 1024) +#define BNXT_QPLIB_GENP4_PF_MAX_AH (16 * 1024) +#define BNXT_QPLIB_GENP4_PF_MAX_PD BNXT_QPLIB_GENP4_PF_MAX_QP + +#define BNXT_QPLIB_DEFAULT_PF_MAX_QP (64 * 1024) +#define BNXT_QPLIB_DEFAULT_PF_MAX_MRW (256 * 1024) +#define BNXT_QPLIB_DEFAULT_PF_MAX_CQ (64 * 1024) +#define BNXT_QPLIB_DEFAULT_PF_MAX_SRQ (4 * 1024) +#define BNXT_QPLIB_DEFAULT_PF_MAX_AH (128 * 1024) +#define BNXT_QPLIB_DEFAULT_PF_MAX_PD BNXT_QPLIB_DEFAULT_PF_MAX_QP + +#define BNXT_QPLIB_DEFAULT_VF_MAX_QP (6 * 1024) +#define BNXT_QPLIB_DEFAULT_VF_MAX_MRW (6 * 1024) +#define BNXT_QPLIB_DEFAULT_VF_MAX_CQ (6 * 1024) +#define BNXT_QPLIB_DEFAULT_VF_MAX_SRQ (4 * 1024) +#define BNXT_QPLIB_DEFAULT_VF_MAX_AH (6 * 1024) +#define BNXT_QPLIB_DEFAULT_VF_MAX_PD BNXT_QPLIB_DEFAULT_VF_MAX_QP + +#ifdef BNXT_FPGA +#define BNXT_QPLIB_GENP7_FPGA_PF_MAX_QP (256) +#define BNXT_QPLIB_GENP7_FPGA_PF_MAX_MRW (2 * 1024) +#define BNXT_QPLIB_GENP7_FPGA_PF_MAX_CQ (256) +#define BNXT_QPLIB_GENP7_FPGA_PF_MAX_SRQ (256) +#define BNXT_QPLIB_GENP7_FPGA_PF_MAX_AH (256) +#define BNXT_QPLIB_GENP7_FPGA_PF_MAX_PD BNXT_QPLIB_GENP7_FPGA_PF_MAX_QP + +#define BNXT_QPLIB_GENP7_FPGA_VF_MAX_QP (64) +#define BNXT_QPLIB_GENP7_FPGA_VF_MAX_MRW (1024) +#define BNXT_QPLIB_GENP7_FPGA_VF_MAX_CQ (64) +#define BNXT_QPLIB_GENP7_FPGA_VF_MAX_SRQ (64) +#define BNXT_QPLIB_GENP7_FPGA_VF_MAX_AH (64) +#define BNXT_QPLIB_GENP7_FPGA_VF_MAX_PD BNXT_QPLIB_GENP7_FPGA_VF_MAX_QP +#endif + +static inline void bnxt_qplib_max_res_supported(struct bnxt_qplib_chip_ctx *cctx, + struct bnxt_qplib_res *qpl_res, + struct bnxt_qplib_max_res *max_res, + bool vf_res_limit) +{ + switch (cctx->chip_num) { + case CHIP_NUM_57608: + case CHIP_NUM_58818: +#ifdef BNXT_FPGA + if (vf_res_limit) { + max_res->max_qp = BNXT_QPLIB_GENP7_FPGA_VF_MAX_QP; + max_res->max_mr = BNXT_QPLIB_GENP7_FPGA_VF_MAX_MRW; + max_res->max_cq = BNXT_QPLIB_GENP7_FPGA_VF_MAX_CQ; + max_res->max_srq = BNXT_QPLIB_GENP7_FPGA_VF_MAX_SRQ; + max_res->max_ah = BNXT_QPLIB_GENP7_FPGA_VF_MAX_AH; + max_res->max_pd = BNXT_QPLIB_GENP7_FPGA_VF_MAX_PD; + } else { + max_res->max_qp = BNXT_QPLIB_GENP7_FPGA_PF_MAX_QP; + max_res->max_mr = BNXT_QPLIB_GENP7_FPGA_PF_MAX_MRW; + max_res->max_cq = BNXT_QPLIB_GENP7_FPGA_PF_MAX_CQ; + max_res->max_srq = BNXT_QPLIB_GENP7_FPGA_PF_MAX_SRQ; + max_res->max_ah = BNXT_QPLIB_GENP7_FPGA_PF_MAX_AH; + max_res->max_pd = BNXT_QPLIB_GENP7_FPGA_PF_MAX_PD; + } + break; +#endif + case CHIP_NUM_57504: + case CHIP_NUM_57502: + case CHIP_NUM_57508: + if (vf_res_limit) { + max_res->max_qp = BNXT_QPLIB_DEFAULT_VF_MAX_QP; + max_res->max_mr = BNXT_QPLIB_DEFAULT_VF_MAX_MRW; + max_res->max_cq = BNXT_QPLIB_DEFAULT_VF_MAX_CQ; + max_res->max_srq = BNXT_QPLIB_DEFAULT_VF_MAX_SRQ; + max_res->max_ah = BNXT_QPLIB_DEFAULT_VF_MAX_AH; + max_res->max_pd = BNXT_QPLIB_DEFAULT_VF_MAX_PD; + } else { + max_res->max_qp = BNXT_QPLIB_DEFAULT_PF_MAX_QP; + max_res->max_mr = BNXT_QPLIB_DEFAULT_PF_MAX_MRW; + max_res->max_cq = BNXT_QPLIB_DEFAULT_PF_MAX_CQ; + max_res->max_srq = BNXT_QPLIB_DEFAULT_PF_MAX_SRQ; + max_res->max_ah = BNXT_QPLIB_DEFAULT_PF_MAX_AH; + max_res->max_pd = BNXT_QPLIB_DEFAULT_PF_MAX_PD; + } + break; + default: + /* Wh+/Stratus max resources */ + max_res->max_qp = BNXT_QPLIB_GENP4_PF_MAX_QP; + max_res->max_mr = BNXT_QPLIB_GENP4_PF_MAX_MRW; + max_res->max_cq = BNXT_QPLIB_GENP4_PF_MAX_CQ; + max_res->max_srq = BNXT_QPLIB_GENP4_PF_MAX_SRQ; + max_res->max_ah = BNXT_QPLIB_GENP4_PF_MAX_AH; + max_res->max_pd = BNXT_QPLIB_GENP4_PF_MAX_PD; + break; + } +} +#endif diff --git a/bnxt_re-1.10.3-229.0.139.0/qplib_sp.c b/bnxt_re-1.10.3-229.0.139.0/qplib_sp.c new file mode 100644 index 0000000..7e4f9f0 --- /dev/null +++ b/bnxt_re-1.10.3-229.0.139.0/qplib_sp.c @@ -0,0 +1,1304 @@ +/* + * Copyright (c) 2015-2023, Broadcom. All rights reserved. The term + * Broadcom refers to Broadcom Inc. and/or its subsidiaries. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * BSD license below: + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * Author: Eddie Wai + * + * Description: Slow Path Operators + */ + +#include +#include +#include +#include +#include + +#include "roce_hsi.h" + +#include "qplib_tlv.h" +#include "qplib_res.h" +#include "qplib_rcfw.h" +#include "qplib_sp.h" +#include "compat.h" + +const struct bnxt_qplib_gid bnxt_qplib_gid_zero = {{ 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0 }}; + +/* Device */ +static u8 bnxt_qplib_is_atomic_cap(struct bnxt_qplib_rcfw *rcfw) +{ + u16 pcie_ctl2 = 0; + + if (!_is_chip_gen_p5_p7(rcfw->res->cctx)) + return false; + pcie_capability_read_word(rcfw->pdev, PCI_EXP_DEVCTL2, &pcie_ctl2); + return (pcie_ctl2 & PCI_EXP_DEVCTL2_ATOMIC_REQ); +} + +static void bnxt_qplib_query_version(struct bnxt_qplib_rcfw *rcfw, char *fw_ver) +{ + struct creq_query_version_resp resp = {}; + struct bnxt_qplib_cmdqmsg msg = {}; + struct cmdq_query_version req = {}; + int rc = 0; + + bnxt_qplib_rcfw_cmd_prep(&req, CMDQ_BASE_OPCODE_QUERY_VERSION, + sizeof(req)); + bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), + sizeof(resp), 0); + rc = bnxt_qplib_rcfw_send_message(rcfw, &msg); + if (rc) { + dev_err(&rcfw->pdev->dev, "QPLIB: Failed to query version"); + return; + } + fw_ver[0] = resp.fw_maj; + fw_ver[1] = resp.fw_minor; + fw_ver[2] = resp.fw_bld; + fw_ver[3] = resp.fw_rsvd; +} + +int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw) +{ + struct bnxt_qplib_max_res dev_res = {}; + struct creq_query_func_resp resp = {}; + struct bnxt_qplib_cmdqmsg msg = {}; + struct creq_query_func_resp_sb *sb; + struct bnxt_qplib_rcfw_sbuf sbuf; + struct bnxt_qplib_dev_attr *attr; + struct bnxt_qplib_chip_ctx *cctx; + struct cmdq_query_func req = {}; + u8 *tqm_alloc; + int i, rc = 0; + u32 temp; + + cctx = rcfw->res->cctx; + attr = rcfw->res->dattr; + + bnxt_qplib_rcfw_cmd_prep(&req, CMDQ_BASE_OPCODE_QUERY_FUNC, + sizeof(req)); + + sbuf.size = ALIGN(sizeof(*sb), BNXT_QPLIB_CMDQE_UNITS); + sbuf.sb = dma_zalloc_coherent(&rcfw->pdev->dev, sbuf.size, + &sbuf.dma_addr, GFP_KERNEL); + if (!sbuf.sb) + return -ENOMEM; + + sb = sbuf.sb; + req.resp_size = sbuf.size / BNXT_QPLIB_CMDQE_UNITS; + bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, &sbuf, sizeof(req), + sizeof(resp), 0); + rc = bnxt_qplib_rcfw_send_message(rcfw, &msg); + if (rc) + goto bail; + + bnxt_qplib_max_res_supported(cctx, rcfw->res, &dev_res, false); + /* Extract the context from the side buffer */ + attr->max_qp = le32_to_cpu(sb->max_qp); + attr->max_qp = min_t(u32, attr->max_qp, dev_res.max_qp); + /* max_qp value reported by FW does not include the QP1 */ + attr->max_qp += 1; + attr->max_qp_rd_atom = + sb->max_qp_rd_atom > BNXT_QPLIB_MAX_OUT_RD_ATOM ? + BNXT_QPLIB_MAX_OUT_RD_ATOM : sb->max_qp_rd_atom; + attr->max_qp_init_rd_atom = + sb->max_qp_init_rd_atom > BNXT_QPLIB_MAX_OUT_RD_ATOM ? + BNXT_QPLIB_MAX_OUT_RD_ATOM : sb->max_qp_init_rd_atom; + /* Report 1 less than the max_qp_wqes reported by FW as driver adds + * one extra entry while creating the qp + */ + attr->max_qp_wqes = le16_to_cpu(sb->max_qp_wr) - 1; + /* Adjust for max_qp_wqes for variable wqe */ + if (cctx->modes.wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE) { + attr->max_qp_wqes = BNXT_VAR_MAX_WQE - 1; + } + + if (!_is_chip_gen_p5_p7(cctx)) { + /* + * 128 WQEs needs to be reserved for the HW (8916). Prevent + * reporting the max number for gen-p4 only. + */ + attr->max_qp_wqes -= BNXT_QPLIB_RESERVED_QP_WRS; + } + attr->max_qp_sges = sb->max_sge; + if (_is_chip_gen_p5_p7(cctx) && + cctx->modes.wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE) + attr->max_qp_sges = min_t(u32, sb->max_sge_var_wqe, BNXT_VAR_MAX_SGE); + + attr->max_cq = le32_to_cpu(sb->max_cq); + attr->max_cq = min_t(u32, attr->max_cq, dev_res.max_cq); + + attr->max_cq_wqes = le32_to_cpu(sb->max_cqe); + attr->max_cq_wqes = min_t(u32, BNXT_QPLIB_MAX_CQ_WQES, attr->max_cq_wqes); + + attr->max_cq_sges = attr->max_qp_sges; + attr->max_mr = le32_to_cpu(sb->max_mr); + attr->max_mr = min_t(u32, attr->max_mr, dev_res.max_mr); + attr->max_mw = le32_to_cpu(sb->max_mw); + attr->max_mw = min_t(u32, attr->max_mw, dev_res.max_mr); + + attr->max_mr_size = le64_to_cpu(sb->max_mr_size); + attr->max_pd = le32_to_cpu(sb->max_pd); + attr->max_pd = min_t(u32, attr->max_pd, dev_res.max_pd); + attr->max_raw_ethy_qp = le32_to_cpu(sb->max_raw_eth_qp); + attr->max_ah = le32_to_cpu(sb->max_ah); + attr->max_ah = min_t(u32, attr->max_ah, dev_res.max_ah); + + attr->max_fmr = le32_to_cpu(sb->max_fmr); + attr->max_map_per_fmr = sb->max_map_per_fmr; + + attr->max_srq = le16_to_cpu(sb->max_srq); + attr->max_srq = min_t(u32, attr->max_srq, dev_res.max_srq); + attr->max_srq_wqes = le32_to_cpu(sb->max_srq_wr) - 1; + attr->max_srq_sges = sb->max_srq_sge; + attr->max_pkey = 1; + + attr->max_inline_data = !cctx->modes.wqe_mode ? + le32_to_cpu(sb->max_inline_data) : + le16_to_cpu(sb->max_inline_data_var_wqe); + + if (!_is_chip_p7(cctx)) { + attr->l2_db_size = (sb->l2_db_space_size + 1) * + (0x01 << RCFW_DBR_BASE_PAGE_SHIFT); + } + attr->max_sgid = le32_to_cpu(sb->max_gid); + + /* TODO: remove this hack for statically allocated gid_map */ + bnxt_re_set_max_gid(&attr->max_sgid); + + attr->dev_cap_flags = le16_to_cpu(sb->dev_cap_flags); + if (attr->dev_cap_flags & CREQ_QUERY_FUNC_RESP_SB_PINGPONG_PUSH_MODE) + cctx->modes.db_push_mode = BNXT_RE_PUSH_MODE_PPP; + if (attr->dev_cap_flags & CREQ_QUERY_FUNC_RESP_SB_EXPRESS_MODE_SUPPORTED) + cctx->modes.express_mode_supported = 1; + + attr->dev_cap_ext_flags = sb->dev_cap_ext_flags; + attr->page_size_cap = BIT_ULL(28) | BIT_ULL(21) | BIT_ULL(16) | BIT_ULL(12); + + bnxt_qplib_query_version(rcfw, attr->fw_ver); + + for (i = 0; i < MAX_TQM_ALLOC_REQ / 4; i++) { + temp = le32_to_cpu(sb->tqm_alloc_reqs[i]); + tqm_alloc = (u8 *)&temp; + attr->tqm_alloc_reqs[i * 4] = *tqm_alloc; + attr->tqm_alloc_reqs[i * 4 + 1] = *(++tqm_alloc); + attr->tqm_alloc_reqs[i * 4 + 2] = *(++tqm_alloc); + attr->tqm_alloc_reqs[i * 4 + 3] = *(++tqm_alloc); + } + + if (rcfw->res->cctx->hwrm_intf_ver >= HWRM_VERSION_DEV_ATTR_MAX_DPI) + attr->max_dpi = le32_to_cpu(sb->max_dpi); + + attr->is_atomic = bnxt_qplib_is_atomic_cap(rcfw); +bail: + dma_free_coherent(&rcfw->pdev->dev, sbuf.size, + sbuf.sb, sbuf.dma_addr); + return rc; +} + +int bnxt_qplib_set_func_resources(struct bnxt_qplib_res *res) +{ + struct creq_set_func_resources_resp resp = {}; + struct cmdq_set_func_resources req = {}; + struct bnxt_qplib_cmdqmsg msg = {}; + struct bnxt_qplib_rcfw *rcfw; + struct bnxt_qplib_ctx *hctx; + int rc = 0; + + rcfw = res->rcfw; + hctx = res->hctx; + bnxt_qplib_rcfw_cmd_prep(&req, CMDQ_BASE_OPCODE_SET_FUNC_RESOURCES, + sizeof(req)); + + req.number_of_qp = cpu_to_le32(hctx->qp_ctx.max); + req.number_of_mrw = cpu_to_le32(hctx->mrw_ctx.max); + req.number_of_srq = cpu_to_le32(hctx->srq_ctx.max); + req.number_of_cq = cpu_to_le32(hctx->cq_ctx.max); + + req.max_qp_per_vf = cpu_to_le32(hctx->vf_res.max_qp); + req.max_mrw_per_vf = cpu_to_le32(hctx->vf_res.max_mrw); + req.max_srq_per_vf = cpu_to_le32(hctx->vf_res.max_srq); + req.max_cq_per_vf = cpu_to_le32(hctx->vf_res.max_cq); + req.max_gid_per_vf = cpu_to_le32(hctx->vf_res.max_gid); + + /* Keep the old stats context id of PF */ + req.stat_ctx_id = cpu_to_le32(hctx->stats.fw_id); + + bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), + sizeof(resp), 0); + rc = bnxt_qplib_rcfw_send_message(rcfw, &msg); + if (rc) + dev_err(&res->pdev->dev, + "QPLIB: Failed to set function resources"); + + return rc; +} + +int bnxt_qplib_update_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl, + struct bnxt_qplib_gid *gid, u16 gid_idx, const u8 *smac) +{ + struct bnxt_qplib_res *res = to_bnxt_qplib(sgid_tbl, + struct bnxt_qplib_res, + sgid_tbl); + struct bnxt_qplib_rcfw *rcfw = res->rcfw; + struct creq_modify_gid_resp resp = {}; + struct bnxt_qplib_cmdqmsg msg = {}; + struct cmdq_modify_gid req = {}; + int rc; + + bnxt_qplib_rcfw_cmd_prep(&req, CMDQ_BASE_OPCODE_MODIFY_GID, + sizeof(req)); + + req.gid[0] = cpu_to_be32(((u32 *)gid->data)[3]); + req.gid[1] = cpu_to_be32(((u32 *)gid->data)[2]); + req.gid[2] = cpu_to_be32(((u32 *)gid->data)[1]); + req.gid[3] = cpu_to_be32(((u32 *)gid->data)[0]); + if (res->prio) { + req.vlan |= cpu_to_le16(CMDQ_ADD_GID_VLAN_TPID_TPID_8100 | + CMDQ_ADD_GID_VLAN_VLAN_EN); + } + + /* MAC in network format */ + req.src_mac[0] = cpu_to_be16(((u16 *)smac)[0]); + req.src_mac[1] = cpu_to_be16(((u16 *)smac)[1]); + req.src_mac[2] = cpu_to_be16(((u16 *)smac)[2]); + req.gid_index = cpu_to_le16(gid_idx); + + bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), + sizeof(resp), 0); + rc = bnxt_qplib_rcfw_send_message(rcfw, &msg); + if (rc) { + dev_err(&res->pdev->dev, + "QPLIB: update SGID table failed"); + return rc; + } + return 0; +} + +/* SGID */ +int bnxt_qplib_get_sgid(struct bnxt_qplib_res *res, + struct bnxt_qplib_sgid_tbl *sgid_tbl, int index, + struct bnxt_qplib_gid *gid) +{ + if (index > sgid_tbl->max) { + dev_err(&res->pdev->dev, + "QPLIB: Index %d exceeded SGID table max (%d)", + index, sgid_tbl->max); + return -EINVAL; + } + memcpy(gid, &sgid_tbl->tbl[index].gid, sizeof(*gid)); + return 0; +} + +int bnxt_qplib_del_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl, + struct bnxt_qplib_gid *gid, + u16 vlan_id, bool update) +{ + struct bnxt_qplib_res *res = to_bnxt_qplib(sgid_tbl, + struct bnxt_qplib_res, + sgid_tbl); + struct bnxt_qplib_rcfw *rcfw = res->rcfw; + int index; + + /* Do we need a sgid_lock here? */ + if (!sgid_tbl->active) { + dev_err(&res->pdev->dev, + "QPLIB: SGID table has no active entries"); + return -ENOMEM; + } + for (index = 0; index < sgid_tbl->max; index++) { + if (!memcmp(&sgid_tbl->tbl[index].gid, gid, sizeof(*gid)) && + vlan_id == sgid_tbl->tbl[index].vlan_id) + break; + } + if (index == sgid_tbl->max) { + dev_warn(&res->pdev->dev, "GID not found in the SGID table"); + return 0; + } + + if (update) { + struct creq_delete_gid_resp resp = {}; + struct bnxt_qplib_cmdqmsg msg = {}; + struct cmdq_delete_gid req = {}; + int rc; + + bnxt_qplib_rcfw_cmd_prep(&req, CMDQ_BASE_OPCODE_DELETE_GID, + sizeof(req)); + if (sgid_tbl->hw_id[index] == 0xFFFF) { + dev_err(&res->pdev->dev, + "QPLIB: GID entry contains an invalid HW id"); + return -EINVAL; + } + req.gid_index = cpu_to_le16(sgid_tbl->hw_id[index]); + bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), + sizeof(resp), 0); + rc = bnxt_qplib_rcfw_send_message(rcfw, &msg); + if (rc) + return rc; + } + memcpy(&sgid_tbl->tbl[index].gid, &bnxt_qplib_gid_zero, + sizeof(bnxt_qplib_gid_zero)); + sgid_tbl->tbl[index].vlan_id = 0xFFFF; + sgid_tbl->vlan[index] = false; + sgid_tbl->active--; + dev_dbg(&res->pdev->dev, + "QPLIB: SGID deleted hw_id[0x%x] = 0x%x active = 0x%x", + index, sgid_tbl->hw_id[index], sgid_tbl->active); + sgid_tbl->hw_id[index] = (u16)-1; + + return 0; +} + +int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl, + struct bnxt_qplib_gid *gid, const u8 *smac, u16 vlan_id, + bool update, u32 *index) +{ + struct bnxt_qplib_res *res = to_bnxt_qplib(sgid_tbl, + struct bnxt_qplib_res, + sgid_tbl); + struct bnxt_qplib_rcfw *rcfw = res->rcfw; + int i, free_idx; + + /* Do we need a sgid_lock here? */ + if (sgid_tbl->active == sgid_tbl->max) { + dev_err(&res->pdev->dev, "QPLIB: SGID table is full"); + return -ENOMEM; + } + free_idx = sgid_tbl->max; + for (i = 0; i < sgid_tbl->max; i++) { + if (!memcmp(&sgid_tbl->tbl[i], gid, sizeof(*gid)) && + sgid_tbl->tbl[i].vlan_id == vlan_id) { + dev_dbg(&res->pdev->dev, + "QPLIB: SGID entry already exist in entry %d!", + i); + *index = i; + return -EALREADY; + } else if (!memcmp(&sgid_tbl->tbl[i], &bnxt_qplib_gid_zero, + sizeof(bnxt_qplib_gid_zero)) && + free_idx == sgid_tbl->max) { + free_idx = i; + } + } + if (free_idx == sgid_tbl->max) { + dev_err(&res->pdev->dev, + "QPLIB: SGID table is FULL but count is not MAX??"); + return -ENOMEM; + } + if (update) { + struct creq_add_gid_resp resp = {}; + struct bnxt_qplib_cmdqmsg msg = {}; + struct cmdq_add_gid req = {}; + int rc; + + bnxt_qplib_rcfw_cmd_prep(&req, CMDQ_BASE_OPCODE_ADD_GID, + sizeof(req)); + + req.gid[0] = cpu_to_be32(((u32 *)gid->data)[3]); + req.gid[1] = cpu_to_be32(((u32 *)gid->data)[2]); + req.gid[2] = cpu_to_be32(((u32 *)gid->data)[1]); + req.gid[3] = cpu_to_be32(((u32 *)gid->data)[0]); + /* driver should ensure that all RoCE traffic is always VLAN tagged + * if RoCE traffic is running on non-zero VLAN ID or + * RoCE traffic is running on non-zero Priority. + */ + if ((vlan_id != 0xFFFF) || res->prio) { + if (vlan_id != 0xFFFF) + req.vlan = cpu_to_le16(vlan_id & + CMDQ_ADD_GID_VLAN_VLAN_ID_MASK); + req.vlan |= + cpu_to_le16(CMDQ_ADD_GID_VLAN_TPID_TPID_8100 | + CMDQ_ADD_GID_VLAN_VLAN_EN); + } + + /* MAC in network format */ + req.src_mac[0] = cpu_to_be16(((u16 *)smac)[0]); + req.src_mac[1] = cpu_to_be16(((u16 *)smac)[1]); + req.src_mac[2] = cpu_to_be16(((u16 *)smac)[2]); + + bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), + sizeof(resp), 0); + rc = bnxt_qplib_rcfw_send_message(rcfw, &msg); + if (rc) + return rc; + sgid_tbl->hw_id[free_idx] = le32_to_cpu(resp.xid); + } + + if (vlan_id != 0xFFFF) + sgid_tbl->vlan[free_idx] = true; + + memcpy(&sgid_tbl->tbl[free_idx], gid, sizeof(*gid)); + sgid_tbl->tbl[free_idx].vlan_id = vlan_id; + sgid_tbl->active++; + dev_dbg(&res->pdev->dev, + "QPLIB: SGID added hw_id[0x%x] = 0x%x active = 0x%x", + free_idx, sgid_tbl->hw_id[free_idx], sgid_tbl->active); + + *index = free_idx; + /* unlock */ + return 0; +} + +/* AH */ +int bnxt_qplib_create_ah(struct bnxt_qplib_res *res, struct bnxt_qplib_ah *ah, + bool block) +{ + struct bnxt_qplib_rcfw *rcfw = res->rcfw; + struct creq_create_ah_resp resp = {}; + struct bnxt_qplib_cmdqmsg msg = {}; + struct cmdq_create_ah req = {}; + u32 temp32[4]; + u16 temp16[3]; + int rc; + + bnxt_qplib_rcfw_cmd_prep(&req, CMDQ_BASE_OPCODE_CREATE_AH, + sizeof(req)); + + memcpy(temp32, ah->dgid.data, sizeof(struct bnxt_qplib_gid)); + req.dgid[0] = cpu_to_le32(temp32[0]); + req.dgid[1] = cpu_to_le32(temp32[1]); + req.dgid[2] = cpu_to_le32(temp32[2]); + req.dgid[3] = cpu_to_le32(temp32[3]); + + req.type = ah->nw_type; + req.hop_limit = ah->hop_limit; + req.sgid_index = cpu_to_le16(res->sgid_tbl.hw_id[ah->sgid_index]); + req.dest_vlan_id_flow_label = cpu_to_le32((ah->flow_label & + CMDQ_CREATE_AH_FLOW_LABEL_MASK) | + CMDQ_CREATE_AH_DEST_VLAN_ID_MASK); + req.pd_id = cpu_to_le32(ah->pd->id); + req.traffic_class = ah->traffic_class; + + /* MAC in network format */ + memcpy(temp16, ah->dmac, ETH_ALEN); + req.dest_mac[0] = cpu_to_le16(temp16[0]); + req.dest_mac[1] = cpu_to_le16(temp16[1]); + req.dest_mac[2] = cpu_to_le16(temp16[2]); + + bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), + sizeof(resp), block); + rc = bnxt_qplib_rcfw_send_message(rcfw, &msg); + if (rc) + return rc; + + ah->id = le32_to_cpu(resp.xid); + /* for Cu/Wh AHID 0 is not valid */ + if (!_is_chip_gen_p5_p7(res->cctx) && !ah->id) + rc = -EINVAL; + + return rc; +} + +int bnxt_qplib_destroy_ah(struct bnxt_qplib_res *res, struct bnxt_qplib_ah *ah, + bool block) +{ + struct bnxt_qplib_rcfw *rcfw = res->rcfw; + struct creq_destroy_ah_resp resp = {}; + struct bnxt_qplib_cmdqmsg msg = {}; + struct cmdq_destroy_ah req = {}; + int rc; + + bnxt_qplib_rcfw_cmd_prep(&req, CMDQ_BASE_OPCODE_DESTROY_AH, + sizeof(req)); + + req.ah_cid = cpu_to_le32(ah->id); + + bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), + sizeof(resp), block); + rc = bnxt_qplib_rcfw_send_message(rcfw, &msg); + return rc; +} + +/* MRW */ +int bnxt_qplib_free_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw) +{ + struct creq_deallocate_key_resp resp = {}; + struct bnxt_qplib_rcfw *rcfw = res->rcfw; + struct cmdq_deallocate_key req = {}; + struct bnxt_qplib_cmdqmsg msg = {}; + int rc; + + if (mrw->lkey == 0xFFFFFFFF) { + dev_info(&res->pdev->dev, + "QPLIB: SP: Free a reserved lkey MRW"); + return 0; + } + + bnxt_qplib_rcfw_cmd_prep(&req, CMDQ_BASE_OPCODE_DEALLOCATE_KEY, + sizeof(req)); + + req.mrw_flags = mrw->type; + + if ((mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE1) || + (mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2A) || + (mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B)) + req.key = cpu_to_le32(mrw->rkey); + else + req.key = cpu_to_le32(mrw->lkey); + + bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), + sizeof(resp), 0); + rc = bnxt_qplib_rcfw_send_message(rcfw, &msg); + if (rc) + return rc; + + if (mrw->hwq.max_elements) + bnxt_qplib_free_hwq(res, &mrw->hwq); + + return 0; +} + +int bnxt_qplib_alloc_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw) +{ + struct bnxt_qplib_rcfw *rcfw = res->rcfw; + struct creq_allocate_mrw_resp resp = {}; + struct bnxt_qplib_cmdqmsg msg = {}; + struct cmdq_allocate_mrw req = {}; + int rc; + + bnxt_qplib_rcfw_cmd_prep(&req, CMDQ_BASE_OPCODE_ALLOCATE_MRW, + sizeof(req)); + + req.pd_id = cpu_to_le32(mrw->pd->id); + req.mrw_flags = mrw->type; + if ((mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR && + mrw->flags & BNXT_QPLIB_FR_PMR) || + mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2A || + mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B) + req.access = CMDQ_ALLOCATE_MRW_ACCESS_CONSUMER_OWNED_KEY; + req.mrw_handle = cpu_to_le64(mrw); + + bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), + sizeof(resp), 0); + rc = bnxt_qplib_rcfw_send_message(rcfw, &msg); + if (rc) + return rc; + if ((mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE1) || + (mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2A) || + (mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B)) + mrw->rkey = le32_to_cpu(resp.xid); + else + mrw->lkey = le32_to_cpu(resp.xid); + + return 0; +} + +int bnxt_qplib_dereg_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw, + bool block) +{ + struct bnxt_qplib_rcfw *rcfw = res->rcfw; + struct creq_deregister_mr_resp resp = {}; + struct bnxt_qplib_cmdqmsg msg = {}; + struct cmdq_deregister_mr req = {}; + int rc; + + bnxt_qplib_rcfw_cmd_prep(&req, CMDQ_BASE_OPCODE_DEREGISTER_MR, + sizeof(req)); + + req.lkey = cpu_to_le32(mrw->lkey); + bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), + sizeof(resp), block); + rc = bnxt_qplib_rcfw_send_message(rcfw, &msg); + if (rc) + return rc; + + if (mrw->hwq.max_elements) { + mrw->va = 0; + mrw->total_size = 0; + bnxt_qplib_free_hwq(res, &mrw->hwq); + } + + return 0; +} + +int bnxt_qplib_reg_mr(struct bnxt_qplib_res *res, + struct bnxt_qplib_mrinfo *mrinfo, + bool block) +{ + struct bnxt_qplib_hwq_attr hwq_attr = {}; + struct bnxt_qplib_rcfw *rcfw = res->rcfw; + struct creq_register_mr_resp resp = {}; + struct bnxt_qplib_cmdqmsg msg = {}; + struct cmdq_register_mr req = {}; + struct bnxt_qplib_mrw *mr; + u32 buf_pg_size; + u16 flags = 0; + u32 pg_size; + u8 cmd_size; + u16 level; + int rc; + + mr = mrinfo->mrw; + buf_pg_size = 0x01ULL << mrinfo->sg.pgshft; + if (mrinfo->sg.npages) { + /* Free the hwq if it already exist, must be a rereg */ + if (mr->hwq.max_elements) + bnxt_qplib_free_hwq(res, &mr->hwq); + /* Use system PAGE_SIZE */ + hwq_attr.res = res; + hwq_attr.depth = mrinfo->sg.npages; + hwq_attr.stride = PAGE_SIZE; + hwq_attr.type = HWQ_TYPE_MR; + hwq_attr.sginfo = &mrinfo->sg; + rc = bnxt_qplib_alloc_init_hwq(&mr->hwq, &hwq_attr); + if (rc) { + dev_err(&res->pdev->dev, + "SP: Reg MR memory allocation failed"); + return -ENOMEM; + } + } + + /* Configure the request */ + if (mrinfo->is_dma) { + /* No PBL provided, just use system PAGE_SIZE */ + level = 0; + req.pbl = 0; + pg_size = PAGE_SIZE; + } else { + level = mr->hwq.level; + req.pbl = cpu_to_le64(mr->hwq.pbl[PBL_LVL_0].pg_map_arr[0]); + } + + pg_size = buf_pg_size ? buf_pg_size : PAGE_SIZE; + req.log2_pg_size_lvl = (level << CMDQ_REGISTER_MR_LVL_SFT) | + ((ilog2(pg_size) << + CMDQ_REGISTER_MR_LOG2_PG_SIZE_SFT) & + CMDQ_REGISTER_MR_LOG2_PG_SIZE_MASK); + req.log2_pbl_pg_size = cpu_to_le16(((ilog2(PAGE_SIZE) << + CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_SFT) & + CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_MASK)); + req.access = (mr->flags & 0xFFFF); + req.va = cpu_to_le64(mr->va); + req.key = cpu_to_le32(mr->lkey); + if (_is_alloc_mr_unified(res->dattr)) { + flags = 0; + req.key = cpu_to_le32(mr->pd->id); + flags |= CMDQ_REGISTER_MR_FLAGS_ALLOC_MR; + req.flags = cpu_to_le16(flags); + } + req.mr_size = cpu_to_le64(mr->total_size); + cmd_size = sizeof(req); + if (!_is_steering_tag_supported(res)) + cmd_size -= BNXT_RE_STEERING_TAG_SUPPORTED_CMD_SIZE; + + bnxt_qplib_rcfw_cmd_prep(&req, CMDQ_BASE_OPCODE_REGISTER_MR, + cmd_size); + bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, cmd_size, + sizeof(resp), block); + rc = bnxt_qplib_rcfw_send_message(rcfw, &msg); + if (rc) + goto fail; + + if (_is_alloc_mr_unified(res->dattr)) { + mr->lkey = le32_to_cpu(resp.xid); + mr->rkey = mr->lkey; + } + + return 0; +fail: + if (mr->hwq.max_elements) + bnxt_qplib_free_hwq(res, &mr->hwq); + return rc; +} + +int bnxt_qplib_alloc_fast_reg_page_list(struct bnxt_qplib_res *res, + struct bnxt_qplib_frpl *frpl, + int max_pg_ptrs) +{ + struct bnxt_qplib_hwq_attr hwq_attr = {}; + struct bnxt_qplib_sg_info sginfo = {}; + int pg_ptrs, rc; + + /* Re-calculate the max to fit the HWQ allocation model */ + pg_ptrs = roundup_pow_of_two(max_pg_ptrs); + + sginfo.pgsize = PAGE_SIZE; + sginfo.nopte = true; + + hwq_attr.res = res; + hwq_attr.depth = pg_ptrs; + hwq_attr.stride = PAGE_SIZE; + hwq_attr.sginfo = &sginfo; + hwq_attr.type = HWQ_TYPE_CTX; + rc = bnxt_qplib_alloc_init_hwq(&frpl->hwq, &hwq_attr); + if (!rc) + frpl->max_pg_ptrs = pg_ptrs; + + return rc; +} + +void bnxt_qplib_free_fast_reg_page_list(struct bnxt_qplib_res *res, + struct bnxt_qplib_frpl *frpl) +{ + bnxt_qplib_free_hwq(res, &frpl->hwq); +} + +int bnxt_qplib_map_tc2cos(struct bnxt_qplib_res *res, u16 *cids) +{ + struct bnxt_qplib_rcfw *rcfw = res->rcfw; + struct creq_map_tc_to_cos_resp resp = {}; + struct bnxt_qplib_cmdqmsg msg = {}; + struct cmdq_map_tc_to_cos req = {}; + int rc; + + bnxt_qplib_rcfw_cmd_prep(&req, CMDQ_BASE_OPCODE_MAP_TC_TO_COS, + sizeof(req)); + req.cos0 = cpu_to_le16(cids[0]); + req.cos1 = cpu_to_le16(cids[1]); + + bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), + sizeof(resp), 0); + rc = bnxt_qplib_rcfw_send_message(rcfw, &msg); + return rc; +} + +void bnxt_qplib_fill_cc_gen1(struct cmdq_modify_roce_cc_gen1_tlv *ext_req, + struct bnxt_qplib_cc_param_ext *cc_ext) +{ + ext_req->modify_mask = cpu_to_le64(cc_ext->ext_mask); + cc_ext->ext_mask = 0; + ext_req->inactivity_th_hi = cpu_to_le16(cc_ext->inact_th_hi); + ext_req->min_time_between_cnps = cpu_to_le16(cc_ext->min_delta_cnp); + ext_req->init_cp = cpu_to_le16(cc_ext->init_cp); + ext_req->tr_update_mode = cc_ext->tr_update_mode; + ext_req->tr_update_cycles = cc_ext->tr_update_cyls; + ext_req->fr_num_rtts = cc_ext->fr_rtt; + ext_req->ai_rate_increase = cc_ext->ai_rate_incr; + ext_req->reduction_relax_rtts_th = cpu_to_le16(cc_ext->rr_rtt_th); + ext_req->additional_relax_cr_th = cpu_to_le16(cc_ext->ar_cr_th); + ext_req->cr_min_th = cpu_to_le16(cc_ext->cr_min_th); + ext_req->bw_avg_weight = cc_ext->bw_avg_weight; + ext_req->actual_cr_factor = cc_ext->cr_factor; + ext_req->max_cp_cr_th = cpu_to_le16(cc_ext->cr_th_max_cp); + ext_req->cp_bias_en = cc_ext->cp_bias_en; + ext_req->cp_bias = cc_ext->cp_bias; + ext_req->cnp_ecn = cc_ext->cnp_ecn; + ext_req->rtt_jitter_en = cc_ext->rtt_jitter_en; + ext_req->link_bytes_per_usec = cpu_to_le16(cc_ext->bytes_per_usec); + ext_req->reset_cc_cr_th = cpu_to_le16(cc_ext->cc_cr_reset_th); + ext_req->cr_width = cc_ext->cr_width; + ext_req->quota_period_min = cc_ext->min_quota; + ext_req->quota_period_max = cc_ext->max_quota; + ext_req->quota_period_abs_max = cc_ext->abs_max_quota; + ext_req->tr_lower_bound = cpu_to_le16(cc_ext->tr_lb); + ext_req->cr_prob_factor = cc_ext->cr_prob_fac; + ext_req->tr_prob_factor = cc_ext->tr_prob_fac; + ext_req->fairness_cr_th = cpu_to_le16(cc_ext->fair_cr_th); + ext_req->red_div = cc_ext->red_div; + ext_req->cnp_ratio_th = cc_ext->cnp_ratio_th; + ext_req->exp_ai_rtts = cpu_to_le16(cc_ext->ai_ext_rtt); + ext_req->exp_ai_cr_cp_ratio = cc_ext->exp_crcp_ratio; + ext_req->use_rate_table = cc_ext->low_rate_en; + ext_req->cp_exp_update_th = cpu_to_le16(cc_ext->cpcr_update_th); + ext_req->high_exp_ai_rtts_th1 = cpu_to_le16(cc_ext->ai_rtt_th1); + ext_req->high_exp_ai_rtts_th2 = cpu_to_le16(cc_ext->ai_rtt_th2); + ext_req->actual_cr_cong_free_rtts_th = cpu_to_le16(cc_ext->cf_rtt_th); + ext_req->severe_cong_cr_th1 = cpu_to_le16(cc_ext->sc_cr_th1); + ext_req->severe_cong_cr_th2 = cpu_to_le16(cc_ext->sc_cr_th2); + ext_req->link64B_per_rtt = cpu_to_le32(cc_ext->l64B_per_rtt); + ext_req->cc_ack_bytes = cc_ext->cc_ack_bytes; + ext_req->reduce_init_cong_free_rtts_th = cpu_to_le16(cc_ext->reduce_cf_rtt_th); +} + +void bnxt_qplib_fill_cc_gen2(struct cmdq_modify_roce_cc_gen2_tlv *ext2_req, + struct bnxt_qplib_cc_param_ext2 *cc_ext2) +{ + u32 act; + + ext2_req->modify_mask = cpu_to_le64(cc_ext2->ext2_mask); + if (!cc_ext2->ext2_mask) + return; + ext2_req->dcn_qlevel_tbl_idx = cc_ext2->idx; + if (cc_ext2->ext2_mask & MODIFY_MASK_DCN_QLEVEL_TBL_THR) + ext2_req->dcn_qlevel_tbl_thr = cpu_to_le16(cc_ext2->thr); + act = cc_ext2->dcn_qlevel_tbl_act[cc_ext2->idx]; + if (cc_ext2->ext2_mask & MODIFY_MASK_DCN_QLEVEL_TBL_CR) + act = DCN_SET_CR(&act, cc_ext2->cr); + if (cc_ext2->ext2_mask & MODIFY_MASK_DCN_QLEVEL_TBL_TR) + act = DCN_SET_TR(&act, cc_ext2->tr); + if (cc_ext2->ext2_mask & MODIFY_MASK_DCN_QLEVEL_TBL_INC_CNP) + act = DCN_SET_INC_CNP(&act, cc_ext2->cnp_inc); + if (cc_ext2->ext2_mask & MODIFY_MASK_DCN_QLEVEL_TBL_UPD_IMM) + act = DCN_SET_UPD_IMM(&act, cc_ext2->upd_imm); + ext2_req->dcn_qlevel_tbl_act = cpu_to_le32(act); + cc_ext2->ext2_mask = 0; + cc_ext2->dcn_qlevel_tbl_thr[cc_ext2->idx] = cc_ext2->thr; + cc_ext2->dcn_qlevel_tbl_act[cc_ext2->idx] = act; +} + +int bnxt_qplib_modify_cc(struct bnxt_qplib_res *res, + struct bnxt_qplib_cc_param *cc_param) +{ + struct bnxt_qplib_tlv_modify_cc_req tlv_req = {}; + struct creq_modify_roce_cc_resp resp = {}; + struct bnxt_qplib_cmdqmsg msg = {}; + struct cmdq_modify_roce_cc *req; + int req_size; + void *cmd; + int rc; + + /* Prepare the older base command */ + req = &tlv_req.base_req; + cmd = req; + req_size = sizeof(*req); + bnxt_qplib_rcfw_cmd_prep(req, CMDQ_BASE_OPCODE_MODIFY_ROCE_CC, + sizeof(*req)); + req->modify_mask = cpu_to_le32(cc_param->mask); + req->enable_cc = cc_param->enable; + req->g = cc_param->g; + req->num_phases_per_state = cc_param->nph_per_state; + req->time_per_phase = cc_param->time_pph; + req->pkts_per_phase = cc_param->pkts_pph; + req->init_cr = cpu_to_le16(cc_param->init_cr); + req->init_tr = cpu_to_le16(cc_param->init_tr); + req->tos_dscp_tos_ecn = (cc_param->tos_dscp << + CMDQ_MODIFY_ROCE_CC_TOS_DSCP_SFT) | + (cc_param->tos_ecn & + CMDQ_MODIFY_ROCE_CC_TOS_ECN_MASK); + req->alt_vlan_pcp = cc_param->alt_vlan_pcp; + req->alt_tos_dscp = cpu_to_le16(cc_param->alt_tos_dscp); + req->rtt = cpu_to_le16(cc_param->rtt); + req->tcp_cp = cpu_to_le16(cc_param->tcp_cp); + req->cc_mode = cc_param->cc_mode; + req->inactivity_th = cpu_to_le16(cc_param->inact_th); + + /* For chip gen P5 onwards fill extended cmd and header */ + if (_is_chip_gen_p5_p7(res->cctx)) { + struct roce_tlv *hdr; + u32 payload; + u32 chunks; + bool dcn_enabled = BNXT_RE_DCN_ENABLED(res); + + cmd = &tlv_req; + req_size = sizeof(tlv_req); + if (!dcn_enabled) + req_size -= sizeof(struct cmdq_modify_roce_cc_gen2_tlv); + /* Prepare primary tlv header */ + hdr = &tlv_req.tlv_hdr; + chunks = CHUNKS(req_size); + payload = sizeof(struct cmdq_modify_roce_cc); + ROCE_1ST_TLV_PREP(hdr, chunks, payload, true); + /* Prepare secondary tlv header */ + hdr = (struct roce_tlv *)&tlv_req.ext_req; + payload = sizeof(struct cmdq_modify_roce_cc_gen1_tlv) - + sizeof(struct roce_tlv); + ROCE_EXT_TLV_PREP(hdr, TLV_TYPE_MODIFY_ROCE_CC_GEN1, payload, + dcn_enabled, true); + bnxt_qplib_fill_cc_gen1(&tlv_req.ext_req, &cc_param->cc_ext); + if (dcn_enabled) { + /* Prepare third tlv header */ + hdr = (struct roce_tlv *)&tlv_req.ext2_req; + payload = sizeof(struct cmdq_modify_roce_cc_gen2_tlv) - + sizeof(struct roce_tlv); + ROCE_EXT_TLV_PREP(hdr, TLV_TYPE_MODIFY_ROCE_CC_GEN2, payload, + false, true); + bnxt_qplib_fill_cc_gen2(&tlv_req.ext2_req, &cc_param->cc_ext2); + } + } + + bnxt_qplib_fill_cmdqmsg(&msg, cmd, &resp, NULL, req_size, + sizeof(resp), 0); + rc = bnxt_qplib_rcfw_send_message(res->rcfw, &msg); + return rc; +} + +void bnxt_qplib_read_cc_gen1(struct bnxt_qplib_cc_param_ext *cc_ext, + struct creq_query_roce_cc_gen1_resp_sb_tlv *sb) +{ + cc_ext->inact_th_hi = le16_to_cpu(sb->inactivity_th_hi); + cc_ext->min_delta_cnp = le16_to_cpu(sb->min_time_between_cnps); + cc_ext->init_cp = le16_to_cpu(sb->init_cp); + cc_ext->tr_update_mode = sb->tr_update_mode; + cc_ext->tr_update_cyls = sb->tr_update_cycles; + cc_ext->fr_rtt = sb->fr_num_rtts; + cc_ext->ai_rate_incr = sb->ai_rate_increase; + cc_ext->rr_rtt_th = le16_to_cpu(sb->reduction_relax_rtts_th); + cc_ext->ar_cr_th = le16_to_cpu(sb->additional_relax_cr_th); + cc_ext->cr_min_th = le16_to_cpu(sb->cr_min_th); + cc_ext->bw_avg_weight = sb->bw_avg_weight; + cc_ext->cr_factor = sb->actual_cr_factor; + cc_ext->cr_th_max_cp = le16_to_cpu(sb->max_cp_cr_th); + cc_ext->cp_bias_en = sb->cp_bias_en; + cc_ext->cp_bias = sb->cp_bias; + cc_ext->cnp_ecn = sb->cnp_ecn; + cc_ext->rtt_jitter_en = sb->rtt_jitter_en; + cc_ext->bytes_per_usec = le16_to_cpu(sb->link_bytes_per_usec); + cc_ext->cc_cr_reset_th = le16_to_cpu(sb->reset_cc_cr_th); + cc_ext->cr_width = sb->cr_width; + cc_ext->min_quota = sb->quota_period_min; + cc_ext->max_quota = sb->quota_period_max; + cc_ext->abs_max_quota = sb->quota_period_abs_max; + cc_ext->tr_lb = le16_to_cpu(sb->tr_lower_bound); + cc_ext->cr_prob_fac = sb->cr_prob_factor; + cc_ext->tr_prob_fac = sb->tr_prob_factor; + cc_ext->fair_cr_th = le16_to_cpu(sb->fairness_cr_th); + cc_ext->red_div = sb->red_div; + cc_ext->cnp_ratio_th = sb->cnp_ratio_th; + cc_ext->ai_ext_rtt = le16_to_cpu(sb->exp_ai_rtts); + cc_ext->exp_crcp_ratio = sb->exp_ai_cr_cp_ratio; + cc_ext->low_rate_en = sb->use_rate_table; + cc_ext->cpcr_update_th = le16_to_cpu(sb->cp_exp_update_th); + cc_ext->ai_rtt_th1 = le16_to_cpu(sb->high_exp_ai_rtts_th1); + cc_ext->ai_rtt_th2 = le16_to_cpu(sb->high_exp_ai_rtts_th2); + cc_ext->cf_rtt_th = le16_to_cpu(sb->actual_cr_cong_free_rtts_th); + cc_ext->sc_cr_th1 = le16_to_cpu(sb->severe_cong_cr_th1); + cc_ext->sc_cr_th2 = le16_to_cpu(sb->severe_cong_cr_th2); + cc_ext->l64B_per_rtt = le32_to_cpu(sb->link64B_per_rtt); + cc_ext->cc_ack_bytes = sb->cc_ack_bytes; + cc_ext->reduce_cf_rtt_th = le16_to_cpu(sb->reduce_init_cong_free_rtts_th); +} + +void bnxt_qplib_read_cc_gen2(struct bnxt_qplib_cc_param_ext2 *cc_ext2, + struct creq_query_roce_cc_gen2_resp_sb_tlv *sb) +{ + int i; + + /* Clear mask to reflect the up-to-date data from HW */ + cc_ext2->ext2_mask = 0; + for (i = 7; i >= 0; i--) { + cc_ext2->dcn_qlevel_tbl_thr[i] = le16_to_cpu(sb->dcn_qlevel_tbl_thr[i]); + cc_ext2->dcn_qlevel_tbl_act[i] = le32_to_cpu(sb->dcn_qlevel_tbl_act[i]); + } +} + +int bnxt_qplib_query_cc_param(struct bnxt_qplib_res *res, + struct bnxt_qplib_cc_param *cc_param) +{ + struct bnxt_qplib_tlv_query_rcc_sb *ext_sb; + struct bnxt_qplib_rcfw *rcfw = res->rcfw; + struct creq_query_roce_cc_resp resp = {}; + struct creq_query_roce_cc_resp_sb *sb; + struct bnxt_qplib_cmdqmsg msg = {}; + struct cmdq_query_roce_cc req = {}; + struct bnxt_qplib_rcfw_sbuf sbuf; + size_t resp_size; + int rc; + + /* Query the parameters from chip */ + bnxt_qplib_rcfw_cmd_prep(&req, CMDQ_BASE_OPCODE_QUERY_ROCE_CC, + sizeof(req)); + if (_is_chip_gen_p5_p7(res->cctx)) { + resp_size = sizeof(*ext_sb); + if (!BNXT_RE_DCN_ENABLED(res)) + resp_size -= sizeof(ext_sb->gen2_sb); + } else { + resp_size = sizeof(*sb); + } + sbuf.size = ALIGN(resp_size, BNXT_QPLIB_CMDQE_UNITS); + sbuf.sb = dma_zalloc_coherent(&rcfw->pdev->dev, sbuf.size, + &sbuf.dma_addr, GFP_KERNEL); + if (!sbuf.sb) + return -ENOMEM; + + req.resp_size = sbuf.size / BNXT_QPLIB_CMDQE_UNITS; + bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, &sbuf, sizeof(req), + sizeof(resp), 0); + rc = bnxt_qplib_rcfw_send_message(res->rcfw, &msg); + if (rc) { + dev_dbg(&res->pdev->dev, "%s:Query CC param failed:0x%x\n", + __func__, rc); + goto out; + } + + ext_sb = sbuf.sb; + sb = _is_chip_gen_p5_p7(res->cctx) ? &ext_sb->base_sb : + (struct creq_query_roce_cc_resp_sb *)ext_sb; + + cc_param->enable = sb->enable_cc & CREQ_QUERY_ROCE_CC_RESP_SB_ENABLE_CC; + cc_param->tos_ecn = (sb->tos_dscp_tos_ecn & + CREQ_QUERY_ROCE_CC_RESP_SB_TOS_ECN_MASK) >> + CREQ_QUERY_ROCE_CC_RESP_SB_TOS_ECN_SFT; + cc_param->tos_dscp = (sb->tos_dscp_tos_ecn & + CREQ_QUERY_ROCE_CC_RESP_SB_TOS_DSCP_MASK) >> + CREQ_QUERY_ROCE_CC_RESP_SB_TOS_DSCP_SFT; + cc_param->alt_tos_dscp = sb->alt_tos_dscp; + cc_param->alt_vlan_pcp = sb->alt_vlan_pcp; + + cc_param->g = sb->g; + cc_param->nph_per_state = sb->num_phases_per_state; + cc_param->init_cr = le16_to_cpu(sb->init_cr); + cc_param->init_tr = le16_to_cpu(sb->init_tr); + cc_param->cc_mode = sb->cc_mode; + cc_param->inact_th = le16_to_cpu(sb->inactivity_th); + cc_param->rtt = le16_to_cpu(sb->rtt); + cc_param->tcp_cp = le16_to_cpu(sb->tcp_cp); + cc_param->time_pph = sb->time_per_phase; + cc_param->pkts_pph = sb->pkts_per_phase; + if (_is_chip_gen_p5_p7(res->cctx)) { + bnxt_qplib_read_cc_gen1(&cc_param->cc_ext, &ext_sb->gen1_sb); + if (BNXT_RE_DCN_ENABLED(res)) + bnxt_qplib_read_cc_gen2(&cc_param->cc_ext2, + &ext_sb->gen2_sb); + } +out: + dma_free_coherent(&rcfw->pdev->dev, sbuf.size, + sbuf.sb, sbuf.dma_addr); + return rc; +} + +int bnxt_qplib_get_roce_error_stats(struct bnxt_qplib_rcfw *rcfw, + struct bnxt_qplib_roce_stats *stats, + struct bnxt_qplib_query_stats_info *sinfo) +{ + struct creq_query_roce_stats_resp resp = {}; + struct creq_query_roce_stats_resp_sb *sb; + struct cmdq_query_roce_stats req = {}; + struct bnxt_qplib_cmdqmsg msg = {}; + struct bnxt_qplib_rcfw_sbuf sbuf; + u16 cmd_flags = 0; + u32 fn_id = 0; + int rc = 0; + + bnxt_qplib_rcfw_cmd_prep(&req, CMDQ_BASE_OPCODE_QUERY_ROCE_STATS, + sizeof(req)); + + sbuf.size = ALIGN(sizeof(*sb), BNXT_QPLIB_CMDQE_UNITS); + sbuf.sb = dma_zalloc_coherent(&rcfw->pdev->dev, sbuf.size, + &sbuf.dma_addr, GFP_KERNEL); + if (!sbuf.sb) + return -ENOMEM; + sb = sbuf.sb; + + if (rcfw->res->cctx->hwrm_intf_ver >= HWRM_VERSION_ROCE_STATS_FN_ID) { + if (sinfo->function_id != 0xFFFFFFFF) { + cmd_flags = CMDQ_QUERY_ROCE_STATS_FLAGS_FUNCTION_ID; + if (sinfo->vf_valid) { + fn_id = CMDQ_QUERY_ROCE_STATS_VF_VALID; + fn_id |= (sinfo->function_id << + CMDQ_QUERY_ROCE_STATS_VF_NUM_SFT) & + CMDQ_QUERY_ROCE_STATS_VF_NUM_MASK; + } else { + fn_id = sinfo->function_id & + CMDQ_QUERY_ROCE_STATS_PF_NUM_MASK; + } + } + + req.flags = cpu_to_le16(cmd_flags); + req.function_id = cpu_to_le32(fn_id); + + if (sinfo->collection_id != 0xFF) { + cmd_flags |= CMDQ_QUERY_ROCE_STATS_FLAGS_COLLECTION_ID; + req.collection_id = sinfo->collection_id; + } + } else { + /* For older HWRM version, the command length has to be + * adjusted. 8 bytes are more in the newer command. + * So subtract these 8 bytes for older HWRM version. + * command units are adjusted inside + * bnxt_qplib_rcfw_send_message. + */ + req.cmd_size -= 8; + } + + req.resp_size = sbuf.size / BNXT_QPLIB_CMDQE_UNITS; + bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, &sbuf, sizeof(req), + sizeof(resp), 0); + rc = bnxt_qplib_rcfw_send_message(rcfw, &msg); + if (rc) + goto bail; + /* Extract the context from the side buffer */ + stats->to_retransmits = le64_to_cpu(sb->to_retransmits); + stats->seq_err_naks_rcvd = le64_to_cpu(sb->seq_err_naks_rcvd); + stats->max_retry_exceeded = le64_to_cpu(sb->max_retry_exceeded); + stats->rnr_naks_rcvd = le64_to_cpu(sb->rnr_naks_rcvd); + stats->missing_resp = le64_to_cpu(sb->missing_resp); + stats->unrecoverable_err = le64_to_cpu(sb->unrecoverable_err); + stats->bad_resp_err = le64_to_cpu(sb->bad_resp_err); + stats->local_qp_op_err = le64_to_cpu(sb->local_qp_op_err); + stats->local_protection_err = le64_to_cpu(sb->local_protection_err); + stats->mem_mgmt_op_err = le64_to_cpu(sb->mem_mgmt_op_err); + stats->remote_invalid_req_err = le64_to_cpu(sb->remote_invalid_req_err); + stats->remote_access_err = le64_to_cpu(sb->remote_access_err); + stats->remote_op_err = le64_to_cpu(sb->remote_op_err); + stats->dup_req = le64_to_cpu(sb->dup_req); + stats->res_exceed_max = le64_to_cpu(sb->res_exceed_max); + stats->res_length_mismatch = le64_to_cpu(sb->res_length_mismatch); + stats->res_exceeds_wqe = le64_to_cpu(sb->res_exceeds_wqe); + stats->res_opcode_err = le64_to_cpu(sb->res_opcode_err); + stats->res_rx_invalid_rkey = le64_to_cpu(sb->res_rx_invalid_rkey); + stats->res_rx_domain_err = le64_to_cpu(sb->res_rx_domain_err); + stats->res_rx_no_perm = le64_to_cpu(sb->res_rx_no_perm); + stats->res_rx_range_err = le64_to_cpu(sb->res_rx_range_err); + stats->res_tx_invalid_rkey = le64_to_cpu(sb->res_tx_invalid_rkey); + stats->res_tx_domain_err = le64_to_cpu(sb->res_tx_domain_err); + stats->res_tx_no_perm = le64_to_cpu(sb->res_tx_no_perm); + stats->res_tx_range_err = le64_to_cpu(sb->res_tx_range_err); + stats->res_irrq_oflow = le64_to_cpu(sb->res_irrq_oflow); + stats->res_unsup_opcode = le64_to_cpu(sb->res_unsup_opcode); + stats->res_unaligned_atomic = le64_to_cpu(sb->res_unaligned_atomic); + stats->res_rem_inv_err = le64_to_cpu(sb->res_rem_inv_err); + stats->res_mem_error = le64_to_cpu(sb->res_mem_error); + stats->res_srq_err = le64_to_cpu(sb->res_srq_err); + stats->res_cmp_err = le64_to_cpu(sb->res_cmp_err); + stats->res_invalid_dup_rkey = le64_to_cpu(sb->res_invalid_dup_rkey); + stats->res_wqe_format_err = le64_to_cpu(sb->res_wqe_format_err); + stats->res_cq_load_err = le64_to_cpu(sb->res_cq_load_err); + stats->res_srq_load_err = le64_to_cpu(sb->res_srq_load_err); + stats->res_tx_pci_err = le64_to_cpu(sb->res_tx_pci_err); + stats->res_rx_pci_err = le64_to_cpu(sb->res_rx_pci_err); + + if (!rcfw->init_oos_stats) { + rcfw->oos_prev = le64_to_cpu(sb->res_oos_drop_count); + rcfw->init_oos_stats = true; + } else { + stats->res_oos_drop_count += (le64_to_cpu(sb->res_oos_drop_count) - + rcfw->oos_prev) & + BNXT_QPLIB_OOS_COUNT_MASK; + rcfw->oos_prev = le64_to_cpu(sb->res_oos_drop_count); + } + + stats->active_qp_count_p0 = le64_to_cpu(sb->active_qp_count_p0); + stats->active_qp_count_p1 = le64_to_cpu(sb->active_qp_count_p1); + stats->active_qp_count_p2 = le64_to_cpu(sb->active_qp_count_p2); + stats->active_qp_count_p3 = le64_to_cpu(sb->active_qp_count_p3); +bail: + dma_free_coherent(&rcfw->pdev->dev, sbuf.size, + sbuf.sb, sbuf.dma_addr); + return rc; +} + +int bnxt_qplib_set_link_aggr_mode(struct bnxt_qplib_res *res, + u8 aggr_mode, u8 member_port_map, + u8 active_port_map, bool aggr_en, + u32 stats_fw_id) +{ + struct creq_set_link_aggr_mode_resources_resp resp = {}; + struct cmdq_set_link_aggr_mode_cc req = {}; + struct bnxt_qplib_rcfw *rcfw = res->rcfw; + struct bnxt_qplib_cmdqmsg msg = {}; + int rc = 0; + + bnxt_qplib_rcfw_cmd_prep(&req, CMDQ_BASE_OPCODE_SET_LINK_AGGR_MODE, + sizeof(req)); + + req.aggr_enable = aggr_en; + req.active_port_map = active_port_map; + req.member_port_map = member_port_map; + req.link_aggr_mode = aggr_mode; + + /* need to specify only second port stats ctx id for now */ + req.stat_ctx_id[1] = cpu_to_le16((u16)(stats_fw_id)); + + req.modify_mask = + cpu_to_le32(CMDQ_SET_LINK_AGGR_MODE_MODIFY_MASK_AGGR_EN | + CMDQ_SET_LINK_AGGR_MODE_MODIFY_MASK_ACTIVE_PORT_MAP | + CMDQ_SET_LINK_AGGR_MODE_MODIFY_MASK_MEMBER_PORT_MAP | + CMDQ_SET_LINK_AGGR_MODE_MODIFY_MASK_AGGR_MODE | + CMDQ_SET_LINK_AGGR_MODE_MODIFY_MASK_STAT_CTX_ID); + + bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), + sizeof(resp), 0); + rc = bnxt_qplib_rcfw_send_message(rcfw, &msg); + if (rc) + dev_err(&res->pdev->dev, + "QPLIB: Failed to set link aggr mode, %#x", rc); + + return rc; +} + +int bnxt_qplib_qext_stat(struct bnxt_qplib_rcfw *rcfw, u32 fid, + struct bnxt_qplib_ext_stat *estat, + struct bnxt_qplib_query_stats_info *sinfo) +{ + struct creq_query_roce_stats_ext_resp resp = {}; + struct creq_query_roce_stats_ext_resp_sb *sb; + struct cmdq_query_roce_stats_ext req = {}; + struct bnxt_qplib_cmdqmsg msg = {}; + struct bnxt_qplib_rcfw_sbuf sbuf; + int rc; + + sbuf.size = ALIGN(sizeof(*sb), BNXT_QPLIB_CMDQE_UNITS); + sbuf.sb = dma_zalloc_coherent(&rcfw->pdev->dev, sbuf.size, + &sbuf.dma_addr, GFP_KERNEL); + if (!sbuf.sb) { + dev_err(&rcfw->pdev->dev, + "QPLIB: SP: QUERY_ROCE_STATS_EXT alloc sb failed"); + return -ENOMEM; + } + sb = sbuf.sb; + + bnxt_qplib_rcfw_cmd_prep(&req, + CMDQ_QUERY_ROCE_STATS_EXT_OPCODE_QUERY_ROCE_STATS, + sizeof(req)); + req.resp_size = sbuf.size / BNXT_QPLIB_CMDQE_UNITS; + req.resp_addr = cpu_to_le64(sbuf.dma_addr); + req.flags = cpu_to_le16(CMDQ_QUERY_ROCE_STATS_EXT_FLAGS_FUNCTION_ID); + if (_is_chip_p7(rcfw->res->cctx) && rcfw->res->is_vf) { + if (sinfo->vf_valid) + req.function_id = + cpu_to_le32(CMDQ_QUERY_ROCE_STATS_EXT_VF_VALID | + (fid << CMDQ_QUERY_ROCE_STATS_EXT_VF_NUM_SFT)); + else + req.flags = cpu_to_le16(0); + } else { + req.function_id = cpu_to_le32(fid); + } + + bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, &sbuf, sizeof(req), + sizeof(resp), 0); + rc = bnxt_qplib_rcfw_send_message(rcfw, &msg); + if (rc) + goto bail; + + /* dump when dyndbg is enabled */ + print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, sb, sizeof(*sb)); + estat->tx_atomic_req = le64_to_cpu(sb->tx_atomic_req_pkts); + estat->tx_read_req = le64_to_cpu(sb->tx_read_req_pkts); + estat->tx_read_res = le64_to_cpu(sb->tx_read_res_pkts); + estat->tx_write_req = le64_to_cpu(sb->tx_write_req_pkts); + estat->tx_send_req = le64_to_cpu(sb->tx_send_req_pkts); + estat->tx_roce_pkts = le64_to_cpu(sb->tx_roce_pkts); + estat->tx_roce_bytes = le64_to_cpu(sb->tx_roce_bytes); + estat->rx_atomic_req = le64_to_cpu(sb->rx_atomic_req_pkts); + estat->rx_read_req = le64_to_cpu(sb->rx_read_req_pkts); + estat->rx_read_res = le64_to_cpu(sb->rx_read_res_pkts); + estat->rx_write_req = le64_to_cpu(sb->rx_write_req_pkts); + estat->rx_send_req = le64_to_cpu(sb->rx_send_req_pkts); + estat->rx_roce_pkts = le64_to_cpu(sb->rx_roce_pkts); + estat->rx_roce_bytes = le64_to_cpu(sb->rx_roce_bytes); + estat->rx_roce_good_pkts = le64_to_cpu(sb->rx_roce_good_pkts); + estat->rx_roce_good_bytes = le64_to_cpu(sb->rx_roce_good_bytes); + estat->rx_out_of_buffer = le64_to_cpu(sb->rx_out_of_buffer_pkts); + estat->rx_out_of_sequence = le64_to_cpu(sb->rx_out_of_sequence_pkts); + estat->tx_cnp = le64_to_cpu(sb->tx_cnp_pkts); + estat->rx_cnp = le64_to_cpu(sb->rx_cnp_pkts); + estat->rx_ecn_marked = le64_to_cpu(sb->rx_ecn_marked_pkts); + estat->seq_err_naks_rcvd = le64_to_cpu(sb->seq_err_naks_rcvd); + estat->rnr_naks_rcvd = le64_to_cpu(sb->rnr_naks_rcvd); + estat->missing_resp = le64_to_cpu(sb->missing_resp); + estat->to_retransmits = le64_to_cpu(sb->to_retransmit); + estat->dup_req = le64_to_cpu(sb->dup_req); + estat->rx_dcn_payload_cut = le64_to_cpu(sb->rx_dcn_payload_cut); + estat->te_bypassed = le64_to_cpu(sb->te_bypassed); +bail: + dma_free_coherent(&rcfw->pdev->dev, sbuf.size, + sbuf.sb, sbuf.dma_addr); + return rc; +} diff --git a/bnxt_re-1.10.3-229.0.139.0/qplib_sp.h b/bnxt_re-1.10.3-229.0.139.0/qplib_sp.h new file mode 100644 index 0000000..ade3634 --- /dev/null +++ b/bnxt_re-1.10.3-229.0.139.0/qplib_sp.h @@ -0,0 +1,453 @@ +/* + * Copyright (c) 2015-2023, Broadcom. All rights reserved. The term + * Broadcom refers to Broadcom Inc. and/or its subsidiaries. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * BSD license below: + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * Author: Eddie Wai + * + * Description: Slow Path Operators (header) + */ + +#ifndef __BNXT_QPLIB_SP_H__ +#define __BNXT_QPLIB_SP_H__ + +#define BNXT_QPLIB_RESERVED_QP_WRS 128 + +/* DCN query */ +#define QUERY_DCN_QT_ACT_CR_MASK CREQ_QUERY_ROCE_CC_GEN2_RESP_SB_TLV_DCN_QLEVEL_TBL_ACT_CR_MASK +#define QUERY_DCN_QT_ACT_CR_SFT CREQ_QUERY_ROCE_CC_GEN2_RESP_SB_TLV_DCN_QLEVEL_TBL_ACT_CR_SFT +#define QUERY_DCN_QT_ACT_INC_CNP CREQ_QUERY_ROCE_CC_GEN2_RESP_SB_TLV_DCN_QLEVEL_TBL_ACT_INC_CNP +#define QUERY_DCN_QT_ACT_UPD_IMM CREQ_QUERY_ROCE_CC_GEN2_RESP_SB_TLV_DCN_QLEVEL_TBL_ACT_UPD_IMM +#define QUERY_DCN_QT_ACT_TR_MASK CREQ_QUERY_ROCE_CC_GEN2_RESP_SB_TLV_DCN_QLEVEL_TBL_ACT_TR_MASK +#define QUERY_DCN_QT_ACT_TR_SFT CREQ_QUERY_ROCE_CC_GEN2_RESP_SB_TLV_DCN_QLEVEL_TBL_ACT_TR_SFT +#define DCN_GET_CR(act) (((act) & QUERY_DCN_QT_ACT_CR_MASK) >> QUERY_DCN_QT_ACT_CR_SFT) +#define DCN_GET_TR(act) (((act) & QUERY_DCN_QT_ACT_TR_MASK) >> QUERY_DCN_QT_ACT_TR_SFT) +#define DCN_GET_INC_CNP(act) (((act) & QUERY_DCN_QT_ACT_INC_CNP) ? 1 : 0) +#define DCN_GET_UPD_IMM(act) (((act) & QUERY_DCN_QT_ACT_UPD_IMM) ? 1 : 0) + +/* DCN modify */ +#define MODIFY_MASK_DCN_QLEVEL_TBL_IDX CMDQ_MODIFY_ROCE_CC_GEN2_TLV_MODIFY_MASK_DCN_QLEVEL_TBL_IDX +#define MODIFY_MASK_DCN_QLEVEL_TBL_THR CMDQ_MODIFY_ROCE_CC_GEN2_TLV_MODIFY_MASK_DCN_QLEVEL_TBL_THR +#define MODIFY_MASK_DCN_QLEVEL_TBL_CR CMDQ_MODIFY_ROCE_CC_GEN2_TLV_MODIFY_MASK_DCN_QLEVEL_TBL_CR +#define MODIFY_MASK_DCN_QLEVEL_TBL_INC_CNP CMDQ_MODIFY_ROCE_CC_GEN2_TLV_MODIFY_MASK_DCN_QLEVEL_TBL_INC_CNP +#define MODIFY_MASK_DCN_QLEVEL_TBL_UPD_IMM CMDQ_MODIFY_ROCE_CC_GEN2_TLV_MODIFY_MASK_DCN_QLEVEL_TBL_UPD_IMM +#define MODIFY_MASK_DCN_QLEVEL_TBL_TR CMDQ_MODIFY_ROCE_CC_GEN2_TLV_MODIFY_MASK_DCN_QLEVEL_TBL_TR +#define MODIFY_DCN_QT_ACT_CR_MASK CMDQ_MODIFY_ROCE_CC_GEN2_TLV_DCN_QLEVEL_TBL_ACT_CR_MASK +#define MODIFY_DCN_QT_ACT_CR_SFT CMDQ_MODIFY_ROCE_CC_GEN2_TLV_DCN_QLEVEL_TBL_ACT_CR_SFT +#define MODIFY_DCN_QT_ACT_INC_CNP CMDQ_MODIFY_ROCE_CC_GEN2_TLV_DCN_QLEVEL_TBL_ACT_INC_CNP +#define MODIFY_DCN_QT_ACT_UPD_IMM CMDQ_MODIFY_ROCE_CC_GEN2_TLV_DCN_QLEVEL_TBL_ACT_UPD_IMM +#define MODIFY_DCN_QT_ACT_TR_MASK CMDQ_MODIFY_ROCE_CC_GEN2_TLV_DCN_QLEVEL_TBL_ACT_TR_MASK +#define MODIFY_DCN_QT_ACT_TR_SFT CMDQ_MODIFY_ROCE_CC_GEN2_TLV_DCN_QLEVEL_TBL_ACT_TR_SFT +#define DCN_SET_CR(act, cr) (((*(act)) & ~MODIFY_DCN_QT_ACT_CR_MASK) | ((cr) << MODIFY_DCN_QT_ACT_CR_SFT)) +#define DCN_SET_TR(act, tr) (((*(act)) & ~MODIFY_DCN_QT_ACT_TR_MASK) | ((tr) << MODIFY_DCN_QT_ACT_TR_SFT)) +#define DCN_SET_INC_CNP(act, ic) ((ic) ? ((*(act)) | MODIFY_DCN_QT_ACT_INC_CNP) : ((*(act)) & ~MODIFY_DCN_QT_ACT_INC_CNP)) +#define DCN_SET_UPD_IMM(act, ui) ((ui) ? ((*(act)) | MODIFY_DCN_QT_ACT_UPD_IMM) : ((*(act)) & ~MODIFY_DCN_QT_ACT_UPD_IMM)) + +#define BNXT_RE_DCN_ENABLED(res) \ + (_is_chip_p7((res)->cctx) && \ + (((res)->dattr->dev_cap_flags & CREQ_QUERY_FUNC_RESP_SB_CC_GENERATION_MASK) ==\ + CREQ_QUERY_FUNC_RESP_SB_CC_GENERATION_CC_GEN2)) + +/* Resource maximums reported by the firmware */ +struct bnxt_qplib_dev_attr { +#define FW_VER_ARR_LEN 4 + u8 fw_ver[FW_VER_ARR_LEN]; + u16 max_sgid; + u16 max_mrw; + u32 max_qp; +#define BNXT_QPLIB_MAX_OUT_RD_ATOM 126 + u32 max_qp_rd_atom; + u32 max_qp_init_rd_atom; + u32 max_qp_wqes; + u32 max_qp_sges; + u32 max_cq; + /* HW supports only 8K entries in PBL. + * So max CQEs that can be supported per CQ is 1M. + */ +#define BNXT_QPLIB_MAX_CQ_WQES 0xfffff + u32 max_cq_wqes; + u32 max_cq_sges; + u32 max_mr; + u64 max_mr_size; + u32 max_pd; + u32 max_mw; + u32 max_raw_ethy_qp; + u32 max_ah; + u32 max_fmr; + u32 max_map_per_fmr; + u32 max_srq; + u32 max_srq_wqes; + u32 max_srq_sges; + u32 max_pkey; + u32 max_inline_data; + u32 l2_db_size; + u8 tqm_alloc_reqs[MAX_TQM_ALLOC_REQ]; + u8 is_atomic; + u8 dev_cap_ext_flags; + u16 dev_cap_flags; + u64 page_size_cap; + u32 max_dpi; +}; + +struct bnxt_qplib_pd { + u32 id; +}; + +struct bnxt_qplib_gid { + u8 data[16]; +}; + +struct bnxt_qplib_gid_info { + struct bnxt_qplib_gid gid; + u16 vlan_id; +}; + +struct bnxt_qplib_ah { + struct bnxt_qplib_gid dgid; + struct bnxt_qplib_pd *pd; + u32 id; + u8 sgid_index; + u8 host_sgid_index; /* For Query AH if the hw table and SW table are differnt */ + u8 traffic_class; + u32 flow_label; + u8 hop_limit; + u8 sl; +// u8 static_rate; /* 0 */ + u8 dmac[6]; + u16 vlan_id; + u8 nw_type; + u8 enable_cc; +}; + +struct bnxt_qplib_mrw { + struct bnxt_qplib_pd *pd; + int type; + u32 flags; +#define BNXT_QPLIB_FR_PMR 0x80000000 + u32 lkey; + u32 rkey; +#define BNXT_QPLIB_RSVD_LKEY 0xFFFFFFFF + u64 va; + u64 total_size; + u32 npages; + u64 mr_handle; + struct bnxt_qplib_hwq hwq; +}; + +struct bnxt_qplib_mrinfo { + struct bnxt_qplib_mrw *mrw; + struct bnxt_qplib_sg_info sg; + u64 *ptes; + bool is_dma; +}; + +struct bnxt_qplib_frpl { + int max_pg_ptrs; + struct bnxt_qplib_hwq hwq; +}; + +struct bnxt_qplib_cc_param_ext { + u64 ext_mask; + u16 inact_th_hi; + u16 min_delta_cnp; + u16 init_cp; + u8 tr_update_mode; + u8 tr_update_cyls; + u8 fr_rtt; + u8 ai_rate_incr; + u16 rr_rtt_th; + u16 ar_cr_th; + u16 cr_min_th; + u8 bw_avg_weight; + u8 cr_factor; + u16 cr_th_max_cp; + u8 cp_bias_en; + u8 cp_bias; + u8 cnp_ecn; + u8 rtt_jitter_en; + u16 bytes_per_usec; + u16 cc_cr_reset_th; + u8 cr_width; + u8 min_quota; + u8 max_quota; + u8 abs_max_quota; + u16 tr_lb; + u8 cr_prob_fac; + u8 tr_prob_fac; + u16 fair_cr_th; + u8 red_div; + u8 cnp_ratio_th; + u16 ai_ext_rtt; + u8 exp_crcp_ratio; + u8 low_rate_en; + u16 cpcr_update_th; + u16 ai_rtt_th1; + u16 ai_rtt_th2; + u16 cf_rtt_th; + u16 sc_cr_th1; /* severe congestion cr threshold 1 */ + u16 sc_cr_th2; /* severe congestion cr threshold 2 */ + u32 l64B_per_rtt; + u8 cc_ack_bytes; + u16 reduce_cf_rtt_th; +}; + +struct bnxt_qplib_cc_param_ext2 { + u64 ext2_mask; + u8 idx; + u16 thr; + u32 cr; + u32 tr; + u32 cnp_inc; + u32 upd_imm; + u16 dcn_qlevel_tbl_thr[8]; + u32 dcn_qlevel_tbl_act[8]; +}; + +struct bnxt_qplib_cc_param { + u8 alt_vlan_pcp; + u16 alt_tos_dscp; +#define BNXT_QPLIB_USER_DSCP_VALID 0x80 + u8 cnp_dscp_user; + u8 roce_dscp_user; + u8 cc_mode; + u8 enable; + u8 disable_prio_vlan_tx; + u16 inact_th; + u16 init_cr; + u16 init_tr; + u16 rtt; + u8 g; + u8 nph_per_state; + u8 time_pph; + u8 pkts_pph; + u8 tos_ecn; + u8 tos_dscp; + u8 qp1_tos_dscp; + /* To track if admin has enabled ECN explicitly */ + u8 admin_enable; + /* Mask used while programming the configfs values */ + u32 mask; + /* Mask used while displaying the configfs values */ + u32 cur_mask; + u8 roce_pri; +#define BNXT_QPLIB_CC_PARAM_MASK_VLAN_TX_DISABLE 0x40000 +#define BNXT_QPLIB_CC_PARAM_MASK_ROCE_PRI 0x80000 + /* prev value to clear dscp table */ + u8 prev_roce_pri; + u8 prev_alt_vlan_pcp; + u8 prev_tos_dscp; + u16 prev_alt_tos_dscp; + u16 tcp_cp; + struct bnxt_qplib_cc_param_ext cc_ext; + struct bnxt_qplib_cc_param_ext2 cc_ext2; +}; + +struct bnxt_qplib_roce_stats { + u64 to_retransmits; + u64 seq_err_naks_rcvd; + /* seq_err_naks_rcvd is 64 b */ + u64 max_retry_exceeded; + /* max_retry_exceeded is 64 b */ + u64 rnr_naks_rcvd; + /* rnr_naks_rcvd is 64 b */ + u64 missing_resp; + u64 unrecoverable_err; + /* unrecoverable_err is 64 b */ + u64 bad_resp_err; + /* bad_resp_err is 64 b */ + u64 local_qp_op_err; + /* local_qp_op_err is 64 b */ + u64 local_protection_err; + /* local_protection_err is 64 b */ + u64 mem_mgmt_op_err; + /* mem_mgmt_op_err is 64 b */ + u64 remote_invalid_req_err; + /* remote_invalid_req_err is 64 b */ + u64 remote_access_err; + /* remote_access_err is 64 b */ + u64 remote_op_err; + /* remote_op_err is 64 b */ + u64 dup_req; + /* dup_req is 64 b */ + u64 res_exceed_max; + /* res_exceed_max is 64 b */ + u64 res_length_mismatch; + /* res_length_mismatch is 64 b */ + u64 res_exceeds_wqe; + /* res_exceeds_wqe is 64 b */ + u64 res_opcode_err; + /* res_opcode_err is 64 b */ + u64 res_rx_invalid_rkey; + /* res_rx_invalid_rkey is 64 b */ + u64 res_rx_domain_err; + /* res_rx_domain_err is 64 b */ + u64 res_rx_no_perm; + /* res_rx_no_perm is 64 b */ + u64 res_rx_range_err; + /* res_rx_range_err is 64 b */ + u64 res_tx_invalid_rkey; + /* res_tx_invalid_rkey is 64 b */ + u64 res_tx_domain_err; + /* res_tx_domain_err is 64 b */ + u64 res_tx_no_perm; + /* res_tx_no_perm is 64 b */ + u64 res_tx_range_err; + /* res_tx_range_err is 64 b */ + u64 res_irrq_oflow; + /* res_irrq_oflow is 64 b */ + u64 res_unsup_opcode; + /* res_unsup_opcode is 64 b */ + u64 res_unaligned_atomic; + /* res_unaligned_atomic is 64 b */ + u64 res_rem_inv_err; + /* res_rem_inv_err is 64 b */ + u64 res_mem_error; + /* res_mem_error is 64 b */ + u64 res_srq_err; + /* res_srq_err is 64 b */ + u64 res_cmp_err; + /* res_cmp_err is 64 b */ + u64 res_invalid_dup_rkey; + /* res_invalid_dup_rkey is 64 b */ + u64 res_wqe_format_err; + /* res_wqe_format_err is 64 b */ + u64 res_cq_load_err; + /* res_cq_load_err is 64 b */ + u64 res_srq_load_err; + /* res_srq_load_err is 64 b */ + u64 res_tx_pci_err; + /* res_tx_pci_err is 64 b */ + u64 res_rx_pci_err; + /* res_rx_pci_err is 64 b */ + u64 res_oos_drop_count; + /* res_oos_drop_count */ + u64 active_qp_count_p0; + /* port 0 active qps */ + u64 active_qp_count_p1; + /* port 1 active qps */ + u64 active_qp_count_p2; + /* port 2 active qps */ + u64 active_qp_count_p3; + /* port 3 active qps */ +}; + +struct bnxt_qplib_ext_stat { + u64 tx_atomic_req; + u64 tx_read_req; + u64 tx_read_res; + u64 tx_write_req; + u64 tx_send_req; + u64 tx_roce_pkts; + u64 tx_roce_bytes; + u64 rx_atomic_req; + u64 rx_read_req; + u64 rx_read_res; + u64 rx_write_req; + u64 rx_send_req; + u64 rx_roce_pkts; + u64 rx_roce_bytes; + u64 rx_roce_good_pkts; + u64 rx_roce_good_bytes; + u64 rx_out_of_buffer; + u64 rx_out_of_sequence; + u64 tx_cnp; + u64 rx_cnp; + u64 rx_ecn_marked; + u64 seq_err_naks_rcvd; + u64 rnr_naks_rcvd; + u64 missing_resp; + u64 to_retransmits; + u64 dup_req; + u64 rx_dcn_payload_cut; + u64 te_bypassed; +}; + +#define BNXT_QPLIB_ACCESS_LOCAL_WRITE (1 << 0) +#define BNXT_QPLIB_ACCESS_REMOTE_READ (1 << 1) +#define BNXT_QPLIB_ACCESS_REMOTE_WRITE (1 << 2) +#define BNXT_QPLIB_ACCESS_REMOTE_ATOMIC (1 << 3) +#define BNXT_QPLIB_ACCESS_MW_BIND (1 << 4) +#define BNXT_QPLIB_ACCESS_ZERO_BASED (1 << 5) +#define BNXT_QPLIB_ACCESS_ON_DEMAND (1 << 6) + +int bnxt_qplib_get_sgid(struct bnxt_qplib_res *res, + struct bnxt_qplib_sgid_tbl *sgid_tbl, int index, + struct bnxt_qplib_gid *gid); +int bnxt_qplib_del_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl, + struct bnxt_qplib_gid *gid, u16 vlan_id, bool update); +int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl, + struct bnxt_qplib_gid *gid, const u8 *mac, u16 vlan_id, + bool update, u32 *index); +int bnxt_qplib_update_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl, + struct bnxt_qplib_gid *gid, u16 gid_idx, const u8 *smac); +int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw); +int bnxt_qplib_set_func_resources(struct bnxt_qplib_res *res); +int bnxt_qplib_create_ah(struct bnxt_qplib_res *res, struct bnxt_qplib_ah *ah, + bool block); +int bnxt_qplib_destroy_ah(struct bnxt_qplib_res *res, struct bnxt_qplib_ah *ah, + bool block); +int bnxt_qplib_alloc_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw); +int bnxt_qplib_dereg_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw, + bool block); +int bnxt_qplib_reg_mr(struct bnxt_qplib_res *res, + struct bnxt_qplib_mrinfo *mrinfo, bool block); +int bnxt_qplib_free_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mr); +int bnxt_qplib_alloc_fast_reg_mr(struct bnxt_qplib_res *res, + struct bnxt_qplib_mrw *mr, int max); +int bnxt_qplib_alloc_fast_reg_page_list(struct bnxt_qplib_res *res, + struct bnxt_qplib_frpl *frpl, int max); +void bnxt_qplib_free_fast_reg_page_list(struct bnxt_qplib_res *res, + struct bnxt_qplib_frpl *frpl); +int bnxt_qplib_map_tc2cos(struct bnxt_qplib_res *res, u16 *cids); +int bnxt_qplib_modify_cc(struct bnxt_qplib_res *res, + struct bnxt_qplib_cc_param *cc_param); +int bnxt_qplib_query_cc_param(struct bnxt_qplib_res *res, + struct bnxt_qplib_cc_param *cc_param); +int bnxt_qplib_set_link_aggr_mode(struct bnxt_qplib_res *res, + u8 aggr_mode, u8 member_port_map, + u8 active_port_map, bool aggr_en, + u32 stats_fw_id); +int bnxt_qplib_get_roce_error_stats(struct bnxt_qplib_rcfw *rcfw, + struct bnxt_qplib_roce_stats *stats, + struct bnxt_qplib_query_stats_info *sinfo); +int bnxt_qplib_qext_stat(struct bnxt_qplib_rcfw *rcfw, u32 fid, + struct bnxt_qplib_ext_stat *estat, + struct bnxt_qplib_query_stats_info *sinfo); + +/* In variable wqe mode, sq_size is hwq.depth. FW is capping sq_size at 65535. + * In order to ensure hwq.depth <= 65535 after align up with 256, we need to + * define maximum wqe slightly smaller. + */ +#define BNXT_VAR_MAX_WQE 4352 +#define BNXT_VAR_MAX_SLOT_ALIGN 256 +#define BNXT_VAR_MAX_SGE 13 + +#endif diff --git a/bnxt_re-1.10.3-229.0.139.0/qplib_tlv.h b/bnxt_re-1.10.3-229.0.139.0/qplib_tlv.h new file mode 100644 index 0000000..14386bd --- /dev/null +++ b/bnxt_re-1.10.3-229.0.139.0/qplib_tlv.h @@ -0,0 +1,193 @@ +/* + * Copyright (c) 2017 - 2020, Broadcom. All rights reserved. The term + * Broadcom refers to Broadcom Inc. and/or its subsidiaries. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * BSD license below: + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef __QPLIB_TLV_H__ +#define __QPLIB_TLV_H__ + +struct roce_tlv { + struct tlv tlv; + u8 total_size; // in units of 16 byte chunks + u8 unused[7]; // for 16 byte alignment +}; + +#define CHUNK_SIZE 16 +#define CHUNKS(x) (((x) + CHUNK_SIZE - 1) / CHUNK_SIZE) + +#define ROCE_1ST_TLV_PREP(rtlv, tot_chunks, content_bytes, more) \ + do { \ + (rtlv)->tlv.cmd_discr = CMD_DISCR_TLV_ENCAP; \ + (rtlv)->tlv.tlv_type = TLV_TYPE_ROCE_SP_COMMAND; \ + (rtlv)->tlv.length = (content_bytes); \ + (rtlv)->tlv.flags = TLV_FLAGS_REQUIRED; \ + (rtlv)->tlv.flags |= (more) ? TLV_FLAGS_MORE : 0; \ + (rtlv)->total_size = (tot_chunks); \ + } while (0) + +#define ROCE_EXT_TLV_PREP(rtlv, ext_type, content_bytes, more, reqd) \ + do { \ + (rtlv)->tlv.cmd_discr = CMD_DISCR_TLV_ENCAP; \ + (rtlv)->tlv.tlv_type = (ext_type); \ + (rtlv)->tlv.length = (content_bytes); \ + (rtlv)->tlv.flags |= (more) ? TLV_FLAGS_MORE : 0; \ + (rtlv)->tlv.flags |= (reqd) ? TLV_FLAGS_REQUIRED : 0; \ + } while (0) + +/* + * TLV size in units of 16 byte chunks + */ +#define TLV_SIZE ((sizeof(struct roce_tlv) + 15) / 16) +/* + * TLV length in bytes + */ +#define TLV_BYTES (TLV_SIZE * 16) + +#define HAS_TLV_HEADER(msg) (((struct tlv *)(msg))->cmd_discr == CMD_DISCR_TLV_ENCAP) +#define GET_TLV_DATA(tlv) ((void *)&((uint8_t *)(tlv))[TLV_BYTES]) + +static inline u8 __get_cmdq_base_opcode(struct cmdq_base *req, u32 size) +{ + if (HAS_TLV_HEADER(req) && size > TLV_BYTES) + return ((struct cmdq_base *)GET_TLV_DATA(req))->opcode; + else + return req->opcode; +} + +static inline void __set_cmdq_base_opcode(struct cmdq_base *req, + u32 size, u8 val) +{ + if (HAS_TLV_HEADER(req) && size > TLV_BYTES) + ((struct cmdq_base *)GET_TLV_DATA(req))->opcode = val; + else + req->opcode = val; +} + +static inline __le16 __get_cmdq_base_cookie(struct cmdq_base *req, u32 size) +{ + if (HAS_TLV_HEADER(req) && size > TLV_BYTES) + return ((struct cmdq_base *)GET_TLV_DATA(req))->cookie; + else + return req->cookie; +} + +static inline void __set_cmdq_base_cookie(struct cmdq_base *req, + u32 size, __le16 val) +{ + if (HAS_TLV_HEADER(req) && size > TLV_BYTES) + ((struct cmdq_base *)GET_TLV_DATA(req))->cookie = val; + else + req->cookie = val; +} + +static inline __le64 __get_cmdq_base_resp_addr(struct cmdq_base *req, u32 size) +{ + if (HAS_TLV_HEADER(req) && size > TLV_BYTES) + return ((struct cmdq_base *)GET_TLV_DATA(req))->resp_addr; + else + return req->resp_addr; +} + +static inline void __set_cmdq_base_resp_addr(struct cmdq_base *req, + u32 size, __le64 val) +{ + if (HAS_TLV_HEADER(req) && size > TLV_BYTES) + ((struct cmdq_base *)GET_TLV_DATA(req))->resp_addr = val; + else + req->resp_addr = val; +} + +static inline u8 __get_cmdq_base_resp_size(struct cmdq_base *req, u32 size) +{ + if (HAS_TLV_HEADER(req) && size > TLV_BYTES) + return ((struct cmdq_base *)GET_TLV_DATA(req))->resp_size; + else + return req->resp_size; +} + +static inline void __set_cmdq_base_resp_size(struct cmdq_base *req, + u32 size, u8 val) +{ + if (HAS_TLV_HEADER(req) && size > TLV_BYTES) + ((struct cmdq_base *)GET_TLV_DATA(req))->resp_size = val; + else + req->resp_size = val; +} + +static inline u8 __get_cmdq_base_cmd_size(struct cmdq_base *req, u32 size) +{ + if (HAS_TLV_HEADER(req) && size > TLV_BYTES) + return ((struct roce_tlv *)(req))->total_size; + else + return req->cmd_size; +} + +static inline void __set_cmdq_base_cmd_size(struct cmdq_base *req, + u32 size, u8 val) +{ + if (HAS_TLV_HEADER(req) && size > TLV_BYTES) + ((struct cmdq_base *)GET_TLV_DATA(req))->cmd_size = val; + else + req->cmd_size = val; +} + +static inline __le16 __get_cmdq_base_flags(struct cmdq_base *req, u32 size) +{ + if (HAS_TLV_HEADER(req) && size > TLV_BYTES) + return ((struct cmdq_base *)GET_TLV_DATA(req))->flags; + else + return req->flags; +} + +static inline void __set_cmdq_base_flags(struct cmdq_base *req, + u32 size, __le16 val) +{ + if (HAS_TLV_HEADER(req) && size > TLV_BYTES) + ((struct cmdq_base *)GET_TLV_DATA(req))->flags = val; + else + req->flags = val; +} + +struct bnxt_qplib_tlv_modify_cc_req { + struct roce_tlv tlv_hdr; + struct cmdq_modify_roce_cc base_req; + __le64 tlvpad; + struct cmdq_modify_roce_cc_gen1_tlv ext_req; + struct cmdq_modify_roce_cc_gen2_tlv ext2_req; +}; + +struct bnxt_qplib_tlv_query_rcc_sb { + struct roce_tlv tlv_hdr; + struct creq_query_roce_cc_resp_sb base_sb; + struct creq_query_roce_cc_gen1_resp_sb_tlv gen1_sb; + struct creq_query_roce_cc_gen2_resp_sb_tlv gen2_sb; +}; +#endif /* __QPLIB_TLV_H__ */ diff --git a/bnxt_re-1.10.3-229.0.139.0/roce_hsi.h b/bnxt_re-1.10.3-229.0.139.0/roce_hsi.h new file mode 100644 index 0000000..f36e4f5 --- /dev/null +++ b/bnxt_re-1.10.3-229.0.139.0/roce_hsi.h @@ -0,0 +1,6615 @@ +/* + * Copyright (c) 2016 - 2023, Broadcom. All rights reserved. The term + * Broadcom refers to Broadcom Inc. and/or its subsidiaries. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * BSD license below: + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS 'AS IS' + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * DO NOT MODIFY!!! This file is automatically generated. + */ +#ifndef _ROCE_HSI_H_ +#define _ROCE_HSI_H_ + +#include + +/* tx_doorbell (size:32b/4B) */ +struct tx_doorbell { + __le32 key_idx; + #define TX_DOORBELL_IDX_MASK 0xffffffUL + #define TX_DOORBELL_IDX_SFT 0 + #define TX_DOORBELL_KEY_MASK 0xf0000000UL + #define TX_DOORBELL_KEY_SFT 28 + #define TX_DOORBELL_KEY_TX (0x0UL << 28) + #define TX_DOORBELL_KEY_LAST TX_DOORBELL_KEY_TX +}; + +/* rx_doorbell (size:32b/4B) */ +struct rx_doorbell { + __le32 key_idx; + #define RX_DOORBELL_IDX_MASK 0xffffffUL + #define RX_DOORBELL_IDX_SFT 0 + #define RX_DOORBELL_KEY_MASK 0xf0000000UL + #define RX_DOORBELL_KEY_SFT 28 + #define RX_DOORBELL_KEY_RX (0x1UL << 28) + #define RX_DOORBELL_KEY_LAST RX_DOORBELL_KEY_RX +}; + +/* cmpl_doorbell (size:32b/4B) */ +struct cmpl_doorbell { + __le32 key_mask_valid_idx; + #define CMPL_DOORBELL_IDX_MASK 0xffffffUL + #define CMPL_DOORBELL_IDX_SFT 0 + #define CMPL_DOORBELL_IDX_VALID 0x4000000UL + #define CMPL_DOORBELL_MASK 0x8000000UL + #define CMPL_DOORBELL_KEY_MASK 0xf0000000UL + #define CMPL_DOORBELL_KEY_SFT 28 + #define CMPL_DOORBELL_KEY_CMPL (0x2UL << 28) + #define CMPL_DOORBELL_KEY_LAST CMPL_DOORBELL_KEY_CMPL +}; + +/* status_doorbell (size:32b/4B) */ +struct status_doorbell { + __le32 key_idx; + #define STATUS_DOORBELL_IDX_MASK 0xffffffUL + #define STATUS_DOORBELL_IDX_SFT 0 + #define STATUS_DOORBELL_KEY_MASK 0xf0000000UL + #define STATUS_DOORBELL_KEY_SFT 28 + #define STATUS_DOORBELL_KEY_STAT (0x3UL << 28) + #define STATUS_DOORBELL_KEY_LAST STATUS_DOORBELL_KEY_STAT +}; + +/* push32_doorbell (size:1024b/128B) */ +struct push32_doorbell { + __le32 key_sz_idx; + #define PUSH32_DOORBELL_IDX_MASK 0xffffffUL + #define PUSH32_DOORBELL_IDX_SFT 0 + #define PUSH32_DOORBELL_SZ_MASK 0xf000000UL + #define PUSH32_DOORBELL_SZ_SFT 24 + #define PUSH32_DOORBELL_KEY_MASK 0xf0000000UL + #define PUSH32_DOORBELL_KEY_SFT 28 + #define PUSH32_DOORBELL_KEY_PUSH (0x4UL << 28) + #define PUSH32_DOORBELL_KEY_LAST PUSH32_DOORBELL_KEY_PUSH + __le16 flags_type; + #define PUSH32_DOORBELL_TYPE_MASK 0x3fUL + #define PUSH32_DOORBELL_TYPE_SFT 0 + #define PUSH32_DOORBELL_TYPE_TX_BD_LONG 0x10UL + #define PUSH32_DOORBELL_TYPE_LAST PUSH32_DOORBELL_TYPE_TX_BD_LONG + #define PUSH32_DOORBELL_FLAGS_MASK 0xffc0UL + #define PUSH32_DOORBELL_FLAGS_SFT 6 + #define PUSH32_DOORBELL_FLAGS_PACKET_END 0x40UL + #define PUSH32_DOORBELL_FLAGS_NO_CMPL 0x80UL + #define PUSH32_DOORBELL_FLAGS_BD_CNT_MASK 0x1f00UL + #define PUSH32_DOORBELL_FLAGS_BD_CNT_SFT 8 + #define PUSH32_DOORBELL_FLAGS_LHINT_MASK 0x6000UL + #define PUSH32_DOORBELL_FLAGS_LHINT_SFT 13 + #define PUSH32_DOORBELL_FLAGS_LHINT_LT512 (0x0UL << 13) + #define PUSH32_DOORBELL_FLAGS_LHINT_LT1K (0x1UL << 13) + #define PUSH32_DOORBELL_FLAGS_LHINT_LT2K (0x2UL << 13) + #define PUSH32_DOORBELL_FLAGS_LHINT_GTE2K (0x3UL << 13) + #define PUSH32_DOORBELL_FLAGS_LHINT_LAST PUSH32_DOORBELL_FLAGS_LHINT_GTE2K + #define PUSH32_DOORBELL_FLAGS_COAL_NOW 0x8000UL + __le16 len; + __le32 opaque; + __le16 lflags; + #define PUSH32_DOORBELL_LFLAGS_TCP_UDP_CHKSUM 0x1UL + #define PUSH32_DOORBELL_LFLAGS_IP_CHKSUM 0x2UL + #define PUSH32_DOORBELL_LFLAGS_NOCRC 0x4UL + #define PUSH32_DOORBELL_LFLAGS_STAMP 0x8UL + #define PUSH32_DOORBELL_LFLAGS_T_IP_CHKSUM 0x10UL + #define PUSH32_DOORBELL_LFLAGS_LSO 0x20UL + #define PUSH32_DOORBELL_LFLAGS_IPID_FMT 0x40UL + #define PUSH32_DOORBELL_LFLAGS_T_IPID 0x80UL + #define PUSH32_DOORBELL_LFLAGS_ROCE_CRC 0x100UL + #define PUSH32_DOORBELL_LFLAGS_FCOE_CRC 0x200UL + __le16 hdr_size; + #define PUSH32_DOORBELL_HDR_SIZE_MASK 0x1ffUL + #define PUSH32_DOORBELL_HDR_SIZE_SFT 0 + __le32 mss; + #define PUSH32_DOORBELL_MSS_MASK 0x7fffUL + #define PUSH32_DOORBELL_MSS_SFT 0 + __le16 unused_2; + __le16 cfa_action; + __le32 cfa_meta; + #define PUSH32_DOORBELL_CFA_META_VLAN_VID_MASK 0xfffUL + #define PUSH32_DOORBELL_CFA_META_VLAN_VID_SFT 0 + #define PUSH32_DOORBELL_CFA_META_VLAN_DE 0x1000UL + #define PUSH32_DOORBELL_CFA_META_VLAN_PRI_MASK 0xe000UL + #define PUSH32_DOORBELL_CFA_META_VLAN_PRI_SFT 13 + #define PUSH32_DOORBELL_CFA_META_VLAN_TPID_MASK 0x70000UL + #define PUSH32_DOORBELL_CFA_META_VLAN_TPID_SFT 16 + #define PUSH32_DOORBELL_CFA_META_VLAN_TPID_TPID88A8 (0x0UL << 16) + #define PUSH32_DOORBELL_CFA_META_VLAN_TPID_TPID8100 (0x1UL << 16) + #define PUSH32_DOORBELL_CFA_META_VLAN_TPID_TPID9100 (0x2UL << 16) + #define PUSH32_DOORBELL_CFA_META_VLAN_TPID_TPID9200 (0x3UL << 16) + #define PUSH32_DOORBELL_CFA_META_VLAN_TPID_TPID9300 (0x4UL << 16) + #define PUSH32_DOORBELL_CFA_META_VLAN_TPID_TPIDCFG (0x5UL << 16) + #define PUSH32_DOORBELL_CFA_META_VLAN_TPID_LAST PUSH32_DOORBELL_CFA_META_VLAN_TPID_TPIDCFG + #define PUSH32_DOORBELL_CFA_META_VLAN_RESERVED_MASK 0xff80000UL + #define PUSH32_DOORBELL_CFA_META_VLAN_RESERVED_SFT 19 + #define PUSH32_DOORBELL_CFA_META_KEY_MASK 0xf0000000UL + #define PUSH32_DOORBELL_CFA_META_KEY_SFT 28 + #define PUSH32_DOORBELL_CFA_META_KEY_NONE (0x0UL << 28) + #define PUSH32_DOORBELL_CFA_META_KEY_VLAN_TAG (0x1UL << 28) + #define PUSH32_DOORBELL_CFA_META_KEY_LAST PUSH32_DOORBELL_CFA_META_KEY_VLAN_TAG + __le32 data[25]; +}; +#define ROCE_SP_HSI_VERSION_MAJOR 1 +#define ROCE_SP_HSI_VERSION_MINOR 8 +#define ROCE_SP_HSI_VERSION_UPDATE 4 +#define ROCE_SP_HSI_VERSION_STR "1.8.4" +#define ROCE_SP_HSI_NA_SIGNATURE ((__le32)(-1)) + +/* cmdq_init (size:128b/16B) */ +struct cmdq_init { + __le64 cmdq_pbl; + __le16 cmdq_size_cmdq_lvl; + #define CMDQ_INIT_CMDQ_LVL_MASK 0x3UL + #define CMDQ_INIT_CMDQ_LVL_SFT 0 + #define CMDQ_INIT_CMDQ_SIZE_MASK 0xfffcUL + #define CMDQ_INIT_CMDQ_SIZE_SFT 2 + __le16 creq_ring_id; + __le32 prod_idx; +}; + +/* cmdq_update (size:128b/16B) */ +struct cmdq_update { + __le64 reserved64; + __le32 reserved32; + __le32 prod_idx; +}; + +/* cmdq_base (size:128b/16B) */ +struct cmdq_base { + u8 opcode; + #define CMDQ_BASE_OPCODE_CREATE_QP 0x1UL + #define CMDQ_BASE_OPCODE_DESTROY_QP 0x2UL + #define CMDQ_BASE_OPCODE_MODIFY_QP 0x3UL + #define CMDQ_BASE_OPCODE_QUERY_QP 0x4UL + #define CMDQ_BASE_OPCODE_CREATE_SRQ 0x5UL + #define CMDQ_BASE_OPCODE_DESTROY_SRQ 0x6UL + #define CMDQ_BASE_OPCODE_QUERY_SRQ 0x8UL + #define CMDQ_BASE_OPCODE_CREATE_CQ 0x9UL + #define CMDQ_BASE_OPCODE_DESTROY_CQ 0xaUL + #define CMDQ_BASE_OPCODE_RESIZE_CQ 0xcUL + #define CMDQ_BASE_OPCODE_ALLOCATE_MRW 0xdUL + #define CMDQ_BASE_OPCODE_DEALLOCATE_KEY 0xeUL + #define CMDQ_BASE_OPCODE_REGISTER_MR 0xfUL + #define CMDQ_BASE_OPCODE_DEREGISTER_MR 0x10UL + #define CMDQ_BASE_OPCODE_ADD_GID 0x11UL + #define CMDQ_BASE_OPCODE_DELETE_GID 0x12UL + #define CMDQ_BASE_OPCODE_MODIFY_GID 0x17UL + #define CMDQ_BASE_OPCODE_QUERY_GID 0x18UL + #define CMDQ_BASE_OPCODE_CREATE_QP1 0x13UL + #define CMDQ_BASE_OPCODE_DESTROY_QP1 0x14UL + #define CMDQ_BASE_OPCODE_CREATE_AH 0x15UL + #define CMDQ_BASE_OPCODE_DESTROY_AH 0x16UL + #define CMDQ_BASE_OPCODE_INITIALIZE_FW 0x80UL + #define CMDQ_BASE_OPCODE_DEINITIALIZE_FW 0x81UL + #define CMDQ_BASE_OPCODE_STOP_FUNC 0x82UL + #define CMDQ_BASE_OPCODE_QUERY_FUNC 0x83UL + #define CMDQ_BASE_OPCODE_SET_FUNC_RESOURCES 0x84UL + #define CMDQ_BASE_OPCODE_READ_CONTEXT 0x85UL + #define CMDQ_BASE_OPCODE_VF_BACKCHANNEL_REQUEST 0x86UL + #define CMDQ_BASE_OPCODE_READ_VF_MEMORY 0x87UL + #define CMDQ_BASE_OPCODE_COMPLETE_VF_REQUEST 0x88UL + #define CMDQ_BASE_OPCODE_EXTEND_CONTEXT_ARRRAY 0x89UL + #define CMDQ_BASE_OPCODE_MAP_TC_TO_COS 0x8aUL + #define CMDQ_BASE_OPCODE_QUERY_VERSION 0x8bUL + #define CMDQ_BASE_OPCODE_MODIFY_ROCE_CC 0x8cUL + #define CMDQ_BASE_OPCODE_QUERY_ROCE_CC 0x8dUL + #define CMDQ_BASE_OPCODE_QUERY_ROCE_STATS 0x8eUL + #define CMDQ_BASE_OPCODE_SET_LINK_AGGR_MODE 0x8fUL + #define CMDQ_BASE_OPCODE_MODIFY_CQ 0x90UL + #define CMDQ_BASE_OPCODE_QUERY_QP_EXTEND 0x91UL + #define CMDQ_BASE_OPCODE_QUERY_ROCE_STATS_EXT 0x92UL + #define CMDQ_BASE_OPCODE_ORCHESTRATE_QID_MIGRATION 0x93UL + #define CMDQ_BASE_OPCODE_CREATE_QP_BATCH 0x94UL + #define CMDQ_BASE_OPCODE_DESTROY_QP_BATCH 0x95UL + #define CMDQ_BASE_OPCODE_ALLOCATE_ROCE_STATS_EXT_CTX 0x96UL + #define CMDQ_BASE_OPCODE_DEALLOCATE_ROCE_STATS_EXT_CTX 0x97UL + #define CMDQ_BASE_OPCODE_QUERY_ROCE_STATS_EXT_V2 0x98UL + #define CMDQ_BASE_OPCODE_LAST CMDQ_BASE_OPCODE_QUERY_ROCE_STATS_EXT_V2 + u8 cmd_size; + __le16 flags; + __le16 cookie; + u8 resp_size; + u8 reserved8; + __le64 resp_addr; +}; + +/* creq_base (size:128b/16B) */ +struct creq_base { + u8 type; + #define CREQ_BASE_TYPE_MASK 0x3fUL + #define CREQ_BASE_TYPE_SFT 0 + #define CREQ_BASE_TYPE_QP_EVENT 0x38UL + #define CREQ_BASE_TYPE_FUNC_EVENT 0x3aUL + #define CREQ_BASE_TYPE_LAST CREQ_BASE_TYPE_FUNC_EVENT + u8 reserved56[7]; + u8 v; + #define CREQ_BASE_V 0x1UL + u8 event; + u8 reserved48[6]; +}; + +/* create_qp_batch_data (size:768b/96B) */ +struct create_qp_batch_data { + __le64 qp_handle; + __le32 qp_flags; + #define CREATE_QP_BATCH_DATA_QP_FLAGS_SRQ_USED 0x1UL + #define CREATE_QP_BATCH_DATA_QP_FLAGS_FORCE_COMPLETION 0x2UL + #define CREATE_QP_BATCH_DATA_QP_FLAGS_RESERVED_LKEY_ENABLE 0x4UL + #define CREATE_QP_BATCH_DATA_QP_FLAGS_FR_PMR_ENABLED 0x8UL + #define CREATE_QP_BATCH_DATA_QP_FLAGS_VARIABLE_SIZED_WQE_ENABLED 0x10UL + #define CREATE_QP_BATCH_DATA_QP_FLAGS_OPTIMIZED_TRANSMIT_ENABLED 0x20UL + #define CREATE_QP_BATCH_DATA_QP_FLAGS_RESPONDER_UD_CQE_WITH_CFA 0x40UL + #define CREATE_QP_BATCH_DATA_QP_FLAGS_EXT_STATS_ENABLED 0x80UL + #define CREATE_QP_BATCH_DATA_QP_FLAGS_EXPRESS_MODE_ENABLED 0x100UL + #define CREATE_QP_BATCH_DATA_QP_FLAGS_STEERING_TAG_VALID 0x200UL + #define CREATE_QP_BATCH_DATA_QP_FLAGS_RDMA_READ_OR_ATOMICS_USED 0x400UL + #define CREATE_QP_BATCH_DATA_QP_FLAGS_EXT_STATS_CTX_VALID 0x800UL + #define CREATE_QP_BATCH_DATA_QP_FLAGS_SCHQ_ID_VALID 0x1000UL + #define CREATE_QP_BATCH_DATA_QP_FLAGS_LAST CREATE_QP_BATCH_DATA_QP_FLAGS_SCHQ_ID_VALID + u8 type; + #define CREATE_QP_BATCH_DATA_TYPE_RC 0x2UL + #define CREATE_QP_BATCH_DATA_TYPE_UD 0x4UL + #define CREATE_QP_BATCH_DATA_TYPE_RAW_ETHERTYPE 0x6UL + #define CREATE_QP_BATCH_DATA_TYPE_GSI 0x7UL + #define CREATE_QP_BATCH_DATA_TYPE_LAST CREATE_QP_BATCH_DATA_TYPE_GSI + u8 sq_pg_size_sq_lvl; + #define CREATE_QP_BATCH_DATA_SQ_LVL_MASK 0xfUL + #define CREATE_QP_BATCH_DATA_SQ_LVL_SFT 0 + #define CREATE_QP_BATCH_DATA_SQ_LVL_LVL_0 0x0UL + #define CREATE_QP_BATCH_DATA_SQ_LVL_LVL_1 0x1UL + #define CREATE_QP_BATCH_DATA_SQ_LVL_LVL_2 0x2UL + #define CREATE_QP_BATCH_DATA_SQ_LVL_LAST CREATE_QP_BATCH_DATA_SQ_LVL_LVL_2 + #define CREATE_QP_BATCH_DATA_SQ_PG_SIZE_MASK 0xf0UL + #define CREATE_QP_BATCH_DATA_SQ_PG_SIZE_SFT 4 + #define CREATE_QP_BATCH_DATA_SQ_PG_SIZE_PG_4K (0x0UL << 4) + #define CREATE_QP_BATCH_DATA_SQ_PG_SIZE_PG_8K (0x1UL << 4) + #define CREATE_QP_BATCH_DATA_SQ_PG_SIZE_PG_64K (0x2UL << 4) + #define CREATE_QP_BATCH_DATA_SQ_PG_SIZE_PG_2M (0x3UL << 4) + #define CREATE_QP_BATCH_DATA_SQ_PG_SIZE_PG_8M (0x4UL << 4) + #define CREATE_QP_BATCH_DATA_SQ_PG_SIZE_PG_1G (0x5UL << 4) + #define CREATE_QP_BATCH_DATA_SQ_PG_SIZE_LAST CREATE_QP_BATCH_DATA_SQ_PG_SIZE_PG_1G + u8 rq_pg_size_rq_lvl; + #define CREATE_QP_BATCH_DATA_RQ_LVL_MASK 0xfUL + #define CREATE_QP_BATCH_DATA_RQ_LVL_SFT 0 + #define CREATE_QP_BATCH_DATA_RQ_LVL_LVL_0 0x0UL + #define CREATE_QP_BATCH_DATA_RQ_LVL_LVL_1 0x1UL + #define CREATE_QP_BATCH_DATA_RQ_LVL_LVL_2 0x2UL + #define CREATE_QP_BATCH_DATA_RQ_LVL_LAST CREATE_QP_BATCH_DATA_RQ_LVL_LVL_2 + #define CREATE_QP_BATCH_DATA_RQ_PG_SIZE_MASK 0xf0UL + #define CREATE_QP_BATCH_DATA_RQ_PG_SIZE_SFT 4 + #define CREATE_QP_BATCH_DATA_RQ_PG_SIZE_PG_4K (0x0UL << 4) + #define CREATE_QP_BATCH_DATA_RQ_PG_SIZE_PG_8K (0x1UL << 4) + #define CREATE_QP_BATCH_DATA_RQ_PG_SIZE_PG_64K (0x2UL << 4) + #define CREATE_QP_BATCH_DATA_RQ_PG_SIZE_PG_2M (0x3UL << 4) + #define CREATE_QP_BATCH_DATA_RQ_PG_SIZE_PG_8M (0x4UL << 4) + #define CREATE_QP_BATCH_DATA_RQ_PG_SIZE_PG_1G (0x5UL << 4) + #define CREATE_QP_BATCH_DATA_RQ_PG_SIZE_LAST CREATE_QP_BATCH_DATA_RQ_PG_SIZE_PG_1G + u8 unused_0; + __le32 dpi; + __le32 sq_size; + __le32 rq_size; + __le16 sq_fwo_sq_sge; + #define CREATE_QP_BATCH_DATA_SQ_SGE_MASK 0xfUL + #define CREATE_QP_BATCH_DATA_SQ_SGE_SFT 0 + #define CREATE_QP_BATCH_DATA_SQ_FWO_MASK 0xfff0UL + #define CREATE_QP_BATCH_DATA_SQ_FWO_SFT 4 + __le16 rq_fwo_rq_sge; + #define CREATE_QP_BATCH_DATA_RQ_SGE_MASK 0xfUL + #define CREATE_QP_BATCH_DATA_RQ_SGE_SFT 0 + #define CREATE_QP_BATCH_DATA_RQ_FWO_MASK 0xfff0UL + #define CREATE_QP_BATCH_DATA_RQ_FWO_SFT 4 + __le32 scq_cid; + __le32 rcq_cid; + __le32 srq_cid; + __le32 pd_id; + __le64 sq_pbl; + __le64 rq_pbl; + __le64 irrq_addr; + __le64 orrq_addr; + __le32 request_xid; + __le16 steering_tag; + __le16 sq_max_num_wqes; + __le32 ext_stats_ctx_id; + __le16 schq_id; + __le16 reserved16; +}; + +/* roce_stats_ext_ctx (size:1856b/232B) */ +struct roce_stats_ext_ctx { + __le64 tx_atomic_req_pkts; + __le64 tx_read_req_pkts; + __le64 tx_read_res_pkts; + __le64 tx_write_req_pkts; + __le64 tx_rc_send_req_pkts; + __le64 tx_ud_send_req_pkts; + __le64 tx_cnp_pkts; + __le64 tx_roce_pkts; + __le64 tx_roce_bytes; + __le64 rx_out_of_buffer_pkts; + __le64 rx_out_of_sequence_pkts; + __le64 dup_req; + __le64 missing_resp; + __le64 seq_err_naks_rcvd; + __le64 rnr_naks_rcvd; + __le64 to_retransmits; + __le64 rx_atomic_req_pkts; + __le64 rx_read_req_pkts; + __le64 rx_read_res_pkts; + __le64 rx_write_req_pkts; + __le64 rx_rc_send_pkts; + __le64 rx_ud_send_pkts; + __le64 rx_dcn_payload_cut; + __le64 rx_ecn_marked_pkts; + __le64 rx_cnp_pkts; + __le64 rx_roce_pkts; + __le64 rx_roce_bytes; + __le64 rx_roce_good_pkts; + __le64 rx_roce_good_bytes; +}; + +/* cmdq_query_version (size:128b/16B) */ +struct cmdq_query_version { + u8 opcode; + #define CMDQ_QUERY_VERSION_OPCODE_QUERY_VERSION 0x8bUL + #define CMDQ_QUERY_VERSION_OPCODE_LAST CMDQ_QUERY_VERSION_OPCODE_QUERY_VERSION + u8 cmd_size; + __le16 flags; + __le16 cookie; + u8 resp_size; + u8 reserved8; + __le64 resp_addr; +}; + +/* creq_query_version_resp (size:128b/16B) */ +struct creq_query_version_resp { + u8 type; + #define CREQ_QUERY_VERSION_RESP_TYPE_MASK 0x3fUL + #define CREQ_QUERY_VERSION_RESP_TYPE_SFT 0 + #define CREQ_QUERY_VERSION_RESP_TYPE_QP_EVENT 0x38UL + #define CREQ_QUERY_VERSION_RESP_TYPE_LAST CREQ_QUERY_VERSION_RESP_TYPE_QP_EVENT + u8 status; + __le16 cookie; + u8 fw_maj; + u8 fw_minor; + u8 fw_bld; + u8 fw_rsvd; + u8 v; + #define CREQ_QUERY_VERSION_RESP_V 0x1UL + u8 event; + #define CREQ_QUERY_VERSION_RESP_EVENT_QUERY_VERSION 0x8bUL + #define CREQ_QUERY_VERSION_RESP_EVENT_LAST CREQ_QUERY_VERSION_RESP_EVENT_QUERY_VERSION + __le16 reserved16; + u8 intf_maj; + u8 intf_minor; + u8 intf_bld; + u8 intf_rsvd; +}; + +/* cmdq_initialize_fw (size:1024b/128B) */ +struct cmdq_initialize_fw { + u8 opcode; + #define CMDQ_INITIALIZE_FW_OPCODE_INITIALIZE_FW 0x80UL + #define CMDQ_INITIALIZE_FW_OPCODE_LAST CMDQ_INITIALIZE_FW_OPCODE_INITIALIZE_FW + u8 cmd_size; + __le16 flags; + #define CMDQ_INITIALIZE_FW_FLAGS_MRAV_RESERVATION_SPLIT 0x1UL + #define CMDQ_INITIALIZE_FW_FLAGS_HW_REQUESTER_RETX_SUPPORTED 0x2UL + #define CMDQ_INITIALIZE_FW_FLAGS_DRV_VERSION 0x4UL + #define CMDQ_INITIALIZE_FW_FLAGS_OPTIMIZE_MODIFY_QP_SUPPORTED 0x8UL + #define CMDQ_INITIALIZE_FW_FLAGS_L2_VF_RESOURCE_MGMT 0x10UL + __le16 cookie; + u8 resp_size; + u8 reserved8; + __le64 resp_addr; + u8 qpc_pg_size_qpc_lvl; + #define CMDQ_INITIALIZE_FW_QPC_LVL_MASK 0xfUL + #define CMDQ_INITIALIZE_FW_QPC_LVL_SFT 0 + #define CMDQ_INITIALIZE_FW_QPC_LVL_LVL_0 0x0UL + #define CMDQ_INITIALIZE_FW_QPC_LVL_LVL_1 0x1UL + #define CMDQ_INITIALIZE_FW_QPC_LVL_LVL_2 0x2UL + #define CMDQ_INITIALIZE_FW_QPC_LVL_LAST CMDQ_INITIALIZE_FW_QPC_LVL_LVL_2 + #define CMDQ_INITIALIZE_FW_QPC_PG_SIZE_MASK 0xf0UL + #define CMDQ_INITIALIZE_FW_QPC_PG_SIZE_SFT 4 + #define CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_4K (0x0UL << 4) + #define CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_8K (0x1UL << 4) + #define CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_64K (0x2UL << 4) + #define CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_2M (0x3UL << 4) + #define CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_8M (0x4UL << 4) + #define CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_1G (0x5UL << 4) + #define CMDQ_INITIALIZE_FW_QPC_PG_SIZE_LAST CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_1G + u8 mrw_pg_size_mrw_lvl; + #define CMDQ_INITIALIZE_FW_MRW_LVL_MASK 0xfUL + #define CMDQ_INITIALIZE_FW_MRW_LVL_SFT 0 + #define CMDQ_INITIALIZE_FW_MRW_LVL_LVL_0 0x0UL + #define CMDQ_INITIALIZE_FW_MRW_LVL_LVL_1 0x1UL + #define CMDQ_INITIALIZE_FW_MRW_LVL_LVL_2 0x2UL + #define CMDQ_INITIALIZE_FW_MRW_LVL_LAST CMDQ_INITIALIZE_FW_MRW_LVL_LVL_2 + #define CMDQ_INITIALIZE_FW_MRW_PG_SIZE_MASK 0xf0UL + #define CMDQ_INITIALIZE_FW_MRW_PG_SIZE_SFT 4 + #define CMDQ_INITIALIZE_FW_MRW_PG_SIZE_PG_4K (0x0UL << 4) + #define CMDQ_INITIALIZE_FW_MRW_PG_SIZE_PG_8K (0x1UL << 4) + #define CMDQ_INITIALIZE_FW_MRW_PG_SIZE_PG_64K (0x2UL << 4) + #define CMDQ_INITIALIZE_FW_MRW_PG_SIZE_PG_2M (0x3UL << 4) + #define CMDQ_INITIALIZE_FW_MRW_PG_SIZE_PG_8M (0x4UL << 4) + #define CMDQ_INITIALIZE_FW_MRW_PG_SIZE_PG_1G (0x5UL << 4) + #define CMDQ_INITIALIZE_FW_MRW_PG_SIZE_LAST CMDQ_INITIALIZE_FW_MRW_PG_SIZE_PG_1G + u8 srq_pg_size_srq_lvl; + #define CMDQ_INITIALIZE_FW_SRQ_LVL_MASK 0xfUL + #define CMDQ_INITIALIZE_FW_SRQ_LVL_SFT 0 + #define CMDQ_INITIALIZE_FW_SRQ_LVL_LVL_0 0x0UL + #define CMDQ_INITIALIZE_FW_SRQ_LVL_LVL_1 0x1UL + #define CMDQ_INITIALIZE_FW_SRQ_LVL_LVL_2 0x2UL + #define CMDQ_INITIALIZE_FW_SRQ_LVL_LAST CMDQ_INITIALIZE_FW_SRQ_LVL_LVL_2 + #define CMDQ_INITIALIZE_FW_SRQ_PG_SIZE_MASK 0xf0UL + #define CMDQ_INITIALIZE_FW_SRQ_PG_SIZE_SFT 4 + #define CMDQ_INITIALIZE_FW_SRQ_PG_SIZE_PG_4K (0x0UL << 4) + #define CMDQ_INITIALIZE_FW_SRQ_PG_SIZE_PG_8K (0x1UL << 4) + #define CMDQ_INITIALIZE_FW_SRQ_PG_SIZE_PG_64K (0x2UL << 4) + #define CMDQ_INITIALIZE_FW_SRQ_PG_SIZE_PG_2M (0x3UL << 4) + #define CMDQ_INITIALIZE_FW_SRQ_PG_SIZE_PG_8M (0x4UL << 4) + #define CMDQ_INITIALIZE_FW_SRQ_PG_SIZE_PG_1G (0x5UL << 4) + #define CMDQ_INITIALIZE_FW_SRQ_PG_SIZE_LAST CMDQ_INITIALIZE_FW_SRQ_PG_SIZE_PG_1G + u8 cq_pg_size_cq_lvl; + #define CMDQ_INITIALIZE_FW_CQ_LVL_MASK 0xfUL + #define CMDQ_INITIALIZE_FW_CQ_LVL_SFT 0 + #define CMDQ_INITIALIZE_FW_CQ_LVL_LVL_0 0x0UL + #define CMDQ_INITIALIZE_FW_CQ_LVL_LVL_1 0x1UL + #define CMDQ_INITIALIZE_FW_CQ_LVL_LVL_2 0x2UL + #define CMDQ_INITIALIZE_FW_CQ_LVL_LAST CMDQ_INITIALIZE_FW_CQ_LVL_LVL_2 + #define CMDQ_INITIALIZE_FW_CQ_PG_SIZE_MASK 0xf0UL + #define CMDQ_INITIALIZE_FW_CQ_PG_SIZE_SFT 4 + #define CMDQ_INITIALIZE_FW_CQ_PG_SIZE_PG_4K (0x0UL << 4) + #define CMDQ_INITIALIZE_FW_CQ_PG_SIZE_PG_8K (0x1UL << 4) + #define CMDQ_INITIALIZE_FW_CQ_PG_SIZE_PG_64K (0x2UL << 4) + #define CMDQ_INITIALIZE_FW_CQ_PG_SIZE_PG_2M (0x3UL << 4) + #define CMDQ_INITIALIZE_FW_CQ_PG_SIZE_PG_8M (0x4UL << 4) + #define CMDQ_INITIALIZE_FW_CQ_PG_SIZE_PG_1G (0x5UL << 4) + #define CMDQ_INITIALIZE_FW_CQ_PG_SIZE_LAST CMDQ_INITIALIZE_FW_CQ_PG_SIZE_PG_1G + u8 tqm_pg_size_tqm_lvl; + #define CMDQ_INITIALIZE_FW_TQM_LVL_MASK 0xfUL + #define CMDQ_INITIALIZE_FW_TQM_LVL_SFT 0 + #define CMDQ_INITIALIZE_FW_TQM_LVL_LVL_0 0x0UL + #define CMDQ_INITIALIZE_FW_TQM_LVL_LVL_1 0x1UL + #define CMDQ_INITIALIZE_FW_TQM_LVL_LVL_2 0x2UL + #define CMDQ_INITIALIZE_FW_TQM_LVL_LAST CMDQ_INITIALIZE_FW_TQM_LVL_LVL_2 + #define CMDQ_INITIALIZE_FW_TQM_PG_SIZE_MASK 0xf0UL + #define CMDQ_INITIALIZE_FW_TQM_PG_SIZE_SFT 4 + #define CMDQ_INITIALIZE_FW_TQM_PG_SIZE_PG_4K (0x0UL << 4) + #define CMDQ_INITIALIZE_FW_TQM_PG_SIZE_PG_8K (0x1UL << 4) + #define CMDQ_INITIALIZE_FW_TQM_PG_SIZE_PG_64K (0x2UL << 4) + #define CMDQ_INITIALIZE_FW_TQM_PG_SIZE_PG_2M (0x3UL << 4) + #define CMDQ_INITIALIZE_FW_TQM_PG_SIZE_PG_8M (0x4UL << 4) + #define CMDQ_INITIALIZE_FW_TQM_PG_SIZE_PG_1G (0x5UL << 4) + #define CMDQ_INITIALIZE_FW_TQM_PG_SIZE_LAST CMDQ_INITIALIZE_FW_TQM_PG_SIZE_PG_1G + u8 tim_pg_size_tim_lvl; + #define CMDQ_INITIALIZE_FW_TIM_LVL_MASK 0xfUL + #define CMDQ_INITIALIZE_FW_TIM_LVL_SFT 0 + #define CMDQ_INITIALIZE_FW_TIM_LVL_LVL_0 0x0UL + #define CMDQ_INITIALIZE_FW_TIM_LVL_LVL_1 0x1UL + #define CMDQ_INITIALIZE_FW_TIM_LVL_LVL_2 0x2UL + #define CMDQ_INITIALIZE_FW_TIM_LVL_LAST CMDQ_INITIALIZE_FW_TIM_LVL_LVL_2 + #define CMDQ_INITIALIZE_FW_TIM_PG_SIZE_MASK 0xf0UL + #define CMDQ_INITIALIZE_FW_TIM_PG_SIZE_SFT 4 + #define CMDQ_INITIALIZE_FW_TIM_PG_SIZE_PG_4K (0x0UL << 4) + #define CMDQ_INITIALIZE_FW_TIM_PG_SIZE_PG_8K (0x1UL << 4) + #define CMDQ_INITIALIZE_FW_TIM_PG_SIZE_PG_64K (0x2UL << 4) + #define CMDQ_INITIALIZE_FW_TIM_PG_SIZE_PG_2M (0x3UL << 4) + #define CMDQ_INITIALIZE_FW_TIM_PG_SIZE_PG_8M (0x4UL << 4) + #define CMDQ_INITIALIZE_FW_TIM_PG_SIZE_PG_1G (0x5UL << 4) + #define CMDQ_INITIALIZE_FW_TIM_PG_SIZE_LAST CMDQ_INITIALIZE_FW_TIM_PG_SIZE_PG_1G + __le16 log2_dbr_pg_size; + #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_MASK 0xfUL + #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_SFT 0 + #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_4K 0x0UL + #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_8K 0x1UL + #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_16K 0x2UL + #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_32K 0x3UL + #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_64K 0x4UL + #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_128K 0x5UL + #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_256K 0x6UL + #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_512K 0x7UL + #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_1M 0x8UL + #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_2M 0x9UL + #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_4M 0xaUL + #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_8M 0xbUL + #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_16M 0xcUL + #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_32M 0xdUL + #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_64M 0xeUL + #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_128M 0xfUL + #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_LAST CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_128M + #define CMDQ_INITIALIZE_FW_RSVD_MASK 0xfff0UL + #define CMDQ_INITIALIZE_FW_RSVD_SFT 4 + __le64 qpc_page_dir; + __le64 mrw_page_dir; + __le64 srq_page_dir; + __le64 cq_page_dir; + __le64 tqm_page_dir; + __le64 tim_page_dir; + __le32 number_of_qp; + __le32 number_of_mrw; + __le32 number_of_srq; + __le32 number_of_cq; + __le32 max_qp_per_vf; + __le32 max_mrw_per_vf; + __le32 max_srq_per_vf; + __le32 max_cq_per_vf; + __le32 max_gid_per_vf; + __le32 stat_ctx_id; + u8 drv_hsi_ver_maj; + u8 drv_hsi_ver_min; + u8 drv_hsi_ver_upd; + u8 unused40[5]; + __le16 drv_build_ver_maj; + __le16 drv_build_ver_min; + __le16 drv_build_ver_upd; + __le16 drv_build_ver_patch; +}; + +/* creq_initialize_fw_resp (size:128b/16B) */ +struct creq_initialize_fw_resp { + u8 type; + #define CREQ_INITIALIZE_FW_RESP_TYPE_MASK 0x3fUL + #define CREQ_INITIALIZE_FW_RESP_TYPE_SFT 0 + #define CREQ_INITIALIZE_FW_RESP_TYPE_QP_EVENT 0x38UL + #define CREQ_INITIALIZE_FW_RESP_TYPE_LAST CREQ_INITIALIZE_FW_RESP_TYPE_QP_EVENT + u8 status; + __le16 cookie; + __le32 reserved32; + u8 v; + #define CREQ_INITIALIZE_FW_RESP_V 0x1UL + u8 event; + #define CREQ_INITIALIZE_FW_RESP_EVENT_INITIALIZE_FW 0x80UL + #define CREQ_INITIALIZE_FW_RESP_EVENT_LAST CREQ_INITIALIZE_FW_RESP_EVENT_INITIALIZE_FW + u8 reserved48[6]; +}; + +/* cmdq_deinitialize_fw (size:128b/16B) */ +struct cmdq_deinitialize_fw { + u8 opcode; + #define CMDQ_DEINITIALIZE_FW_OPCODE_DEINITIALIZE_FW 0x81UL + #define CMDQ_DEINITIALIZE_FW_OPCODE_LAST CMDQ_DEINITIALIZE_FW_OPCODE_DEINITIALIZE_FW + u8 cmd_size; + __le16 flags; + __le16 cookie; + u8 resp_size; + u8 reserved8; + __le64 resp_addr; +}; + +/* creq_deinitialize_fw_resp (size:128b/16B) */ +struct creq_deinitialize_fw_resp { + u8 type; + #define CREQ_DEINITIALIZE_FW_RESP_TYPE_MASK 0x3fUL + #define CREQ_DEINITIALIZE_FW_RESP_TYPE_SFT 0 + #define CREQ_DEINITIALIZE_FW_RESP_TYPE_QP_EVENT 0x38UL + #define CREQ_DEINITIALIZE_FW_RESP_TYPE_LAST CREQ_DEINITIALIZE_FW_RESP_TYPE_QP_EVENT + u8 status; + __le16 cookie; + __le32 reserved32; + u8 v; + #define CREQ_DEINITIALIZE_FW_RESP_V 0x1UL + u8 event; + #define CREQ_DEINITIALIZE_FW_RESP_EVENT_DEINITIALIZE_FW 0x81UL + #define CREQ_DEINITIALIZE_FW_RESP_EVENT_LAST CREQ_DEINITIALIZE_FW_RESP_EVENT_DEINITIALIZE_FW + u8 reserved48[6]; +}; + +/* cmdq_create_qp (size:896b/112B) */ +struct cmdq_create_qp { + u8 opcode; + #define CMDQ_CREATE_QP_OPCODE_CREATE_QP 0x1UL + #define CMDQ_CREATE_QP_OPCODE_LAST CMDQ_CREATE_QP_OPCODE_CREATE_QP + u8 cmd_size; + __le16 flags; + __le16 cookie; + u8 resp_size; + u8 reserved8; + __le64 resp_addr; + __le64 qp_handle; + __le32 qp_flags; + #define CMDQ_CREATE_QP_QP_FLAGS_SRQ_USED 0x1UL + #define CMDQ_CREATE_QP_QP_FLAGS_FORCE_COMPLETION 0x2UL + #define CMDQ_CREATE_QP_QP_FLAGS_RESERVED_LKEY_ENABLE 0x4UL + #define CMDQ_CREATE_QP_QP_FLAGS_FR_PMR_ENABLED 0x8UL + #define CMDQ_CREATE_QP_QP_FLAGS_VARIABLE_SIZED_WQE_ENABLED 0x10UL + #define CMDQ_CREATE_QP_QP_FLAGS_OPTIMIZED_TRANSMIT_ENABLED 0x20UL + #define CMDQ_CREATE_QP_QP_FLAGS_RESPONDER_UD_CQE_WITH_CFA 0x40UL + #define CMDQ_CREATE_QP_QP_FLAGS_EXT_STATS_ENABLED 0x80UL + #define CMDQ_CREATE_QP_QP_FLAGS_EXPRESS_MODE_ENABLED 0x100UL + #define CMDQ_CREATE_QP_QP_FLAGS_STEERING_TAG_VALID 0x200UL + #define CMDQ_CREATE_QP_QP_FLAGS_RDMA_READ_OR_ATOMICS_USED 0x400UL + #define CMDQ_CREATE_QP_QP_FLAGS_EXT_STATS_CTX_VALID 0x800UL + #define CMDQ_CREATE_QP_QP_FLAGS_SCHQ_ID_VALID 0x1000UL + #define CMDQ_CREATE_QP_QP_FLAGS_LAST CMDQ_CREATE_QP_QP_FLAGS_SCHQ_ID_VALID + u8 type; + #define CMDQ_CREATE_QP_TYPE_RC 0x2UL + #define CMDQ_CREATE_QP_TYPE_UD 0x4UL + #define CMDQ_CREATE_QP_TYPE_RAW_ETHERTYPE 0x6UL + #define CMDQ_CREATE_QP_TYPE_GSI 0x7UL + #define CMDQ_CREATE_QP_TYPE_LAST CMDQ_CREATE_QP_TYPE_GSI + u8 sq_pg_size_sq_lvl; + #define CMDQ_CREATE_QP_SQ_LVL_MASK 0xfUL + #define CMDQ_CREATE_QP_SQ_LVL_SFT 0 + #define CMDQ_CREATE_QP_SQ_LVL_LVL_0 0x0UL + #define CMDQ_CREATE_QP_SQ_LVL_LVL_1 0x1UL + #define CMDQ_CREATE_QP_SQ_LVL_LVL_2 0x2UL + #define CMDQ_CREATE_QP_SQ_LVL_LAST CMDQ_CREATE_QP_SQ_LVL_LVL_2 + #define CMDQ_CREATE_QP_SQ_PG_SIZE_MASK 0xf0UL + #define CMDQ_CREATE_QP_SQ_PG_SIZE_SFT 4 + #define CMDQ_CREATE_QP_SQ_PG_SIZE_PG_4K (0x0UL << 4) + #define CMDQ_CREATE_QP_SQ_PG_SIZE_PG_8K (0x1UL << 4) + #define CMDQ_CREATE_QP_SQ_PG_SIZE_PG_64K (0x2UL << 4) + #define CMDQ_CREATE_QP_SQ_PG_SIZE_PG_2M (0x3UL << 4) + #define CMDQ_CREATE_QP_SQ_PG_SIZE_PG_8M (0x4UL << 4) + #define CMDQ_CREATE_QP_SQ_PG_SIZE_PG_1G (0x5UL << 4) + #define CMDQ_CREATE_QP_SQ_PG_SIZE_LAST CMDQ_CREATE_QP_SQ_PG_SIZE_PG_1G + u8 rq_pg_size_rq_lvl; + #define CMDQ_CREATE_QP_RQ_LVL_MASK 0xfUL + #define CMDQ_CREATE_QP_RQ_LVL_SFT 0 + #define CMDQ_CREATE_QP_RQ_LVL_LVL_0 0x0UL + #define CMDQ_CREATE_QP_RQ_LVL_LVL_1 0x1UL + #define CMDQ_CREATE_QP_RQ_LVL_LVL_2 0x2UL + #define CMDQ_CREATE_QP_RQ_LVL_LAST CMDQ_CREATE_QP_RQ_LVL_LVL_2 + #define CMDQ_CREATE_QP_RQ_PG_SIZE_MASK 0xf0UL + #define CMDQ_CREATE_QP_RQ_PG_SIZE_SFT 4 + #define CMDQ_CREATE_QP_RQ_PG_SIZE_PG_4K (0x0UL << 4) + #define CMDQ_CREATE_QP_RQ_PG_SIZE_PG_8K (0x1UL << 4) + #define CMDQ_CREATE_QP_RQ_PG_SIZE_PG_64K (0x2UL << 4) + #define CMDQ_CREATE_QP_RQ_PG_SIZE_PG_2M (0x3UL << 4) + #define CMDQ_CREATE_QP_RQ_PG_SIZE_PG_8M (0x4UL << 4) + #define CMDQ_CREATE_QP_RQ_PG_SIZE_PG_1G (0x5UL << 4) + #define CMDQ_CREATE_QP_RQ_PG_SIZE_LAST CMDQ_CREATE_QP_RQ_PG_SIZE_PG_1G + u8 unused_0; + __le32 dpi; + __le32 sq_size; + __le32 rq_size; + __le16 sq_fwo_sq_sge; + #define CMDQ_CREATE_QP_SQ_SGE_MASK 0xfUL + #define CMDQ_CREATE_QP_SQ_SGE_SFT 0 + #define CMDQ_CREATE_QP_SQ_FWO_MASK 0xfff0UL + #define CMDQ_CREATE_QP_SQ_FWO_SFT 4 + __le16 rq_fwo_rq_sge; + #define CMDQ_CREATE_QP_RQ_SGE_MASK 0xfUL + #define CMDQ_CREATE_QP_RQ_SGE_SFT 0 + #define CMDQ_CREATE_QP_RQ_FWO_MASK 0xfff0UL + #define CMDQ_CREATE_QP_RQ_FWO_SFT 4 + __le32 scq_cid; + __le32 rcq_cid; + __le32 srq_cid; + __le32 pd_id; + __le64 sq_pbl; + __le64 rq_pbl; + __le64 irrq_addr; + __le64 orrq_addr; + __le32 request_xid; + __le16 steering_tag; + __le16 sq_max_num_wqes; + __le32 ext_stats_ctx_id; + __le16 schq_id; + __le16 reserved16; +}; + +/* creq_create_qp_resp (size:128b/16B) */ +struct creq_create_qp_resp { + u8 type; + #define CREQ_CREATE_QP_RESP_TYPE_MASK 0x3fUL + #define CREQ_CREATE_QP_RESP_TYPE_SFT 0 + #define CREQ_CREATE_QP_RESP_TYPE_QP_EVENT 0x38UL + #define CREQ_CREATE_QP_RESP_TYPE_LAST CREQ_CREATE_QP_RESP_TYPE_QP_EVENT + u8 status; + __le16 cookie; + __le32 xid; + u8 v; + #define CREQ_CREATE_QP_RESP_V 0x1UL + u8 event; + #define CREQ_CREATE_QP_RESP_EVENT_CREATE_QP 0x1UL + #define CREQ_CREATE_QP_RESP_EVENT_LAST CREQ_CREATE_QP_RESP_EVENT_CREATE_QP + u8 optimized_transmit_enabled; + u8 reserved48[5]; +}; + +/* cmdq_destroy_qp (size:192b/24B) */ +struct cmdq_destroy_qp { + u8 opcode; + #define CMDQ_DESTROY_QP_OPCODE_DESTROY_QP 0x2UL + #define CMDQ_DESTROY_QP_OPCODE_LAST CMDQ_DESTROY_QP_OPCODE_DESTROY_QP + u8 cmd_size; + __le16 flags; + __le16 cookie; + u8 resp_size; + u8 reserved8; + __le64 resp_addr; + __le32 qp_cid; + __le32 unused_0; +}; + +/* creq_destroy_qp_resp (size:128b/16B) */ +struct creq_destroy_qp_resp { + u8 type; + #define CREQ_DESTROY_QP_RESP_TYPE_MASK 0x3fUL + #define CREQ_DESTROY_QP_RESP_TYPE_SFT 0 + #define CREQ_DESTROY_QP_RESP_TYPE_QP_EVENT 0x38UL + #define CREQ_DESTROY_QP_RESP_TYPE_LAST CREQ_DESTROY_QP_RESP_TYPE_QP_EVENT + u8 status; + __le16 cookie; + __le32 xid; + u8 v; + #define CREQ_DESTROY_QP_RESP_V 0x1UL + u8 event; + #define CREQ_DESTROY_QP_RESP_EVENT_DESTROY_QP 0x2UL + #define CREQ_DESTROY_QP_RESP_EVENT_LAST CREQ_DESTROY_QP_RESP_EVENT_DESTROY_QP + u8 reserved48[6]; +}; + +/* cmdq_modify_qp (size:1152b/144B) */ +struct cmdq_modify_qp { + u8 opcode; + #define CMDQ_MODIFY_QP_OPCODE_MODIFY_QP 0x3UL + #define CMDQ_MODIFY_QP_OPCODE_LAST CMDQ_MODIFY_QP_OPCODE_MODIFY_QP + u8 cmd_size; + __le16 flags; + #define CMDQ_MODIFY_QP_FLAGS_SRQ_USED 0x1UL + __le16 cookie; + u8 resp_size; + u8 qp_type; + #define CMDQ_MODIFY_QP_QP_TYPE_RC 0x2UL + #define CMDQ_MODIFY_QP_QP_TYPE_UD 0x4UL + #define CMDQ_MODIFY_QP_QP_TYPE_RAW_ETHERTYPE 0x6UL + #define CMDQ_MODIFY_QP_QP_TYPE_GSI 0x7UL + #define CMDQ_MODIFY_QP_QP_TYPE_LAST CMDQ_MODIFY_QP_QP_TYPE_GSI + __le64 resp_addr; + __le32 modify_mask; + #define CMDQ_MODIFY_QP_MODIFY_MASK_STATE 0x1UL + #define CMDQ_MODIFY_QP_MODIFY_MASK_EN_SQD_ASYNC_NOTIFY 0x2UL + #define CMDQ_MODIFY_QP_MODIFY_MASK_ACCESS 0x4UL + #define CMDQ_MODIFY_QP_MODIFY_MASK_PKEY 0x8UL + #define CMDQ_MODIFY_QP_MODIFY_MASK_QKEY 0x10UL + #define CMDQ_MODIFY_QP_MODIFY_MASK_DGID 0x20UL + #define CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL 0x40UL + #define CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX 0x80UL + #define CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT 0x100UL + #define CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS 0x200UL + #define CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC 0x400UL + #define CMDQ_MODIFY_QP_MODIFY_MASK_PINGPONG_PUSH_MODE 0x800UL + #define CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU 0x1000UL + #define CMDQ_MODIFY_QP_MODIFY_MASK_TIMEOUT 0x2000UL + #define CMDQ_MODIFY_QP_MODIFY_MASK_RETRY_CNT 0x4000UL + #define CMDQ_MODIFY_QP_MODIFY_MASK_RNR_RETRY 0x8000UL + #define CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN 0x10000UL + #define CMDQ_MODIFY_QP_MODIFY_MASK_MAX_RD_ATOMIC 0x20000UL + #define CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER 0x40000UL + #define CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN 0x80000UL + #define CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC 0x100000UL + #define CMDQ_MODIFY_QP_MODIFY_MASK_SQ_SIZE 0x200000UL + #define CMDQ_MODIFY_QP_MODIFY_MASK_RQ_SIZE 0x400000UL + #define CMDQ_MODIFY_QP_MODIFY_MASK_SQ_SGE 0x800000UL + #define CMDQ_MODIFY_QP_MODIFY_MASK_RQ_SGE 0x1000000UL + #define CMDQ_MODIFY_QP_MODIFY_MASK_MAX_INLINE_DATA 0x2000000UL + #define CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID 0x4000000UL + #define CMDQ_MODIFY_QP_MODIFY_MASK_SRC_MAC 0x8000000UL + #define CMDQ_MODIFY_QP_MODIFY_MASK_VLAN_ID 0x10000000UL + #define CMDQ_MODIFY_QP_MODIFY_MASK_ENABLE_CC 0x20000000UL + #define CMDQ_MODIFY_QP_MODIFY_MASK_TOS_ECN 0x40000000UL + #define CMDQ_MODIFY_QP_MODIFY_MASK_TOS_DSCP 0x80000000UL + __le32 qp_cid; + u8 network_type_en_sqd_async_notify_new_state; + #define CMDQ_MODIFY_QP_NEW_STATE_MASK 0xfUL + #define CMDQ_MODIFY_QP_NEW_STATE_SFT 0 + #define CMDQ_MODIFY_QP_NEW_STATE_RESET 0x0UL + #define CMDQ_MODIFY_QP_NEW_STATE_INIT 0x1UL + #define CMDQ_MODIFY_QP_NEW_STATE_RTR 0x2UL + #define CMDQ_MODIFY_QP_NEW_STATE_RTS 0x3UL + #define CMDQ_MODIFY_QP_NEW_STATE_SQD 0x4UL + #define CMDQ_MODIFY_QP_NEW_STATE_SQE 0x5UL + #define CMDQ_MODIFY_QP_NEW_STATE_ERR 0x6UL + #define CMDQ_MODIFY_QP_NEW_STATE_LAST CMDQ_MODIFY_QP_NEW_STATE_ERR + #define CMDQ_MODIFY_QP_EN_SQD_ASYNC_NOTIFY 0x10UL + #define CMDQ_MODIFY_QP_UNUSED1 0x20UL + #define CMDQ_MODIFY_QP_NETWORK_TYPE_MASK 0xc0UL + #define CMDQ_MODIFY_QP_NETWORK_TYPE_SFT 6 + #define CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV1 (0x0UL << 6) + #define CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV4 (0x2UL << 6) + #define CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV6 (0x3UL << 6) + #define CMDQ_MODIFY_QP_NETWORK_TYPE_LAST CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV6 + u8 access; + #define CMDQ_MODIFY_QP_ACCESS_REMOTE_ATOMIC_REMOTE_READ_REMOTE_WRITE_LOCAL_WRITE_MASK 0xffUL + #define CMDQ_MODIFY_QP_ACCESS_REMOTE_ATOMIC_REMOTE_READ_REMOTE_WRITE_LOCAL_WRITE_SFT 0 + #define CMDQ_MODIFY_QP_ACCESS_LOCAL_WRITE 0x1UL + #define CMDQ_MODIFY_QP_ACCESS_REMOTE_WRITE 0x2UL + #define CMDQ_MODIFY_QP_ACCESS_REMOTE_READ 0x4UL + #define CMDQ_MODIFY_QP_ACCESS_REMOTE_ATOMIC 0x8UL + __le16 pkey; + __le32 qkey; + __le32 dgid[4]; + __le32 flow_label; + __le16 sgid_index; + u8 hop_limit; + u8 traffic_class; + __le16 dest_mac[3]; + u8 tos_dscp_tos_ecn; + #define CMDQ_MODIFY_QP_TOS_ECN_MASK 0x3UL + #define CMDQ_MODIFY_QP_TOS_ECN_SFT 0 + #define CMDQ_MODIFY_QP_TOS_DSCP_MASK 0xfcUL + #define CMDQ_MODIFY_QP_TOS_DSCP_SFT 2 + u8 path_mtu_pingpong_push_enable; + #define CMDQ_MODIFY_QP_PINGPONG_PUSH_ENABLE 0x1UL + #define CMDQ_MODIFY_QP_UNUSED3_MASK 0xeUL + #define CMDQ_MODIFY_QP_UNUSED3_SFT 1 + #define CMDQ_MODIFY_QP_PATH_MTU_MASK 0xf0UL + #define CMDQ_MODIFY_QP_PATH_MTU_SFT 4 + #define CMDQ_MODIFY_QP_PATH_MTU_MTU_256 (0x0UL << 4) + #define CMDQ_MODIFY_QP_PATH_MTU_MTU_512 (0x1UL << 4) + #define CMDQ_MODIFY_QP_PATH_MTU_MTU_1024 (0x2UL << 4) + #define CMDQ_MODIFY_QP_PATH_MTU_MTU_2048 (0x3UL << 4) + #define CMDQ_MODIFY_QP_PATH_MTU_MTU_4096 (0x4UL << 4) + #define CMDQ_MODIFY_QP_PATH_MTU_MTU_8192 (0x5UL << 4) + #define CMDQ_MODIFY_QP_PATH_MTU_LAST CMDQ_MODIFY_QP_PATH_MTU_MTU_8192 + u8 timeout; + u8 retry_cnt; + u8 rnr_retry; + u8 min_rnr_timer; + __le32 rq_psn; + __le32 sq_psn; + u8 max_rd_atomic; + u8 max_dest_rd_atomic; + __le16 enable_cc; + #define CMDQ_MODIFY_QP_ENABLE_CC 0x1UL + #define CMDQ_MODIFY_QP_UNUSED15_MASK 0xfffeUL + #define CMDQ_MODIFY_QP_UNUSED15_SFT 1 + __le32 sq_size; + __le32 rq_size; + __le16 sq_sge; + __le16 rq_sge; + __le32 max_inline_data; + __le32 dest_qp_id; + __le32 pingpong_push_dpi; + __le16 src_mac[3]; + __le16 vlan_pcp_vlan_dei_vlan_id; + #define CMDQ_MODIFY_QP_VLAN_ID_MASK 0xfffUL + #define CMDQ_MODIFY_QP_VLAN_ID_SFT 0 + #define CMDQ_MODIFY_QP_VLAN_DEI 0x1000UL + #define CMDQ_MODIFY_QP_VLAN_PCP_MASK 0xe000UL + #define CMDQ_MODIFY_QP_VLAN_PCP_SFT 13 + __le64 irrq_addr; + __le64 orrq_addr; + __le32 ext_modify_mask; + #define CMDQ_MODIFY_QP_EXT_MODIFY_MASK_EXT_STATS_CTX 0x1UL + #define CMDQ_MODIFY_QP_EXT_MODIFY_MASK_SCHQ_ID_VALID 0x2UL + __le32 ext_stats_ctx_id; + __le16 schq_id; + __le16 unused_0; + __le32 reserved32; +}; + +/* creq_modify_qp_resp (size:128b/16B) */ +struct creq_modify_qp_resp { + u8 type; + #define CREQ_MODIFY_QP_RESP_TYPE_MASK 0x3fUL + #define CREQ_MODIFY_QP_RESP_TYPE_SFT 0 + #define CREQ_MODIFY_QP_RESP_TYPE_QP_EVENT 0x38UL + #define CREQ_MODIFY_QP_RESP_TYPE_LAST CREQ_MODIFY_QP_RESP_TYPE_QP_EVENT + u8 status; + __le16 cookie; + __le32 xid; + u8 v; + #define CREQ_MODIFY_QP_RESP_V 0x1UL + u8 event; + #define CREQ_MODIFY_QP_RESP_EVENT_MODIFY_QP 0x3UL + #define CREQ_MODIFY_QP_RESP_EVENT_LAST CREQ_MODIFY_QP_RESP_EVENT_MODIFY_QP + u8 pingpong_push_state_index_enabled; + #define CREQ_MODIFY_QP_RESP_PINGPONG_PUSH_ENABLED 0x1UL + #define CREQ_MODIFY_QP_RESP_PINGPONG_PUSH_INDEX_MASK 0xeUL + #define CREQ_MODIFY_QP_RESP_PINGPONG_PUSH_INDEX_SFT 1 + #define CREQ_MODIFY_QP_RESP_PINGPONG_PUSH_STATE 0x10UL + u8 reserved8; + __le32 lag_src_mac; +}; + +/* cmdq_query_qp (size:192b/24B) */ +struct cmdq_query_qp { + u8 opcode; + #define CMDQ_QUERY_QP_OPCODE_QUERY_QP 0x4UL + #define CMDQ_QUERY_QP_OPCODE_LAST CMDQ_QUERY_QP_OPCODE_QUERY_QP + u8 cmd_size; + __le16 flags; + __le16 cookie; + u8 resp_size; + u8 reserved8; + __le64 resp_addr; + __le32 qp_cid; + __le32 unused_0; +}; + +/* creq_query_qp_resp (size:128b/16B) */ +struct creq_query_qp_resp { + u8 type; + #define CREQ_QUERY_QP_RESP_TYPE_MASK 0x3fUL + #define CREQ_QUERY_QP_RESP_TYPE_SFT 0 + #define CREQ_QUERY_QP_RESP_TYPE_QP_EVENT 0x38UL + #define CREQ_QUERY_QP_RESP_TYPE_LAST CREQ_QUERY_QP_RESP_TYPE_QP_EVENT + u8 status; + __le16 cookie; + __le32 size; + u8 v; + #define CREQ_QUERY_QP_RESP_V 0x1UL + u8 event; + #define CREQ_QUERY_QP_RESP_EVENT_QUERY_QP 0x4UL + #define CREQ_QUERY_QP_RESP_EVENT_LAST CREQ_QUERY_QP_RESP_EVENT_QUERY_QP + u8 reserved48[6]; +}; + +/* creq_query_qp_resp_sb (size:832b/104B) */ +struct creq_query_qp_resp_sb { + u8 opcode; + #define CREQ_QUERY_QP_RESP_SB_OPCODE_QUERY_QP 0x4UL + #define CREQ_QUERY_QP_RESP_SB_OPCODE_LAST CREQ_QUERY_QP_RESP_SB_OPCODE_QUERY_QP + u8 status; + __le16 cookie; + __le16 flags; + u8 resp_size; + u8 reserved8; + __le32 xid; + u8 en_sqd_async_notify_state; + #define CREQ_QUERY_QP_RESP_SB_STATE_MASK 0xfUL + #define CREQ_QUERY_QP_RESP_SB_STATE_SFT 0 + #define CREQ_QUERY_QP_RESP_SB_STATE_RESET 0x0UL + #define CREQ_QUERY_QP_RESP_SB_STATE_INIT 0x1UL + #define CREQ_QUERY_QP_RESP_SB_STATE_RTR 0x2UL + #define CREQ_QUERY_QP_RESP_SB_STATE_RTS 0x3UL + #define CREQ_QUERY_QP_RESP_SB_STATE_SQD 0x4UL + #define CREQ_QUERY_QP_RESP_SB_STATE_SQE 0x5UL + #define CREQ_QUERY_QP_RESP_SB_STATE_ERR 0x6UL + #define CREQ_QUERY_QP_RESP_SB_STATE_LAST CREQ_QUERY_QP_RESP_SB_STATE_ERR + #define CREQ_QUERY_QP_RESP_SB_EN_SQD_ASYNC_NOTIFY 0x10UL + #define CREQ_QUERY_QP_RESP_SB_UNUSED3_MASK 0xe0UL + #define CREQ_QUERY_QP_RESP_SB_UNUSED3_SFT 5 + u8 access; + #define CREQ_QUERY_QP_RESP_SB_ACCESS_REMOTE_ATOMIC_REMOTE_READ_REMOTE_WRITE_LOCAL_WRITE_MASK 0xffUL + #define CREQ_QUERY_QP_RESP_SB_ACCESS_REMOTE_ATOMIC_REMOTE_READ_REMOTE_WRITE_LOCAL_WRITE_SFT 0 + #define CREQ_QUERY_QP_RESP_SB_ACCESS_LOCAL_WRITE 0x1UL + #define CREQ_QUERY_QP_RESP_SB_ACCESS_REMOTE_WRITE 0x2UL + #define CREQ_QUERY_QP_RESP_SB_ACCESS_REMOTE_READ 0x4UL + #define CREQ_QUERY_QP_RESP_SB_ACCESS_REMOTE_ATOMIC 0x8UL + __le16 pkey; + __le32 qkey; + __le32 reserved32; + __le32 dgid[4]; + __le32 flow_label; + __le16 sgid_index; + u8 hop_limit; + u8 traffic_class; + __le16 dest_mac[3]; + __le16 path_mtu_dest_vlan_id; + #define CREQ_QUERY_QP_RESP_SB_DEST_VLAN_ID_MASK 0xfffUL + #define CREQ_QUERY_QP_RESP_SB_DEST_VLAN_ID_SFT 0 + #define CREQ_QUERY_QP_RESP_SB_PATH_MTU_MASK 0xf000UL + #define CREQ_QUERY_QP_RESP_SB_PATH_MTU_SFT 12 + #define CREQ_QUERY_QP_RESP_SB_PATH_MTU_MTU_256 (0x0UL << 12) + #define CREQ_QUERY_QP_RESP_SB_PATH_MTU_MTU_512 (0x1UL << 12) + #define CREQ_QUERY_QP_RESP_SB_PATH_MTU_MTU_1024 (0x2UL << 12) + #define CREQ_QUERY_QP_RESP_SB_PATH_MTU_MTU_2048 (0x3UL << 12) + #define CREQ_QUERY_QP_RESP_SB_PATH_MTU_MTU_4096 (0x4UL << 12) + #define CREQ_QUERY_QP_RESP_SB_PATH_MTU_MTU_8192 (0x5UL << 12) + #define CREQ_QUERY_QP_RESP_SB_PATH_MTU_LAST CREQ_QUERY_QP_RESP_SB_PATH_MTU_MTU_8192 + u8 timeout; + u8 retry_cnt; + u8 rnr_retry; + u8 min_rnr_timer; + __le32 rq_psn; + __le32 sq_psn; + u8 max_rd_atomic; + u8 max_dest_rd_atomic; + u8 tos_dscp_tos_ecn; + #define CREQ_QUERY_QP_RESP_SB_TOS_ECN_MASK 0x3UL + #define CREQ_QUERY_QP_RESP_SB_TOS_ECN_SFT 0 + #define CREQ_QUERY_QP_RESP_SB_TOS_DSCP_MASK 0xfcUL + #define CREQ_QUERY_QP_RESP_SB_TOS_DSCP_SFT 2 + u8 enable_cc; + #define CREQ_QUERY_QP_RESP_SB_ENABLE_CC 0x1UL + __le32 sq_size; + __le32 rq_size; + __le16 sq_sge; + __le16 rq_sge; + __le32 max_inline_data; + __le32 dest_qp_id; + __le16 port_id; + u8 unused_0; + u8 stat_collection_id; + __le16 src_mac[3]; + __le16 vlan_pcp_vlan_dei_vlan_id; + #define CREQ_QUERY_QP_RESP_SB_VLAN_ID_MASK 0xfffUL + #define CREQ_QUERY_QP_RESP_SB_VLAN_ID_SFT 0 + #define CREQ_QUERY_QP_RESP_SB_VLAN_DEI 0x1000UL + #define CREQ_QUERY_QP_RESP_SB_VLAN_PCP_MASK 0xe000UL + #define CREQ_QUERY_QP_RESP_SB_VLAN_PCP_SFT 13 +}; + +/* cmdq_query_qp_extend (size:192b/24B) */ +struct cmdq_query_qp_extend { + u8 opcode; + #define CMDQ_QUERY_QP_EXTEND_OPCODE_QUERY_QP_EXTEND 0x91UL + #define CMDQ_QUERY_QP_EXTEND_OPCODE_LAST CMDQ_QUERY_QP_EXTEND_OPCODE_QUERY_QP_EXTEND + u8 cmd_size; + __le16 flags; + __le16 cookie; + u8 resp_size; + u8 num_qps; + __le64 resp_addr; + __le32 function_id; + #define CMDQ_QUERY_QP_EXTEND_PF_NUM_MASK 0xffUL + #define CMDQ_QUERY_QP_EXTEND_PF_NUM_SFT 0 + #define CMDQ_QUERY_QP_EXTEND_VF_NUM_MASK 0xffff00UL + #define CMDQ_QUERY_QP_EXTEND_VF_NUM_SFT 8 + #define CMDQ_QUERY_QP_EXTEND_VF_VALID 0x1000000UL + __le32 current_index; +}; + +/* creq_query_qp_extend_resp (size:128b/16B) */ +struct creq_query_qp_extend_resp { + u8 type; + #define CREQ_QUERY_QP_EXTEND_RESP_TYPE_MASK 0x3fUL + #define CREQ_QUERY_QP_EXTEND_RESP_TYPE_SFT 0 + #define CREQ_QUERY_QP_EXTEND_RESP_TYPE_QP_EVENT 0x38UL + #define CREQ_QUERY_QP_EXTEND_RESP_TYPE_LAST CREQ_QUERY_QP_EXTEND_RESP_TYPE_QP_EVENT + u8 status; + __le16 cookie; + __le32 size; + u8 v; + #define CREQ_QUERY_QP_EXTEND_RESP_V 0x1UL + u8 event; + #define CREQ_QUERY_QP_EXTEND_RESP_EVENT_QUERY_QP_EXTEND 0x91UL + #define CREQ_QUERY_QP_EXTEND_RESP_EVENT_LAST CREQ_QUERY_QP_EXTEND_RESP_EVENT_QUERY_QP_EXTEND + __le16 reserved16; + __le32 current_index; +}; + +/* creq_query_qp_extend_resp_sb (size:384b/48B) */ +struct creq_query_qp_extend_resp_sb { + u8 opcode; + #define CREQ_QUERY_QP_EXTEND_RESP_SB_OPCODE_QUERY_QP_EXTEND 0x91UL + #define CREQ_QUERY_QP_EXTEND_RESP_SB_OPCODE_LAST CREQ_QUERY_QP_EXTEND_RESP_SB_OPCODE_QUERY_QP_EXTEND + u8 status; + __le16 cookie; + __le16 flags; + u8 resp_size; + u8 reserved8; + __le32 xid; + u8 state; + #define CREQ_QUERY_QP_EXTEND_RESP_SB_STATE_MASK 0xfUL + #define CREQ_QUERY_QP_EXTEND_RESP_SB_STATE_SFT 0 + #define CREQ_QUERY_QP_EXTEND_RESP_SB_STATE_RESET 0x0UL + #define CREQ_QUERY_QP_EXTEND_RESP_SB_STATE_INIT 0x1UL + #define CREQ_QUERY_QP_EXTEND_RESP_SB_STATE_RTR 0x2UL + #define CREQ_QUERY_QP_EXTEND_RESP_SB_STATE_RTS 0x3UL + #define CREQ_QUERY_QP_EXTEND_RESP_SB_STATE_SQD 0x4UL + #define CREQ_QUERY_QP_EXTEND_RESP_SB_STATE_SQE 0x5UL + #define CREQ_QUERY_QP_EXTEND_RESP_SB_STATE_ERR 0x6UL + #define CREQ_QUERY_QP_EXTEND_RESP_SB_STATE_LAST CREQ_QUERY_QP_EXTEND_RESP_SB_STATE_ERR + #define CREQ_QUERY_QP_EXTEND_RESP_SB_UNUSED4_MASK 0xf0UL + #define CREQ_QUERY_QP_EXTEND_RESP_SB_UNUSED4_SFT 4 + u8 reserved_8; + __le16 port_id; + __le32 qkey; + __le16 sgid_index; + u8 network_type; + #define CREQ_QUERY_QP_EXTEND_RESP_SB_NETWORK_TYPE_ROCEV1 0x0UL + #define CREQ_QUERY_QP_EXTEND_RESP_SB_NETWORK_TYPE_ROCEV2_IPV4 0x2UL + #define CREQ_QUERY_QP_EXTEND_RESP_SB_NETWORK_TYPE_ROCEV2_IPV6 0x3UL + #define CREQ_QUERY_QP_EXTEND_RESP_SB_NETWORK_TYPE_LAST CREQ_QUERY_QP_EXTEND_RESP_SB_NETWORK_TYPE_ROCEV2_IPV6 + u8 unused_0; + __le32 dgid[4]; + __le32 dest_qp_id; + u8 stat_collection_id; + u8 reservred_8; + __le16 reserved_16; +}; + +/* creq_query_qp_extend_resp_sb_tlv (size:512b/64B) */ +struct creq_query_qp_extend_resp_sb_tlv { + __le16 cmd_discr; + u8 reserved_8b; + u8 tlv_flags; + #define CREQ_QUERY_QP_EXTEND_RESP_SB_TLV_TLV_FLAGS_MORE 0x1UL + #define CREQ_QUERY_QP_EXTEND_RESP_SB_TLV_TLV_FLAGS_MORE_LAST 0x0UL + #define CREQ_QUERY_QP_EXTEND_RESP_SB_TLV_TLV_FLAGS_MORE_NOT_LAST 0x1UL + #define CREQ_QUERY_QP_EXTEND_RESP_SB_TLV_TLV_FLAGS_REQUIRED 0x2UL + #define CREQ_QUERY_QP_EXTEND_RESP_SB_TLV_TLV_FLAGS_REQUIRED_NO (0x0UL << 1) + #define CREQ_QUERY_QP_EXTEND_RESP_SB_TLV_TLV_FLAGS_REQUIRED_YES (0x1UL << 1) + #define CREQ_QUERY_QP_EXTEND_RESP_SB_TLV_TLV_FLAGS_REQUIRED_LAST CREQ_QUERY_QP_EXTEND_RESP_SB_TLV_TLV_FLAGS_REQUIRED_YES + __le16 tlv_type; + __le16 length; + u8 total_size; + u8 reserved56[7]; + u8 opcode; + #define CREQ_QUERY_QP_EXTEND_RESP_SB_TLV_OPCODE_QUERY_QP_EXTEND 0x91UL + #define CREQ_QUERY_QP_EXTEND_RESP_SB_TLV_OPCODE_LAST CREQ_QUERY_QP_EXTEND_RESP_SB_TLV_OPCODE_QUERY_QP_EXTEND + u8 status; + __le16 cookie; + __le16 flags; + u8 resp_size; + u8 reserved8; + __le32 xid; + u8 state; + #define CREQ_QUERY_QP_EXTEND_RESP_SB_TLV_STATE_MASK 0xfUL + #define CREQ_QUERY_QP_EXTEND_RESP_SB_TLV_STATE_SFT 0 + #define CREQ_QUERY_QP_EXTEND_RESP_SB_TLV_STATE_RESET 0x0UL + #define CREQ_QUERY_QP_EXTEND_RESP_SB_TLV_STATE_INIT 0x1UL + #define CREQ_QUERY_QP_EXTEND_RESP_SB_TLV_STATE_RTR 0x2UL + #define CREQ_QUERY_QP_EXTEND_RESP_SB_TLV_STATE_RTS 0x3UL + #define CREQ_QUERY_QP_EXTEND_RESP_SB_TLV_STATE_SQD 0x4UL + #define CREQ_QUERY_QP_EXTEND_RESP_SB_TLV_STATE_SQE 0x5UL + #define CREQ_QUERY_QP_EXTEND_RESP_SB_TLV_STATE_ERR 0x6UL + #define CREQ_QUERY_QP_EXTEND_RESP_SB_TLV_STATE_LAST CREQ_QUERY_QP_EXTEND_RESP_SB_TLV_STATE_ERR + #define CREQ_QUERY_QP_EXTEND_RESP_SB_TLV_UNUSED4_MASK 0xf0UL + #define CREQ_QUERY_QP_EXTEND_RESP_SB_TLV_UNUSED4_SFT 4 + u8 reserved_8; + __le16 port_id; + __le32 qkey; + __le16 sgid_index; + u8 network_type; + #define CREQ_QUERY_QP_EXTEND_RESP_SB_TLV_NETWORK_TYPE_ROCEV1 0x0UL + #define CREQ_QUERY_QP_EXTEND_RESP_SB_TLV_NETWORK_TYPE_ROCEV2_IPV4 0x2UL + #define CREQ_QUERY_QP_EXTEND_RESP_SB_TLV_NETWORK_TYPE_ROCEV2_IPV6 0x3UL + #define CREQ_QUERY_QP_EXTEND_RESP_SB_TLV_NETWORK_TYPE_LAST CREQ_QUERY_QP_EXTEND_RESP_SB_TLV_NETWORK_TYPE_ROCEV2_IPV6 + u8 unused_0; + __le32 dgid[4]; + __le32 dest_qp_id; + u8 stat_collection_id; + u8 reservred_8; + __le16 reserved_16; +}; + +/* cmdq_create_srq (size:512b/64B) */ +struct cmdq_create_srq { + u8 opcode; + #define CMDQ_CREATE_SRQ_OPCODE_CREATE_SRQ 0x5UL + #define CMDQ_CREATE_SRQ_OPCODE_LAST CMDQ_CREATE_SRQ_OPCODE_CREATE_SRQ + u8 cmd_size; + __le16 flags; + #define CMDQ_CREATE_SRQ_FLAGS_STEERING_TAG_VALID 0x1UL + __le16 cookie; + u8 resp_size; + u8 reserved8; + __le64 resp_addr; + __le64 srq_handle; + __le16 pg_size_lvl; + #define CMDQ_CREATE_SRQ_LVL_MASK 0x3UL + #define CMDQ_CREATE_SRQ_LVL_SFT 0 + #define CMDQ_CREATE_SRQ_LVL_LVL_0 0x0UL + #define CMDQ_CREATE_SRQ_LVL_LVL_1 0x1UL + #define CMDQ_CREATE_SRQ_LVL_LVL_2 0x2UL + #define CMDQ_CREATE_SRQ_LVL_LAST CMDQ_CREATE_SRQ_LVL_LVL_2 + #define CMDQ_CREATE_SRQ_PG_SIZE_MASK 0x1cUL + #define CMDQ_CREATE_SRQ_PG_SIZE_SFT 2 + #define CMDQ_CREATE_SRQ_PG_SIZE_PG_4K (0x0UL << 2) + #define CMDQ_CREATE_SRQ_PG_SIZE_PG_8K (0x1UL << 2) + #define CMDQ_CREATE_SRQ_PG_SIZE_PG_64K (0x2UL << 2) + #define CMDQ_CREATE_SRQ_PG_SIZE_PG_2M (0x3UL << 2) + #define CMDQ_CREATE_SRQ_PG_SIZE_PG_8M (0x4UL << 2) + #define CMDQ_CREATE_SRQ_PG_SIZE_PG_1G (0x5UL << 2) + #define CMDQ_CREATE_SRQ_PG_SIZE_LAST CMDQ_CREATE_SRQ_PG_SIZE_PG_1G + #define CMDQ_CREATE_SRQ_UNUSED11_MASK 0xffe0UL + #define CMDQ_CREATE_SRQ_UNUSED11_SFT 5 + __le16 eventq_id; + #define CMDQ_CREATE_SRQ_EVENTQ_ID_MASK 0xfffUL + #define CMDQ_CREATE_SRQ_EVENTQ_ID_SFT 0 + #define CMDQ_CREATE_SRQ_UNUSED4_MASK 0xf000UL + #define CMDQ_CREATE_SRQ_UNUSED4_SFT 12 + __le16 srq_size; + __le16 srq_fwo; + #define CMDQ_CREATE_SRQ_SRQ_FWO_MASK 0xfffUL + #define CMDQ_CREATE_SRQ_SRQ_FWO_SFT 0 + #define CMDQ_CREATE_SRQ_SRQ_SGE_MASK 0xf000UL + #define CMDQ_CREATE_SRQ_SRQ_SGE_SFT 12 + __le32 dpi; + __le32 pd_id; + __le64 pbl; + __le16 steering_tag; + u8 reserved48[6]; + __le64 reserved64; +}; + +/* creq_create_srq_resp (size:128b/16B) */ +struct creq_create_srq_resp { + u8 type; + #define CREQ_CREATE_SRQ_RESP_TYPE_MASK 0x3fUL + #define CREQ_CREATE_SRQ_RESP_TYPE_SFT 0 + #define CREQ_CREATE_SRQ_RESP_TYPE_QP_EVENT 0x38UL + #define CREQ_CREATE_SRQ_RESP_TYPE_LAST CREQ_CREATE_SRQ_RESP_TYPE_QP_EVENT + u8 status; + __le16 cookie; + __le32 xid; + u8 v; + #define CREQ_CREATE_SRQ_RESP_V 0x1UL + u8 event; + #define CREQ_CREATE_SRQ_RESP_EVENT_CREATE_SRQ 0x5UL + #define CREQ_CREATE_SRQ_RESP_EVENT_LAST CREQ_CREATE_SRQ_RESP_EVENT_CREATE_SRQ + u8 reserved48[6]; +}; + +/* cmdq_destroy_srq (size:192b/24B) */ +struct cmdq_destroy_srq { + u8 opcode; + #define CMDQ_DESTROY_SRQ_OPCODE_DESTROY_SRQ 0x6UL + #define CMDQ_DESTROY_SRQ_OPCODE_LAST CMDQ_DESTROY_SRQ_OPCODE_DESTROY_SRQ + u8 cmd_size; + __le16 flags; + __le16 cookie; + u8 resp_size; + u8 reserved8; + __le64 resp_addr; + __le32 srq_cid; + __le32 unused_0; +}; + +/* creq_destroy_srq_resp (size:128b/16B) */ +struct creq_destroy_srq_resp { + u8 type; + #define CREQ_DESTROY_SRQ_RESP_TYPE_MASK 0x3fUL + #define CREQ_DESTROY_SRQ_RESP_TYPE_SFT 0 + #define CREQ_DESTROY_SRQ_RESP_TYPE_QP_EVENT 0x38UL + #define CREQ_DESTROY_SRQ_RESP_TYPE_LAST CREQ_DESTROY_SRQ_RESP_TYPE_QP_EVENT + u8 status; + __le16 cookie; + __le32 xid; + u8 v; + #define CREQ_DESTROY_SRQ_RESP_V 0x1UL + u8 event; + #define CREQ_DESTROY_SRQ_RESP_EVENT_DESTROY_SRQ 0x6UL + #define CREQ_DESTROY_SRQ_RESP_EVENT_LAST CREQ_DESTROY_SRQ_RESP_EVENT_DESTROY_SRQ + __le16 enable_for_arm[3]; + #define CREQ_DESTROY_SRQ_RESP_UNUSED0_MASK 0xffffUL + #define CREQ_DESTROY_SRQ_RESP_UNUSED0_SFT 0 + #define CREQ_DESTROY_SRQ_RESP_ENABLE_FOR_ARM_MASK 0x30000UL + #define CREQ_DESTROY_SRQ_RESP_ENABLE_FOR_ARM_SFT 16 +}; + +/* cmdq_query_srq (size:192b/24B) */ +struct cmdq_query_srq { + u8 opcode; + #define CMDQ_QUERY_SRQ_OPCODE_QUERY_SRQ 0x8UL + #define CMDQ_QUERY_SRQ_OPCODE_LAST CMDQ_QUERY_SRQ_OPCODE_QUERY_SRQ + u8 cmd_size; + __le16 flags; + __le16 cookie; + u8 resp_size; + u8 reserved8; + __le64 resp_addr; + __le32 srq_cid; + __le32 unused_0; +}; + +/* creq_query_srq_resp (size:128b/16B) */ +struct creq_query_srq_resp { + u8 type; + #define CREQ_QUERY_SRQ_RESP_TYPE_MASK 0x3fUL + #define CREQ_QUERY_SRQ_RESP_TYPE_SFT 0 + #define CREQ_QUERY_SRQ_RESP_TYPE_QP_EVENT 0x38UL + #define CREQ_QUERY_SRQ_RESP_TYPE_LAST CREQ_QUERY_SRQ_RESP_TYPE_QP_EVENT + u8 status; + __le16 cookie; + __le32 size; + u8 v; + #define CREQ_QUERY_SRQ_RESP_V 0x1UL + u8 event; + #define CREQ_QUERY_SRQ_RESP_EVENT_QUERY_SRQ 0x8UL + #define CREQ_QUERY_SRQ_RESP_EVENT_LAST CREQ_QUERY_SRQ_RESP_EVENT_QUERY_SRQ + u8 reserved48[6]; +}; + +/* creq_query_srq_resp_sb (size:256b/32B) */ +struct creq_query_srq_resp_sb { + u8 opcode; + #define CREQ_QUERY_SRQ_RESP_SB_OPCODE_QUERY_SRQ 0x8UL + #define CREQ_QUERY_SRQ_RESP_SB_OPCODE_LAST CREQ_QUERY_SRQ_RESP_SB_OPCODE_QUERY_SRQ + u8 status; + __le16 cookie; + __le16 flags; + u8 resp_size; + u8 reserved8; + __le32 xid; + __le16 srq_limit; + __le16 reserved16; + __le32 data[4]; +}; + +/* cmdq_create_cq (size:512b/64B) */ +struct cmdq_create_cq { + u8 opcode; + #define CMDQ_CREATE_CQ_OPCODE_CREATE_CQ 0x9UL + #define CMDQ_CREATE_CQ_OPCODE_LAST CMDQ_CREATE_CQ_OPCODE_CREATE_CQ + u8 cmd_size; + __le16 flags; + #define CMDQ_CREATE_CQ_FLAGS_DISABLE_CQ_OVERFLOW_DETECTION 0x1UL + #define CMDQ_CREATE_CQ_FLAGS_STEERING_TAG_VALID 0x2UL + #define CMDQ_CREATE_CQ_FLAGS_INFINITE_CQ_MODE 0x4UL + __le16 cookie; + u8 resp_size; + u8 reserved8; + __le64 resp_addr; + __le64 cq_handle; + __le32 pg_size_lvl; + #define CMDQ_CREATE_CQ_LVL_MASK 0x3UL + #define CMDQ_CREATE_CQ_LVL_SFT 0 + #define CMDQ_CREATE_CQ_LVL_LVL_0 0x0UL + #define CMDQ_CREATE_CQ_LVL_LVL_1 0x1UL + #define CMDQ_CREATE_CQ_LVL_LVL_2 0x2UL + #define CMDQ_CREATE_CQ_LVL_LAST CMDQ_CREATE_CQ_LVL_LVL_2 + #define CMDQ_CREATE_CQ_PG_SIZE_MASK 0x1cUL + #define CMDQ_CREATE_CQ_PG_SIZE_SFT 2 + #define CMDQ_CREATE_CQ_PG_SIZE_PG_4K (0x0UL << 2) + #define CMDQ_CREATE_CQ_PG_SIZE_PG_8K (0x1UL << 2) + #define CMDQ_CREATE_CQ_PG_SIZE_PG_64K (0x2UL << 2) + #define CMDQ_CREATE_CQ_PG_SIZE_PG_2M (0x3UL << 2) + #define CMDQ_CREATE_CQ_PG_SIZE_PG_8M (0x4UL << 2) + #define CMDQ_CREATE_CQ_PG_SIZE_PG_1G (0x5UL << 2) + #define CMDQ_CREATE_CQ_PG_SIZE_LAST CMDQ_CREATE_CQ_PG_SIZE_PG_1G + #define CMDQ_CREATE_CQ_UNUSED27_MASK 0xffffffe0UL + #define CMDQ_CREATE_CQ_UNUSED27_SFT 5 + __le32 cq_fco_cnq_id; + #define CMDQ_CREATE_CQ_CNQ_ID_MASK 0xfffUL + #define CMDQ_CREATE_CQ_CNQ_ID_SFT 0 + #define CMDQ_CREATE_CQ_CQ_FCO_MASK 0xfffff000UL + #define CMDQ_CREATE_CQ_CQ_FCO_SFT 12 + __le32 dpi; + __le32 cq_size; + __le64 pbl; + __le16 steering_tag; + u8 reserved48[6]; + __le64 reserved64; +}; + +/* creq_create_cq_resp (size:128b/16B) */ +struct creq_create_cq_resp { + u8 type; + #define CREQ_CREATE_CQ_RESP_TYPE_MASK 0x3fUL + #define CREQ_CREATE_CQ_RESP_TYPE_SFT 0 + #define CREQ_CREATE_CQ_RESP_TYPE_QP_EVENT 0x38UL + #define CREQ_CREATE_CQ_RESP_TYPE_LAST CREQ_CREATE_CQ_RESP_TYPE_QP_EVENT + u8 status; + __le16 cookie; + __le32 xid; + u8 v; + #define CREQ_CREATE_CQ_RESP_V 0x1UL + u8 event; + #define CREQ_CREATE_CQ_RESP_EVENT_CREATE_CQ 0x9UL + #define CREQ_CREATE_CQ_RESP_EVENT_LAST CREQ_CREATE_CQ_RESP_EVENT_CREATE_CQ + u8 reserved48[6]; +}; + +/* cmdq_destroy_cq (size:192b/24B) */ +struct cmdq_destroy_cq { + u8 opcode; + #define CMDQ_DESTROY_CQ_OPCODE_DESTROY_CQ 0xaUL + #define CMDQ_DESTROY_CQ_OPCODE_LAST CMDQ_DESTROY_CQ_OPCODE_DESTROY_CQ + u8 cmd_size; + __le16 flags; + __le16 cookie; + u8 resp_size; + u8 reserved8; + __le64 resp_addr; + __le32 cq_cid; + __le32 unused_0; +}; + +/* creq_destroy_cq_resp (size:128b/16B) */ +struct creq_destroy_cq_resp { + u8 type; + #define CREQ_DESTROY_CQ_RESP_TYPE_MASK 0x3fUL + #define CREQ_DESTROY_CQ_RESP_TYPE_SFT 0 + #define CREQ_DESTROY_CQ_RESP_TYPE_QP_EVENT 0x38UL + #define CREQ_DESTROY_CQ_RESP_TYPE_LAST CREQ_DESTROY_CQ_RESP_TYPE_QP_EVENT + u8 status; + __le16 cookie; + __le32 xid; + u8 v; + #define CREQ_DESTROY_CQ_RESP_V 0x1UL + u8 event; + #define CREQ_DESTROY_CQ_RESP_EVENT_DESTROY_CQ 0xaUL + #define CREQ_DESTROY_CQ_RESP_EVENT_LAST CREQ_DESTROY_CQ_RESP_EVENT_DESTROY_CQ + __le16 cq_arm_lvl; + #define CREQ_DESTROY_CQ_RESP_CQ_ARM_LVL_MASK 0x3UL + #define CREQ_DESTROY_CQ_RESP_CQ_ARM_LVL_SFT 0 + __le16 total_cnq_events; + __le16 reserved16; +}; + +/* cmdq_resize_cq (size:320b/40B) */ +struct cmdq_resize_cq { + u8 opcode; + #define CMDQ_RESIZE_CQ_OPCODE_RESIZE_CQ 0xcUL + #define CMDQ_RESIZE_CQ_OPCODE_LAST CMDQ_RESIZE_CQ_OPCODE_RESIZE_CQ + u8 cmd_size; + __le16 flags; + __le16 cookie; + u8 resp_size; + u8 reserved8; + __le64 resp_addr; + __le32 cq_cid; + __le32 new_cq_size_pg_size_lvl; + #define CMDQ_RESIZE_CQ_LVL_MASK 0x3UL + #define CMDQ_RESIZE_CQ_LVL_SFT 0 + #define CMDQ_RESIZE_CQ_LVL_LVL_0 0x0UL + #define CMDQ_RESIZE_CQ_LVL_LVL_1 0x1UL + #define CMDQ_RESIZE_CQ_LVL_LVL_2 0x2UL + #define CMDQ_RESIZE_CQ_LVL_LAST CMDQ_RESIZE_CQ_LVL_LVL_2 + #define CMDQ_RESIZE_CQ_PG_SIZE_MASK 0x1cUL + #define CMDQ_RESIZE_CQ_PG_SIZE_SFT 2 + #define CMDQ_RESIZE_CQ_PG_SIZE_PG_4K (0x0UL << 2) + #define CMDQ_RESIZE_CQ_PG_SIZE_PG_8K (0x1UL << 2) + #define CMDQ_RESIZE_CQ_PG_SIZE_PG_64K (0x2UL << 2) + #define CMDQ_RESIZE_CQ_PG_SIZE_PG_2M (0x3UL << 2) + #define CMDQ_RESIZE_CQ_PG_SIZE_PG_8M (0x4UL << 2) + #define CMDQ_RESIZE_CQ_PG_SIZE_PG_1G (0x5UL << 2) + #define CMDQ_RESIZE_CQ_PG_SIZE_LAST CMDQ_RESIZE_CQ_PG_SIZE_PG_1G + #define CMDQ_RESIZE_CQ_NEW_CQ_SIZE_MASK 0x1fffffe0UL + #define CMDQ_RESIZE_CQ_NEW_CQ_SIZE_SFT 5 + __le64 new_pbl; + __le32 new_cq_fco; + __le32 unused_0; +}; + +/* creq_resize_cq_resp (size:128b/16B) */ +struct creq_resize_cq_resp { + u8 type; + #define CREQ_RESIZE_CQ_RESP_TYPE_MASK 0x3fUL + #define CREQ_RESIZE_CQ_RESP_TYPE_SFT 0 + #define CREQ_RESIZE_CQ_RESP_TYPE_QP_EVENT 0x38UL + #define CREQ_RESIZE_CQ_RESP_TYPE_LAST CREQ_RESIZE_CQ_RESP_TYPE_QP_EVENT + u8 status; + __le16 cookie; + __le32 xid; + u8 v; + #define CREQ_RESIZE_CQ_RESP_V 0x1UL + u8 event; + #define CREQ_RESIZE_CQ_RESP_EVENT_RESIZE_CQ 0xcUL + #define CREQ_RESIZE_CQ_RESP_EVENT_LAST CREQ_RESIZE_CQ_RESP_EVENT_RESIZE_CQ + u8 reserved48[6]; +}; + +/* cmdq_modify_cq (size:512b/64B) */ +struct cmdq_modify_cq { + u8 opcode; + #define CMDQ_MODIFY_CQ_OPCODE_MODIFY_CQ 0x90UL + #define CMDQ_MODIFY_CQ_OPCODE_LAST CMDQ_MODIFY_CQ_OPCODE_MODIFY_CQ + u8 cmd_size; + __le16 flags; + #define CMDQ_MODIFY_CQ_FLAGS_DISABLE_CQ_OVERFLOW_DETECTION 0x1UL + __le16 cookie; + u8 resp_size; + u8 reserved8; + __le64 resp_addr; + __le32 modify_mask; + #define CMDQ_MODIFY_CQ_MODIFY_MASK_CQ_HANDLE 0x1UL + #define CMDQ_MODIFY_CQ_MODIFY_MASK_CNQ_ID 0x2UL + #define CMDQ_MODIFY_CQ_MODIFY_MASK_FCO 0x4UL + #define CMDQ_MODIFY_CQ_MODIFY_MASK_DPI 0x8UL + #define CMDQ_MODIFY_CQ_MODIFY_MASK_CQ_SIZE 0x10UL + #define CMDQ_MODIFY_CQ_MODIFY_MASK_PBL 0x20UL + __le32 reserved32; + __le64 cq_handle; + __le32 cq_fco_cnq_id; + #define CMDQ_MODIFY_CQ_CNQ_ID_MASK 0xfffUL + #define CMDQ_MODIFY_CQ_CNQ_ID_SFT 0 + #define CMDQ_MODIFY_CQ_CQ_FCO_MASK 0xfffff000UL + #define CMDQ_MODIFY_CQ_CQ_FCO_SFT 12 + __le32 dpi; + __le32 cq_size; + __le32 reserved32_1; + __le64 pbl; + __le64 reserved64; +}; + +/* creq_modify_cq_resp (size:128b/16B) */ +struct creq_modify_cq_resp { + u8 type; + #define CREQ_MODIFY_CQ_RESP_TYPE_MASK 0x3fUL + #define CREQ_MODIFY_CQ_RESP_TYPE_SFT 0 + #define CREQ_MODIFY_CQ_RESP_TYPE_QP_EVENT 0x38UL + #define CREQ_MODIFY_CQ_RESP_TYPE_LAST CREQ_MODIFY_CQ_RESP_TYPE_QP_EVENT + u8 status; + __le16 cookie; + __le32 xid; + u8 v; + #define CREQ_MODIFY_CQ_RESP_V 0x1UL + u8 event; + #define CREQ_MODIFY_CQ_RESP_EVENT_MODIFY_CQ 0x9UL + #define CREQ_MODIFY_CQ_RESP_EVENT_LAST CREQ_MODIFY_CQ_RESP_EVENT_MODIFY_CQ + u8 reserved48[6]; +}; + +/* cmdq_allocate_mrw (size:256b/32B) */ +struct cmdq_allocate_mrw { + u8 opcode; + #define CMDQ_ALLOCATE_MRW_OPCODE_ALLOCATE_MRW 0xdUL + #define CMDQ_ALLOCATE_MRW_OPCODE_LAST CMDQ_ALLOCATE_MRW_OPCODE_ALLOCATE_MRW + u8 cmd_size; + __le16 flags; + __le16 cookie; + u8 resp_size; + u8 reserved8; + __le64 resp_addr; + __le64 mrw_handle; + u8 mrw_flags; + #define CMDQ_ALLOCATE_MRW_MRW_FLAGS_MASK 0xfUL + #define CMDQ_ALLOCATE_MRW_MRW_FLAGS_SFT 0 + #define CMDQ_ALLOCATE_MRW_MRW_FLAGS_MR 0x0UL + #define CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR 0x1UL + #define CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE1 0x2UL + #define CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2A 0x3UL + #define CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B 0x4UL + #define CMDQ_ALLOCATE_MRW_MRW_FLAGS_LAST CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B + #define CMDQ_ALLOCATE_MRW_STEERING_TAG_VALID 0x10UL + #define CMDQ_ALLOCATE_MRW_UNUSED3_MASK 0xe0UL + #define CMDQ_ALLOCATE_MRW_UNUSED3_SFT 5 + u8 access; + #define CMDQ_ALLOCATE_MRW_ACCESS_CONSUMER_OWNED_KEY 0x20UL + __le16 steering_tag; + __le32 pd_id; +}; + +/* creq_allocate_mrw_resp (size:128b/16B) */ +struct creq_allocate_mrw_resp { + u8 type; + #define CREQ_ALLOCATE_MRW_RESP_TYPE_MASK 0x3fUL + #define CREQ_ALLOCATE_MRW_RESP_TYPE_SFT 0 + #define CREQ_ALLOCATE_MRW_RESP_TYPE_QP_EVENT 0x38UL + #define CREQ_ALLOCATE_MRW_RESP_TYPE_LAST CREQ_ALLOCATE_MRW_RESP_TYPE_QP_EVENT + u8 status; + __le16 cookie; + __le32 xid; + u8 v; + #define CREQ_ALLOCATE_MRW_RESP_V 0x1UL + u8 event; + #define CREQ_ALLOCATE_MRW_RESP_EVENT_ALLOCATE_MRW 0xdUL + #define CREQ_ALLOCATE_MRW_RESP_EVENT_LAST CREQ_ALLOCATE_MRW_RESP_EVENT_ALLOCATE_MRW + u8 reserved48[6]; +}; + +/* cmdq_deallocate_key (size:192b/24B) */ +struct cmdq_deallocate_key { + u8 opcode; + #define CMDQ_DEALLOCATE_KEY_OPCODE_DEALLOCATE_KEY 0xeUL + #define CMDQ_DEALLOCATE_KEY_OPCODE_LAST CMDQ_DEALLOCATE_KEY_OPCODE_DEALLOCATE_KEY + u8 cmd_size; + __le16 flags; + __le16 cookie; + u8 resp_size; + u8 reserved8; + __le64 resp_addr; + u8 mrw_flags; + #define CMDQ_DEALLOCATE_KEY_MRW_FLAGS_MASK 0xfUL + #define CMDQ_DEALLOCATE_KEY_MRW_FLAGS_SFT 0 + #define CMDQ_DEALLOCATE_KEY_MRW_FLAGS_MR 0x0UL + #define CMDQ_DEALLOCATE_KEY_MRW_FLAGS_PMR 0x1UL + #define CMDQ_DEALLOCATE_KEY_MRW_FLAGS_MW_TYPE1 0x2UL + #define CMDQ_DEALLOCATE_KEY_MRW_FLAGS_MW_TYPE2A 0x3UL + #define CMDQ_DEALLOCATE_KEY_MRW_FLAGS_MW_TYPE2B 0x4UL + #define CMDQ_DEALLOCATE_KEY_MRW_FLAGS_LAST CMDQ_DEALLOCATE_KEY_MRW_FLAGS_MW_TYPE2B + #define CMDQ_DEALLOCATE_KEY_UNUSED4_MASK 0xf0UL + #define CMDQ_DEALLOCATE_KEY_UNUSED4_SFT 4 + u8 unused24[3]; + __le32 key; +}; + +/* creq_deallocate_key_resp (size:128b/16B) */ +struct creq_deallocate_key_resp { + u8 type; + #define CREQ_DEALLOCATE_KEY_RESP_TYPE_MASK 0x3fUL + #define CREQ_DEALLOCATE_KEY_RESP_TYPE_SFT 0 + #define CREQ_DEALLOCATE_KEY_RESP_TYPE_QP_EVENT 0x38UL + #define CREQ_DEALLOCATE_KEY_RESP_TYPE_LAST CREQ_DEALLOCATE_KEY_RESP_TYPE_QP_EVENT + u8 status; + __le16 cookie; + __le32 xid; + u8 v; + #define CREQ_DEALLOCATE_KEY_RESP_V 0x1UL + u8 event; + #define CREQ_DEALLOCATE_KEY_RESP_EVENT_DEALLOCATE_KEY 0xeUL + #define CREQ_DEALLOCATE_KEY_RESP_EVENT_LAST CREQ_DEALLOCATE_KEY_RESP_EVENT_DEALLOCATE_KEY + __le16 reserved16; + __le32 bound_window_info; +}; + +/* cmdq_register_mr (size:512b/64B) */ +struct cmdq_register_mr { + u8 opcode; + #define CMDQ_REGISTER_MR_OPCODE_REGISTER_MR 0xfUL + #define CMDQ_REGISTER_MR_OPCODE_LAST CMDQ_REGISTER_MR_OPCODE_REGISTER_MR + u8 cmd_size; + __le16 flags; + #define CMDQ_REGISTER_MR_FLAGS_ALLOC_MR 0x1UL + #define CMDQ_REGISTER_MR_FLAGS_STEERING_TAG_VALID 0x2UL + __le16 cookie; + u8 resp_size; + u8 reserved8; + __le64 resp_addr; + u8 log2_pg_size_lvl; + #define CMDQ_REGISTER_MR_LVL_MASK 0x3UL + #define CMDQ_REGISTER_MR_LVL_SFT 0 + #define CMDQ_REGISTER_MR_LVL_LVL_0 0x0UL + #define CMDQ_REGISTER_MR_LVL_LVL_1 0x1UL + #define CMDQ_REGISTER_MR_LVL_LVL_2 0x2UL + #define CMDQ_REGISTER_MR_LVL_LAST CMDQ_REGISTER_MR_LVL_LVL_2 + #define CMDQ_REGISTER_MR_LOG2_PG_SIZE_MASK 0x7cUL + #define CMDQ_REGISTER_MR_LOG2_PG_SIZE_SFT 2 + #define CMDQ_REGISTER_MR_LOG2_PG_SIZE_PG_4K (0xcUL << 2) + #define CMDQ_REGISTER_MR_LOG2_PG_SIZE_PG_8K (0xdUL << 2) + #define CMDQ_REGISTER_MR_LOG2_PG_SIZE_PG_64K (0x10UL << 2) + #define CMDQ_REGISTER_MR_LOG2_PG_SIZE_PG_256K (0x12UL << 2) + #define CMDQ_REGISTER_MR_LOG2_PG_SIZE_PG_1M (0x14UL << 2) + #define CMDQ_REGISTER_MR_LOG2_PG_SIZE_PG_2M (0x15UL << 2) + #define CMDQ_REGISTER_MR_LOG2_PG_SIZE_PG_4M (0x16UL << 2) + #define CMDQ_REGISTER_MR_LOG2_PG_SIZE_PG_1G (0x1eUL << 2) + #define CMDQ_REGISTER_MR_LOG2_PG_SIZE_LAST CMDQ_REGISTER_MR_LOG2_PG_SIZE_PG_1G + #define CMDQ_REGISTER_MR_UNUSED1 0x80UL + u8 access; + #define CMDQ_REGISTER_MR_ACCESS_LOCAL_WRITE 0x1UL + #define CMDQ_REGISTER_MR_ACCESS_REMOTE_READ 0x2UL + #define CMDQ_REGISTER_MR_ACCESS_REMOTE_WRITE 0x4UL + #define CMDQ_REGISTER_MR_ACCESS_REMOTE_ATOMIC 0x8UL + #define CMDQ_REGISTER_MR_ACCESS_MW_BIND 0x10UL + #define CMDQ_REGISTER_MR_ACCESS_ZERO_BASED 0x20UL + __le16 log2_pbl_pg_size; + #define CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_MASK 0x1fUL + #define CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_SFT 0 + #define CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_4K 0xcUL + #define CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_8K 0xdUL + #define CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_64K 0x10UL + #define CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_256K 0x12UL + #define CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_1M 0x14UL + #define CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_2M 0x15UL + #define CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_4M 0x16UL + #define CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_1G 0x1eUL + #define CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_LAST CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_1G + #define CMDQ_REGISTER_MR_UNUSED11_MASK 0xffe0UL + #define CMDQ_REGISTER_MR_UNUSED11_SFT 5 + __le32 key; + __le64 pbl; + __le64 va; + __le64 mr_size; + __le16 steering_tag; + u8 reserved48[6]; + __le64 reserved64; +}; + +/* creq_register_mr_resp (size:128b/16B) */ +struct creq_register_mr_resp { + u8 type; + #define CREQ_REGISTER_MR_RESP_TYPE_MASK 0x3fUL + #define CREQ_REGISTER_MR_RESP_TYPE_SFT 0 + #define CREQ_REGISTER_MR_RESP_TYPE_QP_EVENT 0x38UL + #define CREQ_REGISTER_MR_RESP_TYPE_LAST CREQ_REGISTER_MR_RESP_TYPE_QP_EVENT + u8 status; + __le16 cookie; + __le32 xid; + u8 v; + #define CREQ_REGISTER_MR_RESP_V 0x1UL + u8 event; + #define CREQ_REGISTER_MR_RESP_EVENT_REGISTER_MR 0xfUL + #define CREQ_REGISTER_MR_RESP_EVENT_LAST CREQ_REGISTER_MR_RESP_EVENT_REGISTER_MR + u8 reserved48[6]; +}; + +/* cmdq_deregister_mr (size:192b/24B) */ +struct cmdq_deregister_mr { + u8 opcode; + #define CMDQ_DEREGISTER_MR_OPCODE_DEREGISTER_MR 0x10UL + #define CMDQ_DEREGISTER_MR_OPCODE_LAST CMDQ_DEREGISTER_MR_OPCODE_DEREGISTER_MR + u8 cmd_size; + __le16 flags; + __le16 cookie; + u8 resp_size; + u8 reserved8; + __le64 resp_addr; + __le32 lkey; + __le32 unused_0; +}; + +/* creq_deregister_mr_resp (size:128b/16B) */ +struct creq_deregister_mr_resp { + u8 type; + #define CREQ_DEREGISTER_MR_RESP_TYPE_MASK 0x3fUL + #define CREQ_DEREGISTER_MR_RESP_TYPE_SFT 0 + #define CREQ_DEREGISTER_MR_RESP_TYPE_QP_EVENT 0x38UL + #define CREQ_DEREGISTER_MR_RESP_TYPE_LAST CREQ_DEREGISTER_MR_RESP_TYPE_QP_EVENT + u8 status; + __le16 cookie; + __le32 xid; + u8 v; + #define CREQ_DEREGISTER_MR_RESP_V 0x1UL + u8 event; + #define CREQ_DEREGISTER_MR_RESP_EVENT_DEREGISTER_MR 0x10UL + #define CREQ_DEREGISTER_MR_RESP_EVENT_LAST CREQ_DEREGISTER_MR_RESP_EVENT_DEREGISTER_MR + __le16 reserved16; + __le32 bound_windows; +}; + +/* cmdq_add_gid (size:384b/48B) */ +struct cmdq_add_gid { + u8 opcode; + #define CMDQ_ADD_GID_OPCODE_ADD_GID 0x11UL + #define CMDQ_ADD_GID_OPCODE_LAST CMDQ_ADD_GID_OPCODE_ADD_GID + u8 cmd_size; + __le16 flags; + __le16 cookie; + u8 resp_size; + u8 reserved8; + __le64 resp_addr; + __le32 gid[4]; + __le16 src_mac[3]; + __le16 vlan; + #define CMDQ_ADD_GID_VLAN_VLAN_EN_TPID_VLAN_ID_MASK 0xffffUL + #define CMDQ_ADD_GID_VLAN_VLAN_EN_TPID_VLAN_ID_SFT 0 + #define CMDQ_ADD_GID_VLAN_VLAN_ID_MASK 0xfffUL + #define CMDQ_ADD_GID_VLAN_VLAN_ID_SFT 0 + #define CMDQ_ADD_GID_VLAN_TPID_MASK 0x7000UL + #define CMDQ_ADD_GID_VLAN_TPID_SFT 12 + #define CMDQ_ADD_GID_VLAN_TPID_TPID_88A8 (0x0UL << 12) + #define CMDQ_ADD_GID_VLAN_TPID_TPID_8100 (0x1UL << 12) + #define CMDQ_ADD_GID_VLAN_TPID_TPID_9100 (0x2UL << 12) + #define CMDQ_ADD_GID_VLAN_TPID_TPID_9200 (0x3UL << 12) + #define CMDQ_ADD_GID_VLAN_TPID_TPID_9300 (0x4UL << 12) + #define CMDQ_ADD_GID_VLAN_TPID_TPID_CFG1 (0x5UL << 12) + #define CMDQ_ADD_GID_VLAN_TPID_TPID_CFG2 (0x6UL << 12) + #define CMDQ_ADD_GID_VLAN_TPID_TPID_CFG3 (0x7UL << 12) + #define CMDQ_ADD_GID_VLAN_TPID_LAST CMDQ_ADD_GID_VLAN_TPID_TPID_CFG3 + #define CMDQ_ADD_GID_VLAN_VLAN_EN 0x8000UL + __le16 ipid; + __le16 stats_ctx; + #define CMDQ_ADD_GID_STATS_CTX_STATS_CTX_VALID_STATS_CTX_ID_MASK 0xffffUL + #define CMDQ_ADD_GID_STATS_CTX_STATS_CTX_VALID_STATS_CTX_ID_SFT 0 + #define CMDQ_ADD_GID_STATS_CTX_STATS_CTX_ID_MASK 0x7fffUL + #define CMDQ_ADD_GID_STATS_CTX_STATS_CTX_ID_SFT 0 + #define CMDQ_ADD_GID_STATS_CTX_STATS_CTX_VALID 0x8000UL + __le32 unused_0; +}; + +/* creq_add_gid_resp (size:128b/16B) */ +struct creq_add_gid_resp { + u8 type; + #define CREQ_ADD_GID_RESP_TYPE_MASK 0x3fUL + #define CREQ_ADD_GID_RESP_TYPE_SFT 0 + #define CREQ_ADD_GID_RESP_TYPE_QP_EVENT 0x38UL + #define CREQ_ADD_GID_RESP_TYPE_LAST CREQ_ADD_GID_RESP_TYPE_QP_EVENT + u8 status; + __le16 cookie; + __le32 xid; + u8 v; + #define CREQ_ADD_GID_RESP_V 0x1UL + u8 event; + #define CREQ_ADD_GID_RESP_EVENT_ADD_GID 0x11UL + #define CREQ_ADD_GID_RESP_EVENT_LAST CREQ_ADD_GID_RESP_EVENT_ADD_GID + u8 reserved48[6]; +}; + +/* cmdq_delete_gid (size:192b/24B) */ +struct cmdq_delete_gid { + u8 opcode; + #define CMDQ_DELETE_GID_OPCODE_DELETE_GID 0x12UL + #define CMDQ_DELETE_GID_OPCODE_LAST CMDQ_DELETE_GID_OPCODE_DELETE_GID + u8 cmd_size; + __le16 flags; + __le16 cookie; + u8 resp_size; + u8 reserved8; + __le64 resp_addr; + __le16 gid_index; + u8 unused_0[6]; +}; + +/* creq_delete_gid_resp (size:128b/16B) */ +struct creq_delete_gid_resp { + u8 type; + #define CREQ_DELETE_GID_RESP_TYPE_MASK 0x3fUL + #define CREQ_DELETE_GID_RESP_TYPE_SFT 0 + #define CREQ_DELETE_GID_RESP_TYPE_QP_EVENT 0x38UL + #define CREQ_DELETE_GID_RESP_TYPE_LAST CREQ_DELETE_GID_RESP_TYPE_QP_EVENT + u8 status; + __le16 cookie; + __le32 xid; + u8 v; + #define CREQ_DELETE_GID_RESP_V 0x1UL + u8 event; + #define CREQ_DELETE_GID_RESP_EVENT_DELETE_GID 0x12UL + #define CREQ_DELETE_GID_RESP_EVENT_LAST CREQ_DELETE_GID_RESP_EVENT_DELETE_GID + u8 reserved48[6]; +}; + +/* cmdq_modify_gid (size:384b/48B) */ +struct cmdq_modify_gid { + u8 opcode; + #define CMDQ_MODIFY_GID_OPCODE_MODIFY_GID 0x17UL + #define CMDQ_MODIFY_GID_OPCODE_LAST CMDQ_MODIFY_GID_OPCODE_MODIFY_GID + u8 cmd_size; + __le16 flags; + __le16 cookie; + u8 resp_size; + u8 reserved8; + __le64 resp_addr; + __le32 gid[4]; + __le16 src_mac[3]; + __le16 vlan; + #define CMDQ_MODIFY_GID_VLAN_VLAN_ID_MASK 0xfffUL + #define CMDQ_MODIFY_GID_VLAN_VLAN_ID_SFT 0 + #define CMDQ_MODIFY_GID_VLAN_TPID_MASK 0x7000UL + #define CMDQ_MODIFY_GID_VLAN_TPID_SFT 12 + #define CMDQ_MODIFY_GID_VLAN_TPID_TPID_88A8 (0x0UL << 12) + #define CMDQ_MODIFY_GID_VLAN_TPID_TPID_8100 (0x1UL << 12) + #define CMDQ_MODIFY_GID_VLAN_TPID_TPID_9100 (0x2UL << 12) + #define CMDQ_MODIFY_GID_VLAN_TPID_TPID_9200 (0x3UL << 12) + #define CMDQ_MODIFY_GID_VLAN_TPID_TPID_9300 (0x4UL << 12) + #define CMDQ_MODIFY_GID_VLAN_TPID_TPID_CFG1 (0x5UL << 12) + #define CMDQ_MODIFY_GID_VLAN_TPID_TPID_CFG2 (0x6UL << 12) + #define CMDQ_MODIFY_GID_VLAN_TPID_TPID_CFG3 (0x7UL << 12) + #define CMDQ_MODIFY_GID_VLAN_TPID_LAST CMDQ_MODIFY_GID_VLAN_TPID_TPID_CFG3 + #define CMDQ_MODIFY_GID_VLAN_VLAN_EN 0x8000UL + __le16 ipid; + __le16 gid_index; + __le16 stats_ctx; + #define CMDQ_MODIFY_GID_STATS_CTX_STATS_CTX_ID_MASK 0x7fffUL + #define CMDQ_MODIFY_GID_STATS_CTX_STATS_CTX_ID_SFT 0 + #define CMDQ_MODIFY_GID_STATS_CTX_STATS_CTX_VALID 0x8000UL + __le16 unused_0; +}; + +/* creq_modify_gid_resp (size:128b/16B) */ +struct creq_modify_gid_resp { + u8 type; + #define CREQ_MODIFY_GID_RESP_TYPE_MASK 0x3fUL + #define CREQ_MODIFY_GID_RESP_TYPE_SFT 0 + #define CREQ_MODIFY_GID_RESP_TYPE_QP_EVENT 0x38UL + #define CREQ_MODIFY_GID_RESP_TYPE_LAST CREQ_MODIFY_GID_RESP_TYPE_QP_EVENT + u8 status; + __le16 cookie; + __le32 xid; + u8 v; + #define CREQ_MODIFY_GID_RESP_V 0x1UL + u8 event; + #define CREQ_MODIFY_GID_RESP_EVENT_ADD_GID 0x11UL + #define CREQ_MODIFY_GID_RESP_EVENT_LAST CREQ_MODIFY_GID_RESP_EVENT_ADD_GID + u8 reserved48[6]; +}; + +/* cmdq_query_gid (size:192b/24B) */ +struct cmdq_query_gid { + u8 opcode; + #define CMDQ_QUERY_GID_OPCODE_QUERY_GID 0x18UL + #define CMDQ_QUERY_GID_OPCODE_LAST CMDQ_QUERY_GID_OPCODE_QUERY_GID + u8 cmd_size; + __le16 flags; + __le16 cookie; + u8 resp_size; + u8 reserved8; + __le64 resp_addr; + __le16 gid_index; + u8 unused16[6]; +}; + +/* creq_query_gid_resp (size:128b/16B) */ +struct creq_query_gid_resp { + u8 type; + #define CREQ_QUERY_GID_RESP_TYPE_MASK 0x3fUL + #define CREQ_QUERY_GID_RESP_TYPE_SFT 0 + #define CREQ_QUERY_GID_RESP_TYPE_QP_EVENT 0x38UL + #define CREQ_QUERY_GID_RESP_TYPE_LAST CREQ_QUERY_GID_RESP_TYPE_QP_EVENT + u8 status; + __le16 cookie; + __le32 size; + u8 v; + #define CREQ_QUERY_GID_RESP_V 0x1UL + u8 event; + #define CREQ_QUERY_GID_RESP_EVENT_QUERY_GID 0x18UL + #define CREQ_QUERY_GID_RESP_EVENT_LAST CREQ_QUERY_GID_RESP_EVENT_QUERY_GID + u8 reserved48[6]; +}; + +/* creq_query_gid_resp_sb (size:320b/40B) */ +struct creq_query_gid_resp_sb { + u8 opcode; + #define CREQ_QUERY_GID_RESP_SB_OPCODE_QUERY_GID 0x18UL + #define CREQ_QUERY_GID_RESP_SB_OPCODE_LAST CREQ_QUERY_GID_RESP_SB_OPCODE_QUERY_GID + u8 status; + __le16 cookie; + __le16 flags; + u8 resp_size; + u8 reserved8; + __le32 gid[4]; + __le16 src_mac[3]; + __le16 vlan; + #define CREQ_QUERY_GID_RESP_SB_VLAN_VLAN_EN_TPID_VLAN_ID_MASK 0xffffUL + #define CREQ_QUERY_GID_RESP_SB_VLAN_VLAN_EN_TPID_VLAN_ID_SFT 0 + #define CREQ_QUERY_GID_RESP_SB_VLAN_VLAN_ID_MASK 0xfffUL + #define CREQ_QUERY_GID_RESP_SB_VLAN_VLAN_ID_SFT 0 + #define CREQ_QUERY_GID_RESP_SB_VLAN_TPID_MASK 0x7000UL + #define CREQ_QUERY_GID_RESP_SB_VLAN_TPID_SFT 12 + #define CREQ_QUERY_GID_RESP_SB_VLAN_TPID_TPID_88A8 (0x0UL << 12) + #define CREQ_QUERY_GID_RESP_SB_VLAN_TPID_TPID_8100 (0x1UL << 12) + #define CREQ_QUERY_GID_RESP_SB_VLAN_TPID_TPID_9100 (0x2UL << 12) + #define CREQ_QUERY_GID_RESP_SB_VLAN_TPID_TPID_9200 (0x3UL << 12) + #define CREQ_QUERY_GID_RESP_SB_VLAN_TPID_TPID_9300 (0x4UL << 12) + #define CREQ_QUERY_GID_RESP_SB_VLAN_TPID_TPID_CFG1 (0x5UL << 12) + #define CREQ_QUERY_GID_RESP_SB_VLAN_TPID_TPID_CFG2 (0x6UL << 12) + #define CREQ_QUERY_GID_RESP_SB_VLAN_TPID_TPID_CFG3 (0x7UL << 12) + #define CREQ_QUERY_GID_RESP_SB_VLAN_TPID_LAST CREQ_QUERY_GID_RESP_SB_VLAN_TPID_TPID_CFG3 + #define CREQ_QUERY_GID_RESP_SB_VLAN_VLAN_EN 0x8000UL + __le16 ipid; + __le16 gid_index; + __le32 unused_0; +}; + +/* cmdq_create_qp1 (size:640b/80B) */ +struct cmdq_create_qp1 { + u8 opcode; + #define CMDQ_CREATE_QP1_OPCODE_CREATE_QP1 0x13UL + #define CMDQ_CREATE_QP1_OPCODE_LAST CMDQ_CREATE_QP1_OPCODE_CREATE_QP1 + u8 cmd_size; + __le16 flags; + __le16 cookie; + u8 resp_size; + u8 reserved8; + __le64 resp_addr; + __le64 qp_handle; + __le32 qp_flags; + #define CMDQ_CREATE_QP1_QP_FLAGS_SRQ_USED 0x1UL + #define CMDQ_CREATE_QP1_QP_FLAGS_FORCE_COMPLETION 0x2UL + #define CMDQ_CREATE_QP1_QP_FLAGS_RESERVED_LKEY_ENABLE 0x4UL + #define CMDQ_CREATE_QP1_QP_FLAGS_LAST CMDQ_CREATE_QP1_QP_FLAGS_RESERVED_LKEY_ENABLE + u8 type; + #define CMDQ_CREATE_QP1_TYPE_GSI 0x1UL + #define CMDQ_CREATE_QP1_TYPE_LAST CMDQ_CREATE_QP1_TYPE_GSI + u8 sq_pg_size_sq_lvl; + #define CMDQ_CREATE_QP1_SQ_LVL_MASK 0xfUL + #define CMDQ_CREATE_QP1_SQ_LVL_SFT 0 + #define CMDQ_CREATE_QP1_SQ_LVL_LVL_0 0x0UL + #define CMDQ_CREATE_QP1_SQ_LVL_LVL_1 0x1UL + #define CMDQ_CREATE_QP1_SQ_LVL_LVL_2 0x2UL + #define CMDQ_CREATE_QP1_SQ_LVL_LAST CMDQ_CREATE_QP1_SQ_LVL_LVL_2 + #define CMDQ_CREATE_QP1_SQ_PG_SIZE_MASK 0xf0UL + #define CMDQ_CREATE_QP1_SQ_PG_SIZE_SFT 4 + #define CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_4K (0x0UL << 4) + #define CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_8K (0x1UL << 4) + #define CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_64K (0x2UL << 4) + #define CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_2M (0x3UL << 4) + #define CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_8M (0x4UL << 4) + #define CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_1G (0x5UL << 4) + #define CMDQ_CREATE_QP1_SQ_PG_SIZE_LAST CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_1G + u8 rq_pg_size_rq_lvl; + #define CMDQ_CREATE_QP1_RQ_LVL_MASK 0xfUL + #define CMDQ_CREATE_QP1_RQ_LVL_SFT 0 + #define CMDQ_CREATE_QP1_RQ_LVL_LVL_0 0x0UL + #define CMDQ_CREATE_QP1_RQ_LVL_LVL_1 0x1UL + #define CMDQ_CREATE_QP1_RQ_LVL_LVL_2 0x2UL + #define CMDQ_CREATE_QP1_RQ_LVL_LAST CMDQ_CREATE_QP1_RQ_LVL_LVL_2 + #define CMDQ_CREATE_QP1_RQ_PG_SIZE_MASK 0xf0UL + #define CMDQ_CREATE_QP1_RQ_PG_SIZE_SFT 4 + #define CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_4K (0x0UL << 4) + #define CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_8K (0x1UL << 4) + #define CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_64K (0x2UL << 4) + #define CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_2M (0x3UL << 4) + #define CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_8M (0x4UL << 4) + #define CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_1G (0x5UL << 4) + #define CMDQ_CREATE_QP1_RQ_PG_SIZE_LAST CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_1G + u8 unused_0; + __le32 dpi; + __le32 sq_size; + __le32 rq_size; + __le16 sq_fwo_sq_sge; + #define CMDQ_CREATE_QP1_SQ_SGE_MASK 0xfUL + #define CMDQ_CREATE_QP1_SQ_SGE_SFT 0 + #define CMDQ_CREATE_QP1_SQ_FWO_MASK 0xfff0UL + #define CMDQ_CREATE_QP1_SQ_FWO_SFT 4 + __le16 rq_fwo_rq_sge; + #define CMDQ_CREATE_QP1_RQ_SGE_MASK 0xfUL + #define CMDQ_CREATE_QP1_RQ_SGE_SFT 0 + #define CMDQ_CREATE_QP1_RQ_FWO_MASK 0xfff0UL + #define CMDQ_CREATE_QP1_RQ_FWO_SFT 4 + __le32 scq_cid; + __le32 rcq_cid; + __le32 srq_cid; + __le32 pd_id; + __le64 sq_pbl; + __le64 rq_pbl; +}; + +/* creq_create_qp1_resp (size:128b/16B) */ +struct creq_create_qp1_resp { + u8 type; + #define CREQ_CREATE_QP1_RESP_TYPE_MASK 0x3fUL + #define CREQ_CREATE_QP1_RESP_TYPE_SFT 0 + #define CREQ_CREATE_QP1_RESP_TYPE_QP_EVENT 0x38UL + #define CREQ_CREATE_QP1_RESP_TYPE_LAST CREQ_CREATE_QP1_RESP_TYPE_QP_EVENT + u8 status; + __le16 cookie; + __le32 xid; + u8 v; + #define CREQ_CREATE_QP1_RESP_V 0x1UL + u8 event; + #define CREQ_CREATE_QP1_RESP_EVENT_CREATE_QP1 0x13UL + #define CREQ_CREATE_QP1_RESP_EVENT_LAST CREQ_CREATE_QP1_RESP_EVENT_CREATE_QP1 + u8 reserved48[6]; +}; + +/* cmdq_destroy_qp1 (size:192b/24B) */ +struct cmdq_destroy_qp1 { + u8 opcode; + #define CMDQ_DESTROY_QP1_OPCODE_DESTROY_QP1 0x14UL + #define CMDQ_DESTROY_QP1_OPCODE_LAST CMDQ_DESTROY_QP1_OPCODE_DESTROY_QP1 + u8 cmd_size; + __le16 flags; + __le16 cookie; + u8 resp_size; + u8 reserved8; + __le64 resp_addr; + __le32 qp1_cid; + __le32 unused_0; +}; + +/* creq_destroy_qp1_resp (size:128b/16B) */ +struct creq_destroy_qp1_resp { + u8 type; + #define CREQ_DESTROY_QP1_RESP_TYPE_MASK 0x3fUL + #define CREQ_DESTROY_QP1_RESP_TYPE_SFT 0 + #define CREQ_DESTROY_QP1_RESP_TYPE_QP_EVENT 0x38UL + #define CREQ_DESTROY_QP1_RESP_TYPE_LAST CREQ_DESTROY_QP1_RESP_TYPE_QP_EVENT + u8 status; + __le16 cookie; + __le32 xid; + u8 v; + #define CREQ_DESTROY_QP1_RESP_V 0x1UL + u8 event; + #define CREQ_DESTROY_QP1_RESP_EVENT_DESTROY_QP1 0x14UL + #define CREQ_DESTROY_QP1_RESP_EVENT_LAST CREQ_DESTROY_QP1_RESP_EVENT_DESTROY_QP1 + u8 reserved48[6]; +}; + +/* cmdq_create_ah (size:512b/64B) */ +struct cmdq_create_ah { + u8 opcode; + #define CMDQ_CREATE_AH_OPCODE_CREATE_AH 0x15UL + #define CMDQ_CREATE_AH_OPCODE_LAST CMDQ_CREATE_AH_OPCODE_CREATE_AH + u8 cmd_size; + __le16 flags; + __le16 cookie; + u8 resp_size; + u8 reserved8; + __le64 resp_addr; + __le64 ah_handle; + __le32 dgid[4]; + u8 type; + #define CMDQ_CREATE_AH_TYPE_V1 0x0UL + #define CMDQ_CREATE_AH_TYPE_V2IPV4 0x2UL + #define CMDQ_CREATE_AH_TYPE_V2IPV6 0x3UL + #define CMDQ_CREATE_AH_TYPE_LAST CMDQ_CREATE_AH_TYPE_V2IPV6 + u8 hop_limit; + __le16 sgid_index; + __le32 dest_vlan_id_flow_label; + #define CMDQ_CREATE_AH_FLOW_LABEL_MASK 0xfffffUL + #define CMDQ_CREATE_AH_FLOW_LABEL_SFT 0 + #define CMDQ_CREATE_AH_DEST_VLAN_ID_MASK 0xfff00000UL + #define CMDQ_CREATE_AH_DEST_VLAN_ID_SFT 20 + __le32 pd_id; + __le32 unused_0; + __le16 dest_mac[3]; + u8 traffic_class; + u8 enable_cc; + #define CMDQ_CREATE_AH_ENABLE_CC 0x1UL +}; + +/* creq_create_ah_resp (size:128b/16B) */ +struct creq_create_ah_resp { + u8 type; + #define CREQ_CREATE_AH_RESP_TYPE_MASK 0x3fUL + #define CREQ_CREATE_AH_RESP_TYPE_SFT 0 + #define CREQ_CREATE_AH_RESP_TYPE_QP_EVENT 0x38UL + #define CREQ_CREATE_AH_RESP_TYPE_LAST CREQ_CREATE_AH_RESP_TYPE_QP_EVENT + u8 status; + __le16 cookie; + __le32 xid; + u8 v; + #define CREQ_CREATE_AH_RESP_V 0x1UL + u8 event; + #define CREQ_CREATE_AH_RESP_EVENT_CREATE_AH 0x15UL + #define CREQ_CREATE_AH_RESP_EVENT_LAST CREQ_CREATE_AH_RESP_EVENT_CREATE_AH + u8 reserved48[6]; +}; + +/* cmdq_destroy_ah (size:192b/24B) */ +struct cmdq_destroy_ah { + u8 opcode; + #define CMDQ_DESTROY_AH_OPCODE_DESTROY_AH 0x16UL + #define CMDQ_DESTROY_AH_OPCODE_LAST CMDQ_DESTROY_AH_OPCODE_DESTROY_AH + u8 cmd_size; + __le16 flags; + __le16 cookie; + u8 resp_size; + u8 reserved8; + __le64 resp_addr; + __le32 ah_cid; + __le32 unused_0; +}; + +/* creq_destroy_ah_resp (size:128b/16B) */ +struct creq_destroy_ah_resp { + u8 type; + #define CREQ_DESTROY_AH_RESP_TYPE_MASK 0x3fUL + #define CREQ_DESTROY_AH_RESP_TYPE_SFT 0 + #define CREQ_DESTROY_AH_RESP_TYPE_QP_EVENT 0x38UL + #define CREQ_DESTROY_AH_RESP_TYPE_LAST CREQ_DESTROY_AH_RESP_TYPE_QP_EVENT + u8 status; + __le16 cookie; + __le32 xid; + u8 v; + #define CREQ_DESTROY_AH_RESP_V 0x1UL + u8 event; + #define CREQ_DESTROY_AH_RESP_EVENT_DESTROY_AH 0x16UL + #define CREQ_DESTROY_AH_RESP_EVENT_LAST CREQ_DESTROY_AH_RESP_EVENT_DESTROY_AH + u8 reserved48[6]; +}; + +/* cmdq_query_roce_stats (size:192b/24B) */ +struct cmdq_query_roce_stats { + u8 opcode; + #define CMDQ_QUERY_ROCE_STATS_OPCODE_QUERY_ROCE_STATS 0x8eUL + #define CMDQ_QUERY_ROCE_STATS_OPCODE_LAST CMDQ_QUERY_ROCE_STATS_OPCODE_QUERY_ROCE_STATS + u8 cmd_size; + __le16 flags; + #define CMDQ_QUERY_ROCE_STATS_FLAGS_COLLECTION_ID 0x1UL + #define CMDQ_QUERY_ROCE_STATS_FLAGS_FUNCTION_ID 0x2UL + __le16 cookie; + u8 resp_size; + u8 collection_id; + __le64 resp_addr; + __le32 function_id; + #define CMDQ_QUERY_ROCE_STATS_PF_NUM_MASK 0xffUL + #define CMDQ_QUERY_ROCE_STATS_PF_NUM_SFT 0 + #define CMDQ_QUERY_ROCE_STATS_VF_NUM_MASK 0xffff00UL + #define CMDQ_QUERY_ROCE_STATS_VF_NUM_SFT 8 + #define CMDQ_QUERY_ROCE_STATS_VF_VALID 0x1000000UL + __le32 reserved32; +}; + +/* creq_query_roce_stats_resp (size:128b/16B) */ +struct creq_query_roce_stats_resp { + u8 type; + #define CREQ_QUERY_ROCE_STATS_RESP_TYPE_MASK 0x3fUL + #define CREQ_QUERY_ROCE_STATS_RESP_TYPE_SFT 0 + #define CREQ_QUERY_ROCE_STATS_RESP_TYPE_QP_EVENT 0x38UL + #define CREQ_QUERY_ROCE_STATS_RESP_TYPE_LAST CREQ_QUERY_ROCE_STATS_RESP_TYPE_QP_EVENT + u8 status; + __le16 cookie; + __le32 size; + u8 v; + #define CREQ_QUERY_ROCE_STATS_RESP_V 0x1UL + u8 event; + #define CREQ_QUERY_ROCE_STATS_RESP_EVENT_QUERY_ROCE_STATS 0x8eUL + #define CREQ_QUERY_ROCE_STATS_RESP_EVENT_LAST CREQ_QUERY_ROCE_STATS_RESP_EVENT_QUERY_ROCE_STATS + u8 reserved48[6]; +}; + +/* creq_query_roce_stats_resp_sb (size:2944b/368B) */ +struct creq_query_roce_stats_resp_sb { + u8 opcode; + #define CREQ_QUERY_ROCE_STATS_RESP_SB_OPCODE_QUERY_ROCE_STATS 0x8eUL + #define CREQ_QUERY_ROCE_STATS_RESP_SB_OPCODE_LAST CREQ_QUERY_ROCE_STATS_RESP_SB_OPCODE_QUERY_ROCE_STATS + u8 status; + __le16 cookie; + __le16 flags; + u8 resp_size; + u8 rsvd; + __le32 num_counters; + __le32 rsvd1; + __le64 to_retransmits; + __le64 seq_err_naks_rcvd; + __le64 max_retry_exceeded; + __le64 rnr_naks_rcvd; + __le64 missing_resp; + __le64 unrecoverable_err; + __le64 bad_resp_err; + __le64 local_qp_op_err; + __le64 local_protection_err; + __le64 mem_mgmt_op_err; + __le64 remote_invalid_req_err; + __le64 remote_access_err; + __le64 remote_op_err; + __le64 dup_req; + __le64 res_exceed_max; + __le64 res_length_mismatch; + __le64 res_exceeds_wqe; + __le64 res_opcode_err; + __le64 res_rx_invalid_rkey; + __le64 res_rx_domain_err; + __le64 res_rx_no_perm; + __le64 res_rx_range_err; + __le64 res_tx_invalid_rkey; + __le64 res_tx_domain_err; + __le64 res_tx_no_perm; + __le64 res_tx_range_err; + __le64 res_irrq_oflow; + __le64 res_unsup_opcode; + __le64 res_unaligned_atomic; + __le64 res_rem_inv_err; + __le64 res_mem_error; + __le64 res_srq_err; + __le64 res_cmp_err; + __le64 res_invalid_dup_rkey; + __le64 res_wqe_format_err; + __le64 res_cq_load_err; + __le64 res_srq_load_err; + __le64 res_tx_pci_err; + __le64 res_rx_pci_err; + __le64 res_oos_drop_count; + __le64 active_qp_count_p0; + __le64 active_qp_count_p1; + __le64 active_qp_count_p2; + __le64 active_qp_count_p3; +}; + +/* cmdq_query_roce_stats_ext (size:192b/24B) */ +struct cmdq_query_roce_stats_ext { + u8 opcode; + #define CMDQ_QUERY_ROCE_STATS_EXT_OPCODE_QUERY_ROCE_STATS 0x92UL + #define CMDQ_QUERY_ROCE_STATS_EXT_OPCODE_LAST CMDQ_QUERY_ROCE_STATS_EXT_OPCODE_QUERY_ROCE_STATS + u8 cmd_size; + __le16 flags; + #define CMDQ_QUERY_ROCE_STATS_EXT_FLAGS_COLLECTION_ID 0x1UL + #define CMDQ_QUERY_ROCE_STATS_EXT_FLAGS_FUNCTION_ID 0x2UL + __le16 cookie; + u8 resp_size; + u8 collection_id; + __le64 resp_addr; + __le32 function_id; + #define CMDQ_QUERY_ROCE_STATS_EXT_PF_NUM_MASK 0xffUL + #define CMDQ_QUERY_ROCE_STATS_EXT_PF_NUM_SFT 0 + #define CMDQ_QUERY_ROCE_STATS_EXT_VF_NUM_MASK 0xffff00UL + #define CMDQ_QUERY_ROCE_STATS_EXT_VF_NUM_SFT 8 + #define CMDQ_QUERY_ROCE_STATS_EXT_VF_VALID 0x1000000UL + __le32 reserved32; +}; + +/* creq_query_roce_stats_ext_resp (size:128b/16B) */ +struct creq_query_roce_stats_ext_resp { + u8 type; + #define CREQ_QUERY_ROCE_STATS_EXT_RESP_TYPE_MASK 0x3fUL + #define CREQ_QUERY_ROCE_STATS_EXT_RESP_TYPE_SFT 0 + #define CREQ_QUERY_ROCE_STATS_EXT_RESP_TYPE_QP_EVENT 0x38UL + #define CREQ_QUERY_ROCE_STATS_EXT_RESP_TYPE_LAST CREQ_QUERY_ROCE_STATS_EXT_RESP_TYPE_QP_EVENT + u8 status; + __le16 cookie; + __le32 size; + u8 v; + #define CREQ_QUERY_ROCE_STATS_EXT_RESP_V 0x1UL + u8 event; + #define CREQ_QUERY_ROCE_STATS_EXT_RESP_EVENT_QUERY_ROCE_STATS_EXT 0x92UL + #define CREQ_QUERY_ROCE_STATS_EXT_RESP_EVENT_LAST CREQ_QUERY_ROCE_STATS_EXT_RESP_EVENT_QUERY_ROCE_STATS_EXT + u8 reserved48[6]; +}; + +/* creq_query_roce_stats_ext_resp_sb (size:1984b/248B) */ +struct creq_query_roce_stats_ext_resp_sb { + u8 opcode; + #define CREQ_QUERY_ROCE_STATS_EXT_RESP_SB_OPCODE_QUERY_ROCE_STATS_EXT 0x92UL + #define CREQ_QUERY_ROCE_STATS_EXT_RESP_SB_OPCODE_LAST CREQ_QUERY_ROCE_STATS_EXT_RESP_SB_OPCODE_QUERY_ROCE_STATS_EXT + u8 status; + __le16 cookie; + __le16 flags; + u8 resp_size; + u8 rsvd; + __le64 tx_atomic_req_pkts; + __le64 tx_read_req_pkts; + __le64 tx_read_res_pkts; + __le64 tx_write_req_pkts; + __le64 tx_send_req_pkts; + __le64 tx_roce_pkts; + __le64 tx_roce_bytes; + __le64 rx_atomic_req_pkts; + __le64 rx_read_req_pkts; + __le64 rx_read_res_pkts; + __le64 rx_write_req_pkts; + __le64 rx_send_req_pkts; + __le64 rx_roce_pkts; + __le64 rx_roce_bytes; + __le64 rx_roce_good_pkts; + __le64 rx_roce_good_bytes; + __le64 rx_out_of_buffer_pkts; + __le64 rx_out_of_sequence_pkts; + __le64 tx_cnp_pkts; + __le64 rx_cnp_pkts; + __le64 rx_ecn_marked_pkts; + __le64 tx_cnp_bytes; + __le64 rx_cnp_bytes; + __le64 seq_err_naks_rcvd; + __le64 rnr_naks_rcvd; + __le64 missing_resp; + __le64 to_retransmit; + __le64 dup_req; + __le64 rx_dcn_payload_cut; + __le64 te_bypassed; +}; + +/* cmdq_query_func (size:128b/16B) */ +struct cmdq_query_func { + u8 opcode; + #define CMDQ_QUERY_FUNC_OPCODE_QUERY_FUNC 0x83UL + #define CMDQ_QUERY_FUNC_OPCODE_LAST CMDQ_QUERY_FUNC_OPCODE_QUERY_FUNC + u8 cmd_size; + __le16 flags; + __le16 cookie; + u8 resp_size; + u8 reserved8; + __le64 resp_addr; +}; + +/* creq_query_func_resp (size:128b/16B) */ +struct creq_query_func_resp { + u8 type; + #define CREQ_QUERY_FUNC_RESP_TYPE_MASK 0x3fUL + #define CREQ_QUERY_FUNC_RESP_TYPE_SFT 0 + #define CREQ_QUERY_FUNC_RESP_TYPE_QP_EVENT 0x38UL + #define CREQ_QUERY_FUNC_RESP_TYPE_LAST CREQ_QUERY_FUNC_RESP_TYPE_QP_EVENT + u8 status; + __le16 cookie; + __le32 size; + u8 v; + #define CREQ_QUERY_FUNC_RESP_V 0x1UL + u8 event; + #define CREQ_QUERY_FUNC_RESP_EVENT_QUERY_FUNC 0x83UL + #define CREQ_QUERY_FUNC_RESP_EVENT_LAST CREQ_QUERY_FUNC_RESP_EVENT_QUERY_FUNC + u8 reserved48[6]; +}; + +/* creq_query_func_resp_sb (size:1152b/144B) */ +struct creq_query_func_resp_sb { + u8 opcode; + #define CREQ_QUERY_FUNC_RESP_SB_OPCODE_QUERY_FUNC 0x83UL + #define CREQ_QUERY_FUNC_RESP_SB_OPCODE_LAST CREQ_QUERY_FUNC_RESP_SB_OPCODE_QUERY_FUNC + u8 status; + __le16 cookie; + __le16 flags; + u8 resp_size; + u8 reserved8; + __le64 max_mr_size; + __le32 max_qp; + __le16 max_qp_wr; + __le16 dev_cap_flags; + #define CREQ_QUERY_FUNC_RESP_SB_RESIZE_QP 0x1UL + #define CREQ_QUERY_FUNC_RESP_SB_CC_GENERATION_MASK 0xeUL + #define CREQ_QUERY_FUNC_RESP_SB_CC_GENERATION_SFT 1 + #define CREQ_QUERY_FUNC_RESP_SB_CC_GENERATION_CC_GEN0 (0x0UL << 1) + #define CREQ_QUERY_FUNC_RESP_SB_CC_GENERATION_CC_GEN1 (0x1UL << 1) + #define CREQ_QUERY_FUNC_RESP_SB_CC_GENERATION_CC_GEN1_EXT (0x2UL << 1) + #define CREQ_QUERY_FUNC_RESP_SB_CC_GENERATION_CC_GEN2 (0x3UL << 1) + #define CREQ_QUERY_FUNC_RESP_SB_CC_GENERATION_LAST CREQ_QUERY_FUNC_RESP_SB_CC_GENERATION_CC_GEN2 + #define CREQ_QUERY_FUNC_RESP_SB_EXT_STATS 0x10UL + #define CREQ_QUERY_FUNC_RESP_SB_MR_REGISTER_ALLOC 0x20UL + #define CREQ_QUERY_FUNC_RESP_SB_OPTIMIZED_TRANSMIT_ENABLED 0x40UL + #define CREQ_QUERY_FUNC_RESP_SB_CQE_V2 0x80UL + #define CREQ_QUERY_FUNC_RESP_SB_PINGPONG_PUSH_MODE 0x100UL + #define CREQ_QUERY_FUNC_RESP_SB_HW_REQUESTER_RETX_ENABLED 0x200UL + #define CREQ_QUERY_FUNC_RESP_SB_HW_RESPONDER_RETX_ENABLED 0x400UL + #define CREQ_QUERY_FUNC_RESP_SB_LINK_AGGR_SUPPORTED 0x800UL + #define CREQ_QUERY_FUNC_RESP_SB_LINK_AGGR_SUPPORTED_VALID 0x1000UL + #define CREQ_QUERY_FUNC_RESP_SB_PSEUDO_STATIC_QP_ALLOC_SUPPORTED 0x2000UL + #define CREQ_QUERY_FUNC_RESP_SB_EXPRESS_MODE_SUPPORTED 0x4000UL + #define CREQ_QUERY_FUNC_RESP_SB_INTERNAL_QUEUE_MEMORY 0x8000UL + __le32 max_cq; + __le32 max_cqe; + __le32 max_pd; + u8 max_sge; + u8 max_srq_sge; + u8 max_qp_rd_atom; + u8 max_qp_init_rd_atom; + __le32 max_mr; + __le32 max_mw; + __le32 max_raw_eth_qp; + __le32 max_ah; + __le32 max_fmr; + __le32 max_srq_wr; + __le32 max_pkeys; + __le32 max_inline_data; + u8 max_map_per_fmr; + u8 l2_db_space_size; + __le16 max_srq; + __le32 max_gid; + __le32 tqm_alloc_reqs[12]; + __le32 max_dpi; + u8 max_sge_var_wqe; + u8 dev_cap_ext_flags; + #define CREQ_QUERY_FUNC_RESP_SB_ATOMIC_OPS_NOT_SUPPORTED 0x1UL + #define CREQ_QUERY_FUNC_RESP_SB_DRV_VERSION_RGTR_SUPPORTED 0x2UL + #define CREQ_QUERY_FUNC_RESP_SB_CREATE_QP_BATCH_SUPPORTED 0x4UL + #define CREQ_QUERY_FUNC_RESP_SB_DESTROY_QP_BATCH_SUPPORTED 0x8UL + #define CREQ_QUERY_FUNC_RESP_SB_ROCE_STATS_EXT_CTX_SUPPORTED 0x10UL + #define CREQ_QUERY_FUNC_RESP_SB_CREATE_SRQ_SGE_SUPPORTED 0x20UL + #define CREQ_QUERY_FUNC_RESP_SB_FIXED_SIZE_WQE_DISABLED 0x40UL + #define CREQ_QUERY_FUNC_RESP_SB_DCN_SUPPORTED 0x80UL + __le16 max_inline_data_var_wqe; + __le32 start_qid; + u8 max_msn_table_size; + u8 reserved8_1; + __le16 dev_cap_ext_flags_2; + #define CREQ_QUERY_FUNC_RESP_SB_OPTIMIZE_MODIFY_QP_SUPPORTED 0x1UL +}; + +/* cmdq_set_func_resources (size:448b/56B) */ +struct cmdq_set_func_resources { + u8 opcode; + #define CMDQ_SET_FUNC_RESOURCES_OPCODE_SET_FUNC_RESOURCES 0x84UL + #define CMDQ_SET_FUNC_RESOURCES_OPCODE_LAST CMDQ_SET_FUNC_RESOURCES_OPCODE_SET_FUNC_RESOURCES + u8 cmd_size; + __le16 flags; + #define CMDQ_SET_FUNC_RESOURCES_FLAGS_MRAV_RESERVATION_SPLIT 0x1UL + __le16 cookie; + u8 resp_size; + u8 reserved8; + __le64 resp_addr; + __le32 number_of_qp; + __le32 number_of_mrw; + __le32 number_of_srq; + __le32 number_of_cq; + __le32 max_qp_per_vf; + __le32 max_mrw_per_vf; + __le32 max_srq_per_vf; + __le32 max_cq_per_vf; + __le32 max_gid_per_vf; + __le32 stat_ctx_id; +}; + +/* creq_set_func_resources_resp (size:128b/16B) */ +struct creq_set_func_resources_resp { + u8 type; + #define CREQ_SET_FUNC_RESOURCES_RESP_TYPE_MASK 0x3fUL + #define CREQ_SET_FUNC_RESOURCES_RESP_TYPE_SFT 0 + #define CREQ_SET_FUNC_RESOURCES_RESP_TYPE_QP_EVENT 0x38UL + #define CREQ_SET_FUNC_RESOURCES_RESP_TYPE_LAST CREQ_SET_FUNC_RESOURCES_RESP_TYPE_QP_EVENT + u8 status; + __le16 cookie; + __le32 reserved32; + u8 v; + #define CREQ_SET_FUNC_RESOURCES_RESP_V 0x1UL + u8 event; + #define CREQ_SET_FUNC_RESOURCES_RESP_EVENT_SET_FUNC_RESOURCES 0x84UL + #define CREQ_SET_FUNC_RESOURCES_RESP_EVENT_LAST CREQ_SET_FUNC_RESOURCES_RESP_EVENT_SET_FUNC_RESOURCES + u8 reserved48[6]; +}; + +/* cmdq_stop_func (size:128b/16B) */ +struct cmdq_stop_func { + u8 opcode; + #define CMDQ_STOP_FUNC_OPCODE_STOP_FUNC 0x82UL + #define CMDQ_STOP_FUNC_OPCODE_LAST CMDQ_STOP_FUNC_OPCODE_STOP_FUNC + u8 cmd_size; + __le16 flags; + __le16 cookie; + u8 resp_size; + u8 reserved8; + __le64 resp_addr; +}; + +/* creq_stop_func_resp (size:128b/16B) */ +struct creq_stop_func_resp { + u8 type; + #define CREQ_STOP_FUNC_RESP_TYPE_MASK 0x3fUL + #define CREQ_STOP_FUNC_RESP_TYPE_SFT 0 + #define CREQ_STOP_FUNC_RESP_TYPE_QP_EVENT 0x38UL + #define CREQ_STOP_FUNC_RESP_TYPE_LAST CREQ_STOP_FUNC_RESP_TYPE_QP_EVENT + u8 status; + __le16 cookie; + __le32 reserved32; + u8 v; + #define CREQ_STOP_FUNC_RESP_V 0x1UL + u8 event; + #define CREQ_STOP_FUNC_RESP_EVENT_STOP_FUNC 0x82UL + #define CREQ_STOP_FUNC_RESP_EVENT_LAST CREQ_STOP_FUNC_RESP_EVENT_STOP_FUNC + u8 reserved48[6]; +}; + +/* cmdq_read_context (size:192b/24B) */ +struct cmdq_read_context { + u8 opcode; + #define CMDQ_READ_CONTEXT_OPCODE_READ_CONTEXT 0x85UL + #define CMDQ_READ_CONTEXT_OPCODE_LAST CMDQ_READ_CONTEXT_OPCODE_READ_CONTEXT + u8 cmd_size; + __le16 flags; + __le16 cookie; + u8 resp_size; + u8 reserved8; + __le64 resp_addr; + __le32 type_xid; + #define CMDQ_READ_CONTEXT_XID_MASK 0xffffffUL + #define CMDQ_READ_CONTEXT_XID_SFT 0 + #define CMDQ_READ_CONTEXT_TYPE_MASK 0xff000000UL + #define CMDQ_READ_CONTEXT_TYPE_SFT 24 + #define CMDQ_READ_CONTEXT_TYPE_QPC (0x0UL << 24) + #define CMDQ_READ_CONTEXT_TYPE_CQ (0x1UL << 24) + #define CMDQ_READ_CONTEXT_TYPE_MRW (0x2UL << 24) + #define CMDQ_READ_CONTEXT_TYPE_SRQ (0x3UL << 24) + #define CMDQ_READ_CONTEXT_TYPE_LAST CMDQ_READ_CONTEXT_TYPE_SRQ + __le32 unused_0; +}; + +/* creq_read_context (size:128b/16B) */ +struct creq_read_context { + u8 type; + #define CREQ_READ_CONTEXT_TYPE_MASK 0x3fUL + #define CREQ_READ_CONTEXT_TYPE_SFT 0 + #define CREQ_READ_CONTEXT_TYPE_QP_EVENT 0x38UL + #define CREQ_READ_CONTEXT_TYPE_LAST CREQ_READ_CONTEXT_TYPE_QP_EVENT + u8 status; + __le16 cookie; + __le32 reserved32; + u8 v; + #define CREQ_READ_CONTEXT_V 0x1UL + u8 event; + #define CREQ_READ_CONTEXT_EVENT_READ_CONTEXT 0x85UL + #define CREQ_READ_CONTEXT_EVENT_LAST CREQ_READ_CONTEXT_EVENT_READ_CONTEXT + __le16 reserved16; + __le32 reserved_32; +}; + +/* cmdq_map_tc_to_cos (size:192b/24B) */ +struct cmdq_map_tc_to_cos { + u8 opcode; + #define CMDQ_MAP_TC_TO_COS_OPCODE_MAP_TC_TO_COS 0x8aUL + #define CMDQ_MAP_TC_TO_COS_OPCODE_LAST CMDQ_MAP_TC_TO_COS_OPCODE_MAP_TC_TO_COS + u8 cmd_size; + __le16 flags; + __le16 cookie; + u8 resp_size; + u8 reserved8; + __le64 resp_addr; + __le16 cos0; + #define CMDQ_MAP_TC_TO_COS_COS0_NO_CHANGE 0xffffUL + #define CMDQ_MAP_TC_TO_COS_COS0_LAST CMDQ_MAP_TC_TO_COS_COS0_NO_CHANGE + __le16 cos1; + #define CMDQ_MAP_TC_TO_COS_COS1_DISABLE 0x8000UL + #define CMDQ_MAP_TC_TO_COS_COS1_NO_CHANGE 0xffffUL + #define CMDQ_MAP_TC_TO_COS_COS1_LAST CMDQ_MAP_TC_TO_COS_COS1_NO_CHANGE + __le32 unused_0; +}; + +/* creq_map_tc_to_cos_resp (size:128b/16B) */ +struct creq_map_tc_to_cos_resp { + u8 type; + #define CREQ_MAP_TC_TO_COS_RESP_TYPE_MASK 0x3fUL + #define CREQ_MAP_TC_TO_COS_RESP_TYPE_SFT 0 + #define CREQ_MAP_TC_TO_COS_RESP_TYPE_QP_EVENT 0x38UL + #define CREQ_MAP_TC_TO_COS_RESP_TYPE_LAST CREQ_MAP_TC_TO_COS_RESP_TYPE_QP_EVENT + u8 status; + __le16 cookie; + __le32 reserved32; + u8 v; + #define CREQ_MAP_TC_TO_COS_RESP_V 0x1UL + u8 event; + #define CREQ_MAP_TC_TO_COS_RESP_EVENT_MAP_TC_TO_COS 0x8aUL + #define CREQ_MAP_TC_TO_COS_RESP_EVENT_LAST CREQ_MAP_TC_TO_COS_RESP_EVENT_MAP_TC_TO_COS + u8 reserved48[6]; +}; + +/* cmdq_query_roce_cc (size:128b/16B) */ +struct cmdq_query_roce_cc { + u8 opcode; + #define CMDQ_QUERY_ROCE_CC_OPCODE_QUERY_ROCE_CC 0x8dUL + #define CMDQ_QUERY_ROCE_CC_OPCODE_LAST CMDQ_QUERY_ROCE_CC_OPCODE_QUERY_ROCE_CC + u8 cmd_size; + __le16 flags; + __le16 cookie; + u8 resp_size; + u8 reserved8; + __le64 resp_addr; +}; + +/* creq_query_roce_cc_resp (size:128b/16B) */ +struct creq_query_roce_cc_resp { + u8 type; + #define CREQ_QUERY_ROCE_CC_RESP_TYPE_MASK 0x3fUL + #define CREQ_QUERY_ROCE_CC_RESP_TYPE_SFT 0 + #define CREQ_QUERY_ROCE_CC_RESP_TYPE_QP_EVENT 0x38UL + #define CREQ_QUERY_ROCE_CC_RESP_TYPE_LAST CREQ_QUERY_ROCE_CC_RESP_TYPE_QP_EVENT + u8 status; + __le16 cookie; + __le32 size; + u8 v; + #define CREQ_QUERY_ROCE_CC_RESP_V 0x1UL + u8 event; + #define CREQ_QUERY_ROCE_CC_RESP_EVENT_QUERY_ROCE_CC 0x8dUL + #define CREQ_QUERY_ROCE_CC_RESP_EVENT_LAST CREQ_QUERY_ROCE_CC_RESP_EVENT_QUERY_ROCE_CC + u8 reserved48[6]; +}; + +/* creq_query_roce_cc_resp_sb (size:256b/32B) */ +struct creq_query_roce_cc_resp_sb { + u8 opcode; + #define CREQ_QUERY_ROCE_CC_RESP_SB_OPCODE_QUERY_ROCE_CC 0x8dUL + #define CREQ_QUERY_ROCE_CC_RESP_SB_OPCODE_LAST CREQ_QUERY_ROCE_CC_RESP_SB_OPCODE_QUERY_ROCE_CC + u8 status; + __le16 cookie; + __le16 flags; + u8 resp_size; + u8 reserved8; + u8 enable_cc; + #define CREQ_QUERY_ROCE_CC_RESP_SB_ENABLE_CC 0x1UL + #define CREQ_QUERY_ROCE_CC_RESP_SB_UNUSED7_MASK 0xfeUL + #define CREQ_QUERY_ROCE_CC_RESP_SB_UNUSED7_SFT 1 + u8 tos_dscp_tos_ecn; + #define CREQ_QUERY_ROCE_CC_RESP_SB_TOS_ECN_MASK 0x3UL + #define CREQ_QUERY_ROCE_CC_RESP_SB_TOS_ECN_SFT 0 + #define CREQ_QUERY_ROCE_CC_RESP_SB_TOS_DSCP_MASK 0xfcUL + #define CREQ_QUERY_ROCE_CC_RESP_SB_TOS_DSCP_SFT 2 + u8 g; + u8 num_phases_per_state; + __le16 init_cr; + __le16 init_tr; + u8 alt_vlan_pcp; + #define CREQ_QUERY_ROCE_CC_RESP_SB_ALT_VLAN_PCP_MASK 0x7UL + #define CREQ_QUERY_ROCE_CC_RESP_SB_ALT_VLAN_PCP_SFT 0 + #define CREQ_QUERY_ROCE_CC_RESP_SB_RSVD1_MASK 0xf8UL + #define CREQ_QUERY_ROCE_CC_RESP_SB_RSVD1_SFT 3 + u8 alt_tos_dscp; + #define CREQ_QUERY_ROCE_CC_RESP_SB_ALT_TOS_DSCP_MASK 0x3fUL + #define CREQ_QUERY_ROCE_CC_RESP_SB_ALT_TOS_DSCP_SFT 0 + #define CREQ_QUERY_ROCE_CC_RESP_SB_RSVD4_MASK 0xc0UL + #define CREQ_QUERY_ROCE_CC_RESP_SB_RSVD4_SFT 6 + u8 cc_mode; + #define CREQ_QUERY_ROCE_CC_RESP_SB_CC_MODE_DCTCP 0x0UL + #define CREQ_QUERY_ROCE_CC_RESP_SB_CC_MODE_PROBABILISTIC 0x1UL + #define CREQ_QUERY_ROCE_CC_RESP_SB_CC_MODE_LAST CREQ_QUERY_ROCE_CC_RESP_SB_CC_MODE_PROBABILISTIC + u8 tx_queue; + __le16 rtt; + #define CREQ_QUERY_ROCE_CC_RESP_SB_RTT_MASK 0x3fffUL + #define CREQ_QUERY_ROCE_CC_RESP_SB_RTT_SFT 0 + #define CREQ_QUERY_ROCE_CC_RESP_SB_RSVD5_MASK 0xc000UL + #define CREQ_QUERY_ROCE_CC_RESP_SB_RSVD5_SFT 14 + __le16 tcp_cp; + #define CREQ_QUERY_ROCE_CC_RESP_SB_TCP_CP_MASK 0x3ffUL + #define CREQ_QUERY_ROCE_CC_RESP_SB_TCP_CP_SFT 0 + #define CREQ_QUERY_ROCE_CC_RESP_SB_RSVD6_MASK 0xfc00UL + #define CREQ_QUERY_ROCE_CC_RESP_SB_RSVD6_SFT 10 + __le16 inactivity_th; + u8 pkts_per_phase; + u8 time_per_phase; + __le32 reserved32; +}; + +/* creq_query_roce_cc_resp_sb_tlv (size:384b/48B) */ +struct creq_query_roce_cc_resp_sb_tlv { + __le16 cmd_discr; + u8 reserved_8b; + u8 tlv_flags; + #define CREQ_QUERY_ROCE_CC_RESP_SB_TLV_TLV_FLAGS_MORE 0x1UL + #define CREQ_QUERY_ROCE_CC_RESP_SB_TLV_TLV_FLAGS_MORE_LAST 0x0UL + #define CREQ_QUERY_ROCE_CC_RESP_SB_TLV_TLV_FLAGS_MORE_NOT_LAST 0x1UL + #define CREQ_QUERY_ROCE_CC_RESP_SB_TLV_TLV_FLAGS_REQUIRED 0x2UL + #define CREQ_QUERY_ROCE_CC_RESP_SB_TLV_TLV_FLAGS_REQUIRED_NO (0x0UL << 1) + #define CREQ_QUERY_ROCE_CC_RESP_SB_TLV_TLV_FLAGS_REQUIRED_YES (0x1UL << 1) + #define CREQ_QUERY_ROCE_CC_RESP_SB_TLV_TLV_FLAGS_REQUIRED_LAST CREQ_QUERY_ROCE_CC_RESP_SB_TLV_TLV_FLAGS_REQUIRED_YES + __le16 tlv_type; + __le16 length; + u8 total_size; + u8 reserved56[7]; + u8 opcode; + #define CREQ_QUERY_ROCE_CC_RESP_SB_TLV_OPCODE_QUERY_ROCE_CC 0x8dUL + #define CREQ_QUERY_ROCE_CC_RESP_SB_TLV_OPCODE_LAST CREQ_QUERY_ROCE_CC_RESP_SB_TLV_OPCODE_QUERY_ROCE_CC + u8 status; + __le16 cookie; + __le16 flags; + u8 resp_size; + u8 reserved8; + u8 enable_cc; + #define CREQ_QUERY_ROCE_CC_RESP_SB_TLV_ENABLE_CC 0x1UL + #define CREQ_QUERY_ROCE_CC_RESP_SB_TLV_UNUSED7_MASK 0xfeUL + #define CREQ_QUERY_ROCE_CC_RESP_SB_TLV_UNUSED7_SFT 1 + u8 tos_dscp_tos_ecn; + #define CREQ_QUERY_ROCE_CC_RESP_SB_TLV_TOS_ECN_MASK 0x3UL + #define CREQ_QUERY_ROCE_CC_RESP_SB_TLV_TOS_ECN_SFT 0 + #define CREQ_QUERY_ROCE_CC_RESP_SB_TLV_TOS_DSCP_MASK 0xfcUL + #define CREQ_QUERY_ROCE_CC_RESP_SB_TLV_TOS_DSCP_SFT 2 + u8 g; + u8 num_phases_per_state; + __le16 init_cr; + __le16 init_tr; + u8 alt_vlan_pcp; + #define CREQ_QUERY_ROCE_CC_RESP_SB_TLV_ALT_VLAN_PCP_MASK 0x7UL + #define CREQ_QUERY_ROCE_CC_RESP_SB_TLV_ALT_VLAN_PCP_SFT 0 + #define CREQ_QUERY_ROCE_CC_RESP_SB_TLV_RSVD1_MASK 0xf8UL + #define CREQ_QUERY_ROCE_CC_RESP_SB_TLV_RSVD1_SFT 3 + u8 alt_tos_dscp; + #define CREQ_QUERY_ROCE_CC_RESP_SB_TLV_ALT_TOS_DSCP_MASK 0x3fUL + #define CREQ_QUERY_ROCE_CC_RESP_SB_TLV_ALT_TOS_DSCP_SFT 0 + #define CREQ_QUERY_ROCE_CC_RESP_SB_TLV_RSVD4_MASK 0xc0UL + #define CREQ_QUERY_ROCE_CC_RESP_SB_TLV_RSVD4_SFT 6 + u8 cc_mode; + #define CREQ_QUERY_ROCE_CC_RESP_SB_TLV_CC_MODE_DCTCP 0x0UL + #define CREQ_QUERY_ROCE_CC_RESP_SB_TLV_CC_MODE_PROBABILISTIC 0x1UL + #define CREQ_QUERY_ROCE_CC_RESP_SB_TLV_CC_MODE_LAST CREQ_QUERY_ROCE_CC_RESP_SB_TLV_CC_MODE_PROBABILISTIC + u8 tx_queue; + __le16 rtt; + #define CREQ_QUERY_ROCE_CC_RESP_SB_TLV_RTT_MASK 0x3fffUL + #define CREQ_QUERY_ROCE_CC_RESP_SB_TLV_RTT_SFT 0 + #define CREQ_QUERY_ROCE_CC_RESP_SB_TLV_RSVD5_MASK 0xc000UL + #define CREQ_QUERY_ROCE_CC_RESP_SB_TLV_RSVD5_SFT 14 + __le16 tcp_cp; + #define CREQ_QUERY_ROCE_CC_RESP_SB_TLV_TCP_CP_MASK 0x3ffUL + #define CREQ_QUERY_ROCE_CC_RESP_SB_TLV_TCP_CP_SFT 0 + #define CREQ_QUERY_ROCE_CC_RESP_SB_TLV_RSVD6_MASK 0xfc00UL + #define CREQ_QUERY_ROCE_CC_RESP_SB_TLV_RSVD6_SFT 10 + __le16 inactivity_th; + u8 pkts_per_phase; + u8 time_per_phase; + __le32 reserved32; +}; + +/* creq_query_roce_cc_gen1_resp_sb_tlv (size:704b/88B) */ +struct creq_query_roce_cc_gen1_resp_sb_tlv { + __le16 cmd_discr; + u8 reserved_8b; + u8 tlv_flags; + #define CREQ_QUERY_ROCE_CC_GEN1_RESP_SB_TLV_TLV_FLAGS_MORE 0x1UL + #define CREQ_QUERY_ROCE_CC_GEN1_RESP_SB_TLV_TLV_FLAGS_MORE_LAST 0x0UL + #define CREQ_QUERY_ROCE_CC_GEN1_RESP_SB_TLV_TLV_FLAGS_MORE_NOT_LAST 0x1UL + #define CREQ_QUERY_ROCE_CC_GEN1_RESP_SB_TLV_TLV_FLAGS_REQUIRED 0x2UL + #define CREQ_QUERY_ROCE_CC_GEN1_RESP_SB_TLV_TLV_FLAGS_REQUIRED_NO (0x0UL << 1) + #define CREQ_QUERY_ROCE_CC_GEN1_RESP_SB_TLV_TLV_FLAGS_REQUIRED_YES (0x1UL << 1) + #define CREQ_QUERY_ROCE_CC_GEN1_RESP_SB_TLV_TLV_FLAGS_REQUIRED_LAST CREQ_QUERY_ROCE_CC_GEN1_RESP_SB_TLV_TLV_FLAGS_REQUIRED_YES + __le16 tlv_type; + __le16 length; + __le64 reserved64; + __le16 inactivity_th_hi; + __le16 min_time_between_cnps; + __le16 init_cp; + u8 tr_update_mode; + u8 tr_update_cycles; + u8 fr_num_rtts; + u8 ai_rate_increase; + __le16 reduction_relax_rtts_th; + __le16 additional_relax_cr_th; + __le16 cr_min_th; + u8 bw_avg_weight; + u8 actual_cr_factor; + __le16 max_cp_cr_th; + u8 cp_bias_en; + u8 cp_bias; + u8 cnp_ecn; + #define CREQ_QUERY_ROCE_CC_GEN1_RESP_SB_TLV_CNP_ECN_NOT_ECT 0x0UL + #define CREQ_QUERY_ROCE_CC_GEN1_RESP_SB_TLV_CNP_ECN_ECT_1 0x1UL + #define CREQ_QUERY_ROCE_CC_GEN1_RESP_SB_TLV_CNP_ECN_ECT_0 0x2UL + #define CREQ_QUERY_ROCE_CC_GEN1_RESP_SB_TLV_CNP_ECN_LAST CREQ_QUERY_ROCE_CC_GEN1_RESP_SB_TLV_CNP_ECN_ECT_0 + u8 rtt_jitter_en; + __le16 link_bytes_per_usec; + __le16 reset_cc_cr_th; + u8 cr_width; + u8 quota_period_min; + u8 quota_period_max; + u8 quota_period_abs_max; + __le16 tr_lower_bound; + u8 cr_prob_factor; + u8 tr_prob_factor; + __le16 fairness_cr_th; + u8 red_div; + u8 cnp_ratio_th; + __le16 exp_ai_rtts; + u8 exp_ai_cr_cp_ratio; + u8 use_rate_table; + __le16 cp_exp_update_th; + __le16 high_exp_ai_rtts_th1; + __le16 high_exp_ai_rtts_th2; + __le16 actual_cr_cong_free_rtts_th; + __le16 severe_cong_cr_th1; + __le16 severe_cong_cr_th2; + __le32 link64B_per_rtt; + u8 cc_ack_bytes; + u8 reduce_init_en; + __le16 reduce_init_cong_free_rtts_th; + u8 random_no_red_en; + u8 actual_cr_shift_correction_en; + u8 quota_period_adjust_en; + u8 reserved[5]; +}; + +/* creq_query_roce_cc_gen2_resp_sb_tlv (size:512b/64B) */ +struct creq_query_roce_cc_gen2_resp_sb_tlv { + __le16 cmd_discr; + u8 reserved_8b; + u8 tlv_flags; + #define CREQ_QUERY_ROCE_CC_GEN2_RESP_SB_TLV_TLV_FLAGS_MORE 0x1UL + #define CREQ_QUERY_ROCE_CC_GEN2_RESP_SB_TLV_TLV_FLAGS_MORE_LAST 0x0UL + #define CREQ_QUERY_ROCE_CC_GEN2_RESP_SB_TLV_TLV_FLAGS_MORE_NOT_LAST 0x1UL + #define CREQ_QUERY_ROCE_CC_GEN2_RESP_SB_TLV_TLV_FLAGS_REQUIRED 0x2UL + #define CREQ_QUERY_ROCE_CC_GEN2_RESP_SB_TLV_TLV_FLAGS_REQUIRED_NO (0x0UL << 1) + #define CREQ_QUERY_ROCE_CC_GEN2_RESP_SB_TLV_TLV_FLAGS_REQUIRED_YES (0x1UL << 1) + #define CREQ_QUERY_ROCE_CC_GEN2_RESP_SB_TLV_TLV_FLAGS_REQUIRED_LAST CREQ_QUERY_ROCE_CC_GEN2_RESP_SB_TLV_TLV_FLAGS_REQUIRED_YES + __le16 tlv_type; + __le16 length; + __le64 reserved64; + __le16 dcn_qlevel_tbl_thr[8]; + __le32 dcn_qlevel_tbl_act[8]; + #define CREQ_QUERY_ROCE_CC_GEN2_RESP_SB_TLV_DCN_QLEVEL_TBL_ACT_CR_MASK 0x3fffUL + #define CREQ_QUERY_ROCE_CC_GEN2_RESP_SB_TLV_DCN_QLEVEL_TBL_ACT_CR_SFT 0 + #define CREQ_QUERY_ROCE_CC_GEN2_RESP_SB_TLV_DCN_QLEVEL_TBL_ACT_INC_CNP 0x4000UL + #define CREQ_QUERY_ROCE_CC_GEN2_RESP_SB_TLV_DCN_QLEVEL_TBL_ACT_UPD_IMM 0x8000UL + #define CREQ_QUERY_ROCE_CC_GEN2_RESP_SB_TLV_DCN_QLEVEL_TBL_ACT_TR_MASK 0x3fff0000UL + #define CREQ_QUERY_ROCE_CC_GEN2_RESP_SB_TLV_DCN_QLEVEL_TBL_ACT_TR_SFT 16 +}; + +/* cmdq_modify_roce_cc (size:448b/56B) */ +struct cmdq_modify_roce_cc { + u8 opcode; + #define CMDQ_MODIFY_ROCE_CC_OPCODE_MODIFY_ROCE_CC 0x8cUL + #define CMDQ_MODIFY_ROCE_CC_OPCODE_LAST CMDQ_MODIFY_ROCE_CC_OPCODE_MODIFY_ROCE_CC + u8 cmd_size; + __le16 flags; + __le16 cookie; + u8 resp_size; + u8 reserved8; + __le64 resp_addr; + __le32 modify_mask; + #define CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_ENABLE_CC 0x1UL + #define CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_G 0x2UL + #define CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_NUMPHASEPERSTATE 0x4UL + #define CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_INIT_CR 0x8UL + #define CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_INIT_TR 0x10UL + #define CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_TOS_ECN 0x20UL + #define CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_TOS_DSCP 0x40UL + #define CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_ALT_VLAN_PCP 0x80UL + #define CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_ALT_TOS_DSCP 0x100UL + #define CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_RTT 0x200UL + #define CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_CC_MODE 0x400UL + #define CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_TCP_CP 0x800UL + #define CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_TX_QUEUE 0x1000UL + #define CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_INACTIVITY_CP 0x2000UL + #define CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_TIME_PER_PHASE 0x4000UL + #define CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_PKTS_PER_PHASE 0x8000UL + u8 enable_cc; + #define CMDQ_MODIFY_ROCE_CC_ENABLE_CC 0x1UL + #define CMDQ_MODIFY_ROCE_CC_RSVD1_MASK 0xfeUL + #define CMDQ_MODIFY_ROCE_CC_RSVD1_SFT 1 + u8 g; + u8 num_phases_per_state; + u8 pkts_per_phase; + __le16 init_cr; + __le16 init_tr; + u8 tos_dscp_tos_ecn; + #define CMDQ_MODIFY_ROCE_CC_TOS_ECN_MASK 0x3UL + #define CMDQ_MODIFY_ROCE_CC_TOS_ECN_SFT 0 + #define CMDQ_MODIFY_ROCE_CC_TOS_DSCP_MASK 0xfcUL + #define CMDQ_MODIFY_ROCE_CC_TOS_DSCP_SFT 2 + u8 alt_vlan_pcp; + #define CMDQ_MODIFY_ROCE_CC_ALT_VLAN_PCP_MASK 0x7UL + #define CMDQ_MODIFY_ROCE_CC_ALT_VLAN_PCP_SFT 0 + #define CMDQ_MODIFY_ROCE_CC_RSVD3_MASK 0xf8UL + #define CMDQ_MODIFY_ROCE_CC_RSVD3_SFT 3 + __le16 alt_tos_dscp; + #define CMDQ_MODIFY_ROCE_CC_ALT_TOS_DSCP_MASK 0x3fUL + #define CMDQ_MODIFY_ROCE_CC_ALT_TOS_DSCP_SFT 0 + #define CMDQ_MODIFY_ROCE_CC_RSVD4_MASK 0xffc0UL + #define CMDQ_MODIFY_ROCE_CC_RSVD4_SFT 6 + __le16 rtt; + #define CMDQ_MODIFY_ROCE_CC_RTT_MASK 0x3fffUL + #define CMDQ_MODIFY_ROCE_CC_RTT_SFT 0 + #define CMDQ_MODIFY_ROCE_CC_RSVD5_MASK 0xc000UL + #define CMDQ_MODIFY_ROCE_CC_RSVD5_SFT 14 + __le16 tcp_cp; + #define CMDQ_MODIFY_ROCE_CC_TCP_CP_MASK 0x3ffUL + #define CMDQ_MODIFY_ROCE_CC_TCP_CP_SFT 0 + #define CMDQ_MODIFY_ROCE_CC_RSVD6_MASK 0xfc00UL + #define CMDQ_MODIFY_ROCE_CC_RSVD6_SFT 10 + u8 cc_mode; + #define CMDQ_MODIFY_ROCE_CC_CC_MODE_DCTCP_CC_MODE 0x0UL + #define CMDQ_MODIFY_ROCE_CC_CC_MODE_PROBABILISTIC_CC_MODE 0x1UL + #define CMDQ_MODIFY_ROCE_CC_CC_MODE_LAST CMDQ_MODIFY_ROCE_CC_CC_MODE_PROBABILISTIC_CC_MODE + u8 tx_queue; + __le16 inactivity_th; + u8 time_per_phase; + u8 reserved8_1; + __le16 reserved16; + __le32 reserved32; + __le64 reserved64; +}; + +/* cmdq_modify_roce_cc_tlv (size:640b/80B) */ +struct cmdq_modify_roce_cc_tlv { + __le16 cmd_discr; + u8 reserved_8b; + u8 tlv_flags; + #define CMDQ_MODIFY_ROCE_CC_TLV_TLV_FLAGS_MORE 0x1UL + #define CMDQ_MODIFY_ROCE_CC_TLV_TLV_FLAGS_MORE_LAST 0x0UL + #define CMDQ_MODIFY_ROCE_CC_TLV_TLV_FLAGS_MORE_NOT_LAST 0x1UL + #define CMDQ_MODIFY_ROCE_CC_TLV_TLV_FLAGS_REQUIRED 0x2UL + #define CMDQ_MODIFY_ROCE_CC_TLV_TLV_FLAGS_REQUIRED_NO (0x0UL << 1) + #define CMDQ_MODIFY_ROCE_CC_TLV_TLV_FLAGS_REQUIRED_YES (0x1UL << 1) + #define CMDQ_MODIFY_ROCE_CC_TLV_TLV_FLAGS_REQUIRED_LAST CMDQ_MODIFY_ROCE_CC_TLV_TLV_FLAGS_REQUIRED_YES + __le16 tlv_type; + __le16 length; + u8 total_size; + u8 reserved56[7]; + u8 opcode; + #define CMDQ_MODIFY_ROCE_CC_TLV_OPCODE_MODIFY_ROCE_CC 0x8cUL + #define CMDQ_MODIFY_ROCE_CC_TLV_OPCODE_LAST CMDQ_MODIFY_ROCE_CC_TLV_OPCODE_MODIFY_ROCE_CC + u8 cmd_size; + __le16 flags; + __le16 cookie; + u8 resp_size; + u8 reserved8; + __le64 resp_addr; + __le32 modify_mask; + #define CMDQ_MODIFY_ROCE_CC_TLV_MODIFY_MASK_ENABLE_CC 0x1UL + #define CMDQ_MODIFY_ROCE_CC_TLV_MODIFY_MASK_G 0x2UL + #define CMDQ_MODIFY_ROCE_CC_TLV_MODIFY_MASK_NUMPHASEPERSTATE 0x4UL + #define CMDQ_MODIFY_ROCE_CC_TLV_MODIFY_MASK_INIT_CR 0x8UL + #define CMDQ_MODIFY_ROCE_CC_TLV_MODIFY_MASK_INIT_TR 0x10UL + #define CMDQ_MODIFY_ROCE_CC_TLV_MODIFY_MASK_TOS_ECN 0x20UL + #define CMDQ_MODIFY_ROCE_CC_TLV_MODIFY_MASK_TOS_DSCP 0x40UL + #define CMDQ_MODIFY_ROCE_CC_TLV_MODIFY_MASK_ALT_VLAN_PCP 0x80UL + #define CMDQ_MODIFY_ROCE_CC_TLV_MODIFY_MASK_ALT_TOS_DSCP 0x100UL + #define CMDQ_MODIFY_ROCE_CC_TLV_MODIFY_MASK_RTT 0x200UL + #define CMDQ_MODIFY_ROCE_CC_TLV_MODIFY_MASK_CC_MODE 0x400UL + #define CMDQ_MODIFY_ROCE_CC_TLV_MODIFY_MASK_TCP_CP 0x800UL + #define CMDQ_MODIFY_ROCE_CC_TLV_MODIFY_MASK_TX_QUEUE 0x1000UL + #define CMDQ_MODIFY_ROCE_CC_TLV_MODIFY_MASK_INACTIVITY_CP 0x2000UL + #define CMDQ_MODIFY_ROCE_CC_TLV_MODIFY_MASK_TIME_PER_PHASE 0x4000UL + #define CMDQ_MODIFY_ROCE_CC_TLV_MODIFY_MASK_PKTS_PER_PHASE 0x8000UL + u8 enable_cc; + #define CMDQ_MODIFY_ROCE_CC_TLV_ENABLE_CC 0x1UL + #define CMDQ_MODIFY_ROCE_CC_TLV_RSVD1_MASK 0xfeUL + #define CMDQ_MODIFY_ROCE_CC_TLV_RSVD1_SFT 1 + u8 g; + u8 num_phases_per_state; + u8 pkts_per_phase; + __le16 init_cr; + __le16 init_tr; + u8 tos_dscp_tos_ecn; + #define CMDQ_MODIFY_ROCE_CC_TLV_TOS_ECN_MASK 0x3UL + #define CMDQ_MODIFY_ROCE_CC_TLV_TOS_ECN_SFT 0 + #define CMDQ_MODIFY_ROCE_CC_TLV_TOS_DSCP_MASK 0xfcUL + #define CMDQ_MODIFY_ROCE_CC_TLV_TOS_DSCP_SFT 2 + u8 alt_vlan_pcp; + #define CMDQ_MODIFY_ROCE_CC_TLV_ALT_VLAN_PCP_MASK 0x7UL + #define CMDQ_MODIFY_ROCE_CC_TLV_ALT_VLAN_PCP_SFT 0 + #define CMDQ_MODIFY_ROCE_CC_TLV_RSVD3_MASK 0xf8UL + #define CMDQ_MODIFY_ROCE_CC_TLV_RSVD3_SFT 3 + __le16 alt_tos_dscp; + #define CMDQ_MODIFY_ROCE_CC_TLV_ALT_TOS_DSCP_MASK 0x3fUL + #define CMDQ_MODIFY_ROCE_CC_TLV_ALT_TOS_DSCP_SFT 0 + #define CMDQ_MODIFY_ROCE_CC_TLV_RSVD4_MASK 0xffc0UL + #define CMDQ_MODIFY_ROCE_CC_TLV_RSVD4_SFT 6 + __le16 rtt; + #define CMDQ_MODIFY_ROCE_CC_TLV_RTT_MASK 0x3fffUL + #define CMDQ_MODIFY_ROCE_CC_TLV_RTT_SFT 0 + #define CMDQ_MODIFY_ROCE_CC_TLV_RSVD5_MASK 0xc000UL + #define CMDQ_MODIFY_ROCE_CC_TLV_RSVD5_SFT 14 + __le16 tcp_cp; + #define CMDQ_MODIFY_ROCE_CC_TLV_TCP_CP_MASK 0x3ffUL + #define CMDQ_MODIFY_ROCE_CC_TLV_TCP_CP_SFT 0 + #define CMDQ_MODIFY_ROCE_CC_TLV_RSVD6_MASK 0xfc00UL + #define CMDQ_MODIFY_ROCE_CC_TLV_RSVD6_SFT 10 + u8 cc_mode; + #define CMDQ_MODIFY_ROCE_CC_TLV_CC_MODE_DCTCP_CC_MODE 0x0UL + #define CMDQ_MODIFY_ROCE_CC_TLV_CC_MODE_PROBABILISTIC_CC_MODE 0x1UL + #define CMDQ_MODIFY_ROCE_CC_TLV_CC_MODE_LAST CMDQ_MODIFY_ROCE_CC_TLV_CC_MODE_PROBABILISTIC_CC_MODE + u8 tx_queue; + __le16 inactivity_th; + u8 time_per_phase; + u8 reserved8_1; + __le16 reserved16; + __le32 reserved32; + __le64 reserved64; + __le64 reservedtlvpad; +}; + +/* cmdq_modify_roce_cc_gen1_tlv (size:768b/96B) */ +struct cmdq_modify_roce_cc_gen1_tlv { + __le16 cmd_discr; + u8 reserved_8b; + u8 tlv_flags; + #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_TLV_FLAGS_MORE 0x1UL + #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_TLV_FLAGS_MORE_LAST 0x0UL + #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_TLV_FLAGS_MORE_NOT_LAST 0x1UL + #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_TLV_FLAGS_REQUIRED 0x2UL + #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_TLV_FLAGS_REQUIRED_NO (0x0UL << 1) + #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_TLV_FLAGS_REQUIRED_YES (0x1UL << 1) + #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_TLV_FLAGS_REQUIRED_LAST CMDQ_MODIFY_ROCE_CC_GEN1_TLV_TLV_FLAGS_REQUIRED_YES + __le16 tlv_type; + __le16 length; + __le64 reserved64; + __le64 modify_mask; + #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_MIN_TIME_BETWEEN_CNPS 0x1UL + #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_INIT_CP 0x2UL + #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_TR_UPDATE_MODE 0x4UL + #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_TR_UPDATE_CYCLES 0x8UL + #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_FR_NUM_RTTS 0x10UL + #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_AI_RATE_INCREASE 0x20UL + #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_REDUCTION_RELAX_RTTS_TH 0x40UL + #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_ADDITIONAL_RELAX_CR_TH 0x80UL + #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_CR_MIN_TH 0x100UL + #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_BW_AVG_WEIGHT 0x200UL + #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_ACTUAL_CR_FACTOR 0x400UL + #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_MAX_CP_CR_TH 0x800UL + #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_CP_BIAS_EN 0x1000UL + #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_CP_BIAS 0x2000UL + #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_CNP_ECN 0x4000UL + #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_RTT_JITTER_EN 0x8000UL + #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_LINK_BYTES_PER_USEC 0x10000UL + #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_RESET_CC_CR_TH 0x20000UL + #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_CR_WIDTH 0x40000UL + #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_QUOTA_PERIOD_MIN 0x80000UL + #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_QUOTA_PERIOD_MAX 0x100000UL + #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_QUOTA_PERIOD_ABS_MAX 0x200000UL + #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_TR_LOWER_BOUND 0x400000UL + #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_CR_PROB_FACTOR 0x800000UL + #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_TR_PROB_FACTOR 0x1000000UL + #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_FAIRNESS_CR_TH 0x2000000UL + #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_RED_DIV 0x4000000UL + #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_CNP_RATIO_TH 0x8000000UL + #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_EXP_AI_RTTS 0x10000000UL + #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_EXP_AI_CR_CP_RATIO 0x20000000UL + #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_CP_EXP_UPDATE_TH 0x40000000UL + #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_HIGH_EXP_AI_RTTS_TH1 0x80000000UL + #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_HIGH_EXP_AI_RTTS_TH2 0x100000000ULL + #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_USE_RATE_TABLE 0x200000000ULL + #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_LINK64B_PER_RTT 0x400000000ULL + #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_ACTUAL_CR_CONG_FREE_RTTS_TH 0x800000000ULL + #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_SEVERE_CONG_CR_TH1 0x1000000000ULL + #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_SEVERE_CONG_CR_TH2 0x2000000000ULL + #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_CC_ACK_BYTES 0x4000000000ULL + #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_REDUCE_INIT_EN 0x8000000000ULL + #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_REDUCE_INIT_CONG_FREE_RTTS_TH 0x10000000000ULL + #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_RANDOM_NO_RED_EN 0x20000000000ULL + #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_ACTUAL_CR_SHIFT_CORRECTION_EN 0x40000000000ULL + #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_QUOTA_PERIOD_ADJUST_EN 0x80000000000ULL + __le16 inactivity_th_hi; + __le16 min_time_between_cnps; + __le16 init_cp; + u8 tr_update_mode; + u8 tr_update_cycles; + u8 fr_num_rtts; + u8 ai_rate_increase; + __le16 reduction_relax_rtts_th; + __le16 additional_relax_cr_th; + __le16 cr_min_th; + u8 bw_avg_weight; + u8 actual_cr_factor; + __le16 max_cp_cr_th; + u8 cp_bias_en; + u8 cp_bias; + u8 cnp_ecn; + #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_CNP_ECN_NOT_ECT 0x0UL + #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_CNP_ECN_ECT_1 0x1UL + #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_CNP_ECN_ECT_0 0x2UL + #define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_CNP_ECN_LAST CMDQ_MODIFY_ROCE_CC_GEN1_TLV_CNP_ECN_ECT_0 + u8 rtt_jitter_en; + __le16 link_bytes_per_usec; + __le16 reset_cc_cr_th; + u8 cr_width; + u8 quota_period_min; + u8 quota_period_max; + u8 quota_period_abs_max; + __le16 tr_lower_bound; + u8 cr_prob_factor; + u8 tr_prob_factor; + __le16 fairness_cr_th; + u8 red_div; + u8 cnp_ratio_th; + __le16 exp_ai_rtts; + u8 exp_ai_cr_cp_ratio; + u8 use_rate_table; + __le16 cp_exp_update_th; + __le16 high_exp_ai_rtts_th1; + __le16 high_exp_ai_rtts_th2; + __le16 actual_cr_cong_free_rtts_th; + __le16 severe_cong_cr_th1; + __le16 severe_cong_cr_th2; + __le32 link64B_per_rtt; + u8 cc_ack_bytes; + u8 reduce_init_en; + __le16 reduce_init_cong_free_rtts_th; + u8 random_no_red_en; + u8 actual_cr_shift_correction_en; + u8 quota_period_adjust_en; + u8 reserved[5]; +}; + +/* cmdq_modify_roce_cc_gen2_tlv (size:256b/32B) */ +struct cmdq_modify_roce_cc_gen2_tlv { + __le16 cmd_discr; + u8 reserved_8b; + u8 tlv_flags; + #define CMDQ_MODIFY_ROCE_CC_GEN2_TLV_TLV_FLAGS_MORE 0x1UL + #define CMDQ_MODIFY_ROCE_CC_GEN2_TLV_TLV_FLAGS_MORE_LAST 0x0UL + #define CMDQ_MODIFY_ROCE_CC_GEN2_TLV_TLV_FLAGS_MORE_NOT_LAST 0x1UL + #define CMDQ_MODIFY_ROCE_CC_GEN2_TLV_TLV_FLAGS_REQUIRED 0x2UL + #define CMDQ_MODIFY_ROCE_CC_GEN2_TLV_TLV_FLAGS_REQUIRED_NO (0x0UL << 1) + #define CMDQ_MODIFY_ROCE_CC_GEN2_TLV_TLV_FLAGS_REQUIRED_YES (0x1UL << 1) + #define CMDQ_MODIFY_ROCE_CC_GEN2_TLV_TLV_FLAGS_REQUIRED_LAST CMDQ_MODIFY_ROCE_CC_GEN2_TLV_TLV_FLAGS_REQUIRED_YES + __le16 tlv_type; + __le16 length; + __le64 reserved64; + __le64 modify_mask; + #define CMDQ_MODIFY_ROCE_CC_GEN2_TLV_MODIFY_MASK_DCN_QLEVEL_TBL_IDX 0x1UL + #define CMDQ_MODIFY_ROCE_CC_GEN2_TLV_MODIFY_MASK_DCN_QLEVEL_TBL_THR 0x2UL + #define CMDQ_MODIFY_ROCE_CC_GEN2_TLV_MODIFY_MASK_DCN_QLEVEL_TBL_CR 0x4UL + #define CMDQ_MODIFY_ROCE_CC_GEN2_TLV_MODIFY_MASK_DCN_QLEVEL_TBL_INC_CNP 0x8UL + #define CMDQ_MODIFY_ROCE_CC_GEN2_TLV_MODIFY_MASK_DCN_QLEVEL_TBL_UPD_IMM 0x10UL + #define CMDQ_MODIFY_ROCE_CC_GEN2_TLV_MODIFY_MASK_DCN_QLEVEL_TBL_TR 0x20UL + u8 dcn_qlevel_tbl_idx; + u8 reserved8; + __le16 dcn_qlevel_tbl_thr; + __le32 dcn_qlevel_tbl_act; + #define CMDQ_MODIFY_ROCE_CC_GEN2_TLV_DCN_QLEVEL_TBL_ACT_CR_MASK 0x3fffUL + #define CMDQ_MODIFY_ROCE_CC_GEN2_TLV_DCN_QLEVEL_TBL_ACT_CR_SFT 0 + #define CMDQ_MODIFY_ROCE_CC_GEN2_TLV_DCN_QLEVEL_TBL_ACT_INC_CNP 0x4000UL + #define CMDQ_MODIFY_ROCE_CC_GEN2_TLV_DCN_QLEVEL_TBL_ACT_UPD_IMM 0x8000UL + #define CMDQ_MODIFY_ROCE_CC_GEN2_TLV_DCN_QLEVEL_TBL_ACT_TR_MASK 0x3fff0000UL + #define CMDQ_MODIFY_ROCE_CC_GEN2_TLV_DCN_QLEVEL_TBL_ACT_TR_SFT 16 +}; + +/* creq_modify_roce_cc_resp (size:128b/16B) */ +struct creq_modify_roce_cc_resp { + u8 type; + #define CREQ_MODIFY_ROCE_CC_RESP_TYPE_MASK 0x3fUL + #define CREQ_MODIFY_ROCE_CC_RESP_TYPE_SFT 0 + #define CREQ_MODIFY_ROCE_CC_RESP_TYPE_QP_EVENT 0x38UL + #define CREQ_MODIFY_ROCE_CC_RESP_TYPE_LAST CREQ_MODIFY_ROCE_CC_RESP_TYPE_QP_EVENT + u8 status; + __le16 cookie; + __le32 reserved32; + u8 v; + #define CREQ_MODIFY_ROCE_CC_RESP_V 0x1UL + u8 event; + #define CREQ_MODIFY_ROCE_CC_RESP_EVENT_MODIFY_ROCE_CC 0x8cUL + #define CREQ_MODIFY_ROCE_CC_RESP_EVENT_LAST CREQ_MODIFY_ROCE_CC_RESP_EVENT_MODIFY_ROCE_CC + u8 reserved48[6]; +}; + +/* cmdq_set_link_aggr_mode_cc (size:320b/40B) */ +struct cmdq_set_link_aggr_mode_cc { + u8 opcode; + #define CMDQ_SET_LINK_AGGR_MODE_OPCODE_SET_LINK_AGGR_MODE 0x8fUL + #define CMDQ_SET_LINK_AGGR_MODE_OPCODE_LAST CMDQ_SET_LINK_AGGR_MODE_OPCODE_SET_LINK_AGGR_MODE + u8 cmd_size; + __le16 flags; + __le16 cookie; + u8 resp_size; + u8 reserved8; + __le64 resp_addr; + __le32 modify_mask; + #define CMDQ_SET_LINK_AGGR_MODE_MODIFY_MASK_AGGR_EN 0x1UL + #define CMDQ_SET_LINK_AGGR_MODE_MODIFY_MASK_ACTIVE_PORT_MAP 0x2UL + #define CMDQ_SET_LINK_AGGR_MODE_MODIFY_MASK_MEMBER_PORT_MAP 0x4UL + #define CMDQ_SET_LINK_AGGR_MODE_MODIFY_MASK_AGGR_MODE 0x8UL + #define CMDQ_SET_LINK_AGGR_MODE_MODIFY_MASK_STAT_CTX_ID 0x10UL + u8 aggr_enable; + #define CMDQ_SET_LINK_AGGR_MODE_AGGR_ENABLE 0x1UL + #define CMDQ_SET_LINK_AGGR_MODE_RSVD1_MASK 0xfeUL + #define CMDQ_SET_LINK_AGGR_MODE_RSVD1_SFT 1 + u8 active_port_map; + #define CMDQ_SET_LINK_AGGR_MODE_ACTIVE_PORT_MAP_MASK 0xfUL + #define CMDQ_SET_LINK_AGGR_MODE_ACTIVE_PORT_MAP_SFT 0 + #define CMDQ_SET_LINK_AGGR_MODE_RSVD2_MASK 0xf0UL + #define CMDQ_SET_LINK_AGGR_MODE_RSVD2_SFT 4 + u8 member_port_map; + u8 link_aggr_mode; + #define CMDQ_SET_LINK_AGGR_MODE_AGGR_MODE_ACTIVE_ACTIVE 0x1UL + #define CMDQ_SET_LINK_AGGR_MODE_AGGR_MODE_ACTIVE_BACKUP 0x2UL + #define CMDQ_SET_LINK_AGGR_MODE_AGGR_MODE_BALANCE_XOR 0x3UL + #define CMDQ_SET_LINK_AGGR_MODE_AGGR_MODE_802_3_AD 0x4UL + #define CMDQ_SET_LINK_AGGR_MODE_AGGR_MODE_LAST CMDQ_SET_LINK_AGGR_MODE_AGGR_MODE_802_3_AD + __le16 stat_ctx_id[4]; + __le64 rsvd1; +}; + +/* creq_set_link_aggr_mode_resources_resp (size:128b/16B) */ +struct creq_set_link_aggr_mode_resources_resp { + u8 type; + #define CREQ_SET_LINK_AGGR_MODE_RESP_TYPE_MASK 0x3fUL + #define CREQ_SET_LINK_AGGR_MODE_RESP_TYPE_SFT 0 + #define CREQ_SET_LINK_AGGR_MODE_RESP_TYPE_QP_EVENT 0x38UL + #define CREQ_SET_LINK_AGGR_MODE_RESP_TYPE_LAST CREQ_SET_LINK_AGGR_MODE_RESP_TYPE_QP_EVENT + u8 status; + __le16 cookie; + __le32 reserved32; + u8 v; + #define CREQ_SET_LINK_AGGR_MODE_RESP_V 0x1UL + u8 event; + #define CREQ_SET_LINK_AGGR_MODE_RESP_EVENT_SET_LINK_AGGR_MODE 0x8fUL + #define CREQ_SET_LINK_AGGR_MODE_RESP_EVENT_LAST CREQ_SET_LINK_AGGR_MODE_RESP_EVENT_SET_LINK_AGGR_MODE + u8 reserved48[6]; +}; + +/* cmdq_vf_backchannel_request (size:256b/32B) */ +struct cmdq_vf_backchannel_request { + u8 opcode; + #define CMDQ_VF_BACKCHANNEL_REQUEST_OPCODE_VF_BACKCHANNEL_REQUEST 0x86UL + #define CMDQ_VF_BACKCHANNEL_REQUEST_OPCODE_LAST CMDQ_VF_BACKCHANNEL_REQUEST_OPCODE_VF_BACKCHANNEL_REQUEST + u8 cmd_size; + __le16 flags; + __le16 cookie; + u8 resp_size; + u8 reserved8; + __le64 resp_addr; + __le64 command_addr; + __le16 command_length; + u8 unused_0[6]; +}; + +/* cmdq_read_vf_memory (size:256b/32B) */ +struct cmdq_read_vf_memory { + u8 opcode; + #define CMDQ_READ_VF_MEMORY_OPCODE_READ_VF_MEMORY 0x87UL + #define CMDQ_READ_VF_MEMORY_OPCODE_LAST CMDQ_READ_VF_MEMORY_OPCODE_READ_VF_MEMORY + u8 cmd_size; + __le16 flags; + __le16 cookie; + u8 resp_size; + u8 reserved8; + __le64 resp_addr; + __le64 addr; + __le16 vf_id; + __le16 length; + __le32 unused_0; +}; + +/* cmdq_complete_vf_request (size:320b/40B) */ +struct cmdq_complete_vf_request { + u8 opcode; + #define CMDQ_COMPLETE_VF_REQUEST_OPCODE_COMPLETE_VF_REQUEST 0x88UL + #define CMDQ_COMPLETE_VF_REQUEST_OPCODE_LAST CMDQ_COMPLETE_VF_REQUEST_OPCODE_COMPLETE_VF_REQUEST + u8 cmd_size; + __le16 flags; + __le16 cookie; + u8 resp_size; + u8 reserved8; + __le64 resp_addr; + __le64 addr; + __le32 vf_misc; + __le16 vf_id; + __le16 vf_cookie; + u8 vf_status; + u8 unused_0[3]; + __le32 unused_1; +}; + +/* orchestrate_qid_migration (size:256b/32B) */ +struct orchestrate_qid_migration { + u8 opcode; + #define ORCHESTRATE_QID_MIGRATION_OPCODE_ORCHESTRATE_QID_MIGRATION 0x93UL + #define ORCHESTRATE_QID_MIGRATION_OPCODE_LAST ORCHESTRATE_QID_MIGRATION_OPCODE_ORCHESTRATE_QID_MIGRATION + u8 cmd_size; + __le16 flags; + __le16 cookie; + u8 resp_size; + u8 reserved8; + __le64 resp_addr; + u8 qid_migration_flags; + #define ORCHESTRATE_QID_MIGRATION_QID_MIGRATION_FLAGS_MASK 0xfUL + #define ORCHESTRATE_QID_MIGRATION_QID_MIGRATION_FLAGS_SFT 0 + #define ORCHESTRATE_QID_MIGRATION_QID_MIGRATION_FLAGS_ENABLE_NATIVE_QID_RANGE 0x0UL + #define ORCHESTRATE_QID_MIGRATION_QID_MIGRATION_FLAGS_ENABLE_EXTENDED_QID_RANGE 0x1UL + #define ORCHESTRATE_QID_MIGRATION_QID_MIGRATION_FLAGS_DISABLE_NATIVE_QID_RANGE 0x2UL + #define ORCHESTRATE_QID_MIGRATION_QID_MIGRATION_FLAGS_DISABLE_EXTENDED_QID_RANGE 0x3UL + #define ORCHESTRATE_QID_MIGRATION_QID_MIGRATION_FLAGS_LAST ORCHESTRATE_QID_MIGRATION_QID_MIGRATION_FLAGS_DISABLE_EXTENDED_QID_RANGE + #define ORCHESTRATE_QID_MIGRATION_UNUSED4_MASK 0xf0UL + #define ORCHESTRATE_QID_MIGRATION_UNUSED4_SFT 4 + u8 reserved56[7]; + __le64 reserved64; +}; + +/* creq_orchestrate_qid_migration (size:128b/16B) */ +struct creq_orchestrate_qid_migration { + u8 type; + #define CREQ_ORCHESTRATE_QID_MIGRATION_TYPE_MASK 0x3fUL + #define CREQ_ORCHESTRATE_QID_MIGRATION_TYPE_SFT 0 + #define CREQ_ORCHESTRATE_QID_MIGRATION_TYPE_QP_EVENT 0x38UL + #define CREQ_ORCHESTRATE_QID_MIGRATION_TYPE_LAST CREQ_ORCHESTRATE_QID_MIGRATION_TYPE_QP_EVENT + u8 status; + __le16 cookie; + __le32 reserved32; + u8 v; + #define CREQ_ORCHESTRATE_QID_MIGRATION_V 0x1UL + u8 event; + #define CREQ_ORCHESTRATE_QID_MIGRATION_EVENT_ORCHESTRATE_QID_MIGRATION 0x93UL + #define CREQ_ORCHESTRATE_QID_MIGRATION_EVENT_LAST CREQ_ORCHESTRATE_QID_MIGRATION_EVENT_ORCHESTRATE_QID_MIGRATION + u8 reserved48[6]; +}; + +/* cmdq_create_qp_batch (size:384b/48B) */ +struct cmdq_create_qp_batch { + u8 opcode; + #define CMDQ_CREATE_QP_BATCH_OPCODE_CREATE_QP_BATCH 0x93UL + #define CMDQ_CREATE_QP_BATCH_OPCODE_LAST CMDQ_CREATE_QP_BATCH_OPCODE_CREATE_QP_BATCH + u8 cmd_size; + __le16 flags; + __le16 cookie; + u8 resp_size; + u8 reserved8; + __le64 resp_addr; + __le32 start_xid; + __le32 count; + __le32 per_qp_param_size; + __le32 reserved32; + __le64 qp_params_array; + __le64 reserved64; +}; + +/* creq_create_qp_batch_resp (size:128b/16B) */ +struct creq_create_qp_batch_resp { + u8 type; + #define CREQ_CREATE_QP_BATCH_RESP_TYPE_MASK 0x3fUL + #define CREQ_CREATE_QP_BATCH_RESP_TYPE_SFT 0 + #define CREQ_CREATE_QP_BATCH_RESP_TYPE_QP_EVENT 0x38UL + #define CREQ_CREATE_QP_BATCH_RESP_TYPE_LAST CREQ_CREATE_QP_BATCH_RESP_TYPE_QP_EVENT + u8 status; + __le16 cookie; + __le32 reserved32; + u8 v; + #define CREQ_CREATE_QP_BATCH_RESP_V 0x1UL + u8 event; + #define CREQ_CREATE_QP_BATCH_RESP_EVENT_CREATE_QP_BATCH 0x94UL + #define CREQ_CREATE_QP_BATCH_RESP_EVENT_LAST CREQ_CREATE_QP_BATCH_RESP_EVENT_CREATE_QP_BATCH + u8 reserved48[6]; +}; + +/* cmdq_destroy_qp_batch (size:256b/32B) */ +struct cmdq_destroy_qp_batch { + u8 opcode; + #define CMDQ_DESTROY_QP_BATCH_OPCODE_DESTROY_BATCH_QP 0x94UL + #define CMDQ_DESTROY_QP_BATCH_OPCODE_LAST CMDQ_DESTROY_QP_BATCH_OPCODE_DESTROY_BATCH_QP + u8 cmd_size; + __le16 flags; + __le16 cookie; + u8 resp_size; + u8 reserved8; + __le64 resp_addr; + __le32 start_xid; + __le32 count; + __le64 reserved64; +}; + +/* creq_destroy_batch_qp_resp (size:128b/16B) */ +struct creq_destroy_batch_qp_resp { + u8 type; + #define CREQ_DESTROY_BATCH_QP_RESP_TYPE_MASK 0x3fUL + #define CREQ_DESTROY_BATCH_QP_RESP_TYPE_SFT 0 + #define CREQ_DESTROY_BATCH_QP_RESP_TYPE_QP_EVENT 0x38UL + #define CREQ_DESTROY_BATCH_QP_RESP_TYPE_LAST CREQ_DESTROY_BATCH_QP_RESP_TYPE_QP_EVENT + u8 status; + __le16 cookie; + __le32 reserved32; + u8 v; + #define CREQ_DESTROY_BATCH_QP_RESP_V 0x1UL + u8 event; + #define CREQ_DESTROY_BATCH_QP_RESP_EVENT_DESTROY_BATCH_QPS 0x95UL + #define CREQ_DESTROY_BATCH_QP_RESP_EVENT_LAST CREQ_DESTROY_BATCH_QP_RESP_EVENT_DESTROY_BATCH_QPS + u8 reserved48[6]; +}; + +/* cmdq_allocate_roce_stats_ext_ctx (size:256b/32B) */ +struct cmdq_allocate_roce_stats_ext_ctx { + u8 opcode; + #define CMDQ_ALLOCATE_ROCE_STATS_EXT_CTX_OPCODE_ALLOCATE_ROCE_STATS_EXT_CTX 0x96UL + #define CMDQ_ALLOCATE_ROCE_STATS_EXT_CTX_OPCODE_LAST CMDQ_ALLOCATE_ROCE_STATS_EXT_CTX_OPCODE_ALLOCATE_ROCE_STATS_EXT_CTX + u8 cmd_size; + __le16 flags; + __le16 cookie; + u8 resp_size; + u8 reserved8; + __le64 resp_addr; + __le64 stats_dma_addr; + __le32 update_period_ms; + __le16 steering_tag; + __le16 reserved16; +}; + +/* creq_allocate_roce_stats_ext_ctx_resp (size:128b/16B) */ +struct creq_allocate_roce_stats_ext_ctx_resp { + u8 type; + #define CREQ_ALLOCATE_ROCE_STATS_EXT_CTX_RESP_TYPE_MASK 0x3fUL + #define CREQ_ALLOCATE_ROCE_STATS_EXT_CTX_RESP_TYPE_SFT 0 + #define CREQ_ALLOCATE_ROCE_STATS_EXT_CTX_RESP_TYPE_QP_EVENT 0x38UL + #define CREQ_ALLOCATE_ROCE_STATS_EXT_CTX_RESP_TYPE_LAST CREQ_ALLOCATE_ROCE_STATS_EXT_CTX_RESP_TYPE_QP_EVENT + u8 status; + __le16 cookie; + __le32 roce_stats_ext_xid; + u8 v; + #define CREQ_ALLOCATE_ROCE_STATS_EXT_CTX_RESP_V 0x1UL + u8 event; + #define CREQ_ALLOCATE_ROCE_STATS_EXT_CTX_RESP_EVENT_ALLOCATE_ROCE_STATS_EXT_CTX 0x96UL + #define CREQ_ALLOCATE_ROCE_STATS_EXT_CTX_RESP_EVENT_LAST CREQ_ALLOCATE_ROCE_STATS_EXT_CTX_RESP_EVENT_ALLOCATE_ROCE_STATS_EXT_CTX + u8 reserved48[6]; +}; + +/* cmdq_deallocate_roce_stats_ext_ctx (size:256b/32B) */ +struct cmdq_deallocate_roce_stats_ext_ctx { + u8 opcode; + #define CMDQ_DEALLOCATE_ROCE_STATS_EXT_CTX_OPCODE_DEALLOCATE_ROCE_STATS_EXT_CTX 0x97UL + #define CMDQ_DEALLOCATE_ROCE_STATS_EXT_CTX_OPCODE_LAST CMDQ_DEALLOCATE_ROCE_STATS_EXT_CTX_OPCODE_DEALLOCATE_ROCE_STATS_EXT_CTX + u8 cmd_size; + __le16 flags; + __le16 cookie; + u8 resp_size; + u8 reserved8; + __le64 resp_addr; + __le32 roce_stats_ext_xid; + __le32 reserved32; + __le64 reserved64; +}; + +/* creq_deallocate_roce_stats_ext_ctx_resp (size:128b/16B) */ +struct creq_deallocate_roce_stats_ext_ctx_resp { + u8 type; + #define CREQ_DEALLOCATE_ROCE_STATS_EXT_CTX_RESP_TYPE_MASK 0x3fUL + #define CREQ_DEALLOCATE_ROCE_STATS_EXT_CTX_RESP_TYPE_SFT 0 + #define CREQ_DEALLOCATE_ROCE_STATS_EXT_CTX_RESP_TYPE_QP_EVENT 0x38UL + #define CREQ_DEALLOCATE_ROCE_STATS_EXT_CTX_RESP_TYPE_LAST CREQ_DEALLOCATE_ROCE_STATS_EXT_CTX_RESP_TYPE_QP_EVENT + u8 status; + __le16 cookie; + __le32 roce_stats_ext_xid; + u8 v; + #define CREQ_DEALLOCATE_ROCE_STATS_EXT_CTX_RESP_V 0x1UL + u8 event; + #define CREQ_DEALLOCATE_ROCE_STATS_EXT_CTX_RESP_EVENT_DEALLOCATE_ROCE_STATS_EXT_CTX 0x97UL + #define CREQ_DEALLOCATE_ROCE_STATS_EXT_CTX_RESP_EVENT_LAST CREQ_DEALLOCATE_ROCE_STATS_EXT_CTX_RESP_EVENT_DEALLOCATE_ROCE_STATS_EXT_CTX + u8 reserved48[6]; +}; + +/* cmdq_query_roce_stats_ext_v2 (size:256b/32B) */ +struct cmdq_query_roce_stats_ext_v2 { + u8 opcode; + #define CMDQ_QUERY_ROCE_STATS_EXT_V2_OPCODE_QUERY_ROCE_STATS_EXT_V2 0x98UL + #define CMDQ_QUERY_ROCE_STATS_EXT_V2_OPCODE_LAST CMDQ_QUERY_ROCE_STATS_EXT_V2_OPCODE_QUERY_ROCE_STATS_EXT_V2 + u8 cmd_size; + __le16 flags; + __le16 cookie; + u8 resp_size; + u8 reserved8; + __le64 resp_addr; + __le32 roce_stats_ext_xid; + __le32 reserved32; + __le64 reserved64; +}; + +/* creq_query_roce_stats_ext_v2_resp (size:128b/16B) */ +struct creq_query_roce_stats_ext_v2_resp { + u8 type; + #define CREQ_QUERY_ROCE_STATS_EXT_V2_RESP_TYPE_MASK 0x3fUL + #define CREQ_QUERY_ROCE_STATS_EXT_V2_RESP_TYPE_SFT 0 + #define CREQ_QUERY_ROCE_STATS_EXT_V2_RESP_TYPE_QP_EVENT 0x38UL + #define CREQ_QUERY_ROCE_STATS_EXT_V2_RESP_TYPE_LAST CREQ_QUERY_ROCE_STATS_EXT_V2_RESP_TYPE_QP_EVENT + u8 status; + __le16 cookie; + __le32 size; + u8 v; + #define CREQ_QUERY_ROCE_STATS_EXT_V2_RESP_V 0x1UL + u8 event; + #define CREQ_QUERY_ROCE_STATS_EXT_V2_RESP_EVENT_QUERY_ROCE_STATS_EXT_V2 0x98UL + #define CREQ_QUERY_ROCE_STATS_EXT_V2_RESP_EVENT_LAST CREQ_QUERY_ROCE_STATS_EXT_V2_RESP_EVENT_QUERY_ROCE_STATS_EXT_V2 + u8 reserved48[6]; +}; + +/* creq_query_roce_stats_ext_v2_resp_sb (size:1920b/240B) */ +struct creq_query_roce_stats_ext_v2_resp_sb { + u8 opcode; + #define CREQ_QUERY_ROCE_STATS_EXT_V2_RESP_SB_OPCODE_QUERY_ROCE_STATS_EXT_V2 0x98UL + #define CREQ_QUERY_ROCE_STATS_EXT_V2_RESP_SB_OPCODE_LAST CREQ_QUERY_ROCE_STATS_EXT_V2_RESP_SB_OPCODE_QUERY_ROCE_STATS_EXT_V2 + u8 status; + __le16 cookie; + __le16 flags; + u8 resp_size; + u8 rsvd; + __le64 tx_atomic_req_pkts; + __le64 tx_read_req_pkts; + __le64 tx_read_res_pkts; + __le64 tx_write_req_pkts; + __le64 tx_rc_send_req_pkts; + __le64 tx_ud_send_req_pkts; + __le64 tx_cnp_pkts; + __le64 tx_roce_pkts; + __le64 tx_roce_bytes; + __le64 rx_out_of_buffer_pkts; + __le64 rx_out_of_sequence_pkts; + __le64 dup_req; + __le64 missing_resp; + __le64 seq_err_naks_rcvd; + __le64 rnr_naks_rcvd; + __le64 to_retransmits; + __le64 rx_atomic_req_pkts; + __le64 rx_read_req_pkts; + __le64 rx_read_res_pkts; + __le64 rx_write_req_pkts; + __le64 rx_rc_send_pkts; + __le64 rx_ud_send_pkts; + __le64 rx_dcn_payload_cut; + __le64 rx_ecn_marked_pkts; + __le64 rx_cnp_pkts; + __le64 rx_roce_pkts; + __le64 rx_roce_bytes; + __le64 rx_roce_good_pkts; + __le64 rx_roce_good_bytes; +}; + +/* creq_func_event (size:128b/16B) */ +struct creq_func_event { + u8 type; + #define CREQ_FUNC_EVENT_TYPE_MASK 0x3fUL + #define CREQ_FUNC_EVENT_TYPE_SFT 0 + #define CREQ_FUNC_EVENT_TYPE_FUNC_EVENT 0x3aUL + #define CREQ_FUNC_EVENT_TYPE_LAST CREQ_FUNC_EVENT_TYPE_FUNC_EVENT + u8 reserved56[7]; + u8 v; + #define CREQ_FUNC_EVENT_V 0x1UL + u8 event; + #define CREQ_FUNC_EVENT_EVENT_TX_WQE_ERROR 0x1UL + #define CREQ_FUNC_EVENT_EVENT_TX_DATA_ERROR 0x2UL + #define CREQ_FUNC_EVENT_EVENT_RX_WQE_ERROR 0x3UL + #define CREQ_FUNC_EVENT_EVENT_RX_DATA_ERROR 0x4UL + #define CREQ_FUNC_EVENT_EVENT_CQ_ERROR 0x5UL + #define CREQ_FUNC_EVENT_EVENT_TQM_ERROR 0x6UL + #define CREQ_FUNC_EVENT_EVENT_CFCQ_ERROR 0x7UL + #define CREQ_FUNC_EVENT_EVENT_CFCS_ERROR 0x8UL + #define CREQ_FUNC_EVENT_EVENT_CFCC_ERROR 0x9UL + #define CREQ_FUNC_EVENT_EVENT_CFCM_ERROR 0xaUL + #define CREQ_FUNC_EVENT_EVENT_TIM_ERROR 0xbUL + #define CREQ_FUNC_EVENT_EVENT_VF_COMM_REQUEST 0x80UL + #define CREQ_FUNC_EVENT_EVENT_RESOURCE_EXHAUSTED 0x81UL + #define CREQ_FUNC_EVENT_EVENT_LAST CREQ_FUNC_EVENT_EVENT_RESOURCE_EXHAUSTED + u8 reserved48[6]; +}; + +/* creq_qp_event (size:128b/16B) */ +struct creq_qp_event { + u8 type; + #define CREQ_QP_EVENT_TYPE_MASK 0x3fUL + #define CREQ_QP_EVENT_TYPE_SFT 0 + #define CREQ_QP_EVENT_TYPE_QP_EVENT 0x38UL + #define CREQ_QP_EVENT_TYPE_LAST CREQ_QP_EVENT_TYPE_QP_EVENT + u8 status; + #define CREQ_QP_EVENT_STATUS_SUCCESS 0x0UL + #define CREQ_QP_EVENT_STATUS_FAIL 0x1UL + #define CREQ_QP_EVENT_STATUS_RESOURCES 0x2UL + #define CREQ_QP_EVENT_STATUS_INVALID_CMD 0x3UL + #define CREQ_QP_EVENT_STATUS_NOT_IMPLEMENTED 0x4UL + #define CREQ_QP_EVENT_STATUS_INVALID_PARAMETER 0x5UL + #define CREQ_QP_EVENT_STATUS_HARDWARE_ERROR 0x6UL + #define CREQ_QP_EVENT_STATUS_INTERNAL_ERROR 0x7UL + #define CREQ_QP_EVENT_STATUS_LAST CREQ_QP_EVENT_STATUS_INTERNAL_ERROR + __le16 cookie; + __le32 reserved32; + u8 v; + #define CREQ_QP_EVENT_V 0x1UL + u8 event; + #define CREQ_QP_EVENT_EVENT_CREATE_QP 0x1UL + #define CREQ_QP_EVENT_EVENT_DESTROY_QP 0x2UL + #define CREQ_QP_EVENT_EVENT_MODIFY_QP 0x3UL + #define CREQ_QP_EVENT_EVENT_QUERY_QP 0x4UL + #define CREQ_QP_EVENT_EVENT_CREATE_SRQ 0x5UL + #define CREQ_QP_EVENT_EVENT_DESTROY_SRQ 0x6UL + #define CREQ_QP_EVENT_EVENT_QUERY_SRQ 0x8UL + #define CREQ_QP_EVENT_EVENT_CREATE_CQ 0x9UL + #define CREQ_QP_EVENT_EVENT_DESTROY_CQ 0xaUL + #define CREQ_QP_EVENT_EVENT_RESIZE_CQ 0xcUL + #define CREQ_QP_EVENT_EVENT_ALLOCATE_MRW 0xdUL + #define CREQ_QP_EVENT_EVENT_DEALLOCATE_KEY 0xeUL + #define CREQ_QP_EVENT_EVENT_REGISTER_MR 0xfUL + #define CREQ_QP_EVENT_EVENT_DEREGISTER_MR 0x10UL + #define CREQ_QP_EVENT_EVENT_ADD_GID 0x11UL + #define CREQ_QP_EVENT_EVENT_DELETE_GID 0x12UL + #define CREQ_QP_EVENT_EVENT_MODIFY_GID 0x17UL + #define CREQ_QP_EVENT_EVENT_QUERY_GID 0x18UL + #define CREQ_QP_EVENT_EVENT_CREATE_QP1 0x13UL + #define CREQ_QP_EVENT_EVENT_DESTROY_QP1 0x14UL + #define CREQ_QP_EVENT_EVENT_CREATE_AH 0x15UL + #define CREQ_QP_EVENT_EVENT_DESTROY_AH 0x16UL + #define CREQ_QP_EVENT_EVENT_INITIALIZE_FW 0x80UL + #define CREQ_QP_EVENT_EVENT_DEINITIALIZE_FW 0x81UL + #define CREQ_QP_EVENT_EVENT_STOP_FUNC 0x82UL + #define CREQ_QP_EVENT_EVENT_QUERY_FUNC 0x83UL + #define CREQ_QP_EVENT_EVENT_SET_FUNC_RESOURCES 0x84UL + #define CREQ_QP_EVENT_EVENT_READ_CONTEXT 0x85UL + #define CREQ_QP_EVENT_EVENT_MAP_TC_TO_COS 0x8aUL + #define CREQ_QP_EVENT_EVENT_QUERY_VERSION 0x8bUL + #define CREQ_QP_EVENT_EVENT_MODIFY_CC 0x8cUL + #define CREQ_QP_EVENT_EVENT_QUERY_CC 0x8dUL + #define CREQ_QP_EVENT_EVENT_QUERY_ROCE_STATS 0x8eUL + #define CREQ_QP_EVENT_EVENT_SET_LINK_AGGR_MODE 0x8fUL + #define CREQ_QP_EVENT_EVENT_QUERY_QP_EXTEND 0x91UL + #define CREQ_QP_EVENT_EVENT_QP_ERROR_NOTIFICATION 0xc0UL + #define CREQ_QP_EVENT_EVENT_CQ_ERROR_NOTIFICATION 0xc1UL + #define CREQ_QP_EVENT_EVENT_LAST CREQ_QP_EVENT_EVENT_CQ_ERROR_NOTIFICATION + u8 reserved48[6]; +}; + +/* creq_qp_error_notification (size:128b/16B) */ +struct creq_qp_error_notification { + u8 type; + #define CREQ_QP_ERROR_NOTIFICATION_TYPE_MASK 0x3fUL + #define CREQ_QP_ERROR_NOTIFICATION_TYPE_SFT 0 + #define CREQ_QP_ERROR_NOTIFICATION_TYPE_QP_EVENT 0x38UL + #define CREQ_QP_ERROR_NOTIFICATION_TYPE_LAST CREQ_QP_ERROR_NOTIFICATION_TYPE_QP_EVENT + u8 status; + u8 req_slow_path_state; + u8 req_err_state_reason; + #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_NO_ERROR 0x0UL + #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_OPCODE_ERROR 0x1UL + #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_TIMEOUT_RETRY_LIMIT 0x2UL + #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_RNR_TIMEOUT_RETRY_LIMIT 0x3UL + #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_NAK_ARRIVAL_1 0x4UL + #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_NAK_ARRIVAL_2 0x5UL + #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_NAK_ARRIVAL_3 0x6UL + #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_NAK_ARRIVAL_4 0x7UL + #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_RX_MEMORY_ERROR 0x8UL + #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_TX_MEMORY_ERROR 0x9UL + #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_READ_RESP_LENGTH 0xaUL + #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_INVALID_READ_RESP 0xbUL + #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_ILLEGAL_BIND 0xcUL + #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_ILLEGAL_FAST_REG 0xdUL + #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_ILLEGAL_INVALIDATE 0xeUL + #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_CMP_ERROR 0xfUL + #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_RETRAN_LOCAL_ERROR 0x10UL + #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_WQE_FORMAT_ERROR 0x11UL + #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_ORRQ_FORMAT_ERROR 0x12UL + #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_INVALID_AVID_ERROR 0x13UL + #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_AV_DOMAIN_ERROR 0x14UL + #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_CQ_LOAD_ERROR 0x15UL + #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_SERV_TYPE_ERROR 0x16UL + #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_INVALID_OP_ERROR 0x17UL + #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_TX_PCI_ERROR 0x18UL + #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_RX_PCI_ERROR 0x19UL + #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_PROD_WQE_MSMTCH_ERROR 0x1aUL + #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_PSN_RANGE_CHECK_ERROR 0x1bUL + #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_RETX_SETUP_ERROR 0x1cUL + #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_LAST CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_RETX_SETUP_ERROR + __le32 xid; + u8 v; + #define CREQ_QP_ERROR_NOTIFICATION_V 0x1UL + u8 event; + #define CREQ_QP_ERROR_NOTIFICATION_EVENT_QP_ERROR_NOTIFICATION 0xc0UL + #define CREQ_QP_ERROR_NOTIFICATION_EVENT_LAST CREQ_QP_ERROR_NOTIFICATION_EVENT_QP_ERROR_NOTIFICATION + u8 res_slow_path_state; + u8 res_err_state_reason; + #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_NO_ERROR 0x0UL + #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_EXCEED_MAX 0x1UL + #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_PAYLOAD_LENGTH_MISMATCH 0x2UL + #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_EXCEEDS_WQE 0x3UL + #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_OPCODE_ERROR 0x4UL + #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_PSN_SEQ_ERROR_RETRY_LIMIT 0x5UL + #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_RX_INVALID_R_KEY 0x6UL + #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_RX_DOMAIN_ERROR 0x7UL + #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_RX_NO_PERMISSION 0x8UL + #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_RX_RANGE_ERROR 0x9UL + #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_TX_INVALID_R_KEY 0xaUL + #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_TX_DOMAIN_ERROR 0xbUL + #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_TX_NO_PERMISSION 0xcUL + #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_TX_RANGE_ERROR 0xdUL + #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_IRRQ_OFLOW 0xeUL + #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_UNSUPPORTED_OPCODE 0xfUL + #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_UNALIGN_ATOMIC 0x10UL + #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_REM_INVALIDATE 0x11UL + #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_MEMORY_ERROR 0x12UL + #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_SRQ_ERROR 0x13UL + #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_CMP_ERROR 0x14UL + #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_INVALID_DUP_RKEY 0x15UL + #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_WQE_FORMAT_ERROR 0x16UL + #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_IRRQ_FORMAT_ERROR 0x17UL + #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_CQ_LOAD_ERROR 0x18UL + #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_SRQ_LOAD_ERROR 0x19UL + #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_TX_PCI_ERROR 0x1bUL + #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_RX_PCI_ERROR 0x1cUL + #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_PSN_NOT_FOUND 0x1dUL + #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_LAST CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_PSN_NOT_FOUND + __le16 sq_cons_idx; + __le16 rq_cons_idx; +}; + +/* creq_cq_error_notification (size:128b/16B) */ +struct creq_cq_error_notification { + u8 type; + #define CREQ_CQ_ERROR_NOTIFICATION_TYPE_MASK 0x3fUL + #define CREQ_CQ_ERROR_NOTIFICATION_TYPE_SFT 0 + #define CREQ_CQ_ERROR_NOTIFICATION_TYPE_CQ_EVENT 0x38UL + #define CREQ_CQ_ERROR_NOTIFICATION_TYPE_LAST CREQ_CQ_ERROR_NOTIFICATION_TYPE_CQ_EVENT + u8 status; + u8 cq_err_reason; + #define CREQ_CQ_ERROR_NOTIFICATION_CQ_ERR_REASON_REQ_CQ_INVALID_ERROR 0x1UL + #define CREQ_CQ_ERROR_NOTIFICATION_CQ_ERR_REASON_REQ_CQ_OVERFLOW_ERROR 0x2UL + #define CREQ_CQ_ERROR_NOTIFICATION_CQ_ERR_REASON_REQ_CQ_LOAD_ERROR 0x3UL + #define CREQ_CQ_ERROR_NOTIFICATION_CQ_ERR_REASON_RES_CQ_INVALID_ERROR 0x4UL + #define CREQ_CQ_ERROR_NOTIFICATION_CQ_ERR_REASON_RES_CQ_OVERFLOW_ERROR 0x5UL + #define CREQ_CQ_ERROR_NOTIFICATION_CQ_ERR_REASON_RES_CQ_LOAD_ERROR 0x6UL + #define CREQ_CQ_ERROR_NOTIFICATION_CQ_ERR_REASON_LAST CREQ_CQ_ERROR_NOTIFICATION_CQ_ERR_REASON_RES_CQ_LOAD_ERROR + u8 reserved8; + __le32 xid; + u8 v; + #define CREQ_CQ_ERROR_NOTIFICATION_V 0x1UL + u8 event; + #define CREQ_CQ_ERROR_NOTIFICATION_EVENT_CQ_ERROR_NOTIFICATION 0xc1UL + #define CREQ_CQ_ERROR_NOTIFICATION_EVENT_LAST CREQ_CQ_ERROR_NOTIFICATION_EVENT_CQ_ERROR_NOTIFICATION + u8 reserved48[6]; +}; + +/* sq_base (size:64b/8B) */ +struct sq_base { + u8 wqe_type; + #define SQ_BASE_WQE_TYPE_SEND 0x0UL + #define SQ_BASE_WQE_TYPE_SEND_W_IMMEAD 0x1UL + #define SQ_BASE_WQE_TYPE_SEND_W_INVALID 0x2UL + #define SQ_BASE_WQE_TYPE_WRITE_WQE 0x4UL + #define SQ_BASE_WQE_TYPE_WRITE_W_IMMEAD 0x5UL + #define SQ_BASE_WQE_TYPE_READ_WQE 0x6UL + #define SQ_BASE_WQE_TYPE_ATOMIC_CS 0x8UL + #define SQ_BASE_WQE_TYPE_ATOMIC_FA 0xbUL + #define SQ_BASE_WQE_TYPE_LOCAL_INVALID 0xcUL + #define SQ_BASE_WQE_TYPE_FR_PMR 0xdUL + #define SQ_BASE_WQE_TYPE_BIND 0xeUL + #define SQ_BASE_WQE_TYPE_FR_PPMR 0xfUL + #define SQ_BASE_WQE_TYPE_SEND_V3 0x10UL + #define SQ_BASE_WQE_TYPE_SEND_W_IMMED_V3 0x11UL + #define SQ_BASE_WQE_TYPE_SEND_W_INVALID_V3 0x12UL + #define SQ_BASE_WQE_TYPE_UDSEND_V3 0x13UL + #define SQ_BASE_WQE_TYPE_UDSEND_W_IMMED_V3 0x14UL + #define SQ_BASE_WQE_TYPE_WRITE_WQE_V3 0x15UL + #define SQ_BASE_WQE_TYPE_WRITE_W_IMMED_V3 0x16UL + #define SQ_BASE_WQE_TYPE_READ_WQE_V3 0x17UL + #define SQ_BASE_WQE_TYPE_ATOMIC_CS_V3 0x18UL + #define SQ_BASE_WQE_TYPE_ATOMIC_FA_V3 0x19UL + #define SQ_BASE_WQE_TYPE_LOCAL_INVALID_V3 0x1aUL + #define SQ_BASE_WQE_TYPE_FR_PMR_V3 0x1bUL + #define SQ_BASE_WQE_TYPE_BIND_V3 0x1cUL + #define SQ_BASE_WQE_TYPE_RAWQP1SEND_V3 0x1dUL + #define SQ_BASE_WQE_TYPE_CHANGE_UDPSRCPORT_V3 0x1eUL + #define SQ_BASE_WQE_TYPE_LAST SQ_BASE_WQE_TYPE_CHANGE_UDPSRCPORT_V3 + u8 unused_0[7]; +}; + +/* sq_sge (size:128b/16B) */ +struct sq_sge { + __le64 va_or_pa; + __le32 l_key; + __le32 size; +}; + +/* sq_psn_search (size:64b/8B) */ +struct sq_psn_search { + __le32 opcode_start_psn; + #define SQ_PSN_SEARCH_START_PSN_MASK 0xffffffUL + #define SQ_PSN_SEARCH_START_PSN_SFT 0 + #define SQ_PSN_SEARCH_OPCODE_MASK 0xff000000UL + #define SQ_PSN_SEARCH_OPCODE_SFT 24 + __le32 flags_next_psn; + #define SQ_PSN_SEARCH_NEXT_PSN_MASK 0xffffffUL + #define SQ_PSN_SEARCH_NEXT_PSN_SFT 0 + #define SQ_PSN_SEARCH_FLAGS_MASK 0xff000000UL + #define SQ_PSN_SEARCH_FLAGS_SFT 24 +}; + +/* sq_psn_search_ext (size:128b/16B) */ +struct sq_psn_search_ext { + __le32 opcode_start_psn; + #define SQ_PSN_SEARCH_EXT_START_PSN_MASK 0xffffffUL + #define SQ_PSN_SEARCH_EXT_START_PSN_SFT 0 + #define SQ_PSN_SEARCH_EXT_OPCODE_MASK 0xff000000UL + #define SQ_PSN_SEARCH_EXT_OPCODE_SFT 24 + __le32 flags_next_psn; + #define SQ_PSN_SEARCH_EXT_NEXT_PSN_MASK 0xffffffUL + #define SQ_PSN_SEARCH_EXT_NEXT_PSN_SFT 0 + #define SQ_PSN_SEARCH_EXT_FLAGS_MASK 0xff000000UL + #define SQ_PSN_SEARCH_EXT_FLAGS_SFT 24 + __le16 start_slot_idx; + __le16 reserved16; + __le32 reserved32; +}; + +/* sq_msn_search (size:64b/8B) */ +struct sq_msn_search { + __le64 start_idx_next_psn_start_psn; + #define SQ_MSN_SEARCH_START_PSN_MASK 0xffffffUL + #define SQ_MSN_SEARCH_START_PSN_SFT 0 + #define SQ_MSN_SEARCH_NEXT_PSN_MASK 0xffffff000000ULL + #define SQ_MSN_SEARCH_NEXT_PSN_SFT 24 + #define SQ_MSN_SEARCH_START_IDX_MASK 0xffff000000000000ULL + #define SQ_MSN_SEARCH_START_IDX_SFT 48 +}; + +/* sq_send (size:1024b/128B) */ +struct sq_send { + u8 wqe_type; + #define SQ_SEND_WQE_TYPE_SEND 0x0UL + #define SQ_SEND_WQE_TYPE_SEND_W_IMMEAD 0x1UL + #define SQ_SEND_WQE_TYPE_SEND_W_INVALID 0x2UL + #define SQ_SEND_WQE_TYPE_LAST SQ_SEND_WQE_TYPE_SEND_W_INVALID + u8 flags; + #define SQ_SEND_FLAGS_INLINE_SE_UC_FENCE_RD_OR_ATOMIC_FENCE_SIGNAL_COMP_MASK 0xffUL + #define SQ_SEND_FLAGS_INLINE_SE_UC_FENCE_RD_OR_ATOMIC_FENCE_SIGNAL_COMP_SFT 0 + #define SQ_SEND_FLAGS_SIGNAL_COMP 0x1UL + #define SQ_SEND_FLAGS_RD_OR_ATOMIC_FENCE 0x2UL + #define SQ_SEND_FLAGS_UC_FENCE 0x4UL + #define SQ_SEND_FLAGS_SE 0x8UL + #define SQ_SEND_FLAGS_INLINE 0x10UL + #define SQ_SEND_FLAGS_WQE_TS_EN 0x20UL + #define SQ_SEND_FLAGS_DEBUG_TRACE 0x40UL + u8 wqe_size; + u8 reserved8_1; + __le32 inv_key_or_imm_data; + __le32 length; + __le32 q_key; + __le32 dst_qp; + #define SQ_SEND_DST_QP_MASK 0xffffffUL + #define SQ_SEND_DST_QP_SFT 0 + __le32 avid; + #define SQ_SEND_AVID_MASK 0xfffffUL + #define SQ_SEND_AVID_SFT 0 + __le32 reserved32; + __le32 timestamp; + #define SQ_SEND_TIMESTAMP_MASK 0xffffffUL + #define SQ_SEND_TIMESTAMP_SFT 0 + __le32 data[24]; +}; + +/* sq_send_hdr (size:256b/32B) */ +struct sq_send_hdr { + u8 wqe_type; + #define SQ_SEND_HDR_WQE_TYPE_SEND 0x0UL + #define SQ_SEND_HDR_WQE_TYPE_SEND_W_IMMEAD 0x1UL + #define SQ_SEND_HDR_WQE_TYPE_SEND_W_INVALID 0x2UL + #define SQ_SEND_HDR_WQE_TYPE_LAST SQ_SEND_HDR_WQE_TYPE_SEND_W_INVALID + u8 flags; + #define SQ_SEND_HDR_FLAGS_INLINE_SE_UC_FENCE_RD_OR_ATOMIC_FENCE_SIGNAL_COMP_MASK 0xffUL + #define SQ_SEND_HDR_FLAGS_INLINE_SE_UC_FENCE_RD_OR_ATOMIC_FENCE_SIGNAL_COMP_SFT 0 + #define SQ_SEND_HDR_FLAGS_SIGNAL_COMP 0x1UL + #define SQ_SEND_HDR_FLAGS_RD_OR_ATOMIC_FENCE 0x2UL + #define SQ_SEND_HDR_FLAGS_UC_FENCE 0x4UL + #define SQ_SEND_HDR_FLAGS_SE 0x8UL + #define SQ_SEND_HDR_FLAGS_INLINE 0x10UL + #define SQ_SEND_HDR_FLAGS_WQE_TS_EN 0x20UL + #define SQ_SEND_HDR_FLAGS_DEBUG_TRACE 0x40UL + u8 wqe_size; + u8 reserved8_1; + __le32 inv_key_or_imm_data; + __le32 length; + __le32 q_key; + __le32 dst_qp; + #define SQ_SEND_HDR_DST_QP_MASK 0xffffffUL + #define SQ_SEND_HDR_DST_QP_SFT 0 + __le32 avid; + #define SQ_SEND_HDR_AVID_MASK 0xfffffUL + #define SQ_SEND_HDR_AVID_SFT 0 + __le32 reserved32; + __le32 timestamp; + #define SQ_SEND_HDR_TIMESTAMP_MASK 0xffffffUL + #define SQ_SEND_HDR_TIMESTAMP_SFT 0 +}; + +/* sq_send_raweth_qp1 (size:1024b/128B) */ +struct sq_send_raweth_qp1 { + u8 wqe_type; + #define SQ_SEND_RAWETH_QP1_WQE_TYPE_SEND 0x0UL + #define SQ_SEND_RAWETH_QP1_WQE_TYPE_LAST SQ_SEND_RAWETH_QP1_WQE_TYPE_SEND + u8 flags; + #define SQ_SEND_RAWETH_QP1_FLAGS_INLINE_SE_UC_FENCE_RD_OR_ATOMIC_FENCE_SIGNAL_COMP_MASK 0xffUL + #define SQ_SEND_RAWETH_QP1_FLAGS_INLINE_SE_UC_FENCE_RD_OR_ATOMIC_FENCE_SIGNAL_COMP_SFT 0 + #define SQ_SEND_RAWETH_QP1_FLAGS_SIGNAL_COMP 0x1UL + #define SQ_SEND_RAWETH_QP1_FLAGS_RD_OR_ATOMIC_FENCE 0x2UL + #define SQ_SEND_RAWETH_QP1_FLAGS_UC_FENCE 0x4UL + #define SQ_SEND_RAWETH_QP1_FLAGS_SE 0x8UL + #define SQ_SEND_RAWETH_QP1_FLAGS_INLINE 0x10UL + #define SQ_SEND_RAWETH_QP1_FLAGS_WQE_TS_EN 0x20UL + #define SQ_SEND_RAWETH_QP1_FLAGS_DEBUG_TRACE 0x40UL + u8 wqe_size; + u8 reserved8; + __le16 lflags; + #define SQ_SEND_RAWETH_QP1_LFLAGS_TCP_UDP_CHKSUM 0x1UL + #define SQ_SEND_RAWETH_QP1_LFLAGS_IP_CHKSUM 0x2UL + #define SQ_SEND_RAWETH_QP1_LFLAGS_NOCRC 0x4UL + #define SQ_SEND_RAWETH_QP1_LFLAGS_STAMP 0x8UL + #define SQ_SEND_RAWETH_QP1_LFLAGS_T_IP_CHKSUM 0x10UL + #define SQ_SEND_RAWETH_QP1_LFLAGS_ROCE_CRC 0x100UL + #define SQ_SEND_RAWETH_QP1_LFLAGS_FCOE_CRC 0x200UL + __le16 cfa_action; + __le32 length; + __le32 reserved32_1; + __le32 cfa_meta; + #define SQ_SEND_RAWETH_QP1_CFA_META_VLAN_VID_MASK 0xfffUL + #define SQ_SEND_RAWETH_QP1_CFA_META_VLAN_VID_SFT 0 + #define SQ_SEND_RAWETH_QP1_CFA_META_VLAN_DE 0x1000UL + #define SQ_SEND_RAWETH_QP1_CFA_META_VLAN_PRI_MASK 0xe000UL + #define SQ_SEND_RAWETH_QP1_CFA_META_VLAN_PRI_SFT 13 + #define SQ_SEND_RAWETH_QP1_CFA_META_VLAN_TPID_MASK 0x70000UL + #define SQ_SEND_RAWETH_QP1_CFA_META_VLAN_TPID_SFT 16 + #define SQ_SEND_RAWETH_QP1_CFA_META_VLAN_TPID_TPID88A8 (0x0UL << 16) + #define SQ_SEND_RAWETH_QP1_CFA_META_VLAN_TPID_TPID8100 (0x1UL << 16) + #define SQ_SEND_RAWETH_QP1_CFA_META_VLAN_TPID_TPID9100 (0x2UL << 16) + #define SQ_SEND_RAWETH_QP1_CFA_META_VLAN_TPID_TPID9200 (0x3UL << 16) + #define SQ_SEND_RAWETH_QP1_CFA_META_VLAN_TPID_TPID9300 (0x4UL << 16) + #define SQ_SEND_RAWETH_QP1_CFA_META_VLAN_TPID_TPIDCFG (0x5UL << 16) + #define SQ_SEND_RAWETH_QP1_CFA_META_VLAN_TPID_LAST SQ_SEND_RAWETH_QP1_CFA_META_VLAN_TPID_TPIDCFG + #define SQ_SEND_RAWETH_QP1_CFA_META_VLAN_RESERVED_MASK 0xff80000UL + #define SQ_SEND_RAWETH_QP1_CFA_META_VLAN_RESERVED_SFT 19 + #define SQ_SEND_RAWETH_QP1_CFA_META_KEY_MASK 0xf0000000UL + #define SQ_SEND_RAWETH_QP1_CFA_META_KEY_SFT 28 + #define SQ_SEND_RAWETH_QP1_CFA_META_KEY_NONE (0x0UL << 28) + #define SQ_SEND_RAWETH_QP1_CFA_META_KEY_VLAN_TAG (0x1UL << 28) + #define SQ_SEND_RAWETH_QP1_CFA_META_KEY_LAST SQ_SEND_RAWETH_QP1_CFA_META_KEY_VLAN_TAG + __le32 reserved32_2; + __le32 reserved32_3; + __le32 timestamp; + #define SQ_SEND_RAWETH_QP1_TIMESTAMP_MASK 0xffffffUL + #define SQ_SEND_RAWETH_QP1_TIMESTAMP_SFT 0 + __le32 data[24]; +}; + +/* sq_send_raweth_qp1_hdr (size:256b/32B) */ +struct sq_send_raweth_qp1_hdr { + u8 wqe_type; + #define SQ_SEND_RAWETH_QP1_HDR_WQE_TYPE_SEND 0x0UL + #define SQ_SEND_RAWETH_QP1_HDR_WQE_TYPE_LAST SQ_SEND_RAWETH_QP1_HDR_WQE_TYPE_SEND + u8 flags; + #define SQ_SEND_RAWETH_QP1_HDR_FLAGS_INLINE_SE_UC_FENCE_RD_OR_ATOMIC_FENCE_SIGNAL_COMP_MASK 0xffUL + #define SQ_SEND_RAWETH_QP1_HDR_FLAGS_INLINE_SE_UC_FENCE_RD_OR_ATOMIC_FENCE_SIGNAL_COMP_SFT 0 + #define SQ_SEND_RAWETH_QP1_HDR_FLAGS_SIGNAL_COMP 0x1UL + #define SQ_SEND_RAWETH_QP1_HDR_FLAGS_RD_OR_ATOMIC_FENCE 0x2UL + #define SQ_SEND_RAWETH_QP1_HDR_FLAGS_UC_FENCE 0x4UL + #define SQ_SEND_RAWETH_QP1_HDR_FLAGS_SE 0x8UL + #define SQ_SEND_RAWETH_QP1_HDR_FLAGS_INLINE 0x10UL + #define SQ_SEND_RAWETH_QP1_HDR_FLAGS_WQE_TS_EN 0x20UL + #define SQ_SEND_RAWETH_QP1_HDR_FLAGS_DEBUG_TRACE 0x40UL + u8 wqe_size; + u8 reserved8; + __le16 lflags; + #define SQ_SEND_RAWETH_QP1_HDR_LFLAGS_TCP_UDP_CHKSUM 0x1UL + #define SQ_SEND_RAWETH_QP1_HDR_LFLAGS_IP_CHKSUM 0x2UL + #define SQ_SEND_RAWETH_QP1_HDR_LFLAGS_NOCRC 0x4UL + #define SQ_SEND_RAWETH_QP1_HDR_LFLAGS_STAMP 0x8UL + #define SQ_SEND_RAWETH_QP1_HDR_LFLAGS_T_IP_CHKSUM 0x10UL + #define SQ_SEND_RAWETH_QP1_HDR_LFLAGS_ROCE_CRC 0x100UL + #define SQ_SEND_RAWETH_QP1_HDR_LFLAGS_FCOE_CRC 0x200UL + __le16 cfa_action; + __le32 length; + __le32 reserved32_1; + __le32 cfa_meta; + #define SQ_SEND_RAWETH_QP1_HDR_CFA_META_VLAN_VID_MASK 0xfffUL + #define SQ_SEND_RAWETH_QP1_HDR_CFA_META_VLAN_VID_SFT 0 + #define SQ_SEND_RAWETH_QP1_HDR_CFA_META_VLAN_DE 0x1000UL + #define SQ_SEND_RAWETH_QP1_HDR_CFA_META_VLAN_PRI_MASK 0xe000UL + #define SQ_SEND_RAWETH_QP1_HDR_CFA_META_VLAN_PRI_SFT 13 + #define SQ_SEND_RAWETH_QP1_HDR_CFA_META_VLAN_TPID_MASK 0x70000UL + #define SQ_SEND_RAWETH_QP1_HDR_CFA_META_VLAN_TPID_SFT 16 + #define SQ_SEND_RAWETH_QP1_HDR_CFA_META_VLAN_TPID_TPID88A8 (0x0UL << 16) + #define SQ_SEND_RAWETH_QP1_HDR_CFA_META_VLAN_TPID_TPID8100 (0x1UL << 16) + #define SQ_SEND_RAWETH_QP1_HDR_CFA_META_VLAN_TPID_TPID9100 (0x2UL << 16) + #define SQ_SEND_RAWETH_QP1_HDR_CFA_META_VLAN_TPID_TPID9200 (0x3UL << 16) + #define SQ_SEND_RAWETH_QP1_HDR_CFA_META_VLAN_TPID_TPID9300 (0x4UL << 16) + #define SQ_SEND_RAWETH_QP1_HDR_CFA_META_VLAN_TPID_TPIDCFG (0x5UL << 16) + #define SQ_SEND_RAWETH_QP1_HDR_CFA_META_VLAN_TPID_LAST SQ_SEND_RAWETH_QP1_HDR_CFA_META_VLAN_TPID_TPIDCFG + #define SQ_SEND_RAWETH_QP1_HDR_CFA_META_VLAN_RESERVED_MASK 0xff80000UL + #define SQ_SEND_RAWETH_QP1_HDR_CFA_META_VLAN_RESERVED_SFT 19 + #define SQ_SEND_RAWETH_QP1_HDR_CFA_META_KEY_MASK 0xf0000000UL + #define SQ_SEND_RAWETH_QP1_HDR_CFA_META_KEY_SFT 28 + #define SQ_SEND_RAWETH_QP1_HDR_CFA_META_KEY_NONE (0x0UL << 28) + #define SQ_SEND_RAWETH_QP1_HDR_CFA_META_KEY_VLAN_TAG (0x1UL << 28) + #define SQ_SEND_RAWETH_QP1_HDR_CFA_META_KEY_LAST SQ_SEND_RAWETH_QP1_HDR_CFA_META_KEY_VLAN_TAG + __le32 reserved32_2; + __le32 reserved32_3; + __le32 timestamp; + #define SQ_SEND_RAWETH_QP1_HDR_TIMESTAMP_MASK 0xffffffUL + #define SQ_SEND_RAWETH_QP1_HDR_TIMESTAMP_SFT 0 +}; + +/* sq_rdma (size:1024b/128B) */ +struct sq_rdma { + u8 wqe_type; + #define SQ_RDMA_WQE_TYPE_WRITE_WQE 0x4UL + #define SQ_RDMA_WQE_TYPE_WRITE_W_IMMEAD 0x5UL + #define SQ_RDMA_WQE_TYPE_READ_WQE 0x6UL + #define SQ_RDMA_WQE_TYPE_LAST SQ_RDMA_WQE_TYPE_READ_WQE + u8 flags; + #define SQ_RDMA_FLAGS_INLINE_SE_UC_FENCE_RD_OR_ATOMIC_FENCE_SIGNAL_COMP_MASK 0xffUL + #define SQ_RDMA_FLAGS_INLINE_SE_UC_FENCE_RD_OR_ATOMIC_FENCE_SIGNAL_COMP_SFT 0 + #define SQ_RDMA_FLAGS_SIGNAL_COMP 0x1UL + #define SQ_RDMA_FLAGS_RD_OR_ATOMIC_FENCE 0x2UL + #define SQ_RDMA_FLAGS_UC_FENCE 0x4UL + #define SQ_RDMA_FLAGS_SE 0x8UL + #define SQ_RDMA_FLAGS_INLINE 0x10UL + #define SQ_RDMA_FLAGS_WQE_TS_EN 0x20UL + #define SQ_RDMA_FLAGS_DEBUG_TRACE 0x40UL + u8 wqe_size; + u8 reserved8; + __le32 imm_data; + __le32 length; + __le32 reserved32_1; + __le64 remote_va; + __le32 remote_key; + __le32 timestamp; + #define SQ_RDMA_TIMESTAMP_MASK 0xffffffUL + #define SQ_RDMA_TIMESTAMP_SFT 0 + __le32 data[24]; +}; + +/* sq_rdma_hdr (size:256b/32B) */ +struct sq_rdma_hdr { + u8 wqe_type; + #define SQ_RDMA_HDR_WQE_TYPE_WRITE_WQE 0x4UL + #define SQ_RDMA_HDR_WQE_TYPE_WRITE_W_IMMEAD 0x5UL + #define SQ_RDMA_HDR_WQE_TYPE_READ_WQE 0x6UL + #define SQ_RDMA_HDR_WQE_TYPE_LAST SQ_RDMA_HDR_WQE_TYPE_READ_WQE + u8 flags; + #define SQ_RDMA_HDR_FLAGS_INLINE_SE_UC_FENCE_RD_OR_ATOMIC_FENCE_SIGNAL_COMP_MASK 0xffUL + #define SQ_RDMA_HDR_FLAGS_INLINE_SE_UC_FENCE_RD_OR_ATOMIC_FENCE_SIGNAL_COMP_SFT 0 + #define SQ_RDMA_HDR_FLAGS_SIGNAL_COMP 0x1UL + #define SQ_RDMA_HDR_FLAGS_RD_OR_ATOMIC_FENCE 0x2UL + #define SQ_RDMA_HDR_FLAGS_UC_FENCE 0x4UL + #define SQ_RDMA_HDR_FLAGS_SE 0x8UL + #define SQ_RDMA_HDR_FLAGS_INLINE 0x10UL + #define SQ_RDMA_HDR_FLAGS_WQE_TS_EN 0x20UL + #define SQ_RDMA_HDR_FLAGS_DEBUG_TRACE 0x40UL + u8 wqe_size; + u8 reserved8; + __le32 imm_data; + __le32 length; + __le32 reserved32_1; + __le64 remote_va; + __le32 remote_key; + __le32 timestamp; + #define SQ_RDMA_HDR_TIMESTAMP_MASK 0xffffffUL + #define SQ_RDMA_HDR_TIMESTAMP_SFT 0 +}; + +/* sq_atomic (size:1024b/128B) */ +struct sq_atomic { + u8 wqe_type; + #define SQ_ATOMIC_WQE_TYPE_ATOMIC_CS 0x8UL + #define SQ_ATOMIC_WQE_TYPE_ATOMIC_FA 0xbUL + #define SQ_ATOMIC_WQE_TYPE_LAST SQ_ATOMIC_WQE_TYPE_ATOMIC_FA + u8 flags; + #define SQ_ATOMIC_FLAGS_INLINE_SE_UC_FENCE_RD_OR_ATOMIC_FENCE_SIGNAL_COMP_MASK 0xffUL + #define SQ_ATOMIC_FLAGS_INLINE_SE_UC_FENCE_RD_OR_ATOMIC_FENCE_SIGNAL_COMP_SFT 0 + #define SQ_ATOMIC_FLAGS_SIGNAL_COMP 0x1UL + #define SQ_ATOMIC_FLAGS_RD_OR_ATOMIC_FENCE 0x2UL + #define SQ_ATOMIC_FLAGS_UC_FENCE 0x4UL + #define SQ_ATOMIC_FLAGS_SE 0x8UL + #define SQ_ATOMIC_FLAGS_INLINE 0x10UL + #define SQ_ATOMIC_FLAGS_WQE_TS_EN 0x20UL + #define SQ_ATOMIC_FLAGS_DEBUG_TRACE 0x40UL + __le16 reserved16; + __le32 remote_key; + __le64 remote_va; + __le64 swap_data; + __le64 cmp_data; + __le32 data[24]; +}; + +/* sq_atomic_hdr (size:256b/32B) */ +struct sq_atomic_hdr { + u8 wqe_type; + #define SQ_ATOMIC_HDR_WQE_TYPE_ATOMIC_CS 0x8UL + #define SQ_ATOMIC_HDR_WQE_TYPE_ATOMIC_FA 0xbUL + #define SQ_ATOMIC_HDR_WQE_TYPE_LAST SQ_ATOMIC_HDR_WQE_TYPE_ATOMIC_FA + u8 flags; + #define SQ_ATOMIC_HDR_FLAGS_INLINE_SE_UC_FENCE_RD_OR_ATOMIC_FENCE_SIGNAL_COMP_MASK 0xffUL + #define SQ_ATOMIC_HDR_FLAGS_INLINE_SE_UC_FENCE_RD_OR_ATOMIC_FENCE_SIGNAL_COMP_SFT 0 + #define SQ_ATOMIC_HDR_FLAGS_SIGNAL_COMP 0x1UL + #define SQ_ATOMIC_HDR_FLAGS_RD_OR_ATOMIC_FENCE 0x2UL + #define SQ_ATOMIC_HDR_FLAGS_UC_FENCE 0x4UL + #define SQ_ATOMIC_HDR_FLAGS_SE 0x8UL + #define SQ_ATOMIC_HDR_FLAGS_INLINE 0x10UL + #define SQ_ATOMIC_HDR_FLAGS_WQE_TS_EN 0x20UL + #define SQ_ATOMIC_HDR_FLAGS_DEBUG_TRACE 0x40UL + __le16 reserved16; + __le32 remote_key; + __le64 remote_va; + __le64 swap_data; + __le64 cmp_data; +}; + +/* sq_localinvalidate (size:1024b/128B) */ +struct sq_localinvalidate { + u8 wqe_type; + #define SQ_LOCALINVALIDATE_WQE_TYPE_LOCAL_INVALID 0xcUL + #define SQ_LOCALINVALIDATE_WQE_TYPE_LAST SQ_LOCALINVALIDATE_WQE_TYPE_LOCAL_INVALID + u8 flags; + #define SQ_LOCALINVALIDATE_FLAGS_INLINE_SE_UC_FENCE_RD_OR_ATOMIC_FENCE_SIGNAL_COMP_MASK 0xffUL + #define SQ_LOCALINVALIDATE_FLAGS_INLINE_SE_UC_FENCE_RD_OR_ATOMIC_FENCE_SIGNAL_COMP_SFT 0 + #define SQ_LOCALINVALIDATE_FLAGS_SIGNAL_COMP 0x1UL + #define SQ_LOCALINVALIDATE_FLAGS_RD_OR_ATOMIC_FENCE 0x2UL + #define SQ_LOCALINVALIDATE_FLAGS_UC_FENCE 0x4UL + #define SQ_LOCALINVALIDATE_FLAGS_SE 0x8UL + #define SQ_LOCALINVALIDATE_FLAGS_INLINE 0x10UL + #define SQ_LOCALINVALIDATE_FLAGS_WQE_TS_EN 0x20UL + #define SQ_LOCALINVALIDATE_FLAGS_DEBUG_TRACE 0x40UL + __le16 reserved16; + __le32 inv_l_key; + __le64 reserved64; + u8 reserved128[16]; + __le32 data[24]; +}; + +/* sq_localinvalidate_hdr (size:256b/32B) */ +struct sq_localinvalidate_hdr { + u8 wqe_type; + #define SQ_LOCALINVALIDATE_HDR_WQE_TYPE_LOCAL_INVALID 0xcUL + #define SQ_LOCALINVALIDATE_HDR_WQE_TYPE_LAST SQ_LOCALINVALIDATE_HDR_WQE_TYPE_LOCAL_INVALID + u8 flags; + #define SQ_LOCALINVALIDATE_HDR_FLAGS_INLINE_SE_UC_FENCE_RD_OR_ATOMIC_FENCE_SIGNAL_COMP_MASK 0xffUL + #define SQ_LOCALINVALIDATE_HDR_FLAGS_INLINE_SE_UC_FENCE_RD_OR_ATOMIC_FENCE_SIGNAL_COMP_SFT 0 + #define SQ_LOCALINVALIDATE_HDR_FLAGS_SIGNAL_COMP 0x1UL + #define SQ_LOCALINVALIDATE_HDR_FLAGS_RD_OR_ATOMIC_FENCE 0x2UL + #define SQ_LOCALINVALIDATE_HDR_FLAGS_UC_FENCE 0x4UL + #define SQ_LOCALINVALIDATE_HDR_FLAGS_SE 0x8UL + #define SQ_LOCALINVALIDATE_HDR_FLAGS_INLINE 0x10UL + #define SQ_LOCALINVALIDATE_HDR_FLAGS_WQE_TS_EN 0x20UL + #define SQ_LOCALINVALIDATE_HDR_FLAGS_DEBUG_TRACE 0x40UL + __le16 reserved16; + __le32 inv_l_key; + __le64 reserved64; + u8 reserved128[16]; +}; + +/* sq_fr_pmr (size:1024b/128B) */ +struct sq_fr_pmr { + u8 wqe_type; + #define SQ_FR_PMR_WQE_TYPE_FR_PMR 0xdUL + #define SQ_FR_PMR_WQE_TYPE_LAST SQ_FR_PMR_WQE_TYPE_FR_PMR + u8 flags; + #define SQ_FR_PMR_FLAGS_SIGNAL_COMP 0x1UL + #define SQ_FR_PMR_FLAGS_RD_OR_ATOMIC_FENCE 0x2UL + #define SQ_FR_PMR_FLAGS_UC_FENCE 0x4UL + #define SQ_FR_PMR_FLAGS_SE 0x8UL + #define SQ_FR_PMR_FLAGS_INLINE 0x10UL + #define SQ_FR_PMR_FLAGS_WQE_TS_EN 0x20UL + #define SQ_FR_PMR_FLAGS_DEBUG_TRACE 0x40UL + u8 access_cntl; + #define SQ_FR_PMR_ACCESS_CNTL_LOCAL_WRITE 0x1UL + #define SQ_FR_PMR_ACCESS_CNTL_REMOTE_READ 0x2UL + #define SQ_FR_PMR_ACCESS_CNTL_REMOTE_WRITE 0x4UL + #define SQ_FR_PMR_ACCESS_CNTL_REMOTE_ATOMIC 0x8UL + #define SQ_FR_PMR_ACCESS_CNTL_WINDOW_BIND 0x10UL + u8 zero_based_page_size_log; + #define SQ_FR_PMR_PAGE_SIZE_LOG_MASK 0x1fUL + #define SQ_FR_PMR_PAGE_SIZE_LOG_SFT 0 + #define SQ_FR_PMR_PAGE_SIZE_LOG_PGSZ_4K 0x0UL + #define SQ_FR_PMR_PAGE_SIZE_LOG_PGSZ_8K 0x1UL + #define SQ_FR_PMR_PAGE_SIZE_LOG_PGSZ_16K 0x2UL + #define SQ_FR_PMR_PAGE_SIZE_LOG_PGSZ_32K 0x3UL + #define SQ_FR_PMR_PAGE_SIZE_LOG_PGSZ_64K 0x4UL + #define SQ_FR_PMR_PAGE_SIZE_LOG_PGSZ_128K 0x5UL + #define SQ_FR_PMR_PAGE_SIZE_LOG_PGSZ_256K 0x6UL + #define SQ_FR_PMR_PAGE_SIZE_LOG_PGSZ_512K 0x7UL + #define SQ_FR_PMR_PAGE_SIZE_LOG_PGSZ_1M 0x8UL + #define SQ_FR_PMR_PAGE_SIZE_LOG_PGSZ_2M 0x9UL + #define SQ_FR_PMR_PAGE_SIZE_LOG_PGSZ_4M 0xaUL + #define SQ_FR_PMR_PAGE_SIZE_LOG_PGSZ_8M 0xbUL + #define SQ_FR_PMR_PAGE_SIZE_LOG_PGSZ_16M 0xcUL + #define SQ_FR_PMR_PAGE_SIZE_LOG_PGSZ_32M 0xdUL + #define SQ_FR_PMR_PAGE_SIZE_LOG_PGSZ_64M 0xeUL + #define SQ_FR_PMR_PAGE_SIZE_LOG_PGSZ_128M 0xfUL + #define SQ_FR_PMR_PAGE_SIZE_LOG_PGSZ_256M 0x10UL + #define SQ_FR_PMR_PAGE_SIZE_LOG_PGSZ_512M 0x11UL + #define SQ_FR_PMR_PAGE_SIZE_LOG_PGSZ_1G 0x12UL + #define SQ_FR_PMR_PAGE_SIZE_LOG_PGSZ_2G 0x13UL + #define SQ_FR_PMR_PAGE_SIZE_LOG_PGSZ_4G 0x14UL + #define SQ_FR_PMR_PAGE_SIZE_LOG_PGSZ_8G 0x15UL + #define SQ_FR_PMR_PAGE_SIZE_LOG_PGSZ_16G 0x16UL + #define SQ_FR_PMR_PAGE_SIZE_LOG_PGSZ_32G 0x17UL + #define SQ_FR_PMR_PAGE_SIZE_LOG_PGSZ_64G 0x18UL + #define SQ_FR_PMR_PAGE_SIZE_LOG_PGSZ_128G 0x19UL + #define SQ_FR_PMR_PAGE_SIZE_LOG_PGSZ_256G 0x1aUL + #define SQ_FR_PMR_PAGE_SIZE_LOG_PGSZ_512G 0x1bUL + #define SQ_FR_PMR_PAGE_SIZE_LOG_PGSZ_1T 0x1cUL + #define SQ_FR_PMR_PAGE_SIZE_LOG_PGSZ_2T 0x1dUL + #define SQ_FR_PMR_PAGE_SIZE_LOG_PGSZ_4T 0x1eUL + #define SQ_FR_PMR_PAGE_SIZE_LOG_PGSZ_8T 0x1fUL + #define SQ_FR_PMR_PAGE_SIZE_LOG_LAST SQ_FR_PMR_PAGE_SIZE_LOG_PGSZ_8T + #define SQ_FR_PMR_ZERO_BASED 0x20UL + __le32 l_key; + u8 length[5]; + u8 reserved8_1; + u8 reserved8_2; + u8 numlevels_pbl_page_size_log; + #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_MASK 0x1fUL + #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_SFT 0 + #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_PGSZ_4K 0x0UL + #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_PGSZ_8K 0x1UL + #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_PGSZ_16K 0x2UL + #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_PGSZ_32K 0x3UL + #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_PGSZ_64K 0x4UL + #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_PGSZ_128K 0x5UL + #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_PGSZ_256K 0x6UL + #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_PGSZ_512K 0x7UL + #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_PGSZ_1M 0x8UL + #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_PGSZ_2M 0x9UL + #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_PGSZ_4M 0xaUL + #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_PGSZ_8M 0xbUL + #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_PGSZ_16M 0xcUL + #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_PGSZ_32M 0xdUL + #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_PGSZ_64M 0xeUL + #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_PGSZ_128M 0xfUL + #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_PGSZ_256M 0x10UL + #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_PGSZ_512M 0x11UL + #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_PGSZ_1G 0x12UL + #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_PGSZ_2G 0x13UL + #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_PGSZ_4G 0x14UL + #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_PGSZ_8G 0x15UL + #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_PGSZ_16G 0x16UL + #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_PGSZ_32G 0x17UL + #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_PGSZ_64G 0x18UL + #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_PGSZ_128G 0x19UL + #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_PGSZ_256G 0x1aUL + #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_PGSZ_512G 0x1bUL + #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_PGSZ_1T 0x1cUL + #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_PGSZ_2T 0x1dUL + #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_PGSZ_4T 0x1eUL + #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_PGSZ_8T 0x1fUL + #define SQ_FR_PMR_PBL_PAGE_SIZE_LOG_LAST SQ_FR_PMR_PBL_PAGE_SIZE_LOG_PGSZ_8T + #define SQ_FR_PMR_NUMLEVELS_MASK 0xc0UL + #define SQ_FR_PMR_NUMLEVELS_SFT 6 + #define SQ_FR_PMR_NUMLEVELS_PHYSICAL (0x0UL << 6) + #define SQ_FR_PMR_NUMLEVELS_LAYER1 (0x1UL << 6) + #define SQ_FR_PMR_NUMLEVELS_LAYER2 (0x2UL << 6) + #define SQ_FR_PMR_NUMLEVELS_LAST SQ_FR_PMR_NUMLEVELS_LAYER2 + __le64 pblptr; + __le64 va; + __le32 data[24]; +}; + +/* sq_fr_pmr_hdr (size:256b/32B) */ +struct sq_fr_pmr_hdr { + u8 wqe_type; + #define SQ_FR_PMR_HDR_WQE_TYPE_FR_PMR 0xdUL + #define SQ_FR_PMR_HDR_WQE_TYPE_LAST SQ_FR_PMR_HDR_WQE_TYPE_FR_PMR + u8 flags; + #define SQ_FR_PMR_HDR_FLAGS_SIGNAL_COMP 0x1UL + #define SQ_FR_PMR_HDR_FLAGS_RD_OR_ATOMIC_FENCE 0x2UL + #define SQ_FR_PMR_HDR_FLAGS_UC_FENCE 0x4UL + #define SQ_FR_PMR_HDR_FLAGS_SE 0x8UL + #define SQ_FR_PMR_HDR_FLAGS_INLINE 0x10UL + #define SQ_FR_PMR_HDR_FLAGS_WQE_TS_EN 0x20UL + #define SQ_FR_PMR_HDR_FLAGS_DEBUG_TRACE 0x40UL + u8 access_cntl; + #define SQ_FR_PMR_HDR_ACCESS_CNTL_LOCAL_WRITE 0x1UL + #define SQ_FR_PMR_HDR_ACCESS_CNTL_REMOTE_READ 0x2UL + #define SQ_FR_PMR_HDR_ACCESS_CNTL_REMOTE_WRITE 0x4UL + #define SQ_FR_PMR_HDR_ACCESS_CNTL_REMOTE_ATOMIC 0x8UL + #define SQ_FR_PMR_HDR_ACCESS_CNTL_WINDOW_BIND 0x10UL + u8 zero_based_page_size_log; + #define SQ_FR_PMR_HDR_PAGE_SIZE_LOG_MASK 0x1fUL + #define SQ_FR_PMR_HDR_PAGE_SIZE_LOG_SFT 0 + #define SQ_FR_PMR_HDR_PAGE_SIZE_LOG_PGSZ_4K 0x0UL + #define SQ_FR_PMR_HDR_PAGE_SIZE_LOG_PGSZ_8K 0x1UL + #define SQ_FR_PMR_HDR_PAGE_SIZE_LOG_PGSZ_16K 0x2UL + #define SQ_FR_PMR_HDR_PAGE_SIZE_LOG_PGSZ_32K 0x3UL + #define SQ_FR_PMR_HDR_PAGE_SIZE_LOG_PGSZ_64K 0x4UL + #define SQ_FR_PMR_HDR_PAGE_SIZE_LOG_PGSZ_128K 0x5UL + #define SQ_FR_PMR_HDR_PAGE_SIZE_LOG_PGSZ_256K 0x6UL + #define SQ_FR_PMR_HDR_PAGE_SIZE_LOG_PGSZ_512K 0x7UL + #define SQ_FR_PMR_HDR_PAGE_SIZE_LOG_PGSZ_1M 0x8UL + #define SQ_FR_PMR_HDR_PAGE_SIZE_LOG_PGSZ_2M 0x9UL + #define SQ_FR_PMR_HDR_PAGE_SIZE_LOG_PGSZ_4M 0xaUL + #define SQ_FR_PMR_HDR_PAGE_SIZE_LOG_PGSZ_8M 0xbUL + #define SQ_FR_PMR_HDR_PAGE_SIZE_LOG_PGSZ_16M 0xcUL + #define SQ_FR_PMR_HDR_PAGE_SIZE_LOG_PGSZ_32M 0xdUL + #define SQ_FR_PMR_HDR_PAGE_SIZE_LOG_PGSZ_64M 0xeUL + #define SQ_FR_PMR_HDR_PAGE_SIZE_LOG_PGSZ_128M 0xfUL + #define SQ_FR_PMR_HDR_PAGE_SIZE_LOG_PGSZ_256M 0x10UL + #define SQ_FR_PMR_HDR_PAGE_SIZE_LOG_PGSZ_512M 0x11UL + #define SQ_FR_PMR_HDR_PAGE_SIZE_LOG_PGSZ_1G 0x12UL + #define SQ_FR_PMR_HDR_PAGE_SIZE_LOG_PGSZ_2G 0x13UL + #define SQ_FR_PMR_HDR_PAGE_SIZE_LOG_PGSZ_4G 0x14UL + #define SQ_FR_PMR_HDR_PAGE_SIZE_LOG_PGSZ_8G 0x15UL + #define SQ_FR_PMR_HDR_PAGE_SIZE_LOG_PGSZ_16G 0x16UL + #define SQ_FR_PMR_HDR_PAGE_SIZE_LOG_PGSZ_32G 0x17UL + #define SQ_FR_PMR_HDR_PAGE_SIZE_LOG_PGSZ_64G 0x18UL + #define SQ_FR_PMR_HDR_PAGE_SIZE_LOG_PGSZ_128G 0x19UL + #define SQ_FR_PMR_HDR_PAGE_SIZE_LOG_PGSZ_256G 0x1aUL + #define SQ_FR_PMR_HDR_PAGE_SIZE_LOG_PGSZ_512G 0x1bUL + #define SQ_FR_PMR_HDR_PAGE_SIZE_LOG_PGSZ_1T 0x1cUL + #define SQ_FR_PMR_HDR_PAGE_SIZE_LOG_PGSZ_2T 0x1dUL + #define SQ_FR_PMR_HDR_PAGE_SIZE_LOG_PGSZ_4T 0x1eUL + #define SQ_FR_PMR_HDR_PAGE_SIZE_LOG_PGSZ_8T 0x1fUL + #define SQ_FR_PMR_HDR_PAGE_SIZE_LOG_LAST SQ_FR_PMR_HDR_PAGE_SIZE_LOG_PGSZ_8T + #define SQ_FR_PMR_HDR_ZERO_BASED 0x20UL + __le32 l_key; + u8 length[5]; + u8 reserved8_1; + u8 reserved8_2; + u8 numlevels_pbl_page_size_log; + #define SQ_FR_PMR_HDR_PBL_PAGE_SIZE_LOG_MASK 0x1fUL + #define SQ_FR_PMR_HDR_PBL_PAGE_SIZE_LOG_SFT 0 + #define SQ_FR_PMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_4K 0x0UL + #define SQ_FR_PMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_8K 0x1UL + #define SQ_FR_PMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_16K 0x2UL + #define SQ_FR_PMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_32K 0x3UL + #define SQ_FR_PMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_64K 0x4UL + #define SQ_FR_PMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_128K 0x5UL + #define SQ_FR_PMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_256K 0x6UL + #define SQ_FR_PMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_512K 0x7UL + #define SQ_FR_PMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_1M 0x8UL + #define SQ_FR_PMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_2M 0x9UL + #define SQ_FR_PMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_4M 0xaUL + #define SQ_FR_PMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_8M 0xbUL + #define SQ_FR_PMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_16M 0xcUL + #define SQ_FR_PMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_32M 0xdUL + #define SQ_FR_PMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_64M 0xeUL + #define SQ_FR_PMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_128M 0xfUL + #define SQ_FR_PMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_256M 0x10UL + #define SQ_FR_PMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_512M 0x11UL + #define SQ_FR_PMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_1G 0x12UL + #define SQ_FR_PMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_2G 0x13UL + #define SQ_FR_PMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_4G 0x14UL + #define SQ_FR_PMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_8G 0x15UL + #define SQ_FR_PMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_16G 0x16UL + #define SQ_FR_PMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_32G 0x17UL + #define SQ_FR_PMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_64G 0x18UL + #define SQ_FR_PMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_128G 0x19UL + #define SQ_FR_PMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_256G 0x1aUL + #define SQ_FR_PMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_512G 0x1bUL + #define SQ_FR_PMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_1T 0x1cUL + #define SQ_FR_PMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_2T 0x1dUL + #define SQ_FR_PMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_4T 0x1eUL + #define SQ_FR_PMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_8T 0x1fUL + #define SQ_FR_PMR_HDR_PBL_PAGE_SIZE_LOG_LAST SQ_FR_PMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_8T + #define SQ_FR_PMR_HDR_NUMLEVELS_MASK 0xc0UL + #define SQ_FR_PMR_HDR_NUMLEVELS_SFT 6 + #define SQ_FR_PMR_HDR_NUMLEVELS_PHYSICAL (0x0UL << 6) + #define SQ_FR_PMR_HDR_NUMLEVELS_LAYER1 (0x1UL << 6) + #define SQ_FR_PMR_HDR_NUMLEVELS_LAYER2 (0x2UL << 6) + #define SQ_FR_PMR_HDR_NUMLEVELS_LAST SQ_FR_PMR_HDR_NUMLEVELS_LAYER2 + __le64 pblptr; + __le64 va; +}; + +/* sq_fr_ppmr (size:1024b/128B) */ +struct sq_fr_ppmr { + u8 wqe_type; + #define SQ_FR_PPMR_WQE_TYPE_FR_PPMR 0xfUL + #define SQ_FR_PPMR_WQE_TYPE_LAST SQ_FR_PPMR_WQE_TYPE_FR_PPMR + u8 flags; + #define SQ_FR_PPMR_FLAGS_SIGNAL_COMP 0x1UL + #define SQ_FR_PPMR_FLAGS_RD_OR_ATOMIC_FENCE 0x2UL + #define SQ_FR_PPMR_FLAGS_UC_FENCE 0x4UL + #define SQ_FR_PPMR_FLAGS_SE 0x8UL + #define SQ_FR_PPMR_FLAGS_INLINE 0x10UL + #define SQ_FR_PPMR_FLAGS_WQE_TS_EN 0x20UL + #define SQ_FR_PPMR_FLAGS_DEBUG_TRACE 0x40UL + u8 access_cntl; + #define SQ_FR_PPMR_ACCESS_CNTL_LOCAL_WRITE 0x1UL + #define SQ_FR_PPMR_ACCESS_CNTL_REMOTE_READ 0x2UL + #define SQ_FR_PPMR_ACCESS_CNTL_REMOTE_WRITE 0x4UL + #define SQ_FR_PPMR_ACCESS_CNTL_REMOTE_ATOMIC 0x8UL + #define SQ_FR_PPMR_ACCESS_CNTL_WINDOW_BIND 0x10UL + u8 zero_based_page_size_log; + #define SQ_FR_PPMR_PAGE_SIZE_LOG_MASK 0x1fUL + #define SQ_FR_PPMR_PAGE_SIZE_LOG_SFT 0 + #define SQ_FR_PPMR_PAGE_SIZE_LOG_PGSZ_4K 0x0UL + #define SQ_FR_PPMR_PAGE_SIZE_LOG_PGSZ_8K 0x1UL + #define SQ_FR_PPMR_PAGE_SIZE_LOG_PGSZ_16K 0x2UL + #define SQ_FR_PPMR_PAGE_SIZE_LOG_PGSZ_32K 0x3UL + #define SQ_FR_PPMR_PAGE_SIZE_LOG_PGSZ_64K 0x4UL + #define SQ_FR_PPMR_PAGE_SIZE_LOG_PGSZ_128K 0x5UL + #define SQ_FR_PPMR_PAGE_SIZE_LOG_PGSZ_256K 0x6UL + #define SQ_FR_PPMR_PAGE_SIZE_LOG_PGSZ_512K 0x7UL + #define SQ_FR_PPMR_PAGE_SIZE_LOG_PGSZ_1M 0x8UL + #define SQ_FR_PPMR_PAGE_SIZE_LOG_PGSZ_2M 0x9UL + #define SQ_FR_PPMR_PAGE_SIZE_LOG_PGSZ_4M 0xaUL + #define SQ_FR_PPMR_PAGE_SIZE_LOG_PGSZ_8M 0xbUL + #define SQ_FR_PPMR_PAGE_SIZE_LOG_PGSZ_16M 0xcUL + #define SQ_FR_PPMR_PAGE_SIZE_LOG_PGSZ_32M 0xdUL + #define SQ_FR_PPMR_PAGE_SIZE_LOG_PGSZ_64M 0xeUL + #define SQ_FR_PPMR_PAGE_SIZE_LOG_PGSZ_128M 0xfUL + #define SQ_FR_PPMR_PAGE_SIZE_LOG_PGSZ_256M 0x10UL + #define SQ_FR_PPMR_PAGE_SIZE_LOG_PGSZ_512M 0x11UL + #define SQ_FR_PPMR_PAGE_SIZE_LOG_PGSZ_1G 0x12UL + #define SQ_FR_PPMR_PAGE_SIZE_LOG_PGSZ_2G 0x13UL + #define SQ_FR_PPMR_PAGE_SIZE_LOG_PGSZ_4G 0x14UL + #define SQ_FR_PPMR_PAGE_SIZE_LOG_PGSZ_8G 0x15UL + #define SQ_FR_PPMR_PAGE_SIZE_LOG_PGSZ_16G 0x16UL + #define SQ_FR_PPMR_PAGE_SIZE_LOG_PGSZ_32G 0x17UL + #define SQ_FR_PPMR_PAGE_SIZE_LOG_PGSZ_64G 0x18UL + #define SQ_FR_PPMR_PAGE_SIZE_LOG_PGSZ_128G 0x19UL + #define SQ_FR_PPMR_PAGE_SIZE_LOG_PGSZ_256G 0x1aUL + #define SQ_FR_PPMR_PAGE_SIZE_LOG_PGSZ_512G 0x1bUL + #define SQ_FR_PPMR_PAGE_SIZE_LOG_PGSZ_1T 0x1cUL + #define SQ_FR_PPMR_PAGE_SIZE_LOG_PGSZ_2T 0x1dUL + #define SQ_FR_PPMR_PAGE_SIZE_LOG_PGSZ_4T 0x1eUL + #define SQ_FR_PPMR_PAGE_SIZE_LOG_PGSZ_8T 0x1fUL + #define SQ_FR_PPMR_PAGE_SIZE_LOG_LAST SQ_FR_PPMR_PAGE_SIZE_LOG_PGSZ_8T + #define SQ_FR_PPMR_ZERO_BASED 0x20UL + __le32 l_key; + __le32 length; + __le16 proxy_vfid; + u8 proxy_pfid; + u8 numlevels_pbl_page_size_log; + #define SQ_FR_PPMR_PBL_PAGE_SIZE_LOG_MASK 0x1fUL + #define SQ_FR_PPMR_PBL_PAGE_SIZE_LOG_SFT 0 + #define SQ_FR_PPMR_PBL_PAGE_SIZE_LOG_PGSZ_4K 0x0UL + #define SQ_FR_PPMR_PBL_PAGE_SIZE_LOG_PGSZ_8K 0x1UL + #define SQ_FR_PPMR_PBL_PAGE_SIZE_LOG_PGSZ_16K 0x2UL + #define SQ_FR_PPMR_PBL_PAGE_SIZE_LOG_PGSZ_32K 0x3UL + #define SQ_FR_PPMR_PBL_PAGE_SIZE_LOG_PGSZ_64K 0x4UL + #define SQ_FR_PPMR_PBL_PAGE_SIZE_LOG_PGSZ_128K 0x5UL + #define SQ_FR_PPMR_PBL_PAGE_SIZE_LOG_PGSZ_256K 0x6UL + #define SQ_FR_PPMR_PBL_PAGE_SIZE_LOG_PGSZ_512K 0x7UL + #define SQ_FR_PPMR_PBL_PAGE_SIZE_LOG_PGSZ_1M 0x8UL + #define SQ_FR_PPMR_PBL_PAGE_SIZE_LOG_PGSZ_2M 0x9UL + #define SQ_FR_PPMR_PBL_PAGE_SIZE_LOG_PGSZ_4M 0xaUL + #define SQ_FR_PPMR_PBL_PAGE_SIZE_LOG_PGSZ_8M 0xbUL + #define SQ_FR_PPMR_PBL_PAGE_SIZE_LOG_PGSZ_16M 0xcUL + #define SQ_FR_PPMR_PBL_PAGE_SIZE_LOG_PGSZ_32M 0xdUL + #define SQ_FR_PPMR_PBL_PAGE_SIZE_LOG_PGSZ_64M 0xeUL + #define SQ_FR_PPMR_PBL_PAGE_SIZE_LOG_PGSZ_128M 0xfUL + #define SQ_FR_PPMR_PBL_PAGE_SIZE_LOG_PGSZ_256M 0x10UL + #define SQ_FR_PPMR_PBL_PAGE_SIZE_LOG_PGSZ_512M 0x11UL + #define SQ_FR_PPMR_PBL_PAGE_SIZE_LOG_PGSZ_1G 0x12UL + #define SQ_FR_PPMR_PBL_PAGE_SIZE_LOG_PGSZ_2G 0x13UL + #define SQ_FR_PPMR_PBL_PAGE_SIZE_LOG_PGSZ_4G 0x14UL + #define SQ_FR_PPMR_PBL_PAGE_SIZE_LOG_PGSZ_8G 0x15UL + #define SQ_FR_PPMR_PBL_PAGE_SIZE_LOG_PGSZ_16G 0x16UL + #define SQ_FR_PPMR_PBL_PAGE_SIZE_LOG_PGSZ_32G 0x17UL + #define SQ_FR_PPMR_PBL_PAGE_SIZE_LOG_PGSZ_64G 0x18UL + #define SQ_FR_PPMR_PBL_PAGE_SIZE_LOG_PGSZ_128G 0x19UL + #define SQ_FR_PPMR_PBL_PAGE_SIZE_LOG_PGSZ_256G 0x1aUL + #define SQ_FR_PPMR_PBL_PAGE_SIZE_LOG_PGSZ_512G 0x1bUL + #define SQ_FR_PPMR_PBL_PAGE_SIZE_LOG_PGSZ_1T 0x1cUL + #define SQ_FR_PPMR_PBL_PAGE_SIZE_LOG_PGSZ_2T 0x1dUL + #define SQ_FR_PPMR_PBL_PAGE_SIZE_LOG_PGSZ_4T 0x1eUL + #define SQ_FR_PPMR_PBL_PAGE_SIZE_LOG_PGSZ_8T 0x1fUL + #define SQ_FR_PPMR_PBL_PAGE_SIZE_LOG_LAST SQ_FR_PPMR_PBL_PAGE_SIZE_LOG_PGSZ_8T + #define SQ_FR_PPMR_PROXY_VF_VALID 0x20UL + #define SQ_FR_PPMR_NUMLEVELS_MASK 0xc0UL + #define SQ_FR_PPMR_NUMLEVELS_SFT 6 + #define SQ_FR_PPMR_NUMLEVELS_PHYSICAL (0x0UL << 6) + #define SQ_FR_PPMR_NUMLEVELS_LAYER1 (0x1UL << 6) + #define SQ_FR_PPMR_NUMLEVELS_LAYER2 (0x2UL << 6) + #define SQ_FR_PPMR_NUMLEVELS_LAST SQ_FR_PPMR_NUMLEVELS_LAYER2 + __le64 pblptr; + __le64 va; + __le32 data[24]; +}; + +/* sq_fr_ppmr_hdr (size:256b/32B) */ +struct sq_fr_ppmr_hdr { + u8 wqe_type; + #define SQ_FR_PPMR_HDR_WQE_TYPE_FR_PPMR 0xfUL + #define SQ_FR_PPMR_HDR_WQE_TYPE_LAST SQ_FR_PPMR_HDR_WQE_TYPE_FR_PPMR + u8 flags; + #define SQ_FR_PPMR_HDR_FLAGS_SIGNAL_COMP 0x1UL + #define SQ_FR_PPMR_HDR_FLAGS_RD_OR_ATOMIC_FENCE 0x2UL + #define SQ_FR_PPMR_HDR_FLAGS_UC_FENCE 0x4UL + #define SQ_FR_PPMR_HDR_FLAGS_SE 0x8UL + #define SQ_FR_PPMR_HDR_FLAGS_INLINE 0x10UL + #define SQ_FR_PPMR_HDR_FLAGS_WQE_TS_EN 0x20UL + #define SQ_FR_PPMR_HDR_FLAGS_DEBUG_TRACE 0x40UL + u8 access_cntl; + #define SQ_FR_PPMR_HDR_ACCESS_CNTL_LOCAL_WRITE 0x1UL + #define SQ_FR_PPMR_HDR_ACCESS_CNTL_REMOTE_READ 0x2UL + #define SQ_FR_PPMR_HDR_ACCESS_CNTL_REMOTE_WRITE 0x4UL + #define SQ_FR_PPMR_HDR_ACCESS_CNTL_REMOTE_ATOMIC 0x8UL + #define SQ_FR_PPMR_HDR_ACCESS_CNTL_WINDOW_BIND 0x10UL + u8 zero_based_page_size_log; + #define SQ_FR_PPMR_HDR_PAGE_SIZE_LOG_MASK 0x1fUL + #define SQ_FR_PPMR_HDR_PAGE_SIZE_LOG_SFT 0 + #define SQ_FR_PPMR_HDR_PAGE_SIZE_LOG_PGSZ_4K 0x0UL + #define SQ_FR_PPMR_HDR_PAGE_SIZE_LOG_PGSZ_8K 0x1UL + #define SQ_FR_PPMR_HDR_PAGE_SIZE_LOG_PGSZ_16K 0x2UL + #define SQ_FR_PPMR_HDR_PAGE_SIZE_LOG_PGSZ_32K 0x3UL + #define SQ_FR_PPMR_HDR_PAGE_SIZE_LOG_PGSZ_64K 0x4UL + #define SQ_FR_PPMR_HDR_PAGE_SIZE_LOG_PGSZ_128K 0x5UL + #define SQ_FR_PPMR_HDR_PAGE_SIZE_LOG_PGSZ_256K 0x6UL + #define SQ_FR_PPMR_HDR_PAGE_SIZE_LOG_PGSZ_512K 0x7UL + #define SQ_FR_PPMR_HDR_PAGE_SIZE_LOG_PGSZ_1M 0x8UL + #define SQ_FR_PPMR_HDR_PAGE_SIZE_LOG_PGSZ_2M 0x9UL + #define SQ_FR_PPMR_HDR_PAGE_SIZE_LOG_PGSZ_4M 0xaUL + #define SQ_FR_PPMR_HDR_PAGE_SIZE_LOG_PGSZ_8M 0xbUL + #define SQ_FR_PPMR_HDR_PAGE_SIZE_LOG_PGSZ_16M 0xcUL + #define SQ_FR_PPMR_HDR_PAGE_SIZE_LOG_PGSZ_32M 0xdUL + #define SQ_FR_PPMR_HDR_PAGE_SIZE_LOG_PGSZ_64M 0xeUL + #define SQ_FR_PPMR_HDR_PAGE_SIZE_LOG_PGSZ_128M 0xfUL + #define SQ_FR_PPMR_HDR_PAGE_SIZE_LOG_PGSZ_256M 0x10UL + #define SQ_FR_PPMR_HDR_PAGE_SIZE_LOG_PGSZ_512M 0x11UL + #define SQ_FR_PPMR_HDR_PAGE_SIZE_LOG_PGSZ_1G 0x12UL + #define SQ_FR_PPMR_HDR_PAGE_SIZE_LOG_PGSZ_2G 0x13UL + #define SQ_FR_PPMR_HDR_PAGE_SIZE_LOG_PGSZ_4G 0x14UL + #define SQ_FR_PPMR_HDR_PAGE_SIZE_LOG_PGSZ_8G 0x15UL + #define SQ_FR_PPMR_HDR_PAGE_SIZE_LOG_PGSZ_16G 0x16UL + #define SQ_FR_PPMR_HDR_PAGE_SIZE_LOG_PGSZ_32G 0x17UL + #define SQ_FR_PPMR_HDR_PAGE_SIZE_LOG_PGSZ_64G 0x18UL + #define SQ_FR_PPMR_HDR_PAGE_SIZE_LOG_PGSZ_128G 0x19UL + #define SQ_FR_PPMR_HDR_PAGE_SIZE_LOG_PGSZ_256G 0x1aUL + #define SQ_FR_PPMR_HDR_PAGE_SIZE_LOG_PGSZ_512G 0x1bUL + #define SQ_FR_PPMR_HDR_PAGE_SIZE_LOG_PGSZ_1T 0x1cUL + #define SQ_FR_PPMR_HDR_PAGE_SIZE_LOG_PGSZ_2T 0x1dUL + #define SQ_FR_PPMR_HDR_PAGE_SIZE_LOG_PGSZ_4T 0x1eUL + #define SQ_FR_PPMR_HDR_PAGE_SIZE_LOG_PGSZ_8T 0x1fUL + #define SQ_FR_PPMR_HDR_PAGE_SIZE_LOG_LAST SQ_FR_PPMR_HDR_PAGE_SIZE_LOG_PGSZ_8T + #define SQ_FR_PPMR_HDR_ZERO_BASED 0x20UL + __le32 l_key; + __le32 length; + __le16 proxy_vfid; + u8 proxy_pfid; + u8 numlevels_pbl_page_size_log; + #define SQ_FR_PPMR_HDR_PBL_PAGE_SIZE_LOG_MASK 0x1fUL + #define SQ_FR_PPMR_HDR_PBL_PAGE_SIZE_LOG_SFT 0 + #define SQ_FR_PPMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_4K 0x0UL + #define SQ_FR_PPMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_8K 0x1UL + #define SQ_FR_PPMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_16K 0x2UL + #define SQ_FR_PPMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_32K 0x3UL + #define SQ_FR_PPMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_64K 0x4UL + #define SQ_FR_PPMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_128K 0x5UL + #define SQ_FR_PPMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_256K 0x6UL + #define SQ_FR_PPMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_512K 0x7UL + #define SQ_FR_PPMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_1M 0x8UL + #define SQ_FR_PPMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_2M 0x9UL + #define SQ_FR_PPMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_4M 0xaUL + #define SQ_FR_PPMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_8M 0xbUL + #define SQ_FR_PPMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_16M 0xcUL + #define SQ_FR_PPMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_32M 0xdUL + #define SQ_FR_PPMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_64M 0xeUL + #define SQ_FR_PPMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_128M 0xfUL + #define SQ_FR_PPMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_256M 0x10UL + #define SQ_FR_PPMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_512M 0x11UL + #define SQ_FR_PPMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_1G 0x12UL + #define SQ_FR_PPMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_2G 0x13UL + #define SQ_FR_PPMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_4G 0x14UL + #define SQ_FR_PPMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_8G 0x15UL + #define SQ_FR_PPMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_16G 0x16UL + #define SQ_FR_PPMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_32G 0x17UL + #define SQ_FR_PPMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_64G 0x18UL + #define SQ_FR_PPMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_128G 0x19UL + #define SQ_FR_PPMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_256G 0x1aUL + #define SQ_FR_PPMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_512G 0x1bUL + #define SQ_FR_PPMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_1T 0x1cUL + #define SQ_FR_PPMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_2T 0x1dUL + #define SQ_FR_PPMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_4T 0x1eUL + #define SQ_FR_PPMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_8T 0x1fUL + #define SQ_FR_PPMR_HDR_PBL_PAGE_SIZE_LOG_LAST SQ_FR_PPMR_HDR_PBL_PAGE_SIZE_LOG_PGSZ_8T + #define SQ_FR_PPMR_HDR_PROXY_VF_VALID 0x20UL + #define SQ_FR_PPMR_HDR_NUMLEVELS_MASK 0xc0UL + #define SQ_FR_PPMR_HDR_NUMLEVELS_SFT 6 + #define SQ_FR_PPMR_HDR_NUMLEVELS_PHYSICAL (0x0UL << 6) + #define SQ_FR_PPMR_HDR_NUMLEVELS_LAYER1 (0x1UL << 6) + #define SQ_FR_PPMR_HDR_NUMLEVELS_LAYER2 (0x2UL << 6) + #define SQ_FR_PPMR_HDR_NUMLEVELS_LAST SQ_FR_PPMR_HDR_NUMLEVELS_LAYER2 + __le64 pblptr; + __le64 va; +}; + +/* sq_bind (size:1024b/128B) */ +struct sq_bind { + u8 wqe_type; + #define SQ_BIND_WQE_TYPE_BIND 0xeUL + #define SQ_BIND_WQE_TYPE_LAST SQ_BIND_WQE_TYPE_BIND + u8 flags; + #define SQ_BIND_FLAGS_INLINE_SE_UC_FENCE_RD_OR_ATOMIC_FENCE_SIGNAL_COMP_MASK 0xffUL + #define SQ_BIND_FLAGS_INLINE_SE_UC_FENCE_RD_OR_ATOMIC_FENCE_SIGNAL_COMP_SFT 0 + #define SQ_BIND_FLAGS_SIGNAL_COMP 0x1UL + #define SQ_BIND_FLAGS_RD_OR_ATOMIC_FENCE 0x2UL + #define SQ_BIND_FLAGS_UC_FENCE 0x4UL + #define SQ_BIND_FLAGS_SE 0x8UL + #define SQ_BIND_FLAGS_INLINE 0x10UL + #define SQ_BIND_FLAGS_WQE_TS_EN 0x20UL + #define SQ_BIND_FLAGS_DEBUG_TRACE 0x40UL + u8 access_cntl; + #define SQ_BIND_ACCESS_CNTL_WINDOW_BIND_REMOTE_ATOMIC_REMOTE_WRITE_REMOTE_READ_LOCAL_WRITE_MASK 0xffUL + #define SQ_BIND_ACCESS_CNTL_WINDOW_BIND_REMOTE_ATOMIC_REMOTE_WRITE_REMOTE_READ_LOCAL_WRITE_SFT 0 + #define SQ_BIND_ACCESS_CNTL_LOCAL_WRITE 0x1UL + #define SQ_BIND_ACCESS_CNTL_REMOTE_READ 0x2UL + #define SQ_BIND_ACCESS_CNTL_REMOTE_WRITE 0x4UL + #define SQ_BIND_ACCESS_CNTL_REMOTE_ATOMIC 0x8UL + #define SQ_BIND_ACCESS_CNTL_WINDOW_BIND 0x10UL + u8 reserved8_1; + u8 mw_type_zero_based; + #define SQ_BIND_ZERO_BASED 0x1UL + #define SQ_BIND_MW_TYPE 0x2UL + #define SQ_BIND_MW_TYPE_TYPE1 (0x0UL << 1) + #define SQ_BIND_MW_TYPE_TYPE2 (0x1UL << 1) + #define SQ_BIND_MW_TYPE_LAST SQ_BIND_MW_TYPE_TYPE2 + u8 reserved8_2; + __le16 reserved16; + __le32 parent_l_key; + __le32 l_key; + __le64 va; + u8 length[5]; + u8 reserved24[3]; + __le32 data[24]; +}; + +/* sq_bind_hdr (size:256b/32B) */ +struct sq_bind_hdr { + u8 wqe_type; + #define SQ_BIND_HDR_WQE_TYPE_BIND 0xeUL + #define SQ_BIND_HDR_WQE_TYPE_LAST SQ_BIND_HDR_WQE_TYPE_BIND + u8 flags; + #define SQ_BIND_HDR_FLAGS_INLINE_SE_UC_FENCE_RD_OR_ATOMIC_FENCE_SIGNAL_COMP_MASK 0xffUL + #define SQ_BIND_HDR_FLAGS_INLINE_SE_UC_FENCE_RD_OR_ATOMIC_FENCE_SIGNAL_COMP_SFT 0 + #define SQ_BIND_HDR_FLAGS_SIGNAL_COMP 0x1UL + #define SQ_BIND_HDR_FLAGS_RD_OR_ATOMIC_FENCE 0x2UL + #define SQ_BIND_HDR_FLAGS_UC_FENCE 0x4UL + #define SQ_BIND_HDR_FLAGS_SE 0x8UL + #define SQ_BIND_HDR_FLAGS_INLINE 0x10UL + #define SQ_BIND_HDR_FLAGS_WQE_TS_EN 0x20UL + #define SQ_BIND_HDR_FLAGS_DEBUG_TRACE 0x40UL + u8 access_cntl; + #define SQ_BIND_HDR_ACCESS_CNTL_WINDOW_BIND_REMOTE_ATOMIC_REMOTE_WRITE_REMOTE_READ_LOCAL_WRITE_MASK 0xffUL + #define SQ_BIND_HDR_ACCESS_CNTL_WINDOW_BIND_REMOTE_ATOMIC_REMOTE_WRITE_REMOTE_READ_LOCAL_WRITE_SFT 0 + #define SQ_BIND_HDR_ACCESS_CNTL_LOCAL_WRITE 0x1UL + #define SQ_BIND_HDR_ACCESS_CNTL_REMOTE_READ 0x2UL + #define SQ_BIND_HDR_ACCESS_CNTL_REMOTE_WRITE 0x4UL + #define SQ_BIND_HDR_ACCESS_CNTL_REMOTE_ATOMIC 0x8UL + #define SQ_BIND_HDR_ACCESS_CNTL_WINDOW_BIND 0x10UL + u8 reserved8_1; + u8 mw_type_zero_based; + #define SQ_BIND_HDR_ZERO_BASED 0x1UL + #define SQ_BIND_HDR_MW_TYPE 0x2UL + #define SQ_BIND_HDR_MW_TYPE_TYPE1 (0x0UL << 1) + #define SQ_BIND_HDR_MW_TYPE_TYPE2 (0x1UL << 1) + #define SQ_BIND_HDR_MW_TYPE_LAST SQ_BIND_HDR_MW_TYPE_TYPE2 + u8 reserved8_2; + __le16 reserved16; + __le32 parent_l_key; + __le32 l_key; + __le64 va; + u8 length[5]; + u8 reserved24[3]; +}; + +/* sq_msn_search_v3 (size:128b/16B) */ +struct sq_msn_search_v3 { + __le64 idx_psn; + #define SQ_MSN_SEARCH_V3_START_PSN_MASK 0xffffffUL + #define SQ_MSN_SEARCH_V3_START_PSN_SFT 0 + #define SQ_MSN_SEARCH_V3_NEXT_PSN_MASK 0xffffff000000ULL + #define SQ_MSN_SEARCH_V3_NEXT_PSN_SFT 24 + #define SQ_MSN_SEARCH_V3_START_IDX_MASK 0xffff000000000000ULL + #define SQ_MSN_SEARCH_V3_START_IDX_SFT 48 + __le32 wqe_opaque; + u8 wqe_size; + u8 signal; + #define SQ_MSN_SEARCH_V3_SGNLD 0x1UL + #define SQ_MSN_SEARCH_V3_PREV_SGNLD_LOCAL_MEM_WQE 0x2UL + __le16 reserved; +}; + +/* sq_send_v3 (size:1024b/128B) */ +struct sq_send_v3 { + u8 wqe_type; + #define SQ_SEND_V3_WQE_TYPE_SEND_V3 0x10UL + #define SQ_SEND_V3_WQE_TYPE_SEND_W_IMMED_V3 0x11UL + #define SQ_SEND_V3_WQE_TYPE_SEND_W_INVALID_V3 0x12UL + #define SQ_SEND_V3_WQE_TYPE_LAST SQ_SEND_V3_WQE_TYPE_SEND_W_INVALID_V3 + u8 flags; + #define SQ_SEND_V3_FLAGS_SIGNAL_COMP 0x1UL + #define SQ_SEND_V3_FLAGS_RD_OR_ATOMIC_FENCE 0x2UL + #define SQ_SEND_V3_FLAGS_UC_FENCE 0x4UL + #define SQ_SEND_V3_FLAGS_SE 0x8UL + #define SQ_SEND_V3_FLAGS_INLINE 0x10UL + #define SQ_SEND_V3_FLAGS_WQE_TS_EN 0x20UL + #define SQ_SEND_V3_FLAGS_DEBUG_TRACE 0x40UL + u8 wqe_size; + #define SQ_SEND_V3_WQE_SIZE_MASK 0x3fUL + #define SQ_SEND_V3_WQE_SIZE_SFT 0 + u8 inline_length; + #define SQ_SEND_V3_INLINE_LENGTH_MASK 0xfUL + #define SQ_SEND_V3_INLINE_LENGTH_SFT 0 + __le32 opaque; + __le32 inv_key_or_imm_data; + __le32 timestamp; + #define SQ_SEND_V3_TIMESTAMP_MASK 0xffffffUL + #define SQ_SEND_V3_TIMESTAMP_SFT 0 + __le32 data[28]; +}; + +/* sq_send_hdr_v3 (size:128b/16B) */ +struct sq_send_hdr_v3 { + u8 wqe_type; + #define SQ_SEND_HDR_V3_WQE_TYPE_SEND_V3 0x10UL + #define SQ_SEND_HDR_V3_WQE_TYPE_SEND_W_IMMED_V3 0x11UL + #define SQ_SEND_HDR_V3_WQE_TYPE_SEND_W_INVALID_V3 0x12UL + #define SQ_SEND_HDR_V3_WQE_TYPE_LAST SQ_SEND_HDR_V3_WQE_TYPE_SEND_W_INVALID_V3 + u8 flags; + #define SQ_SEND_HDR_V3_FLAGS_SIGNAL_COMP 0x1UL + #define SQ_SEND_HDR_V3_FLAGS_RD_OR_ATOMIC_FENCE 0x2UL + #define SQ_SEND_HDR_V3_FLAGS_UC_FENCE 0x4UL + #define SQ_SEND_HDR_V3_FLAGS_SE 0x8UL + #define SQ_SEND_HDR_V3_FLAGS_INLINE 0x10UL + #define SQ_SEND_HDR_V3_FLAGS_WQE_TS_EN 0x20UL + #define SQ_SEND_HDR_V3_FLAGS_DEBUG_TRACE 0x40UL + u8 wqe_size; + #define SQ_SEND_HDR_V3_WQE_SIZE_MASK 0x3fUL + #define SQ_SEND_HDR_V3_WQE_SIZE_SFT 0 + u8 inline_length; + #define SQ_SEND_HDR_V3_INLINE_LENGTH_MASK 0xfUL + #define SQ_SEND_HDR_V3_INLINE_LENGTH_SFT 0 + __le32 opaque; + __le32 inv_key_or_imm_data; + __le32 timestamp; + #define SQ_SEND_HDR_V3_TIMESTAMP_MASK 0xffffffUL + #define SQ_SEND_HDR_V3_TIMESTAMP_SFT 0 +}; + +/* sq_rawqp1send_v3 (size:1024b/128B) */ +struct sq_rawqp1send_v3 { + u8 wqe_type; + #define SQ_RAWQP1SEND_V3_WQE_TYPE_RAWQP1SEND_V3 0x1dUL + #define SQ_RAWQP1SEND_V3_WQE_TYPE_LAST SQ_RAWQP1SEND_V3_WQE_TYPE_RAWQP1SEND_V3 + u8 flags; + #define SQ_RAWQP1SEND_V3_FLAGS_SIGNAL_COMP 0x1UL + #define SQ_RAWQP1SEND_V3_FLAGS_RD_OR_ATOMIC_FENCE 0x2UL + #define SQ_RAWQP1SEND_V3_FLAGS_UC_FENCE 0x4UL + #define SQ_RAWQP1SEND_V3_FLAGS_SE 0x8UL + #define SQ_RAWQP1SEND_V3_FLAGS_INLINE 0x10UL + #define SQ_RAWQP1SEND_V3_FLAGS_WQE_TS_EN 0x20UL + #define SQ_RAWQP1SEND_V3_FLAGS_DEBUG_TRACE 0x40UL + u8 wqe_size; + #define SQ_RAWQP1SEND_V3_WQE_SIZE_MASK 0x3fUL + #define SQ_RAWQP1SEND_V3_WQE_SIZE_SFT 0 + u8 inline_length; + #define SQ_RAWQP1SEND_V3_INLINE_LENGTH_MASK 0xfUL + #define SQ_RAWQP1SEND_V3_INLINE_LENGTH_SFT 0 + __le32 opaque; + __le16 lflags; + #define SQ_RAWQP1SEND_V3_LFLAGS_TCP_UDP_CHKSUM 0x1UL + #define SQ_RAWQP1SEND_V3_LFLAGS_IP_CHKSUM 0x2UL + #define SQ_RAWQP1SEND_V3_LFLAGS_NOCRC 0x4UL + #define SQ_RAWQP1SEND_V3_LFLAGS_T_IP_CHKSUM 0x10UL + #define SQ_RAWQP1SEND_V3_LFLAGS_OT_IP_CHKSUM 0x20UL + #define SQ_RAWQP1SEND_V3_LFLAGS_ROCE_CRC 0x100UL + #define SQ_RAWQP1SEND_V3_LFLAGS_FCOE_CRC 0x200UL + __le16 cfa_action; + __le16 cfa_action_high; + #define SQ_RAWQP1SEND_V3_CFA_ACTION_HIGH_MASK 0x3ffUL + #define SQ_RAWQP1SEND_V3_CFA_ACTION_HIGH_SFT 0 + __le16 reserved_2; + __le32 cfa_meta; + #define SQ_RAWQP1SEND_V3_CFA_META_VLAN_VID_MASK 0xfffUL + #define SQ_RAWQP1SEND_V3_CFA_META_VLAN_VID_SFT 0 + #define SQ_RAWQP1SEND_V3_CFA_META_VLAN_DE 0x1000UL + #define SQ_RAWQP1SEND_V3_CFA_META_VLAN_PRI_MASK 0xe000UL + #define SQ_RAWQP1SEND_V3_CFA_META_VLAN_PRI_SFT 13 + #define SQ_RAWQP1SEND_V3_CFA_META_VLAN_TPID_MASK 0x70000UL + #define SQ_RAWQP1SEND_V3_CFA_META_VLAN_TPID_SFT 16 + #define SQ_RAWQP1SEND_V3_CFA_META_VLAN_TPID_TPID88A8 (0x0UL << 16) + #define SQ_RAWQP1SEND_V3_CFA_META_VLAN_TPID_TPID8100 (0x1UL << 16) + #define SQ_RAWQP1SEND_V3_CFA_META_VLAN_TPID_TPID9100 (0x2UL << 16) + #define SQ_RAWQP1SEND_V3_CFA_META_VLAN_TPID_TPID9200 (0x3UL << 16) + #define SQ_RAWQP1SEND_V3_CFA_META_VLAN_TPID_TPID9300 (0x4UL << 16) + #define SQ_RAWQP1SEND_V3_CFA_META_VLAN_TPID_TPIDCFG (0x5UL << 16) + #define SQ_RAWQP1SEND_V3_CFA_META_VLAN_TPID_LAST SQ_RAWQP1SEND_V3_CFA_META_VLAN_TPID_TPIDCFG + #define SQ_RAWQP1SEND_V3_CFA_META_VLAN_RESERVED_MASK 0xff80000UL + #define SQ_RAWQP1SEND_V3_CFA_META_VLAN_RESERVED_SFT 19 + #define SQ_RAWQP1SEND_V3_CFA_META_KEY_MASK 0xf0000000UL + #define SQ_RAWQP1SEND_V3_CFA_META_KEY_SFT 28 + #define SQ_RAWQP1SEND_V3_CFA_META_KEY_NONE (0x0UL << 28) + #define SQ_RAWQP1SEND_V3_CFA_META_KEY_VLAN_TAG (0x1UL << 28) + #define SQ_RAWQP1SEND_V3_CFA_META_KEY_LAST SQ_RAWQP1SEND_V3_CFA_META_KEY_VLAN_TAG + __le32 timestamp; + #define SQ_RAWQP1SEND_V3_TIMESTAMP_MASK 0xffffffUL + #define SQ_RAWQP1SEND_V3_TIMESTAMP_SFT 0 + __le64 reserved_3; + __le32 data[24]; +}; + +/* sq_rawqp1send_hdr_v3 (size:256b/32B) */ +struct sq_rawqp1send_hdr_v3 { + u8 wqe_type; + #define SQ_RAWQP1SEND_HDR_V3_WQE_TYPE_RAWQP1SEND_V3 0x1dUL + #define SQ_RAWQP1SEND_HDR_V3_WQE_TYPE_LAST SQ_RAWQP1SEND_HDR_V3_WQE_TYPE_RAWQP1SEND_V3 + u8 flags; + #define SQ_RAWQP1SEND_HDR_V3_FLAGS_SIGNAL_COMP 0x1UL + #define SQ_RAWQP1SEND_HDR_V3_FLAGS_RD_OR_ATOMIC_FENCE 0x2UL + #define SQ_RAWQP1SEND_HDR_V3_FLAGS_UC_FENCE 0x4UL + #define SQ_RAWQP1SEND_HDR_V3_FLAGS_SE 0x8UL + #define SQ_RAWQP1SEND_HDR_V3_FLAGS_INLINE 0x10UL + #define SQ_RAWQP1SEND_HDR_V3_FLAGS_WQE_TS_EN 0x20UL + #define SQ_RAWQP1SEND_HDR_V3_FLAGS_DEBUG_TRACE 0x40UL + u8 wqe_size; + #define SQ_RAWQP1SEND_HDR_V3_WQE_SIZE_MASK 0x3fUL + #define SQ_RAWQP1SEND_HDR_V3_WQE_SIZE_SFT 0 + u8 inline_length; + #define SQ_RAWQP1SEND_HDR_V3_INLINE_LENGTH_MASK 0xfUL + #define SQ_RAWQP1SEND_HDR_V3_INLINE_LENGTH_SFT 0 + __le32 opaque; + __le16 lflags; + #define SQ_RAWQP1SEND_HDR_V3_LFLAGS_TCP_UDP_CHKSUM 0x1UL + #define SQ_RAWQP1SEND_HDR_V3_LFLAGS_IP_CHKSUM 0x2UL + #define SQ_RAWQP1SEND_HDR_V3_LFLAGS_NOCRC 0x4UL + #define SQ_RAWQP1SEND_HDR_V3_LFLAGS_T_IP_CHKSUM 0x10UL + #define SQ_RAWQP1SEND_HDR_V3_LFLAGS_OT_IP_CHKSUM 0x20UL + #define SQ_RAWQP1SEND_HDR_V3_LFLAGS_ROCE_CRC 0x100UL + #define SQ_RAWQP1SEND_HDR_V3_LFLAGS_FCOE_CRC 0x200UL + __le16 cfa_action; + __le16 cfa_action_high; + #define SQ_RAWQP1SEND_HDR_V3_CFA_ACTION_HIGH_MASK 0x3ffUL + #define SQ_RAWQP1SEND_HDR_V3_CFA_ACTION_HIGH_SFT 0 + __le16 reserved_2; + __le32 cfa_meta; + #define SQ_RAWQP1SEND_HDR_V3_CFA_META_VLAN_VID_MASK 0xfffUL + #define SQ_RAWQP1SEND_HDR_V3_CFA_META_VLAN_VID_SFT 0 + #define SQ_RAWQP1SEND_HDR_V3_CFA_META_VLAN_DE 0x1000UL + #define SQ_RAWQP1SEND_HDR_V3_CFA_META_VLAN_PRI_MASK 0xe000UL + #define SQ_RAWQP1SEND_HDR_V3_CFA_META_VLAN_PRI_SFT 13 + #define SQ_RAWQP1SEND_HDR_V3_CFA_META_VLAN_TPID_MASK 0x70000UL + #define SQ_RAWQP1SEND_HDR_V3_CFA_META_VLAN_TPID_SFT 16 + #define SQ_RAWQP1SEND_HDR_V3_CFA_META_VLAN_TPID_TPID88A8 (0x0UL << 16) + #define SQ_RAWQP1SEND_HDR_V3_CFA_META_VLAN_TPID_TPID8100 (0x1UL << 16) + #define SQ_RAWQP1SEND_HDR_V3_CFA_META_VLAN_TPID_TPID9100 (0x2UL << 16) + #define SQ_RAWQP1SEND_HDR_V3_CFA_META_VLAN_TPID_TPID9200 (0x3UL << 16) + #define SQ_RAWQP1SEND_HDR_V3_CFA_META_VLAN_TPID_TPID9300 (0x4UL << 16) + #define SQ_RAWQP1SEND_HDR_V3_CFA_META_VLAN_TPID_TPIDCFG (0x5UL << 16) + #define SQ_RAWQP1SEND_HDR_V3_CFA_META_VLAN_TPID_LAST SQ_RAWQP1SEND_HDR_V3_CFA_META_VLAN_TPID_TPIDCFG + #define SQ_RAWQP1SEND_HDR_V3_CFA_META_VLAN_RESERVED_MASK 0xff80000UL + #define SQ_RAWQP1SEND_HDR_V3_CFA_META_VLAN_RESERVED_SFT 19 + #define SQ_RAWQP1SEND_HDR_V3_CFA_META_KEY_MASK 0xf0000000UL + #define SQ_RAWQP1SEND_HDR_V3_CFA_META_KEY_SFT 28 + #define SQ_RAWQP1SEND_HDR_V3_CFA_META_KEY_NONE (0x0UL << 28) + #define SQ_RAWQP1SEND_HDR_V3_CFA_META_KEY_VLAN_TAG (0x1UL << 28) + #define SQ_RAWQP1SEND_HDR_V3_CFA_META_KEY_LAST SQ_RAWQP1SEND_HDR_V3_CFA_META_KEY_VLAN_TAG + __le32 timestamp; + #define SQ_RAWQP1SEND_HDR_V3_TIMESTAMP_MASK 0xffffffUL + #define SQ_RAWQP1SEND_HDR_V3_TIMESTAMP_SFT 0 + __le64 reserved_3; +}; + +/* sq_udsend_v3 (size:1024b/128B) */ +struct sq_udsend_v3 { + u8 wqe_type; + #define SQ_UDSEND_V3_WQE_TYPE_UDSEND_V3 0x13UL + #define SQ_UDSEND_V3_WQE_TYPE_UDSEND_W_IMMED_V3 0x14UL + #define SQ_UDSEND_V3_WQE_TYPE_LAST SQ_UDSEND_V3_WQE_TYPE_UDSEND_W_IMMED_V3 + u8 flags; + #define SQ_UDSEND_V3_FLAGS_SIGNAL_COMP 0x1UL + #define SQ_UDSEND_V3_FLAGS_RD_OR_ATOMIC_FENCE 0x2UL + #define SQ_UDSEND_V3_FLAGS_UC_FENCE 0x4UL + #define SQ_UDSEND_V3_FLAGS_SE 0x8UL + #define SQ_UDSEND_V3_FLAGS_INLINE 0x10UL + #define SQ_UDSEND_V3_FLAGS_WQE_TS_EN 0x20UL + #define SQ_UDSEND_V3_FLAGS_DEBUG_TRACE 0x40UL + u8 wqe_size; + #define SQ_UDSEND_V3_WQE_SIZE_MASK 0x3fUL + #define SQ_UDSEND_V3_WQE_SIZE_SFT 0 + u8 inline_length; + #define SQ_UDSEND_V3_INLINE_LENGTH_MASK 0xfUL + #define SQ_UDSEND_V3_INLINE_LENGTH_SFT 0 + __le32 opaque; + __le32 imm_data; + __le32 q_key; + __le32 dst_qp; + #define SQ_UDSEND_V3_DST_QP_MASK 0xffffffUL + #define SQ_UDSEND_V3_DST_QP_SFT 0 + __le32 avid; + #define SQ_UDSEND_V3_AVID_MASK 0x3ffUL + #define SQ_UDSEND_V3_AVID_SFT 0 + __le32 reserved2; + __le32 timestamp; + #define SQ_UDSEND_V3_TIMESTAMP_MASK 0xffffffUL + #define SQ_UDSEND_V3_TIMESTAMP_SFT 0 + __le32 data[24]; +}; + +/* sq_udsend_hdr_v3 (size:256b/32B) */ +struct sq_udsend_hdr_v3 { + u8 wqe_type; + #define SQ_UDSEND_HDR_V3_WQE_TYPE_UDSEND_V3 0x13UL + #define SQ_UDSEND_HDR_V3_WQE_TYPE_UDSEND_W_IMMED_V3 0x14UL + #define SQ_UDSEND_HDR_V3_WQE_TYPE_LAST SQ_UDSEND_HDR_V3_WQE_TYPE_UDSEND_W_IMMED_V3 + u8 flags; + #define SQ_UDSEND_HDR_V3_FLAGS_SIGNAL_COMP 0x1UL + #define SQ_UDSEND_HDR_V3_FLAGS_RD_OR_ATOMIC_FENCE 0x2UL + #define SQ_UDSEND_HDR_V3_FLAGS_UC_FENCE 0x4UL + #define SQ_UDSEND_HDR_V3_FLAGS_SE 0x8UL + #define SQ_UDSEND_HDR_V3_FLAGS_INLINE 0x10UL + #define SQ_UDSEND_HDR_V3_FLAGS_WQE_TS_EN 0x20UL + #define SQ_UDSEND_HDR_V3_FLAGS_DEBUG_TRACE 0x40UL + u8 wqe_size; + #define SQ_UDSEND_HDR_V3_WQE_SIZE_MASK 0x3fUL + #define SQ_UDSEND_HDR_V3_WQE_SIZE_SFT 0 + u8 inline_length; + #define SQ_UDSEND_HDR_V3_INLINE_LENGTH_MASK 0xfUL + #define SQ_UDSEND_HDR_V3_INLINE_LENGTH_SFT 0 + __le32 opaque; + __le32 imm_data; + __le32 q_key; + __le32 dst_qp; + #define SQ_UDSEND_HDR_V3_DST_QP_MASK 0xffffffUL + #define SQ_UDSEND_HDR_V3_DST_QP_SFT 0 + __le32 avid; + #define SQ_UDSEND_HDR_V3_AVID_MASK 0x3ffUL + #define SQ_UDSEND_HDR_V3_AVID_SFT 0 + __le32 reserved2; + __le32 timestamp; + #define SQ_UDSEND_HDR_V3_TIMESTAMP_MASK 0xffffffUL + #define SQ_UDSEND_HDR_V3_TIMESTAMP_SFT 0 +}; + +/* sq_rdma_v3 (size:1024b/128B) */ +struct sq_rdma_v3 { + u8 wqe_type; + #define SQ_RDMA_V3_WQE_TYPE_WRITE_WQE_V3 0x15UL + #define SQ_RDMA_V3_WQE_TYPE_WRITE_W_IMMED_V3 0x16UL + #define SQ_RDMA_V3_WQE_TYPE_READ_WQE_V3 0x17UL + #define SQ_RDMA_V3_WQE_TYPE_LAST SQ_RDMA_V3_WQE_TYPE_READ_WQE_V3 + u8 flags; + #define SQ_RDMA_V3_FLAGS_SIGNAL_COMP 0x1UL + #define SQ_RDMA_V3_FLAGS_RD_OR_ATOMIC_FENCE 0x2UL + #define SQ_RDMA_V3_FLAGS_UC_FENCE 0x4UL + #define SQ_RDMA_V3_FLAGS_SE 0x8UL + #define SQ_RDMA_V3_FLAGS_INLINE 0x10UL + #define SQ_RDMA_V3_FLAGS_WQE_TS_EN 0x20UL + #define SQ_RDMA_V3_FLAGS_DEBUG_TRACE 0x40UL + u8 wqe_size; + #define SQ_RDMA_V3_WQE_SIZE_MASK 0x3fUL + #define SQ_RDMA_V3_WQE_SIZE_SFT 0 + u8 inline_length; + #define SQ_RDMA_V3_INLINE_LENGTH_MASK 0xfUL + #define SQ_RDMA_V3_INLINE_LENGTH_SFT 0 + __le32 opaque; + __le32 imm_data; + __le32 reserved2; + __le64 remote_va; + __le32 remote_key; + __le32 timestamp; + #define SQ_RDMA_V3_TIMESTAMP_MASK 0xffffffUL + #define SQ_RDMA_V3_TIMESTAMP_SFT 0 + __le32 data[24]; +}; + +/* sq_rdma_hdr_v3 (size:256b/32B) */ +struct sq_rdma_hdr_v3 { + u8 wqe_type; + #define SQ_RDMA_HDR_V3_WQE_TYPE_WRITE_WQE_V3 0x15UL + #define SQ_RDMA_HDR_V3_WQE_TYPE_WRITE_W_IMMED_V3 0x16UL + #define SQ_RDMA_HDR_V3_WQE_TYPE_READ_WQE_V3 0x17UL + #define SQ_RDMA_HDR_V3_WQE_TYPE_LAST SQ_RDMA_HDR_V3_WQE_TYPE_READ_WQE_V3 + u8 flags; + #define SQ_RDMA_HDR_V3_FLAGS_SIGNAL_COMP 0x1UL + #define SQ_RDMA_HDR_V3_FLAGS_RD_OR_ATOMIC_FENCE 0x2UL + #define SQ_RDMA_HDR_V3_FLAGS_UC_FENCE 0x4UL + #define SQ_RDMA_HDR_V3_FLAGS_SE 0x8UL + #define SQ_RDMA_HDR_V3_FLAGS_INLINE 0x10UL + #define SQ_RDMA_HDR_V3_FLAGS_WQE_TS_EN 0x20UL + #define SQ_RDMA_HDR_V3_FLAGS_DEBUG_TRACE 0x40UL + u8 wqe_size; + #define SQ_RDMA_HDR_V3_WQE_SIZE_MASK 0x3fUL + #define SQ_RDMA_HDR_V3_WQE_SIZE_SFT 0 + u8 inline_length; + #define SQ_RDMA_HDR_V3_INLINE_LENGTH_MASK 0xfUL + #define SQ_RDMA_HDR_V3_INLINE_LENGTH_SFT 0 + __le32 opaque; + __le32 imm_data; + __le32 reserved2; + __le64 remote_va; + __le32 remote_key; + __le32 timestamp; + #define SQ_RDMA_HDR_V3_TIMESTAMP_MASK 0xffffffUL + #define SQ_RDMA_HDR_V3_TIMESTAMP_SFT 0 +}; + +/* sq_atomic_v3 (size:448b/56B) */ +struct sq_atomic_v3 { + u8 wqe_type; + #define SQ_ATOMIC_V3_WQE_TYPE_ATOMIC_CS_V3 0x18UL + #define SQ_ATOMIC_V3_WQE_TYPE_ATOMIC_FA_V3 0x19UL + #define SQ_ATOMIC_V3_WQE_TYPE_LAST SQ_ATOMIC_V3_WQE_TYPE_ATOMIC_FA_V3 + u8 flags; + #define SQ_ATOMIC_V3_FLAGS_SIGNAL_COMP 0x1UL + #define SQ_ATOMIC_V3_FLAGS_RD_OR_ATOMIC_FENCE 0x2UL + #define SQ_ATOMIC_V3_FLAGS_UC_FENCE 0x4UL + #define SQ_ATOMIC_V3_FLAGS_SE 0x8UL + #define SQ_ATOMIC_V3_FLAGS_INLINE 0x10UL + #define SQ_ATOMIC_V3_FLAGS_WQE_TS_EN 0x20UL + #define SQ_ATOMIC_V3_FLAGS_DEBUG_TRACE 0x40UL + u8 wqe_size; + #define SQ_ATOMIC_V3_WQE_SIZE_MASK 0x3fUL + #define SQ_ATOMIC_V3_WQE_SIZE_SFT 0 + u8 reserved1; + __le32 opaque; + __le32 remote_key; + __le32 reserved2; + __le64 remote_va; + __le64 swap_data; + __le64 cmp_data; + __le64 va_or_pa; + __le32 l_key; + __le32 size; +}; + +/* sq_atomic_hdr_v3 (size:320b/40B) */ +struct sq_atomic_hdr_v3 { + u8 wqe_type; + #define SQ_ATOMIC_HDR_V3_WQE_TYPE_ATOMIC_CS_V3 0x18UL + #define SQ_ATOMIC_HDR_V3_WQE_TYPE_ATOMIC_FA_V3 0x19UL + #define SQ_ATOMIC_HDR_V3_WQE_TYPE_LAST SQ_ATOMIC_HDR_V3_WQE_TYPE_ATOMIC_FA_V3 + u8 flags; + #define SQ_ATOMIC_HDR_V3_FLAGS_SIGNAL_COMP 0x1UL + #define SQ_ATOMIC_HDR_V3_FLAGS_RD_OR_ATOMIC_FENCE 0x2UL + #define SQ_ATOMIC_HDR_V3_FLAGS_UC_FENCE 0x4UL + #define SQ_ATOMIC_HDR_V3_FLAGS_SE 0x8UL + #define SQ_ATOMIC_HDR_V3_FLAGS_INLINE 0x10UL + #define SQ_ATOMIC_HDR_V3_FLAGS_WQE_TS_EN 0x20UL + #define SQ_ATOMIC_HDR_V3_FLAGS_DEBUG_TRACE 0x40UL + u8 wqe_size; + #define SQ_ATOMIC_HDR_V3_WQE_SIZE_MASK 0x3fUL + #define SQ_ATOMIC_HDR_V3_WQE_SIZE_SFT 0 + u8 reserved1; + __le32 opaque; + __le32 remote_key; + __le32 reserved2; + __le64 remote_va; + __le64 swap_data; + __le64 cmp_data; +}; + +/* sq_localinvalidate_v3 (size:128b/16B) */ +struct sq_localinvalidate_v3 { + u8 wqe_type; + #define SQ_LOCALINVALIDATE_V3_WQE_TYPE_LOCAL_INVALID_V3 0x1aUL + #define SQ_LOCALINVALIDATE_V3_WQE_TYPE_LAST SQ_LOCALINVALIDATE_V3_WQE_TYPE_LOCAL_INVALID_V3 + u8 flags; + #define SQ_LOCALINVALIDATE_V3_FLAGS_SIGNAL_COMP 0x1UL + #define SQ_LOCALINVALIDATE_V3_FLAGS_RD_OR_ATOMIC_FENCE 0x2UL + #define SQ_LOCALINVALIDATE_V3_FLAGS_UC_FENCE 0x4UL + #define SQ_LOCALINVALIDATE_V3_FLAGS_SE 0x8UL + #define SQ_LOCALINVALIDATE_V3_FLAGS_INLINE 0x10UL + #define SQ_LOCALINVALIDATE_V3_FLAGS_WQE_TS_EN 0x20UL + #define SQ_LOCALINVALIDATE_V3_FLAGS_DEBUG_TRACE 0x40UL + u8 wqe_size; + #define SQ_LOCALINVALIDATE_V3_WQE_SIZE_MASK 0x3fUL + #define SQ_LOCALINVALIDATE_V3_WQE_SIZE_SFT 0 + u8 reserved1; + __le32 opaque; + __le32 inv_l_key; + __le32 reserved2; +}; + +/* sq_localinvalidate_hdr_v3 (size:128b/16B) */ +struct sq_localinvalidate_hdr_v3 { + u8 wqe_type; + #define SQ_LOCALINVALIDATE_HDR_V3_WQE_TYPE_LOCAL_INVALID_V3 0x1aUL + #define SQ_LOCALINVALIDATE_HDR_V3_WQE_TYPE_LAST SQ_LOCALINVALIDATE_HDR_V3_WQE_TYPE_LOCAL_INVALID_V3 + u8 flags; + #define SQ_LOCALINVALIDATE_HDR_V3_FLAGS_SIGNAL_COMP 0x1UL + #define SQ_LOCALINVALIDATE_HDR_V3_FLAGS_RD_OR_ATOMIC_FENCE 0x2UL + #define SQ_LOCALINVALIDATE_HDR_V3_FLAGS_UC_FENCE 0x4UL + #define SQ_LOCALINVALIDATE_HDR_V3_FLAGS_SE 0x8UL + #define SQ_LOCALINVALIDATE_HDR_V3_FLAGS_INLINE 0x10UL + #define SQ_LOCALINVALIDATE_HDR_V3_FLAGS_WQE_TS_EN 0x20UL + #define SQ_LOCALINVALIDATE_HDR_V3_FLAGS_DEBUG_TRACE 0x40UL + u8 wqe_size; + #define SQ_LOCALINVALIDATE_HDR_V3_WQE_SIZE_MASK 0x3fUL + #define SQ_LOCALINVALIDATE_HDR_V3_WQE_SIZE_SFT 0 + u8 reserved1; + __le32 opaque; + __le32 inv_l_key; + __le32 reserved2; +}; + +/* sq_fr_pmr_v3 (size:320b/40B) */ +struct sq_fr_pmr_v3 { + u8 wqe_type; + #define SQ_FR_PMR_V3_WQE_TYPE_FR_PMR_V3 0x1bUL + #define SQ_FR_PMR_V3_WQE_TYPE_LAST SQ_FR_PMR_V3_WQE_TYPE_FR_PMR_V3 + u8 flags; + #define SQ_FR_PMR_V3_FLAGS_SIGNAL_COMP 0x1UL + #define SQ_FR_PMR_V3_FLAGS_RD_OR_ATOMIC_FENCE 0x2UL + #define SQ_FR_PMR_V3_FLAGS_UC_FENCE 0x4UL + #define SQ_FR_PMR_V3_FLAGS_SE 0x8UL + #define SQ_FR_PMR_V3_FLAGS_INLINE 0x10UL + #define SQ_FR_PMR_V3_FLAGS_WQE_TS_EN 0x20UL + #define SQ_FR_PMR_V3_FLAGS_DEBUG_TRACE 0x40UL + u8 wqe_size_zero_based; + #define SQ_FR_PMR_V3_WQE_SIZE_MASK 0x3fUL + #define SQ_FR_PMR_V3_WQE_SIZE_SFT 0 + #define SQ_FR_PMR_V3_ZERO_BASED 0x40UL + u8 access_cntl; + #define SQ_FR_PMR_V3_ACCESS_CNTL_LOCAL_WRITE 0x1UL + #define SQ_FR_PMR_V3_ACCESS_CNTL_REMOTE_READ 0x2UL + #define SQ_FR_PMR_V3_ACCESS_CNTL_REMOTE_WRITE 0x4UL + #define SQ_FR_PMR_V3_ACCESS_CNTL_REMOTE_ATOMIC 0x8UL + #define SQ_FR_PMR_V3_ACCESS_CNTL_WINDOW_BIND 0x10UL + __le32 opaque; + __le32 l_key; + __le16 page_size_log; + #define SQ_FR_PMR_V3_PAGE_SIZE_LOG_MASK 0x1fUL + #define SQ_FR_PMR_V3_PAGE_SIZE_LOG_SFT 0 + #define SQ_FR_PMR_V3_PAGE_SIZE_LOG_PGSZ_4K 0x0UL + #define SQ_FR_PMR_V3_PAGE_SIZE_LOG_PGSZ_8K 0x1UL + #define SQ_FR_PMR_V3_PAGE_SIZE_LOG_PGSZ_16K 0x2UL + #define SQ_FR_PMR_V3_PAGE_SIZE_LOG_PGSZ_32K 0x3UL + #define SQ_FR_PMR_V3_PAGE_SIZE_LOG_PGSZ_64K 0x4UL + #define SQ_FR_PMR_V3_PAGE_SIZE_LOG_PGSZ_128K 0x5UL + #define SQ_FR_PMR_V3_PAGE_SIZE_LOG_PGSZ_256K 0x6UL + #define SQ_FR_PMR_V3_PAGE_SIZE_LOG_PGSZ_512K 0x7UL + #define SQ_FR_PMR_V3_PAGE_SIZE_LOG_PGSZ_1M 0x8UL + #define SQ_FR_PMR_V3_PAGE_SIZE_LOG_PGSZ_2M 0x9UL + #define SQ_FR_PMR_V3_PAGE_SIZE_LOG_PGSZ_4M 0xaUL + #define SQ_FR_PMR_V3_PAGE_SIZE_LOG_PGSZ_8M 0xbUL + #define SQ_FR_PMR_V3_PAGE_SIZE_LOG_PGSZ_16M 0xcUL + #define SQ_FR_PMR_V3_PAGE_SIZE_LOG_PGSZ_32M 0xdUL + #define SQ_FR_PMR_V3_PAGE_SIZE_LOG_PGSZ_64M 0xeUL + #define SQ_FR_PMR_V3_PAGE_SIZE_LOG_PGSZ_128M 0xfUL + #define SQ_FR_PMR_V3_PAGE_SIZE_LOG_PGSZ_256M 0x10UL + #define SQ_FR_PMR_V3_PAGE_SIZE_LOG_PGSZ_512M 0x11UL + #define SQ_FR_PMR_V3_PAGE_SIZE_LOG_PGSZ_1G 0x12UL + #define SQ_FR_PMR_V3_PAGE_SIZE_LOG_PGSZ_2G 0x13UL + #define SQ_FR_PMR_V3_PAGE_SIZE_LOG_PGSZ_4G 0x14UL + #define SQ_FR_PMR_V3_PAGE_SIZE_LOG_PGSZ_8G 0x15UL + #define SQ_FR_PMR_V3_PAGE_SIZE_LOG_PGSZ_16G 0x16UL + #define SQ_FR_PMR_V3_PAGE_SIZE_LOG_PGSZ_32G 0x17UL + #define SQ_FR_PMR_V3_PAGE_SIZE_LOG_PGSZ_64G 0x18UL + #define SQ_FR_PMR_V3_PAGE_SIZE_LOG_PGSZ_128G 0x19UL + #define SQ_FR_PMR_V3_PAGE_SIZE_LOG_PGSZ_256G 0x1aUL + #define SQ_FR_PMR_V3_PAGE_SIZE_LOG_PGSZ_512G 0x1bUL + #define SQ_FR_PMR_V3_PAGE_SIZE_LOG_PGSZ_1T 0x1cUL + #define SQ_FR_PMR_V3_PAGE_SIZE_LOG_PGSZ_2T 0x1dUL + #define SQ_FR_PMR_V3_PAGE_SIZE_LOG_PGSZ_4T 0x1eUL + #define SQ_FR_PMR_V3_PAGE_SIZE_LOG_PGSZ_8T 0x1fUL + #define SQ_FR_PMR_V3_PAGE_SIZE_LOG_LAST SQ_FR_PMR_V3_PAGE_SIZE_LOG_PGSZ_8T + #define SQ_FR_PMR_V3_PBL_PAGE_SIZE_LOG_MASK 0x3e0UL + #define SQ_FR_PMR_V3_PBL_PAGE_SIZE_LOG_SFT 5 + #define SQ_FR_PMR_V3_PBL_PAGE_SIZE_LOG_PGSZ_4K (0x0UL << 5) + #define SQ_FR_PMR_V3_PBL_PAGE_SIZE_LOG_PGSZ_8K (0x1UL << 5) + #define SQ_FR_PMR_V3_PBL_PAGE_SIZE_LOG_PGSZ_16K (0x2UL << 5) + #define SQ_FR_PMR_V3_PBL_PAGE_SIZE_LOG_PGSZ_32K (0x3UL << 5) + #define SQ_FR_PMR_V3_PBL_PAGE_SIZE_LOG_PGSZ_64K (0x4UL << 5) + #define SQ_FR_PMR_V3_PBL_PAGE_SIZE_LOG_PGSZ_128K (0x5UL << 5) + #define SQ_FR_PMR_V3_PBL_PAGE_SIZE_LOG_PGSZ_256K (0x6UL << 5) + #define SQ_FR_PMR_V3_PBL_PAGE_SIZE_LOG_PGSZ_512K (0x7UL << 5) + #define SQ_FR_PMR_V3_PBL_PAGE_SIZE_LOG_PGSZ_1M (0x8UL << 5) + #define SQ_FR_PMR_V3_PBL_PAGE_SIZE_LOG_PGSZ_2M (0x9UL << 5) + #define SQ_FR_PMR_V3_PBL_PAGE_SIZE_LOG_PGSZ_4M (0xaUL << 5) + #define SQ_FR_PMR_V3_PBL_PAGE_SIZE_LOG_PGSZ_8M (0xbUL << 5) + #define SQ_FR_PMR_V3_PBL_PAGE_SIZE_LOG_PGSZ_16M (0xcUL << 5) + #define SQ_FR_PMR_V3_PBL_PAGE_SIZE_LOG_PGSZ_32M (0xdUL << 5) + #define SQ_FR_PMR_V3_PBL_PAGE_SIZE_LOG_PGSZ_64M (0xeUL << 5) + #define SQ_FR_PMR_V3_PBL_PAGE_SIZE_LOG_PGSZ_128M (0xfUL << 5) + #define SQ_FR_PMR_V3_PBL_PAGE_SIZE_LOG_PGSZ_256M (0x10UL << 5) + #define SQ_FR_PMR_V3_PBL_PAGE_SIZE_LOG_PGSZ_512M (0x11UL << 5) + #define SQ_FR_PMR_V3_PBL_PAGE_SIZE_LOG_PGSZ_1G (0x12UL << 5) + #define SQ_FR_PMR_V3_PBL_PAGE_SIZE_LOG_PGSZ_2G (0x13UL << 5) + #define SQ_FR_PMR_V3_PBL_PAGE_SIZE_LOG_PGSZ_4G (0x14UL << 5) + #define SQ_FR_PMR_V3_PBL_PAGE_SIZE_LOG_PGSZ_8G (0x15UL << 5) + #define SQ_FR_PMR_V3_PBL_PAGE_SIZE_LOG_PGSZ_16G (0x16UL << 5) + #define SQ_FR_PMR_V3_PBL_PAGE_SIZE_LOG_PGSZ_32G (0x17UL << 5) + #define SQ_FR_PMR_V3_PBL_PAGE_SIZE_LOG_PGSZ_64G (0x18UL << 5) + #define SQ_FR_PMR_V3_PBL_PAGE_SIZE_LOG_PGSZ_128G (0x19UL << 5) + #define SQ_FR_PMR_V3_PBL_PAGE_SIZE_LOG_PGSZ_256G (0x1aUL << 5) + #define SQ_FR_PMR_V3_PBL_PAGE_SIZE_LOG_PGSZ_512G (0x1bUL << 5) + #define SQ_FR_PMR_V3_PBL_PAGE_SIZE_LOG_PGSZ_1T (0x1cUL << 5) + #define SQ_FR_PMR_V3_PBL_PAGE_SIZE_LOG_PGSZ_2T (0x1dUL << 5) + #define SQ_FR_PMR_V3_PBL_PAGE_SIZE_LOG_PGSZ_4T (0x1eUL << 5) + #define SQ_FR_PMR_V3_PBL_PAGE_SIZE_LOG_PGSZ_8T (0x1fUL << 5) + #define SQ_FR_PMR_V3_PBL_PAGE_SIZE_LOG_LAST SQ_FR_PMR_V3_PBL_PAGE_SIZE_LOG_PGSZ_8T + #define SQ_FR_PMR_V3_NUMLEVELS_MASK 0xc00UL + #define SQ_FR_PMR_V3_NUMLEVELS_SFT 10 + #define SQ_FR_PMR_V3_NUMLEVELS_PHYSICAL (0x0UL << 10) + #define SQ_FR_PMR_V3_NUMLEVELS_LAYER1 (0x1UL << 10) + #define SQ_FR_PMR_V3_NUMLEVELS_LAYER2 (0x2UL << 10) + #define SQ_FR_PMR_V3_NUMLEVELS_LAST SQ_FR_PMR_V3_NUMLEVELS_LAYER2 + __le16 reserved; + __le64 va; + __le64 length; + __le64 pbl_ptr; +}; + +/* sq_fr_pmr_hdr_v3 (size:320b/40B) */ +struct sq_fr_pmr_hdr_v3 { + u8 wqe_type; + #define SQ_FR_PMR_HDR_V3_WQE_TYPE_FR_PMR_V3 0x1bUL + #define SQ_FR_PMR_HDR_V3_WQE_TYPE_LAST SQ_FR_PMR_HDR_V3_WQE_TYPE_FR_PMR_V3 + u8 flags; + #define SQ_FR_PMR_HDR_V3_FLAGS_SIGNAL_COMP 0x1UL + #define SQ_FR_PMR_HDR_V3_FLAGS_RD_OR_ATOMIC_FENCE 0x2UL + #define SQ_FR_PMR_HDR_V3_FLAGS_UC_FENCE 0x4UL + #define SQ_FR_PMR_HDR_V3_FLAGS_SE 0x8UL + #define SQ_FR_PMR_HDR_V3_FLAGS_INLINE 0x10UL + #define SQ_FR_PMR_HDR_V3_FLAGS_WQE_TS_EN 0x20UL + #define SQ_FR_PMR_HDR_V3_FLAGS_DEBUG_TRACE 0x40UL + u8 wqe_size_zero_based; + #define SQ_FR_PMR_HDR_V3_WQE_SIZE_MASK 0x3fUL + #define SQ_FR_PMR_HDR_V3_WQE_SIZE_SFT 0 + #define SQ_FR_PMR_HDR_V3_ZERO_BASED 0x40UL + u8 access_cntl; + #define SQ_FR_PMR_HDR_V3_ACCESS_CNTL_LOCAL_WRITE 0x1UL + #define SQ_FR_PMR_HDR_V3_ACCESS_CNTL_REMOTE_READ 0x2UL + #define SQ_FR_PMR_HDR_V3_ACCESS_CNTL_REMOTE_WRITE 0x4UL + #define SQ_FR_PMR_HDR_V3_ACCESS_CNTL_REMOTE_ATOMIC 0x8UL + #define SQ_FR_PMR_HDR_V3_ACCESS_CNTL_WINDOW_BIND 0x10UL + __le32 opaque; + __le32 l_key; + __le16 page_size_log; + #define SQ_FR_PMR_HDR_V3_PAGE_SIZE_LOG_MASK 0x1fUL + #define SQ_FR_PMR_HDR_V3_PAGE_SIZE_LOG_SFT 0 + #define SQ_FR_PMR_HDR_V3_PAGE_SIZE_LOG_PGSZ_4K 0x0UL + #define SQ_FR_PMR_HDR_V3_PAGE_SIZE_LOG_PGSZ_8K 0x1UL + #define SQ_FR_PMR_HDR_V3_PAGE_SIZE_LOG_PGSZ_16K 0x2UL + #define SQ_FR_PMR_HDR_V3_PAGE_SIZE_LOG_PGSZ_32K 0x3UL + #define SQ_FR_PMR_HDR_V3_PAGE_SIZE_LOG_PGSZ_64K 0x4UL + #define SQ_FR_PMR_HDR_V3_PAGE_SIZE_LOG_PGSZ_128K 0x5UL + #define SQ_FR_PMR_HDR_V3_PAGE_SIZE_LOG_PGSZ_256K 0x6UL + #define SQ_FR_PMR_HDR_V3_PAGE_SIZE_LOG_PGSZ_512K 0x7UL + #define SQ_FR_PMR_HDR_V3_PAGE_SIZE_LOG_PGSZ_1M 0x8UL + #define SQ_FR_PMR_HDR_V3_PAGE_SIZE_LOG_PGSZ_2M 0x9UL + #define SQ_FR_PMR_HDR_V3_PAGE_SIZE_LOG_PGSZ_4M 0xaUL + #define SQ_FR_PMR_HDR_V3_PAGE_SIZE_LOG_PGSZ_8M 0xbUL + #define SQ_FR_PMR_HDR_V3_PAGE_SIZE_LOG_PGSZ_16M 0xcUL + #define SQ_FR_PMR_HDR_V3_PAGE_SIZE_LOG_PGSZ_32M 0xdUL + #define SQ_FR_PMR_HDR_V3_PAGE_SIZE_LOG_PGSZ_64M 0xeUL + #define SQ_FR_PMR_HDR_V3_PAGE_SIZE_LOG_PGSZ_128M 0xfUL + #define SQ_FR_PMR_HDR_V3_PAGE_SIZE_LOG_PGSZ_256M 0x10UL + #define SQ_FR_PMR_HDR_V3_PAGE_SIZE_LOG_PGSZ_512M 0x11UL + #define SQ_FR_PMR_HDR_V3_PAGE_SIZE_LOG_PGSZ_1G 0x12UL + #define SQ_FR_PMR_HDR_V3_PAGE_SIZE_LOG_PGSZ_2G 0x13UL + #define SQ_FR_PMR_HDR_V3_PAGE_SIZE_LOG_PGSZ_4G 0x14UL + #define SQ_FR_PMR_HDR_V3_PAGE_SIZE_LOG_PGSZ_8G 0x15UL + #define SQ_FR_PMR_HDR_V3_PAGE_SIZE_LOG_PGSZ_16G 0x16UL + #define SQ_FR_PMR_HDR_V3_PAGE_SIZE_LOG_PGSZ_32G 0x17UL + #define SQ_FR_PMR_HDR_V3_PAGE_SIZE_LOG_PGSZ_64G 0x18UL + #define SQ_FR_PMR_HDR_V3_PAGE_SIZE_LOG_PGSZ_128G 0x19UL + #define SQ_FR_PMR_HDR_V3_PAGE_SIZE_LOG_PGSZ_256G 0x1aUL + #define SQ_FR_PMR_HDR_V3_PAGE_SIZE_LOG_PGSZ_512G 0x1bUL + #define SQ_FR_PMR_HDR_V3_PAGE_SIZE_LOG_PGSZ_1T 0x1cUL + #define SQ_FR_PMR_HDR_V3_PAGE_SIZE_LOG_PGSZ_2T 0x1dUL + #define SQ_FR_PMR_HDR_V3_PAGE_SIZE_LOG_PGSZ_4T 0x1eUL + #define SQ_FR_PMR_HDR_V3_PAGE_SIZE_LOG_PGSZ_8T 0x1fUL + #define SQ_FR_PMR_HDR_V3_PAGE_SIZE_LOG_LAST SQ_FR_PMR_HDR_V3_PAGE_SIZE_LOG_PGSZ_8T + #define SQ_FR_PMR_HDR_V3_PBL_PAGE_SIZE_LOG_MASK 0x3e0UL + #define SQ_FR_PMR_HDR_V3_PBL_PAGE_SIZE_LOG_SFT 5 + #define SQ_FR_PMR_HDR_V3_PBL_PAGE_SIZE_LOG_PGSZ_4K (0x0UL << 5) + #define SQ_FR_PMR_HDR_V3_PBL_PAGE_SIZE_LOG_PGSZ_8K (0x1UL << 5) + #define SQ_FR_PMR_HDR_V3_PBL_PAGE_SIZE_LOG_PGSZ_16K (0x2UL << 5) + #define SQ_FR_PMR_HDR_V3_PBL_PAGE_SIZE_LOG_PGSZ_32K (0x3UL << 5) + #define SQ_FR_PMR_HDR_V3_PBL_PAGE_SIZE_LOG_PGSZ_64K (0x4UL << 5) + #define SQ_FR_PMR_HDR_V3_PBL_PAGE_SIZE_LOG_PGSZ_128K (0x5UL << 5) + #define SQ_FR_PMR_HDR_V3_PBL_PAGE_SIZE_LOG_PGSZ_256K (0x6UL << 5) + #define SQ_FR_PMR_HDR_V3_PBL_PAGE_SIZE_LOG_PGSZ_512K (0x7UL << 5) + #define SQ_FR_PMR_HDR_V3_PBL_PAGE_SIZE_LOG_PGSZ_1M (0x8UL << 5) + #define SQ_FR_PMR_HDR_V3_PBL_PAGE_SIZE_LOG_PGSZ_2M (0x9UL << 5) + #define SQ_FR_PMR_HDR_V3_PBL_PAGE_SIZE_LOG_PGSZ_4M (0xaUL << 5) + #define SQ_FR_PMR_HDR_V3_PBL_PAGE_SIZE_LOG_PGSZ_8M (0xbUL << 5) + #define SQ_FR_PMR_HDR_V3_PBL_PAGE_SIZE_LOG_PGSZ_16M (0xcUL << 5) + #define SQ_FR_PMR_HDR_V3_PBL_PAGE_SIZE_LOG_PGSZ_32M (0xdUL << 5) + #define SQ_FR_PMR_HDR_V3_PBL_PAGE_SIZE_LOG_PGSZ_64M (0xeUL << 5) + #define SQ_FR_PMR_HDR_V3_PBL_PAGE_SIZE_LOG_PGSZ_128M (0xfUL << 5) + #define SQ_FR_PMR_HDR_V3_PBL_PAGE_SIZE_LOG_PGSZ_256M (0x10UL << 5) + #define SQ_FR_PMR_HDR_V3_PBL_PAGE_SIZE_LOG_PGSZ_512M (0x11UL << 5) + #define SQ_FR_PMR_HDR_V3_PBL_PAGE_SIZE_LOG_PGSZ_1G (0x12UL << 5) + #define SQ_FR_PMR_HDR_V3_PBL_PAGE_SIZE_LOG_PGSZ_2G (0x13UL << 5) + #define SQ_FR_PMR_HDR_V3_PBL_PAGE_SIZE_LOG_PGSZ_4G (0x14UL << 5) + #define SQ_FR_PMR_HDR_V3_PBL_PAGE_SIZE_LOG_PGSZ_8G (0x15UL << 5) + #define SQ_FR_PMR_HDR_V3_PBL_PAGE_SIZE_LOG_PGSZ_16G (0x16UL << 5) + #define SQ_FR_PMR_HDR_V3_PBL_PAGE_SIZE_LOG_PGSZ_32G (0x17UL << 5) + #define SQ_FR_PMR_HDR_V3_PBL_PAGE_SIZE_LOG_PGSZ_64G (0x18UL << 5) + #define SQ_FR_PMR_HDR_V3_PBL_PAGE_SIZE_LOG_PGSZ_128G (0x19UL << 5) + #define SQ_FR_PMR_HDR_V3_PBL_PAGE_SIZE_LOG_PGSZ_256G (0x1aUL << 5) + #define SQ_FR_PMR_HDR_V3_PBL_PAGE_SIZE_LOG_PGSZ_512G (0x1bUL << 5) + #define SQ_FR_PMR_HDR_V3_PBL_PAGE_SIZE_LOG_PGSZ_1T (0x1cUL << 5) + #define SQ_FR_PMR_HDR_V3_PBL_PAGE_SIZE_LOG_PGSZ_2T (0x1dUL << 5) + #define SQ_FR_PMR_HDR_V3_PBL_PAGE_SIZE_LOG_PGSZ_4T (0x1eUL << 5) + #define SQ_FR_PMR_HDR_V3_PBL_PAGE_SIZE_LOG_PGSZ_8T (0x1fUL << 5) + #define SQ_FR_PMR_HDR_V3_PBL_PAGE_SIZE_LOG_LAST SQ_FR_PMR_HDR_V3_PBL_PAGE_SIZE_LOG_PGSZ_8T + #define SQ_FR_PMR_HDR_V3_NUMLEVELS_MASK 0xc00UL + #define SQ_FR_PMR_HDR_V3_NUMLEVELS_SFT 10 + #define SQ_FR_PMR_HDR_V3_NUMLEVELS_PHYSICAL (0x0UL << 10) + #define SQ_FR_PMR_HDR_V3_NUMLEVELS_LAYER1 (0x1UL << 10) + #define SQ_FR_PMR_HDR_V3_NUMLEVELS_LAYER2 (0x2UL << 10) + #define SQ_FR_PMR_HDR_V3_NUMLEVELS_LAST SQ_FR_PMR_HDR_V3_NUMLEVELS_LAYER2 + __le16 reserved; + __le64 va; + __le64 length; + __le64 pbl_ptr; +}; + +/* sq_bind_v3 (size:256b/32B) */ +struct sq_bind_v3 { + u8 wqe_type; + #define SQ_BIND_V3_WQE_TYPE_BIND_V3 0x1cUL + #define SQ_BIND_V3_WQE_TYPE_LAST SQ_BIND_V3_WQE_TYPE_BIND_V3 + u8 flags; + #define SQ_BIND_V3_FLAGS_SIGNAL_COMP 0x1UL + #define SQ_BIND_V3_FLAGS_RD_OR_ATOMIC_FENCE 0x2UL + #define SQ_BIND_V3_FLAGS_UC_FENCE 0x4UL + #define SQ_BIND_V3_FLAGS_SE 0x8UL + #define SQ_BIND_V3_FLAGS_INLINE 0x10UL + #define SQ_BIND_V3_FLAGS_WQE_TS_EN 0x20UL + #define SQ_BIND_V3_FLAGS_DEBUG_TRACE 0x40UL + u8 wqe_size_zero_based_mw_type; + #define SQ_BIND_V3_WQE_SIZE_MASK 0x3fUL + #define SQ_BIND_V3_WQE_SIZE_SFT 0 + #define SQ_BIND_V3_ZERO_BASED 0x40UL + #define SQ_BIND_V3_MW_TYPE 0x80UL + #define SQ_BIND_V3__TYPE1 (0x0UL << 7) + #define SQ_BIND_V3__TYPE2 (0x1UL << 7) + #define SQ_BIND_V3__LAST SQ_BIND_V3__TYPE2 + u8 access_cntl; + #define SQ_BIND_V3_ACCESS_CNTL_LOCAL_WRITE 0x1UL + #define SQ_BIND_V3_ACCESS_CNTL_REMOTE_READ 0x2UL + #define SQ_BIND_V3_ACCESS_CNTL_REMOTE_WRITE 0x4UL + #define SQ_BIND_V3_ACCESS_CNTL_REMOTE_ATOMIC 0x8UL + #define SQ_BIND_V3_ACCESS_CNTL_WINDOW_BIND 0x10UL + __le32 opaque; + __le32 parent_l_key; + __le32 l_key; + __le64 va; + __le64 length; +}; + +/* sq_bind_hdr_v3 (size:256b/32B) */ +struct sq_bind_hdr_v3 { + u8 wqe_type; + #define SQ_BIND_HDR_V3_WQE_TYPE_BIND_V3 0x1cUL + #define SQ_BIND_HDR_V3_WQE_TYPE_LAST SQ_BIND_HDR_V3_WQE_TYPE_BIND_V3 + u8 flags; + #define SQ_BIND_HDR_V3_FLAGS_SIGNAL_COMP 0x1UL + #define SQ_BIND_HDR_V3_FLAGS_RD_OR_ATOMIC_FENCE 0x2UL + #define SQ_BIND_HDR_V3_FLAGS_UC_FENCE 0x4UL + #define SQ_BIND_HDR_V3_FLAGS_SE 0x8UL + #define SQ_BIND_HDR_V3_FLAGS_INLINE 0x10UL + #define SQ_BIND_HDR_V3_FLAGS_WQE_TS_EN 0x20UL + #define SQ_BIND_HDR_V3_FLAGS_DEBUG_TRACE 0x40UL + u8 wqe_size_zero_based_mw_type; + #define SQ_BIND_HDR_V3_WQE_SIZE_MASK 0x3fUL + #define SQ_BIND_HDR_V3_WQE_SIZE_SFT 0 + #define SQ_BIND_HDR_V3_ZERO_BASED 0x40UL + #define SQ_BIND_HDR_V3_MW_TYPE 0x80UL + #define SQ_BIND_HDR_V3__TYPE1 (0x0UL << 7) + #define SQ_BIND_HDR_V3__TYPE2 (0x1UL << 7) + #define SQ_BIND_HDR_V3__LAST SQ_BIND_HDR_V3__TYPE2 + u8 access_cntl; + #define SQ_BIND_HDR_V3_ACCESS_CNTL_LOCAL_WRITE 0x1UL + #define SQ_BIND_HDR_V3_ACCESS_CNTL_REMOTE_READ 0x2UL + #define SQ_BIND_HDR_V3_ACCESS_CNTL_REMOTE_WRITE 0x4UL + #define SQ_BIND_HDR_V3_ACCESS_CNTL_REMOTE_ATOMIC 0x8UL + #define SQ_BIND_HDR_V3_ACCESS_CNTL_WINDOW_BIND 0x10UL + __le32 opaque; + __le32 parent_l_key; + __le32 l_key; + __le64 va; + __le64 length; +}; + +/* sq_change_udpsrcport_v3 (size:128b/16B) */ +struct sq_change_udpsrcport_v3 { + u8 wqe_type; + #define SQ_CHANGE_UDPSRCPORT_V3_WQE_TYPE_CHANGE_UDPSRCPORT_V3 0x1eUL + #define SQ_CHANGE_UDPSRCPORT_V3_WQE_TYPE_LAST SQ_CHANGE_UDPSRCPORT_V3_WQE_TYPE_CHANGE_UDPSRCPORT_V3 + u8 flags; + #define SQ_CHANGE_UDPSRCPORT_V3_FLAGS_SIGNAL_COMP 0x1UL + #define SQ_CHANGE_UDPSRCPORT_V3_FLAGS_RD_OR_ATOMIC_FENCE 0x2UL + #define SQ_CHANGE_UDPSRCPORT_V3_FLAGS_UC_FENCE 0x4UL + #define SQ_CHANGE_UDPSRCPORT_V3_FLAGS_SE 0x8UL + #define SQ_CHANGE_UDPSRCPORT_V3_FLAGS_INLINE 0x10UL + #define SQ_CHANGE_UDPSRCPORT_V3_FLAGS_WQE_TS_EN 0x20UL + #define SQ_CHANGE_UDPSRCPORT_V3_FLAGS_DEBUG_TRACE 0x40UL + u8 wqe_size; + #define SQ_CHANGE_UDPSRCPORT_V3_WQE_SIZE_MASK 0x3fUL + #define SQ_CHANGE_UDPSRCPORT_V3_WQE_SIZE_SFT 0 + u8 reserved_1; + __le32 opaque; + __le16 udp_src_port; + __le16 reserved_2; + __le32 reserved_3; +}; + +/* sq_change_udpsrcport_hdr_v3 (size:128b/16B) */ +struct sq_change_udpsrcport_hdr_v3 { + u8 wqe_type; + #define SQ_CHANGE_UDPSRCPORT_HDR_V3_WQE_TYPE_CHANGE_UDPSRCPORT_V3 0x1eUL + #define SQ_CHANGE_UDPSRCPORT_HDR_V3_WQE_TYPE_LAST SQ_CHANGE_UDPSRCPORT_HDR_V3_WQE_TYPE_CHANGE_UDPSRCPORT_V3 + u8 flags; + #define SQ_CHANGE_UDPSRCPORT_HDR_V3_FLAGS_SIGNAL_COMP 0x1UL + #define SQ_CHANGE_UDPSRCPORT_HDR_V3_FLAGS_RD_OR_ATOMIC_FENCE 0x2UL + #define SQ_CHANGE_UDPSRCPORT_HDR_V3_FLAGS_UC_FENCE 0x4UL + #define SQ_CHANGE_UDPSRCPORT_HDR_V3_FLAGS_SE 0x8UL + #define SQ_CHANGE_UDPSRCPORT_HDR_V3_FLAGS_INLINE 0x10UL + #define SQ_CHANGE_UDPSRCPORT_HDR_V3_FLAGS_WQE_TS_EN 0x20UL + #define SQ_CHANGE_UDPSRCPORT_HDR_V3_FLAGS_DEBUG_TRACE 0x40UL + u8 wqe_size; + #define SQ_CHANGE_UDPSRCPORT_HDR_V3_WQE_SIZE_MASK 0x3fUL + #define SQ_CHANGE_UDPSRCPORT_HDR_V3_WQE_SIZE_SFT 0 + u8 reserved_1; + __le32 opaque; + __le16 udp_src_port; + __le16 reserved_2; + __le32 reserved_3; +}; + +/* rq_wqe (size:1024b/128B) */ +struct rq_wqe { + u8 wqe_type; + #define RQ_WQE_WQE_TYPE_RCV 0x80UL + #define RQ_WQE_WQE_TYPE_LAST RQ_WQE_WQE_TYPE_RCV + u8 flags; + u8 wqe_size; + u8 reserved8; + __le32 reserved32; + __le32 wr_id[2]; + #define RQ_WQE_WR_ID_MASK 0xfffffUL + #define RQ_WQE_WR_ID_SFT 0 + u8 reserved128[16]; + __le32 data[24]; +}; + +/* rq_wqe_hdr (size:256b/32B) */ +struct rq_wqe_hdr { + u8 wqe_type; + #define RQ_WQE_HDR_WQE_TYPE_RCV 0x80UL + #define RQ_WQE_HDR_WQE_TYPE_LAST RQ_WQE_HDR_WQE_TYPE_RCV + u8 flags; + u8 wqe_size; + u8 reserved8; + __le32 reserved32; + __le32 wr_id[2]; + #define RQ_WQE_HDR_WR_ID_MASK 0xfffffUL + #define RQ_WQE_HDR_WR_ID_SFT 0 + u8 reserved128[16]; +}; + +/* rq_wqe_v3 (size:4096b/512B) */ +struct rq_wqe_v3 { + u8 wqe_type; + #define RQ_WQE_V3_WQE_TYPE_RCV_V3 0x90UL + #define RQ_WQE_V3_WQE_TYPE_LAST RQ_WQE_V3_WQE_TYPE_RCV_V3 + u8 flags; + u8 wqe_size; + u8 reserved1; + __le32 opaque; + __le64 reserved2; + __le32 data[124]; +}; + +/* rq_wqe_hdr_v3 (size:128b/16B) */ +struct rq_wqe_hdr_v3 { + u8 wqe_type; + #define RQ_WQE_HDR_V3_WQE_TYPE_RCV_V3 0x90UL + #define RQ_WQE_HDR_V3_WQE_TYPE_LAST RQ_WQE_HDR_V3_WQE_TYPE_RCV_V3 + u8 flags; + u8 wqe_size; + u8 reserved1; + __le32 opaque; + __le64 reserved2; +}; + +/* cq_base (size:256b/32B) */ +struct cq_base { + __le64 reserved64_1; + __le64 reserved64_2; + __le64 reserved64_3; + u8 cqe_type_toggle; + #define CQ_BASE_TOGGLE 0x1UL + #define CQ_BASE_CQE_TYPE_MASK 0x1eUL + #define CQ_BASE_CQE_TYPE_SFT 1 + #define CQ_BASE_CQE_TYPE_REQ (0x0UL << 1) + #define CQ_BASE_CQE_TYPE_RES_RC (0x1UL << 1) + #define CQ_BASE_CQE_TYPE_RES_UD (0x2UL << 1) + #define CQ_BASE_CQE_TYPE_RES_RAWETH_QP1 (0x3UL << 1) + #define CQ_BASE_CQE_TYPE_RES_UD_CFA (0x4UL << 1) + #define CQ_BASE_CQE_TYPE_REQ_V3 (0x8UL << 1) + #define CQ_BASE_CQE_TYPE_RES_RC_V3 (0x9UL << 1) + #define CQ_BASE_CQE_TYPE_RES_UD_V3 (0xaUL << 1) + #define CQ_BASE_CQE_TYPE_RES_RAWETH_QP1_V3 (0xbUL << 1) + #define CQ_BASE_CQE_TYPE_RES_UD_CFA_V3 (0xcUL << 1) + #define CQ_BASE_CQE_TYPE_NO_OP (0xdUL << 1) + #define CQ_BASE_CQE_TYPE_TERMINAL (0xeUL << 1) + #define CQ_BASE_CQE_TYPE_CUT_OFF (0xfUL << 1) + #define CQ_BASE_CQE_TYPE_LAST CQ_BASE_CQE_TYPE_CUT_OFF + u8 status; + #define CQ_BASE_STATUS_OK 0x0UL + #define CQ_BASE_STATUS_BAD_RESPONSE_ERR 0x1UL + #define CQ_BASE_STATUS_LOCAL_LENGTH_ERR 0x2UL + #define CQ_BASE_STATUS_HW_LOCAL_LENGTH_ERR 0x3UL + #define CQ_BASE_STATUS_LOCAL_QP_OPERATION_ERR 0x4UL + #define CQ_BASE_STATUS_LOCAL_PROTECTION_ERR 0x5UL + #define CQ_BASE_STATUS_LOCAL_ACCESS_ERROR 0x6UL + #define CQ_BASE_STATUS_MEMORY_MGT_OPERATION_ERR 0x7UL + #define CQ_BASE_STATUS_REMOTE_INVALID_REQUEST_ERR 0x8UL + #define CQ_BASE_STATUS_REMOTE_ACCESS_ERR 0x9UL + #define CQ_BASE_STATUS_REMOTE_OPERATION_ERR 0xaUL + #define CQ_BASE_STATUS_RNR_NAK_RETRY_CNT_ERR 0xbUL + #define CQ_BASE_STATUS_TRANSPORT_RETRY_CNT_ERR 0xcUL + #define CQ_BASE_STATUS_WORK_REQUEST_FLUSHED_ERR 0xdUL + #define CQ_BASE_STATUS_HW_FLUSH_ERR 0xeUL + #define CQ_BASE_STATUS_OVERFLOW_ERR 0xfUL + #define CQ_BASE_STATUS_LAST CQ_BASE_STATUS_OVERFLOW_ERR + __le16 reserved16; + __le32 opaque; +}; + +/* cq_req (size:256b/32B) */ +struct cq_req { + __le64 qp_handle; + __le16 sq_cons_idx; + __le16 reserved16_1; + __le32 reserved32_2; + __le64 reserved64; + u8 cqe_type_toggle; + #define CQ_REQ_TOGGLE 0x1UL + #define CQ_REQ_CQE_TYPE_MASK 0x1eUL + #define CQ_REQ_CQE_TYPE_SFT 1 + #define CQ_REQ_CQE_TYPE_REQ (0x0UL << 1) + #define CQ_REQ_CQE_TYPE_LAST CQ_REQ_CQE_TYPE_REQ + #define CQ_REQ_PUSH 0x20UL + u8 status; + #define CQ_REQ_STATUS_OK 0x0UL + #define CQ_REQ_STATUS_BAD_RESPONSE_ERR 0x1UL + #define CQ_REQ_STATUS_LOCAL_LENGTH_ERR 0x2UL + #define CQ_REQ_STATUS_LOCAL_QP_OPERATION_ERR 0x3UL + #define CQ_REQ_STATUS_LOCAL_PROTECTION_ERR 0x4UL + #define CQ_REQ_STATUS_MEMORY_MGT_OPERATION_ERR 0x5UL + #define CQ_REQ_STATUS_REMOTE_INVALID_REQUEST_ERR 0x6UL + #define CQ_REQ_STATUS_REMOTE_ACCESS_ERR 0x7UL + #define CQ_REQ_STATUS_REMOTE_OPERATION_ERR 0x8UL + #define CQ_REQ_STATUS_RNR_NAK_RETRY_CNT_ERR 0x9UL + #define CQ_REQ_STATUS_TRANSPORT_RETRY_CNT_ERR 0xaUL + #define CQ_REQ_STATUS_WORK_REQUEST_FLUSHED_ERR 0xbUL + #define CQ_REQ_STATUS_LAST CQ_REQ_STATUS_WORK_REQUEST_FLUSHED_ERR + __le16 reserved16_2; + __le32 reserved32_1; +}; + +/* cq_res_rc (size:256b/32B) */ +struct cq_res_rc { + __le32 length; + __le32 imm_data_or_inv_r_key; + __le64 qp_handle; + __le64 mr_handle; + u8 cqe_type_toggle; + #define CQ_RES_RC_TOGGLE 0x1UL + #define CQ_RES_RC_CQE_TYPE_MASK 0x1eUL + #define CQ_RES_RC_CQE_TYPE_SFT 1 + #define CQ_RES_RC_CQE_TYPE_RES_RC (0x1UL << 1) + #define CQ_RES_RC_CQE_TYPE_LAST CQ_RES_RC_CQE_TYPE_RES_RC + u8 status; + #define CQ_RES_RC_STATUS_OK 0x0UL + #define CQ_RES_RC_STATUS_LOCAL_ACCESS_ERROR 0x1UL + #define CQ_RES_RC_STATUS_LOCAL_LENGTH_ERR 0x2UL + #define CQ_RES_RC_STATUS_LOCAL_PROTECTION_ERR 0x3UL + #define CQ_RES_RC_STATUS_LOCAL_QP_OPERATION_ERR 0x4UL + #define CQ_RES_RC_STATUS_MEMORY_MGT_OPERATION_ERR 0x5UL + #define CQ_RES_RC_STATUS_REMOTE_INVALID_REQUEST_ERR 0x6UL + #define CQ_RES_RC_STATUS_WORK_REQUEST_FLUSHED_ERR 0x7UL + #define CQ_RES_RC_STATUS_HW_FLUSH_ERR 0x8UL + #define CQ_RES_RC_STATUS_LAST CQ_RES_RC_STATUS_HW_FLUSH_ERR + __le16 flags; + #define CQ_RES_RC_FLAGS_SRQ 0x1UL + #define CQ_RES_RC_FLAGS_SRQ_RQ 0x0UL + #define CQ_RES_RC_FLAGS_SRQ_SRQ 0x1UL + #define CQ_RES_RC_FLAGS_SRQ_LAST CQ_RES_RC_FLAGS_SRQ_SRQ + #define CQ_RES_RC_FLAGS_IMM 0x2UL + #define CQ_RES_RC_FLAGS_INV 0x4UL + #define CQ_RES_RC_FLAGS_RDMA 0x8UL + #define CQ_RES_RC_FLAGS_RDMA_SEND (0x0UL << 3) + #define CQ_RES_RC_FLAGS_RDMA_RDMA_WRITE (0x1UL << 3) + #define CQ_RES_RC_FLAGS_RDMA_LAST CQ_RES_RC_FLAGS_RDMA_RDMA_WRITE + __le32 srq_or_rq_wr_id; + #define CQ_RES_RC_SRQ_OR_RQ_WR_ID_MASK 0xfffffUL + #define CQ_RES_RC_SRQ_OR_RQ_WR_ID_SFT 0 +}; + +/* cq_res_ud (size:256b/32B) */ +struct cq_res_ud { + __le16 length; + #define CQ_RES_UD_LENGTH_MASK 0x3fffUL + #define CQ_RES_UD_LENGTH_SFT 0 + __le16 cfa_metadata; + #define CQ_RES_UD_CFA_METADATA_VID_MASK 0xfffUL + #define CQ_RES_UD_CFA_METADATA_VID_SFT 0 + #define CQ_RES_UD_CFA_METADATA_DE 0x1000UL + #define CQ_RES_UD_CFA_METADATA_PRI_MASK 0xe000UL + #define CQ_RES_UD_CFA_METADATA_PRI_SFT 13 + __le32 imm_data; + __le64 qp_handle; + __le16 src_mac[3]; + __le16 src_qp_low; + u8 cqe_type_toggle; + #define CQ_RES_UD_TOGGLE 0x1UL + #define CQ_RES_UD_CQE_TYPE_MASK 0x1eUL + #define CQ_RES_UD_CQE_TYPE_SFT 1 + #define CQ_RES_UD_CQE_TYPE_RES_UD (0x2UL << 1) + #define CQ_RES_UD_CQE_TYPE_LAST CQ_RES_UD_CQE_TYPE_RES_UD + u8 status; + #define CQ_RES_UD_STATUS_OK 0x0UL + #define CQ_RES_UD_STATUS_LOCAL_ACCESS_ERROR 0x1UL + #define CQ_RES_UD_STATUS_HW_LOCAL_LENGTH_ERR 0x2UL + #define CQ_RES_UD_STATUS_LOCAL_PROTECTION_ERR 0x3UL + #define CQ_RES_UD_STATUS_LOCAL_QP_OPERATION_ERR 0x4UL + #define CQ_RES_UD_STATUS_MEMORY_MGT_OPERATION_ERR 0x5UL + #define CQ_RES_UD_STATUS_WORK_REQUEST_FLUSHED_ERR 0x7UL + #define CQ_RES_UD_STATUS_HW_FLUSH_ERR 0x8UL + #define CQ_RES_UD_STATUS_LAST CQ_RES_UD_STATUS_HW_FLUSH_ERR + __le16 flags; + #define CQ_RES_UD_FLAGS_SRQ 0x1UL + #define CQ_RES_UD_FLAGS_SRQ_RQ 0x0UL + #define CQ_RES_UD_FLAGS_SRQ_SRQ 0x1UL + #define CQ_RES_UD_FLAGS_SRQ_LAST CQ_RES_UD_FLAGS_SRQ_SRQ + #define CQ_RES_UD_FLAGS_IMM 0x2UL + #define CQ_RES_UD_FLAGS_UNUSED_MASK 0xcUL + #define CQ_RES_UD_FLAGS_UNUSED_SFT 2 + #define CQ_RES_UD_FLAGS_ROCE_IP_VER_MASK 0x30UL + #define CQ_RES_UD_FLAGS_ROCE_IP_VER_SFT 4 + #define CQ_RES_UD_FLAGS_ROCE_IP_VER_V1 (0x0UL << 4) + #define CQ_RES_UD_FLAGS_ROCE_IP_VER_V2IPV4 (0x2UL << 4) + #define CQ_RES_UD_FLAGS_ROCE_IP_VER_V2IPV6 (0x3UL << 4) + #define CQ_RES_UD_FLAGS_ROCE_IP_VER_LAST CQ_RES_UD_FLAGS_ROCE_IP_VER_V2IPV6 + #define CQ_RES_UD_FLAGS_META_FORMAT_MASK 0x3c0UL + #define CQ_RES_UD_FLAGS_META_FORMAT_SFT 6 + #define CQ_RES_UD_FLAGS_META_FORMAT_NONE (0x0UL << 6) + #define CQ_RES_UD_FLAGS_META_FORMAT_VLAN (0x1UL << 6) + #define CQ_RES_UD_FLAGS_META_FORMAT_TUNNEL_ID (0x2UL << 6) + #define CQ_RES_UD_FLAGS_META_FORMAT_CHDR_DATA (0x3UL << 6) + #define CQ_RES_UD_FLAGS_META_FORMAT_HDR_OFFSET (0x4UL << 6) + #define CQ_RES_UD_FLAGS_META_FORMAT_LAST CQ_RES_UD_FLAGS_META_FORMAT_HDR_OFFSET + #define CQ_RES_UD_FLAGS_EXT_META_FORMAT_MASK 0xc00UL + #define CQ_RES_UD_FLAGS_EXT_META_FORMAT_SFT 10 + __le32 src_qp_high_srq_or_rq_wr_id; + #define CQ_RES_UD_SRQ_OR_RQ_WR_ID_MASK 0xfffffUL + #define CQ_RES_UD_SRQ_OR_RQ_WR_ID_SFT 0 + #define CQ_RES_UD_SRC_QP_HIGH_MASK 0xff000000UL + #define CQ_RES_UD_SRC_QP_HIGH_SFT 24 +}; + +/* cq_res_ud_v2 (size:256b/32B) */ +struct cq_res_ud_v2 { + __le16 length; + #define CQ_RES_UD_V2_LENGTH_MASK 0x3fffUL + #define CQ_RES_UD_V2_LENGTH_SFT 0 + __le16 cfa_metadata0; + #define CQ_RES_UD_V2_CFA_METADATA0_VID_MASK 0xfffUL + #define CQ_RES_UD_V2_CFA_METADATA0_VID_SFT 0 + #define CQ_RES_UD_V2_CFA_METADATA0_DE 0x1000UL + #define CQ_RES_UD_V2_CFA_METADATA0_PRI_MASK 0xe000UL + #define CQ_RES_UD_V2_CFA_METADATA0_PRI_SFT 13 + __le32 imm_data; + __le64 qp_handle; + __le16 src_mac[3]; + __le16 src_qp_low; + u8 cqe_type_toggle; + #define CQ_RES_UD_V2_TOGGLE 0x1UL + #define CQ_RES_UD_V2_CQE_TYPE_MASK 0x1eUL + #define CQ_RES_UD_V2_CQE_TYPE_SFT 1 + #define CQ_RES_UD_V2_CQE_TYPE_RES_UD (0x2UL << 1) + #define CQ_RES_UD_V2_CQE_TYPE_LAST CQ_RES_UD_V2_CQE_TYPE_RES_UD + u8 status; + #define CQ_RES_UD_V2_STATUS_OK 0x0UL + #define CQ_RES_UD_V2_STATUS_LOCAL_ACCESS_ERROR 0x1UL + #define CQ_RES_UD_V2_STATUS_HW_LOCAL_LENGTH_ERR 0x2UL + #define CQ_RES_UD_V2_STATUS_LOCAL_PROTECTION_ERR 0x3UL + #define CQ_RES_UD_V2_STATUS_LOCAL_QP_OPERATION_ERR 0x4UL + #define CQ_RES_UD_V2_STATUS_MEMORY_MGT_OPERATION_ERR 0x5UL + #define CQ_RES_UD_V2_STATUS_WORK_REQUEST_FLUSHED_ERR 0x7UL + #define CQ_RES_UD_V2_STATUS_HW_FLUSH_ERR 0x8UL + #define CQ_RES_UD_V2_STATUS_LAST CQ_RES_UD_V2_STATUS_HW_FLUSH_ERR + __le16 flags; + #define CQ_RES_UD_V2_FLAGS_SRQ 0x1UL + #define CQ_RES_UD_V2_FLAGS_SRQ_RQ 0x0UL + #define CQ_RES_UD_V2_FLAGS_SRQ_SRQ 0x1UL + #define CQ_RES_UD_V2_FLAGS_SRQ_LAST CQ_RES_UD_V2_FLAGS_SRQ_SRQ + #define CQ_RES_UD_V2_FLAGS_IMM 0x2UL + #define CQ_RES_UD_V2_FLAGS_UNUSED_MASK 0xcUL + #define CQ_RES_UD_V2_FLAGS_UNUSED_SFT 2 + #define CQ_RES_UD_V2_FLAGS_ROCE_IP_VER_MASK 0x30UL + #define CQ_RES_UD_V2_FLAGS_ROCE_IP_VER_SFT 4 + #define CQ_RES_UD_V2_FLAGS_ROCE_IP_VER_V1 (0x0UL << 4) + #define CQ_RES_UD_V2_FLAGS_ROCE_IP_VER_V2IPV4 (0x2UL << 4) + #define CQ_RES_UD_V2_FLAGS_ROCE_IP_VER_V2IPV6 (0x3UL << 4) + #define CQ_RES_UD_V2_FLAGS_ROCE_IP_VER_LAST CQ_RES_UD_V2_FLAGS_ROCE_IP_VER_V2IPV6 + #define CQ_RES_UD_V2_FLAGS_META_FORMAT_MASK 0x3c0UL + #define CQ_RES_UD_V2_FLAGS_META_FORMAT_SFT 6 + #define CQ_RES_UD_V2_FLAGS_META_FORMAT_NONE (0x0UL << 6) + #define CQ_RES_UD_V2_FLAGS_META_FORMAT_ACT_REC_PTR (0x1UL << 6) + #define CQ_RES_UD_V2_FLAGS_META_FORMAT_TUNNEL_ID (0x2UL << 6) + #define CQ_RES_UD_V2_FLAGS_META_FORMAT_CHDR_DATA (0x3UL << 6) + #define CQ_RES_UD_V2_FLAGS_META_FORMAT_HDR_OFFSET (0x4UL << 6) + #define CQ_RES_UD_V2_FLAGS_META_FORMAT_LAST CQ_RES_UD_V2_FLAGS_META_FORMAT_HDR_OFFSET + __le32 src_qp_high_srq_or_rq_wr_id; + #define CQ_RES_UD_V2_SRQ_OR_RQ_WR_ID_MASK 0xfffffUL + #define CQ_RES_UD_V2_SRQ_OR_RQ_WR_ID_SFT 0 + #define CQ_RES_UD_V2_CFA_METADATA1_MASK 0xf00000UL + #define CQ_RES_UD_V2_CFA_METADATA1_SFT 20 + #define CQ_RES_UD_V2_CFA_METADATA1_TPID_SEL_MASK 0x700000UL + #define CQ_RES_UD_V2_CFA_METADATA1_TPID_SEL_SFT 20 + #define CQ_RES_UD_V2_CFA_METADATA1_TPID_SEL_TPID88A8 (0x0UL << 20) + #define CQ_RES_UD_V2_CFA_METADATA1_TPID_SEL_TPID8100 (0x1UL << 20) + #define CQ_RES_UD_V2_CFA_METADATA1_TPID_SEL_TPID9100 (0x2UL << 20) + #define CQ_RES_UD_V2_CFA_METADATA1_TPID_SEL_TPID9200 (0x3UL << 20) + #define CQ_RES_UD_V2_CFA_METADATA1_TPID_SEL_TPID9300 (0x4UL << 20) + #define CQ_RES_UD_V2_CFA_METADATA1_TPID_SEL_TPIDCFG (0x5UL << 20) + #define CQ_RES_UD_V2_CFA_METADATA1_TPID_SEL_LAST CQ_RES_UD_V2_CFA_METADATA1_TPID_SEL_TPIDCFG + #define CQ_RES_UD_V2_CFA_METADATA1_VALID 0x800000UL + #define CQ_RES_UD_V2_SRC_QP_HIGH_MASK 0xff000000UL + #define CQ_RES_UD_V2_SRC_QP_HIGH_SFT 24 +}; + +/* cq_res_ud_cfa (size:256b/32B) */ +struct cq_res_ud_cfa { + __le16 length; + #define CQ_RES_UD_CFA_LENGTH_MASK 0x3fffUL + #define CQ_RES_UD_CFA_LENGTH_SFT 0 + __le16 cfa_code; + __le32 imm_data; + __le32 qid; + #define CQ_RES_UD_CFA_QID_MASK 0xfffffUL + #define CQ_RES_UD_CFA_QID_SFT 0 + __le32 cfa_metadata; + #define CQ_RES_UD_CFA_CFA_METADATA_VID_MASK 0xfffUL + #define CQ_RES_UD_CFA_CFA_METADATA_VID_SFT 0 + #define CQ_RES_UD_CFA_CFA_METADATA_DE 0x1000UL + #define CQ_RES_UD_CFA_CFA_METADATA_PRI_MASK 0xe000UL + #define CQ_RES_UD_CFA_CFA_METADATA_PRI_SFT 13 + #define CQ_RES_UD_CFA_CFA_METADATA_TPID_MASK 0xffff0000UL + #define CQ_RES_UD_CFA_CFA_METADATA_TPID_SFT 16 + __le16 src_mac[3]; + __le16 src_qp_low; + u8 cqe_type_toggle; + #define CQ_RES_UD_CFA_TOGGLE 0x1UL + #define CQ_RES_UD_CFA_CQE_TYPE_MASK 0x1eUL + #define CQ_RES_UD_CFA_CQE_TYPE_SFT 1 + #define CQ_RES_UD_CFA_CQE_TYPE_RES_UD_CFA (0x4UL << 1) + #define CQ_RES_UD_CFA_CQE_TYPE_LAST CQ_RES_UD_CFA_CQE_TYPE_RES_UD_CFA + u8 status; + #define CQ_RES_UD_CFA_STATUS_OK 0x0UL + #define CQ_RES_UD_CFA_STATUS_LOCAL_ACCESS_ERROR 0x1UL + #define CQ_RES_UD_CFA_STATUS_HW_LOCAL_LENGTH_ERR 0x2UL + #define CQ_RES_UD_CFA_STATUS_LOCAL_PROTECTION_ERR 0x3UL + #define CQ_RES_UD_CFA_STATUS_LOCAL_QP_OPERATION_ERR 0x4UL + #define CQ_RES_UD_CFA_STATUS_MEMORY_MGT_OPERATION_ERR 0x5UL + #define CQ_RES_UD_CFA_STATUS_WORK_REQUEST_FLUSHED_ERR 0x7UL + #define CQ_RES_UD_CFA_STATUS_HW_FLUSH_ERR 0x8UL + #define CQ_RES_UD_CFA_STATUS_LAST CQ_RES_UD_CFA_STATUS_HW_FLUSH_ERR + __le16 flags; + #define CQ_RES_UD_CFA_FLAGS_SRQ 0x1UL + #define CQ_RES_UD_CFA_FLAGS_SRQ_RQ 0x0UL + #define CQ_RES_UD_CFA_FLAGS_SRQ_SRQ 0x1UL + #define CQ_RES_UD_CFA_FLAGS_SRQ_LAST CQ_RES_UD_CFA_FLAGS_SRQ_SRQ + #define CQ_RES_UD_CFA_FLAGS_IMM 0x2UL + #define CQ_RES_UD_CFA_FLAGS_UNUSED_MASK 0xcUL + #define CQ_RES_UD_CFA_FLAGS_UNUSED_SFT 2 + #define CQ_RES_UD_CFA_FLAGS_ROCE_IP_VER_MASK 0x30UL + #define CQ_RES_UD_CFA_FLAGS_ROCE_IP_VER_SFT 4 + #define CQ_RES_UD_CFA_FLAGS_ROCE_IP_VER_V1 (0x0UL << 4) + #define CQ_RES_UD_CFA_FLAGS_ROCE_IP_VER_V2IPV4 (0x2UL << 4) + #define CQ_RES_UD_CFA_FLAGS_ROCE_IP_VER_V2IPV6 (0x3UL << 4) + #define CQ_RES_UD_CFA_FLAGS_ROCE_IP_VER_LAST CQ_RES_UD_CFA_FLAGS_ROCE_IP_VER_V2IPV6 + #define CQ_RES_UD_CFA_FLAGS_META_FORMAT_MASK 0x3c0UL + #define CQ_RES_UD_CFA_FLAGS_META_FORMAT_SFT 6 + #define CQ_RES_UD_CFA_FLAGS_META_FORMAT_NONE (0x0UL << 6) + #define CQ_RES_UD_CFA_FLAGS_META_FORMAT_VLAN (0x1UL << 6) + #define CQ_RES_UD_CFA_FLAGS_META_FORMAT_TUNNEL_ID (0x2UL << 6) + #define CQ_RES_UD_CFA_FLAGS_META_FORMAT_CHDR_DATA (0x3UL << 6) + #define CQ_RES_UD_CFA_FLAGS_META_FORMAT_HDR_OFFSET (0x4UL << 6) + #define CQ_RES_UD_CFA_FLAGS_META_FORMAT_LAST CQ_RES_UD_CFA_FLAGS_META_FORMAT_HDR_OFFSET + #define CQ_RES_UD_CFA_FLAGS_EXT_META_FORMAT_MASK 0xc00UL + #define CQ_RES_UD_CFA_FLAGS_EXT_META_FORMAT_SFT 10 + __le32 src_qp_high_srq_or_rq_wr_id; + #define CQ_RES_UD_CFA_SRQ_OR_RQ_WR_ID_MASK 0xfffffUL + #define CQ_RES_UD_CFA_SRQ_OR_RQ_WR_ID_SFT 0 + #define CQ_RES_UD_CFA_SRC_QP_HIGH_MASK 0xff000000UL + #define CQ_RES_UD_CFA_SRC_QP_HIGH_SFT 24 +}; + +/* cq_res_ud_cfa_v2 (size:256b/32B) */ +struct cq_res_ud_cfa_v2 { + __le16 length; + #define CQ_RES_UD_CFA_V2_LENGTH_MASK 0x3fffUL + #define CQ_RES_UD_CFA_V2_LENGTH_SFT 0 + __le16 cfa_metadata0; + #define CQ_RES_UD_CFA_V2_CFA_METADATA0_VID_MASK 0xfffUL + #define CQ_RES_UD_CFA_V2_CFA_METADATA0_VID_SFT 0 + #define CQ_RES_UD_CFA_V2_CFA_METADATA0_DE 0x1000UL + #define CQ_RES_UD_CFA_V2_CFA_METADATA0_PRI_MASK 0xe000UL + #define CQ_RES_UD_CFA_V2_CFA_METADATA0_PRI_SFT 13 + __le32 imm_data; + __le32 qid; + #define CQ_RES_UD_CFA_V2_QID_MASK 0xfffffUL + #define CQ_RES_UD_CFA_V2_QID_SFT 0 + __le32 cfa_metadata2; + __le16 src_mac[3]; + __le16 src_qp_low; + u8 cqe_type_toggle; + #define CQ_RES_UD_CFA_V2_TOGGLE 0x1UL + #define CQ_RES_UD_CFA_V2_CQE_TYPE_MASK 0x1eUL + #define CQ_RES_UD_CFA_V2_CQE_TYPE_SFT 1 + #define CQ_RES_UD_CFA_V2_CQE_TYPE_RES_UD_CFA (0x4UL << 1) + #define CQ_RES_UD_CFA_V2_CQE_TYPE_LAST CQ_RES_UD_CFA_V2_CQE_TYPE_RES_UD_CFA + u8 status; + #define CQ_RES_UD_CFA_V2_STATUS_OK 0x0UL + #define CQ_RES_UD_CFA_V2_STATUS_LOCAL_ACCESS_ERROR 0x1UL + #define CQ_RES_UD_CFA_V2_STATUS_HW_LOCAL_LENGTH_ERR 0x2UL + #define CQ_RES_UD_CFA_V2_STATUS_LOCAL_PROTECTION_ERR 0x3UL + #define CQ_RES_UD_CFA_V2_STATUS_LOCAL_QP_OPERATION_ERR 0x4UL + #define CQ_RES_UD_CFA_V2_STATUS_MEMORY_MGT_OPERATION_ERR 0x5UL + #define CQ_RES_UD_CFA_V2_STATUS_WORK_REQUEST_FLUSHED_ERR 0x7UL + #define CQ_RES_UD_CFA_V2_STATUS_HW_FLUSH_ERR 0x8UL + #define CQ_RES_UD_CFA_V2_STATUS_LAST CQ_RES_UD_CFA_V2_STATUS_HW_FLUSH_ERR + __le16 flags; + #define CQ_RES_UD_CFA_V2_FLAGS_SRQ 0x1UL + #define CQ_RES_UD_CFA_V2_FLAGS_SRQ_RQ 0x0UL + #define CQ_RES_UD_CFA_V2_FLAGS_SRQ_SRQ 0x1UL + #define CQ_RES_UD_CFA_V2_FLAGS_SRQ_LAST CQ_RES_UD_CFA_V2_FLAGS_SRQ_SRQ + #define CQ_RES_UD_CFA_V2_FLAGS_IMM 0x2UL + #define CQ_RES_UD_CFA_V2_FLAGS_UNUSED_MASK 0xcUL + #define CQ_RES_UD_CFA_V2_FLAGS_UNUSED_SFT 2 + #define CQ_RES_UD_CFA_V2_FLAGS_ROCE_IP_VER_MASK 0x30UL + #define CQ_RES_UD_CFA_V2_FLAGS_ROCE_IP_VER_SFT 4 + #define CQ_RES_UD_CFA_V2_FLAGS_ROCE_IP_VER_V1 (0x0UL << 4) + #define CQ_RES_UD_CFA_V2_FLAGS_ROCE_IP_VER_V2IPV4 (0x2UL << 4) + #define CQ_RES_UD_CFA_V2_FLAGS_ROCE_IP_VER_V2IPV6 (0x3UL << 4) + #define CQ_RES_UD_CFA_V2_FLAGS_ROCE_IP_VER_LAST CQ_RES_UD_CFA_V2_FLAGS_ROCE_IP_VER_V2IPV6 + #define CQ_RES_UD_CFA_V2_FLAGS_META_FORMAT_MASK 0x3c0UL + #define CQ_RES_UD_CFA_V2_FLAGS_META_FORMAT_SFT 6 + #define CQ_RES_UD_CFA_V2_FLAGS_META_FORMAT_NONE (0x0UL << 6) + #define CQ_RES_UD_CFA_V2_FLAGS_META_FORMAT_ACT_REC_PTR (0x1UL << 6) + #define CQ_RES_UD_CFA_V2_FLAGS_META_FORMAT_TUNNEL_ID (0x2UL << 6) + #define CQ_RES_UD_CFA_V2_FLAGS_META_FORMAT_CHDR_DATA (0x3UL << 6) + #define CQ_RES_UD_CFA_V2_FLAGS_META_FORMAT_HDR_OFFSET (0x4UL << 6) + #define CQ_RES_UD_CFA_V2_FLAGS_META_FORMAT_LAST CQ_RES_UD_CFA_V2_FLAGS_META_FORMAT_HDR_OFFSET + __le32 src_qp_high_srq_or_rq_wr_id; + #define CQ_RES_UD_CFA_V2_SRQ_OR_RQ_WR_ID_MASK 0xfffffUL + #define CQ_RES_UD_CFA_V2_SRQ_OR_RQ_WR_ID_SFT 0 + #define CQ_RES_UD_CFA_V2_CFA_METADATA1_MASK 0xf00000UL + #define CQ_RES_UD_CFA_V2_CFA_METADATA1_SFT 20 + #define CQ_RES_UD_CFA_V2_CFA_METADATA1_TPID_SEL_MASK 0x700000UL + #define CQ_RES_UD_CFA_V2_CFA_METADATA1_TPID_SEL_SFT 20 + #define CQ_RES_UD_CFA_V2_CFA_METADATA1_TPID_SEL_TPID88A8 (0x0UL << 20) + #define CQ_RES_UD_CFA_V2_CFA_METADATA1_TPID_SEL_TPID8100 (0x1UL << 20) + #define CQ_RES_UD_CFA_V2_CFA_METADATA1_TPID_SEL_TPID9100 (0x2UL << 20) + #define CQ_RES_UD_CFA_V2_CFA_METADATA1_TPID_SEL_TPID9200 (0x3UL << 20) + #define CQ_RES_UD_CFA_V2_CFA_METADATA1_TPID_SEL_TPID9300 (0x4UL << 20) + #define CQ_RES_UD_CFA_V2_CFA_METADATA1_TPID_SEL_TPIDCFG (0x5UL << 20) + #define CQ_RES_UD_CFA_V2_CFA_METADATA1_TPID_SEL_LAST CQ_RES_UD_CFA_V2_CFA_METADATA1_TPID_SEL_TPIDCFG + #define CQ_RES_UD_CFA_V2_CFA_METADATA1_VALID 0x800000UL + #define CQ_RES_UD_CFA_V2_SRC_QP_HIGH_MASK 0xff000000UL + #define CQ_RES_UD_CFA_V2_SRC_QP_HIGH_SFT 24 +}; + +/* cq_res_raweth_qp1 (size:256b/32B) */ +struct cq_res_raweth_qp1 { + __le16 length; + #define CQ_RES_RAWETH_QP1_LENGTH_MASK 0x3fffUL + #define CQ_RES_RAWETH_QP1_LENGTH_SFT 0 + __le16 raweth_qp1_flags; + #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_MASK 0x3ffUL + #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_SFT 0 + #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ERROR 0x1UL + #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_MASK 0x3c0UL + #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_SFT 6 + #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_NOT_KNOWN (0x0UL << 6) + #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_IP (0x1UL << 6) + #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_TCP (0x2UL << 6) + #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_UDP (0x3UL << 6) + #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_FCOE (0x4UL << 6) + #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_ROCE (0x5UL << 6) + #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_ICMP (0x7UL << 6) + #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_PTP_WO_TIMESTAMP (0x8UL << 6) + #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_PTP_W_TIMESTAMP (0x9UL << 6) + #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_LAST CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_PTP_W_TIMESTAMP + __le16 raweth_qp1_errors; + #define CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_IP_CS_ERROR 0x10UL + #define CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_L4_CS_ERROR 0x20UL + #define CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_T_IP_CS_ERROR 0x40UL + #define CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_T_L4_CS_ERROR 0x80UL + #define CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_CRC_ERROR 0x100UL + #define CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_T_PKT_ERROR_MASK 0xe00UL + #define CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_T_PKT_ERROR_SFT 9 + #define CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_T_PKT_ERROR_NO_ERROR (0x0UL << 9) + #define CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_T_PKT_ERROR_T_L3_BAD_VERSION (0x1UL << 9) + #define CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_T_PKT_ERROR_T_L3_BAD_HDR_LEN (0x2UL << 9) + #define CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_T_PKT_ERROR_TUNNEL_TOTAL_ERROR (0x3UL << 9) + #define CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_T_PKT_ERROR_T_IP_TOTAL_ERROR (0x4UL << 9) + #define CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_T_PKT_ERROR_T_UDP_TOTAL_ERROR (0x5UL << 9) + #define CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_T_PKT_ERROR_T_L3_BAD_TTL (0x6UL << 9) + #define CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_T_PKT_ERROR_LAST CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_T_PKT_ERROR_T_L3_BAD_TTL + #define CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_PKT_ERROR_MASK 0xf000UL + #define CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_PKT_ERROR_SFT 12 + #define CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_PKT_ERROR_NO_ERROR (0x0UL << 12) + #define CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_PKT_ERROR_L3_BAD_VERSION (0x1UL << 12) + #define CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_PKT_ERROR_L3_BAD_HDR_LEN (0x2UL << 12) + #define CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_PKT_ERROR_L3_BAD_TTL (0x3UL << 12) + #define CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_PKT_ERROR_IP_TOTAL_ERROR (0x4UL << 12) + #define CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_PKT_ERROR_UDP_TOTAL_ERROR (0x5UL << 12) + #define CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_PKT_ERROR_L4_BAD_HDR_LEN (0x6UL << 12) + #define CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_PKT_ERROR_L4_BAD_HDR_LEN_TOO_SMALL (0x7UL << 12) + #define CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_PKT_ERROR_L4_BAD_OPT_LEN (0x8UL << 12) + #define CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_PKT_ERROR_LAST CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_PKT_ERROR_L4_BAD_OPT_LEN + __le16 raweth_qp1_cfa_code; + __le64 qp_handle; + __le32 raweth_qp1_flags2; + #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_IP_CS_CALC 0x1UL + #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_L4_CS_CALC 0x2UL + #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_T_IP_CS_CALC 0x4UL + #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_T_L4_CS_CALC 0x8UL + #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_META_FORMAT_MASK 0xf0UL + #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_META_FORMAT_SFT 4 + #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_META_FORMAT_NONE (0x0UL << 4) + #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_META_FORMAT_VLAN (0x1UL << 4) + #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_META_FORMAT_TUNNEL_ID (0x2UL << 4) + #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_META_FORMAT_CHDR_DATA (0x3UL << 4) + #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_META_FORMAT_HDR_OFFSET (0x4UL << 4) + #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_META_FORMAT_LAST CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_META_FORMAT_HDR_OFFSET + #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_IP_TYPE 0x100UL + #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_COMPLETE_CHECKSUM_CALC 0x200UL + #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_EXT_META_FORMAT_MASK 0xc00UL + #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_EXT_META_FORMAT_SFT 10 + #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_COMPLETE_CHECKSUM_MASK 0xffff0000UL + #define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_COMPLETE_CHECKSUM_SFT 16 + __le32 raweth_qp1_metadata; + #define CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_PRI_DE_VID_MASK 0xffffUL + #define CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_PRI_DE_VID_SFT 0 + #define CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_VID_MASK 0xfffUL + #define CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_VID_SFT 0 + #define CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_DE 0x1000UL + #define CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_PRI_MASK 0xe000UL + #define CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_PRI_SFT 13 + #define CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_TPID_MASK 0xffff0000UL + #define CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_TPID_SFT 16 + u8 cqe_type_toggle; + #define CQ_RES_RAWETH_QP1_TOGGLE 0x1UL + #define CQ_RES_RAWETH_QP1_CQE_TYPE_MASK 0x1eUL + #define CQ_RES_RAWETH_QP1_CQE_TYPE_SFT 1 + #define CQ_RES_RAWETH_QP1_CQE_TYPE_RES_RAWETH_QP1 (0x3UL << 1) + #define CQ_RES_RAWETH_QP1_CQE_TYPE_LAST CQ_RES_RAWETH_QP1_CQE_TYPE_RES_RAWETH_QP1 + u8 status; + #define CQ_RES_RAWETH_QP1_STATUS_OK 0x0UL + #define CQ_RES_RAWETH_QP1_STATUS_LOCAL_ACCESS_ERROR 0x1UL + #define CQ_RES_RAWETH_QP1_STATUS_HW_LOCAL_LENGTH_ERR 0x2UL + #define CQ_RES_RAWETH_QP1_STATUS_LOCAL_PROTECTION_ERR 0x3UL + #define CQ_RES_RAWETH_QP1_STATUS_LOCAL_QP_OPERATION_ERR 0x4UL + #define CQ_RES_RAWETH_QP1_STATUS_MEMORY_MGT_OPERATION_ERR 0x5UL + #define CQ_RES_RAWETH_QP1_STATUS_WORK_REQUEST_FLUSHED_ERR 0x7UL + #define CQ_RES_RAWETH_QP1_STATUS_HW_FLUSH_ERR 0x8UL + #define CQ_RES_RAWETH_QP1_STATUS_LAST CQ_RES_RAWETH_QP1_STATUS_HW_FLUSH_ERR + __le16 flags; + #define CQ_RES_RAWETH_QP1_FLAGS_SRQ 0x1UL + #define CQ_RES_RAWETH_QP1_FLAGS_SRQ_RQ 0x0UL + #define CQ_RES_RAWETH_QP1_FLAGS_SRQ_SRQ 0x1UL + #define CQ_RES_RAWETH_QP1_FLAGS_SRQ_LAST CQ_RES_RAWETH_QP1_FLAGS_SRQ_SRQ + __le32 raweth_qp1_payload_offset_srq_or_rq_wr_id; + #define CQ_RES_RAWETH_QP1_SRQ_OR_RQ_WR_ID_MASK 0xfffffUL + #define CQ_RES_RAWETH_QP1_SRQ_OR_RQ_WR_ID_SFT 0 + #define CQ_RES_RAWETH_QP1_RAWETH_QP1_PAYLOAD_OFFSET_MASK 0xff000000UL + #define CQ_RES_RAWETH_QP1_RAWETH_QP1_PAYLOAD_OFFSET_SFT 24 +}; + +/* cq_res_raweth_qp1_v2 (size:256b/32B) */ +struct cq_res_raweth_qp1_v2 { + __le16 length; + #define CQ_RES_RAWETH_QP1_V2_LENGTH_MASK 0x3fffUL + #define CQ_RES_RAWETH_QP1_V2_LENGTH_SFT 0 + __le16 raweth_qp1_flags; + #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_FLAGS_MASK 0x3ffUL + #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_FLAGS_SFT 0 + #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_FLAGS_ERROR 0x1UL + #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_FLAGS_ITYPE_MASK 0x3c0UL + #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_FLAGS_ITYPE_SFT 6 + #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_FLAGS_ITYPE_NOT_KNOWN (0x0UL << 6) + #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_FLAGS_ITYPE_IP (0x1UL << 6) + #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_FLAGS_ITYPE_TCP (0x2UL << 6) + #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_FLAGS_ITYPE_UDP (0x3UL << 6) + #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_FLAGS_ITYPE_FCOE (0x4UL << 6) + #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_FLAGS_ITYPE_ROCE (0x5UL << 6) + #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_FLAGS_ITYPE_ICMP (0x7UL << 6) + #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_FLAGS_ITYPE_PTP_WO_TIMESTAMP (0x8UL << 6) + #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_FLAGS_ITYPE_PTP_W_TIMESTAMP (0x9UL << 6) + #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_FLAGS_ITYPE_LAST CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_FLAGS_ITYPE_PTP_W_TIMESTAMP + __le16 raweth_qp1_errors; + #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_ERRORS_IP_CS_ERROR 0x10UL + #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_ERRORS_L4_CS_ERROR 0x20UL + #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_ERRORS_T_IP_CS_ERROR 0x40UL + #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_ERRORS_T_L4_CS_ERROR 0x80UL + #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_ERRORS_CRC_ERROR 0x100UL + #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_ERRORS_T_PKT_ERROR_MASK 0xe00UL + #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_ERRORS_T_PKT_ERROR_SFT 9 + #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_ERRORS_T_PKT_ERROR_NO_ERROR (0x0UL << 9) + #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_ERRORS_T_PKT_ERROR_T_L3_BAD_VERSION (0x1UL << 9) + #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_ERRORS_T_PKT_ERROR_T_L3_BAD_HDR_LEN (0x2UL << 9) + #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_ERRORS_T_PKT_ERROR_TUNNEL_TOTAL_ERROR (0x3UL << 9) + #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_ERRORS_T_PKT_ERROR_T_IP_TOTAL_ERROR (0x4UL << 9) + #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_ERRORS_T_PKT_ERROR_T_UDP_TOTAL_ERROR (0x5UL << 9) + #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_ERRORS_T_PKT_ERROR_T_L3_BAD_TTL (0x6UL << 9) + #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_ERRORS_T_PKT_ERROR_LAST CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_ERRORS_T_PKT_ERROR_T_L3_BAD_TTL + #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_ERRORS_PKT_ERROR_MASK 0xf000UL + #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_ERRORS_PKT_ERROR_SFT 12 + #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_ERRORS_PKT_ERROR_NO_ERROR (0x0UL << 12) + #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_ERRORS_PKT_ERROR_L3_BAD_VERSION (0x1UL << 12) + #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_ERRORS_PKT_ERROR_L3_BAD_HDR_LEN (0x2UL << 12) + #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_ERRORS_PKT_ERROR_L3_BAD_TTL (0x3UL << 12) + #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_ERRORS_PKT_ERROR_IP_TOTAL_ERROR (0x4UL << 12) + #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_ERRORS_PKT_ERROR_UDP_TOTAL_ERROR (0x5UL << 12) + #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_ERRORS_PKT_ERROR_L4_BAD_HDR_LEN (0x6UL << 12) + #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_ERRORS_PKT_ERROR_L4_BAD_HDR_LEN_TOO_SMALL (0x7UL << 12) + #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_ERRORS_PKT_ERROR_L4_BAD_OPT_LEN (0x8UL << 12) + #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_ERRORS_PKT_ERROR_LAST CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_ERRORS_PKT_ERROR_L4_BAD_OPT_LEN + __le16 cfa_metadata0; + #define CQ_RES_RAWETH_QP1_V2_CFA_METADATA0_VID_MASK 0xfffUL + #define CQ_RES_RAWETH_QP1_V2_CFA_METADATA0_VID_SFT 0 + #define CQ_RES_RAWETH_QP1_V2_CFA_METADATA0_DE 0x1000UL + #define CQ_RES_RAWETH_QP1_V2_CFA_METADATA0_PRI_MASK 0xe000UL + #define CQ_RES_RAWETH_QP1_V2_CFA_METADATA0_PRI_SFT 13 + __le64 qp_handle; + __le32 raweth_qp1_flags2; + #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_FLAGS2_CS_ALL_OK_MODE 0x8UL + #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_FLAGS2_META_FORMAT_MASK 0xf0UL + #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_FLAGS2_META_FORMAT_SFT 4 + #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_FLAGS2_META_FORMAT_NONE (0x0UL << 4) + #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_FLAGS2_META_FORMAT_ACT_REC_PTR (0x1UL << 4) + #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_FLAGS2_META_FORMAT_TUNNEL_ID (0x2UL << 4) + #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_FLAGS2_META_FORMAT_CHDR_DATA (0x3UL << 4) + #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_FLAGS2_META_FORMAT_HDR_OFFSET (0x4UL << 4) + #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_FLAGS2_META_FORMAT_LAST CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_FLAGS2_META_FORMAT_HDR_OFFSET + #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_FLAGS2_IP_TYPE 0x100UL + #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_FLAGS2_COMPLETE_CHECKSUM_CALC 0x200UL + #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_FLAGS2_CS_OK_MASK 0xfc00UL + #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_FLAGS2_CS_OK_SFT 10 + #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_FLAGS2_COMPLETE_CHECKSUM_MASK 0xffff0000UL + #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_FLAGS2_COMPLETE_CHECKSUM_SFT 16 + __le32 cfa_metadata2; + u8 cqe_type_toggle; + #define CQ_RES_RAWETH_QP1_V2_TOGGLE 0x1UL + #define CQ_RES_RAWETH_QP1_V2_CQE_TYPE_MASK 0x1eUL + #define CQ_RES_RAWETH_QP1_V2_CQE_TYPE_SFT 1 + #define CQ_RES_RAWETH_QP1_V2_CQE_TYPE_RES_RAWETH_QP1 (0x3UL << 1) + #define CQ_RES_RAWETH_QP1_V2_CQE_TYPE_LAST CQ_RES_RAWETH_QP1_V2_CQE_TYPE_RES_RAWETH_QP1 + u8 status; + #define CQ_RES_RAWETH_QP1_V2_STATUS_OK 0x0UL + #define CQ_RES_RAWETH_QP1_V2_STATUS_LOCAL_ACCESS_ERROR 0x1UL + #define CQ_RES_RAWETH_QP1_V2_STATUS_HW_LOCAL_LENGTH_ERR 0x2UL + #define CQ_RES_RAWETH_QP1_V2_STATUS_LOCAL_PROTECTION_ERR 0x3UL + #define CQ_RES_RAWETH_QP1_V2_STATUS_LOCAL_QP_OPERATION_ERR 0x4UL + #define CQ_RES_RAWETH_QP1_V2_STATUS_MEMORY_MGT_OPERATION_ERR 0x5UL + #define CQ_RES_RAWETH_QP1_V2_STATUS_WORK_REQUEST_FLUSHED_ERR 0x7UL + #define CQ_RES_RAWETH_QP1_V2_STATUS_HW_FLUSH_ERR 0x8UL + #define CQ_RES_RAWETH_QP1_V2_STATUS_LAST CQ_RES_RAWETH_QP1_V2_STATUS_HW_FLUSH_ERR + __le16 flags; + #define CQ_RES_RAWETH_QP1_V2_FLAGS_SRQ 0x1UL + #define CQ_RES_RAWETH_QP1_V2_FLAGS_SRQ_RQ 0x0UL + #define CQ_RES_RAWETH_QP1_V2_FLAGS_SRQ_SRQ 0x1UL + #define CQ_RES_RAWETH_QP1_V2_FLAGS_SRQ_LAST CQ_RES_RAWETH_QP1_V2_FLAGS_SRQ_SRQ + __le32 raweth_qp1_payload_offset_srq_or_rq_wr_id; + #define CQ_RES_RAWETH_QP1_V2_SRQ_OR_RQ_WR_ID_MASK 0xfffffUL + #define CQ_RES_RAWETH_QP1_V2_SRQ_OR_RQ_WR_ID_SFT 0 + #define CQ_RES_RAWETH_QP1_V2_CFA_METADATA1_MASK 0xf00000UL + #define CQ_RES_RAWETH_QP1_V2_CFA_METADATA1_SFT 20 + #define CQ_RES_RAWETH_QP1_V2_CFA_METADATA1_TPID_SEL_MASK 0x700000UL + #define CQ_RES_RAWETH_QP1_V2_CFA_METADATA1_TPID_SEL_SFT 20 + #define CQ_RES_RAWETH_QP1_V2_CFA_METADATA1_TPID_SEL_TPID88A8 (0x0UL << 20) + #define CQ_RES_RAWETH_QP1_V2_CFA_METADATA1_TPID_SEL_TPID8100 (0x1UL << 20) + #define CQ_RES_RAWETH_QP1_V2_CFA_METADATA1_TPID_SEL_TPID9100 (0x2UL << 20) + #define CQ_RES_RAWETH_QP1_V2_CFA_METADATA1_TPID_SEL_TPID9200 (0x3UL << 20) + #define CQ_RES_RAWETH_QP1_V2_CFA_METADATA1_TPID_SEL_TPID9300 (0x4UL << 20) + #define CQ_RES_RAWETH_QP1_V2_CFA_METADATA1_TPID_SEL_TPIDCFG (0x5UL << 20) + #define CQ_RES_RAWETH_QP1_V2_CFA_METADATA1_TPID_SEL_LAST CQ_RES_RAWETH_QP1_V2_CFA_METADATA1_TPID_SEL_TPIDCFG + #define CQ_RES_RAWETH_QP1_V2_CFA_METADATA1_VALID 0x800000UL + #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_PAYLOAD_OFFSET_MASK 0xff000000UL + #define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_PAYLOAD_OFFSET_SFT 24 +}; + +/* cq_terminal (size:256b/32B) */ +struct cq_terminal { + __le64 qp_handle; + __le16 sq_cons_idx; + __le16 rq_cons_idx; + __le32 reserved32_1; + __le64 reserved64_3; + u8 cqe_type_toggle; + #define CQ_TERMINAL_TOGGLE 0x1UL + #define CQ_TERMINAL_CQE_TYPE_MASK 0x1eUL + #define CQ_TERMINAL_CQE_TYPE_SFT 1 + #define CQ_TERMINAL_CQE_TYPE_TERMINAL (0xeUL << 1) + #define CQ_TERMINAL_CQE_TYPE_LAST CQ_TERMINAL_CQE_TYPE_TERMINAL + u8 status; + #define CQ_TERMINAL_STATUS_OK 0x0UL + #define CQ_TERMINAL_STATUS_LAST CQ_TERMINAL_STATUS_OK + __le16 reserved16; + __le32 reserved32_2; +}; + +/* cq_cutoff (size:256b/32B) */ +struct cq_cutoff { + __le64 reserved64_1; + __le64 reserved64_2; + __le64 reserved64_3; + u8 cqe_type_toggle; + #define CQ_CUTOFF_TOGGLE 0x1UL + #define CQ_CUTOFF_CQE_TYPE_MASK 0x1eUL + #define CQ_CUTOFF_CQE_TYPE_SFT 1 + #define CQ_CUTOFF_CQE_TYPE_CUT_OFF (0xfUL << 1) + #define CQ_CUTOFF_CQE_TYPE_LAST CQ_CUTOFF_CQE_TYPE_CUT_OFF + #define CQ_CUTOFF_RESIZE_TOGGLE_MASK 0x60UL + #define CQ_CUTOFF_RESIZE_TOGGLE_SFT 5 + u8 status; + #define CQ_CUTOFF_STATUS_OK 0x0UL + #define CQ_CUTOFF_STATUS_LAST CQ_CUTOFF_STATUS_OK + __le16 reserved16; + __le32 reserved32; +}; + +/* cq_no_op (size:256b/32B) */ +struct cq_no_op { + __le64 reserved64_1; + __le64 reserved64_2; + __le64 reserved64_3; + u8 cqe_type_toggle; + #define CQ_NO_OP_TOGGLE 0x1UL + #define CQ_NO_OP_CQE_TYPE_MASK 0x1eUL + #define CQ_NO_OP_CQE_TYPE_SFT 1 + #define CQ_NO_OP_CQE_TYPE_NO_OP (0xdUL << 1) + #define CQ_NO_OP_CQE_TYPE_LAST CQ_NO_OP_CQE_TYPE_NO_OP + u8 status; + #define CQ_NO_OP_STATUS_OK 0x0UL + #define CQ_NO_OP_STATUS_LAST CQ_NO_OP_STATUS_OK + __le16 reserved16; + __le32 reserved32; +}; + +/* cq_req_v3 (size:256b/32B) */ +struct cq_req_v3 { + __le64 qp_handle; + __le16 sq_cons_idx; + __le16 reserved1; + __le32 reserved2; + __le64 reserved3; + u8 cqe_type_toggle; + #define CQ_REQ_V3_TOGGLE 0x1UL + #define CQ_REQ_V3_CQE_TYPE_MASK 0x1eUL + #define CQ_REQ_V3_CQE_TYPE_SFT 1 + #define CQ_REQ_V3_CQE_TYPE_REQ_V3 (0x8UL << 1) + #define CQ_REQ_V3_CQE_TYPE_LAST CQ_REQ_V3_CQE_TYPE_REQ_V3 + #define CQ_REQ_V3_PUSH 0x20UL + u8 status; + #define CQ_REQ_V3_STATUS_OK 0x0UL + #define CQ_REQ_V3_STATUS_BAD_RESPONSE_ERR 0x1UL + #define CQ_REQ_V3_STATUS_LOCAL_LENGTH_ERR 0x2UL + #define CQ_REQ_V3_STATUS_LOCAL_QP_OPERATION_ERR 0x4UL + #define CQ_REQ_V3_STATUS_LOCAL_PROTECTION_ERR 0x5UL + #define CQ_REQ_V3_STATUS_MEMORY_MGT_OPERATION_ERR 0x7UL + #define CQ_REQ_V3_STATUS_REMOTE_INVALID_REQUEST_ERR 0x8UL + #define CQ_REQ_V3_STATUS_REMOTE_ACCESS_ERR 0x9UL + #define CQ_REQ_V3_STATUS_REMOTE_OPERATION_ERR 0xaUL + #define CQ_REQ_V3_STATUS_RNR_NAK_RETRY_CNT_ERR 0xbUL + #define CQ_REQ_V3_STATUS_TRANSPORT_RETRY_CNT_ERR 0xcUL + #define CQ_REQ_V3_STATUS_WORK_REQUEST_FLUSHED_ERR 0xdUL + #define CQ_REQ_V3_STATUS_OVERFLOW_ERR 0xfUL + #define CQ_REQ_V3_STATUS_LAST CQ_REQ_V3_STATUS_OVERFLOW_ERR + __le16 reserved4; + __le32 opaque; +}; + +/* cq_res_rc_v3 (size:256b/32B) */ +struct cq_res_rc_v3 { + __le32 length; + __le32 imm_data_or_inv_r_key; + __le64 qp_handle; + __le64 mr_handle; + u8 cqe_type_toggle; + #define CQ_RES_RC_V3_TOGGLE 0x1UL + #define CQ_RES_RC_V3_CQE_TYPE_MASK 0x1eUL + #define CQ_RES_RC_V3_CQE_TYPE_SFT 1 + #define CQ_RES_RC_V3_CQE_TYPE_RES_RC_V3 (0x9UL << 1) + #define CQ_RES_RC_V3_CQE_TYPE_LAST CQ_RES_RC_V3_CQE_TYPE_RES_RC_V3 + u8 status; + #define CQ_RES_RC_V3_STATUS_OK 0x0UL + #define CQ_RES_RC_V3_STATUS_LOCAL_LENGTH_ERR 0x2UL + #define CQ_RES_RC_V3_STATUS_LOCAL_QP_OPERATION_ERR 0x4UL + #define CQ_RES_RC_V3_STATUS_LOCAL_PROTECTION_ERR 0x5UL + #define CQ_RES_RC_V3_STATUS_LOCAL_ACCESS_ERROR 0x6UL + #define CQ_RES_RC_V3_STATUS_REMOTE_INVALID_REQUEST_ERR 0x8UL + #define CQ_RES_RC_V3_STATUS_WORK_REQUEST_FLUSHED_ERR 0xdUL + #define CQ_RES_RC_V3_STATUS_HW_FLUSH_ERR 0xeUL + #define CQ_RES_RC_V3_STATUS_OVERFLOW_ERR 0xfUL + #define CQ_RES_RC_V3_STATUS_LAST CQ_RES_RC_V3_STATUS_OVERFLOW_ERR + __le16 flags; + #define CQ_RES_RC_V3_FLAGS_SRQ 0x1UL + #define CQ_RES_RC_V3_FLAGS_SRQ_RQ 0x0UL + #define CQ_RES_RC_V3_FLAGS_SRQ_SRQ 0x1UL + #define CQ_RES_RC_V3_FLAGS_SRQ_LAST CQ_RES_RC_V3_FLAGS_SRQ_SRQ + #define CQ_RES_RC_V3_FLAGS_IMM 0x2UL + #define CQ_RES_RC_V3_FLAGS_INV 0x4UL + #define CQ_RES_RC_V3_FLAGS_RDMA 0x8UL + #define CQ_RES_RC_V3_FLAGS_RDMA_SEND (0x0UL << 3) + #define CQ_RES_RC_V3_FLAGS_RDMA_RDMA_WRITE (0x1UL << 3) + #define CQ_RES_RC_V3_FLAGS_RDMA_LAST CQ_RES_RC_V3_FLAGS_RDMA_RDMA_WRITE + __le32 opaque; +}; + +/* cq_res_ud_v3 (size:256b/32B) */ +struct cq_res_ud_v3 { + __le16 length; + #define CQ_RES_UD_V3_LENGTH_MASK 0x3fffUL + #define CQ_RES_UD_V3_LENGTH_SFT 0 + u8 reserved1; + u8 src_qp_high; + __le32 imm_data; + __le64 qp_handle; + __le16 src_mac[3]; + __le16 src_qp_low; + u8 cqe_type_toggle; + #define CQ_RES_UD_V3_TOGGLE 0x1UL + #define CQ_RES_UD_V3_CQE_TYPE_MASK 0x1eUL + #define CQ_RES_UD_V3_CQE_TYPE_SFT 1 + #define CQ_RES_UD_V3_CQE_TYPE_RES_UD_V3 (0xaUL << 1) + #define CQ_RES_UD_V3_CQE_TYPE_LAST CQ_RES_UD_V3_CQE_TYPE_RES_UD_V3 + u8 status; + #define CQ_RES_UD_V3_STATUS_OK 0x0UL + #define CQ_RES_UD_V3_STATUS_HW_LOCAL_LENGTH_ERR 0x3UL + #define CQ_RES_UD_V3_STATUS_LOCAL_QP_OPERATION_ERR 0x4UL + #define CQ_RES_UD_V3_STATUS_LOCAL_PROTECTION_ERR 0x5UL + #define CQ_RES_UD_V3_STATUS_WORK_REQUEST_FLUSHED_ERR 0xdUL + #define CQ_RES_UD_V3_STATUS_HW_FLUSH_ERR 0xeUL + #define CQ_RES_UD_V3_STATUS_OVERFLOW_ERR 0xfUL + #define CQ_RES_UD_V3_STATUS_LAST CQ_RES_UD_V3_STATUS_OVERFLOW_ERR + __le16 flags; + #define CQ_RES_UD_V3_FLAGS_SRQ 0x1UL + #define CQ_RES_UD_V3_FLAGS_SRQ_RQ 0x0UL + #define CQ_RES_UD_V3_FLAGS_SRQ_SRQ 0x1UL + #define CQ_RES_UD_V3_FLAGS_SRQ_LAST CQ_RES_UD_V3_FLAGS_SRQ_SRQ + #define CQ_RES_UD_V3_FLAGS_IMM 0x2UL + #define CQ_RES_UD_V3_FLAGS_UNUSED_MASK 0xcUL + #define CQ_RES_UD_V3_FLAGS_UNUSED_SFT 2 + #define CQ_RES_UD_V3_FLAGS_ROCE_IP_VER_MASK 0x30UL + #define CQ_RES_UD_V3_FLAGS_ROCE_IP_VER_SFT 4 + #define CQ_RES_UD_V3_FLAGS_ROCE_IP_VER_V1 (0x0UL << 4) + #define CQ_RES_UD_V3_FLAGS_ROCE_IP_VER_V2IPV4 (0x2UL << 4) + #define CQ_RES_UD_V3_FLAGS_ROCE_IP_VER_V2IPV6 (0x3UL << 4) + #define CQ_RES_UD_V3_FLAGS_ROCE_IP_VER_LAST CQ_RES_UD_V3_FLAGS_ROCE_IP_VER_V2IPV6 + __le32 opaque; +}; + +/* cq_res_raweth_qp1_v3 (size:256b/32B) */ +struct cq_res_raweth_qp1_v3 { + __le16 length; + #define CQ_RES_RAWETH_QP1_V3_LENGTH_MASK 0x3fffUL + #define CQ_RES_RAWETH_QP1_V3_LENGTH_SFT 0 + __le16 raweth_qp1_flags_cfa_metadata1; + #define CQ_RES_RAWETH_QP1_V3_ERROR 0x1UL + #define CQ_RES_RAWETH_QP1_V3_ITYPE_MASK 0x3c0UL + #define CQ_RES_RAWETH_QP1_V3_ITYPE_SFT 6 + #define CQ_RES_RAWETH_QP1_V3_ITYPE_NOT_KNOWN (0x0UL << 6) + #define CQ_RES_RAWETH_QP1_V3_ITYPE_IP (0x1UL << 6) + #define CQ_RES_RAWETH_QP1_V3_ITYPE_TCP (0x2UL << 6) + #define CQ_RES_RAWETH_QP1_V3_ITYPE_UDP (0x3UL << 6) + #define CQ_RES_RAWETH_QP1_V3_ITYPE_FCOE (0x4UL << 6) + #define CQ_RES_RAWETH_QP1_V3_ITYPE_ROCE (0x5UL << 6) + #define CQ_RES_RAWETH_QP1_V3_ITYPE_ICMP (0x7UL << 6) + #define CQ_RES_RAWETH_QP1_V3_ITYPE_PTP_WO_TIMESTAMP (0x8UL << 6) + #define CQ_RES_RAWETH_QP1_V3_ITYPE_PTP_W_TIMESTAMP (0x9UL << 6) + #define CQ_RES_RAWETH_QP1_V3_ITYPE_LAST CQ_RES_RAWETH_QP1_V3_ITYPE_PTP_W_TIMESTAMP + #define CQ_RES_RAWETH_QP1_V3_CFA_METADATA1_MASK 0xf000UL + #define CQ_RES_RAWETH_QP1_V3_CFA_METADATA1_SFT 12 + #define CQ_RES_RAWETH_QP1_V3_CFA_METADATA1_TPID_SEL_MASK 0x7000UL + #define CQ_RES_RAWETH_QP1_V3_CFA_METADATA1_TPID_SEL_SFT 12 + #define CQ_RES_RAWETH_QP1_V3_CFA_METADATA1_VALID 0x8000UL + __le16 raweth_qp1_errors; + #define CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_ERRORS_IP_CS_ERROR 0x10UL + #define CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_ERRORS_L4_CS_ERROR 0x20UL + #define CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_ERRORS_T_IP_CS_ERROR 0x40UL + #define CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_ERRORS_T_L4_CS_ERROR 0x80UL + #define CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_ERRORS_CRC_ERROR 0x100UL + #define CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_ERRORS_T_PKT_ERROR_MASK 0xe00UL + #define CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_ERRORS_T_PKT_ERROR_SFT 9 + #define CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_ERRORS_T_PKT_ERROR_NO_ERROR (0x0UL << 9) + #define CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_ERRORS_T_PKT_ERROR_T_L3_BAD_VERSION (0x1UL << 9) + #define CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_ERRORS_T_PKT_ERROR_T_L3_BAD_HDR_LEN (0x2UL << 9) + #define CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_ERRORS_T_PKT_ERROR_T_IP_TOTAL_ERROR (0x3UL << 9) + #define CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_ERRORS_T_PKT_ERROR_T_UDP_TOTAL_ERROR (0x4UL << 9) + #define CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_ERRORS_T_PKT_ERROR_T_L3_BAD_TTL (0x5UL << 9) + #define CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_ERRORS_T_PKT_ERROR_T_TOTAL_ERROR (0x6UL << 9) + #define CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_ERRORS_T_PKT_ERROR_LAST CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_ERRORS_T_PKT_ERROR_T_TOTAL_ERROR + #define CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_ERRORS_PKT_ERROR_MASK 0xf000UL + #define CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_ERRORS_PKT_ERROR_SFT 12 + #define CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_ERRORS_PKT_ERROR_NO_ERROR (0x0UL << 12) + #define CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_ERRORS_PKT_ERROR_L3_BAD_VERSION (0x1UL << 12) + #define CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_ERRORS_PKT_ERROR_L3_BAD_HDR_LEN (0x2UL << 12) + #define CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_ERRORS_PKT_ERROR_L3_BAD_TTL (0x3UL << 12) + #define CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_ERRORS_PKT_ERROR_IP_TOTAL_ERROR (0x4UL << 12) + #define CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_ERRORS_PKT_ERROR_UDP_TOTAL_ERROR (0x5UL << 12) + #define CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_ERRORS_PKT_ERROR_L4_BAD_HDR_LEN (0x6UL << 12) + #define CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_ERRORS_PKT_ERROR_L4_BAD_HDR_LEN_TOO_SMALL (0x7UL << 12) + #define CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_ERRORS_PKT_ERROR_L4_BAD_OPT_LEN (0x8UL << 12) + #define CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_ERRORS_PKT_ERROR_LAST CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_ERRORS_PKT_ERROR_L4_BAD_OPT_LEN + __le16 cfa_metadata0; + #define CQ_RES_RAWETH_QP1_V3_CFA_METADATA0_VID_MASK 0xfffUL + #define CQ_RES_RAWETH_QP1_V3_CFA_METADATA0_VID_SFT 0 + #define CQ_RES_RAWETH_QP1_V3_CFA_METADATA0_DE 0x1000UL + #define CQ_RES_RAWETH_QP1_V3_CFA_METADATA0_PRI_MASK 0xe000UL + #define CQ_RES_RAWETH_QP1_V3_CFA_METADATA0_PRI_SFT 13 + __le64 qp_handle; + __le32 raweth_qp1_flags2; + #define CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_FLAGS2_IP_CS_CALC 0x1UL + #define CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_FLAGS2_L4_CS_CALC 0x2UL + #define CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_FLAGS2_T_IP_CS_CALC 0x4UL + #define CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_FLAGS2_T_L4_CS_CALC 0x8UL + #define CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_FLAGS2_META_FORMAT_MASK 0xf0UL + #define CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_FLAGS2_META_FORMAT_SFT 4 + #define CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_FLAGS2_META_FORMAT_NONE (0x0UL << 4) + #define CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_FLAGS2_META_FORMAT_ACT_REC_PTR (0x1UL << 4) + #define CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_FLAGS2_META_FORMAT_TUNNEL_ID (0x2UL << 4) + #define CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_FLAGS2_META_FORMAT_CHDR_DATA (0x3UL << 4) + #define CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_FLAGS2_META_FORMAT_HDR_OFFSET (0x4UL << 4) + #define CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_FLAGS2_META_FORMAT_LAST CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_FLAGS2_META_FORMAT_HDR_OFFSET + #define CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_FLAGS2_IP_TYPE 0x100UL + #define CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_FLAGS2_COMPLETE_CHECKSUM_CALC 0x200UL + #define CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_FLAGS2_T_IP_TYPE 0x400UL + #define CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_FLAGS2_T_IP_TYPE_IPV4 (0x0UL << 10) + #define CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_FLAGS2_T_IP_TYPE_IPV6 (0x1UL << 10) + #define CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_FLAGS2_T_IP_TYPE_LAST CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_FLAGS2_T_IP_TYPE_IPV6 + #define CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_FLAGS2_COMPLETE_CHECKSUM_MASK 0xffff0000UL + #define CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_FLAGS2_COMPLETE_CHECKSUM_SFT 16 + __le32 cfa_metadata2; + u8 cqe_type_toggle; + #define CQ_RES_RAWETH_QP1_V3_TOGGLE 0x1UL + #define CQ_RES_RAWETH_QP1_V3_CQE_TYPE_MASK 0x1eUL + #define CQ_RES_RAWETH_QP1_V3_CQE_TYPE_SFT 1 + #define CQ_RES_RAWETH_QP1_V3_CQE_TYPE_RES_RAWETH_QP1_V3 (0xbUL << 1) + #define CQ_RES_RAWETH_QP1_V3_CQE_TYPE_LAST CQ_RES_RAWETH_QP1_V3_CQE_TYPE_RES_RAWETH_QP1_V3 + u8 status; + #define CQ_RES_RAWETH_QP1_V3_STATUS_OK 0x0UL + #define CQ_RES_RAWETH_QP1_V3_STATUS_HW_LOCAL_LENGTH_ERR 0x3UL + #define CQ_RES_RAWETH_QP1_V3_STATUS_LOCAL_QP_OPERATION_ERR 0x4UL + #define CQ_RES_RAWETH_QP1_V3_STATUS_LOCAL_PROTECTION_ERR 0x5UL + #define CQ_RES_RAWETH_QP1_V3_STATUS_WORK_REQUEST_FLUSHED_ERR 0xdUL + #define CQ_RES_RAWETH_QP1_V3_STATUS_HW_FLUSH_ERR 0xeUL + #define CQ_RES_RAWETH_QP1_V3_STATUS_OVERFLOW_ERR 0xfUL + #define CQ_RES_RAWETH_QP1_V3_STATUS_LAST CQ_RES_RAWETH_QP1_V3_STATUS_OVERFLOW_ERR + u8 flags; + #define CQ_RES_RAWETH_QP1_V3_FLAGS_SRQ 0x1UL + #define CQ_RES_RAWETH_QP1_V3_FLAGS_SRQ_RQ 0x0UL + #define CQ_RES_RAWETH_QP1_V3_FLAGS_SRQ_SRQ 0x1UL + #define CQ_RES_RAWETH_QP1_V3_FLAGS_SRQ_LAST CQ_RES_RAWETH_QP1_V3_FLAGS_SRQ_SRQ + u8 raweth_qp1_payload_offset; + __le32 opaque; +}; + +/* cq_res_ud_cfa_v3 (size:256b/32B) */ +struct cq_res_ud_cfa_v3 { + __le16 length; + #define CQ_RES_UD_CFA_V3_LENGTH_MASK 0x3fffUL + #define CQ_RES_UD_CFA_V3_LENGTH_SFT 0 + __le16 cfa_metadata0; + #define CQ_RES_UD_CFA_V3_CFA_METADATA0_VID_MASK 0xfffUL + #define CQ_RES_UD_CFA_V3_CFA_METADATA0_VID_SFT 0 + #define CQ_RES_UD_CFA_V3_CFA_METADATA0_DE 0x1000UL + #define CQ_RES_UD_CFA_V3_CFA_METADATA0_PRI_MASK 0xe000UL + #define CQ_RES_UD_CFA_V3_CFA_METADATA0_PRI_SFT 13 + __le32 imm_data; + __le32 qid_cfa_metadata1_src_qp_high; + #define CQ_RES_UD_CFA_V3_QID_MASK 0x7ffUL + #define CQ_RES_UD_CFA_V3_QID_SFT 0 + #define CQ_RES_UD_CFA_V3_UNUSED_MASK 0xff800UL + #define CQ_RES_UD_CFA_V3_UNUSED_SFT 11 + #define CQ_RES_UD_CFA_V3_CFA_METADATA1_MASK 0xf00000UL + #define CQ_RES_UD_CFA_V3_CFA_METADATA1_SFT 20 + #define CQ_RES_UD_CFA_V3_CFA_METADATA1_TPID_SEL_MASK 0x700000UL + #define CQ_RES_UD_CFA_V3_CFA_METADATA1_TPID_SEL_SFT 20 + #define CQ_RES_UD_CFA_V3_CFA_METADATA1_VALID 0x800000UL + #define CQ_RES_UD_CFA_V3_SRC_QP_HIGH_MASK 0xff000000UL + #define CQ_RES_UD_CFA_V3_SRC_QP_HIGH_SFT 24 + __le32 cfa_metadata2; + __le16 src_mac[3]; + __le16 src_qp_low; + u8 cqe_type_toggle; + #define CQ_RES_UD_CFA_V3_TOGGLE 0x1UL + #define CQ_RES_UD_CFA_V3_CQE_TYPE_MASK 0x1eUL + #define CQ_RES_UD_CFA_V3_CQE_TYPE_SFT 1 + #define CQ_RES_UD_CFA_V3_CQE_TYPE_RES_UD_CFA_V3 (0xcUL << 1) + #define CQ_RES_UD_CFA_V3_CQE_TYPE_LAST CQ_RES_UD_CFA_V3_CQE_TYPE_RES_UD_CFA_V3 + u8 status; + #define CQ_RES_UD_CFA_V3_STATUS_OK 0x0UL + #define CQ_RES_UD_CFA_V3_STATUS_HW_LOCAL_LENGTH_ERR 0x3UL + #define CQ_RES_UD_CFA_V3_STATUS_LOCAL_QP_OPERATION_ERR 0x4UL + #define CQ_RES_UD_CFA_V3_STATUS_LOCAL_PROTECTION_ERR 0x5UL + #define CQ_RES_UD_CFA_V3_STATUS_WORK_REQUEST_FLUSHED_ERR 0xdUL + #define CQ_RES_UD_CFA_V3_STATUS_HW_FLUSH_ERR 0xeUL + #define CQ_RES_UD_CFA_V3_STATUS_OVERFLOW_ERR 0xfUL + #define CQ_RES_UD_CFA_V3_STATUS_LAST CQ_RES_UD_CFA_V3_STATUS_OVERFLOW_ERR + __le16 flags; + #define CQ_RES_UD_CFA_V3_FLAGS_SRQ 0x1UL + #define CQ_RES_UD_CFA_V3_FLAGS_SRQ_RQ 0x0UL + #define CQ_RES_UD_CFA_V3_FLAGS_SRQ_SRQ 0x1UL + #define CQ_RES_UD_CFA_V3_FLAGS_SRQ_LAST CQ_RES_UD_CFA_V3_FLAGS_SRQ_SRQ + #define CQ_RES_UD_CFA_V3_FLAGS_IMM 0x2UL + #define CQ_RES_UD_CFA_V3_FLAGS_UNUSED_MASK 0xcUL + #define CQ_RES_UD_CFA_V3_FLAGS_UNUSED_SFT 2 + #define CQ_RES_UD_CFA_V3_FLAGS_ROCE_IP_VER_MASK 0x30UL + #define CQ_RES_UD_CFA_V3_FLAGS_ROCE_IP_VER_SFT 4 + #define CQ_RES_UD_CFA_V3_FLAGS_ROCE_IP_VER_V1 (0x0UL << 4) + #define CQ_RES_UD_CFA_V3_FLAGS_ROCE_IP_VER_V2IPV4 (0x2UL << 4) + #define CQ_RES_UD_CFA_V3_FLAGS_ROCE_IP_VER_V2IPV6 (0x3UL << 4) + #define CQ_RES_UD_CFA_V3_FLAGS_ROCE_IP_VER_LAST CQ_RES_UD_CFA_V3_FLAGS_ROCE_IP_VER_V2IPV6 + #define CQ_RES_UD_CFA_V3_FLAGS_META_FORMAT_MASK 0x3c0UL + #define CQ_RES_UD_CFA_V3_FLAGS_META_FORMAT_SFT 6 + #define CQ_RES_UD_CFA_V3_FLAGS_META_FORMAT_NONE (0x0UL << 6) + #define CQ_RES_UD_CFA_V3_FLAGS_META_FORMAT_ACT_REC_PTR (0x1UL << 6) + #define CQ_RES_UD_CFA_V3_FLAGS_META_FORMAT_TUNNEL_ID (0x2UL << 6) + #define CQ_RES_UD_CFA_V3_FLAGS_META_FORMAT_CHDR_DATA (0x3UL << 6) + #define CQ_RES_UD_CFA_V3_FLAGS_META_FORMAT_HDR_OFFSET (0x4UL << 6) + #define CQ_RES_UD_CFA_V3_FLAGS_META_FORMAT_LAST CQ_RES_UD_CFA_V3_FLAGS_META_FORMAT_HDR_OFFSET + __le32 opaque; +}; + +/* nq_base (size:128b/16B) */ +struct nq_base { + __le16 info10_type; + #define NQ_BASE_TYPE_MASK 0x3fUL + #define NQ_BASE_TYPE_SFT 0 + #define NQ_BASE_TYPE_CQ_NOTIFICATION 0x30UL + #define NQ_BASE_TYPE_SRQ_EVENT 0x32UL + #define NQ_BASE_TYPE_DBQ_EVENT 0x34UL + #define NQ_BASE_TYPE_QP_EVENT 0x38UL + #define NQ_BASE_TYPE_FUNC_EVENT 0x3aUL + #define NQ_BASE_TYPE_NQ_REASSIGN 0x3cUL + #define NQ_BASE_TYPE_LAST NQ_BASE_TYPE_NQ_REASSIGN + #define NQ_BASE_INFO10_MASK 0xffc0UL + #define NQ_BASE_INFO10_SFT 6 + __le16 info16; + __le32 info32; + __le32 info63_v[2]; + #define NQ_BASE_V 0x1UL + #define NQ_BASE_INFO63_MASK 0xfffffffeUL + #define NQ_BASE_INFO63_SFT 1 +}; + +/* nq_cn (size:128b/16B) */ +struct nq_cn { + __le16 type; + #define NQ_CN_TYPE_MASK 0x3fUL + #define NQ_CN_TYPE_SFT 0 + #define NQ_CN_TYPE_CQ_NOTIFICATION 0x30UL + #define NQ_CN_TYPE_LAST NQ_CN_TYPE_CQ_NOTIFICATION + #define NQ_CN_TOGGLE_MASK 0xc0UL + #define NQ_CN_TOGGLE_SFT 6 + __le16 reserved16; + __le32 cq_handle_low; + __le32 v; + #define NQ_CN_V 0x1UL + __le32 cq_handle_high; +}; + +/* nq_srq_event (size:128b/16B) */ +struct nq_srq_event { + u8 type; + #define NQ_SRQ_EVENT_TYPE_MASK 0x3fUL + #define NQ_SRQ_EVENT_TYPE_SFT 0 + #define NQ_SRQ_EVENT_TYPE_SRQ_EVENT 0x32UL + #define NQ_SRQ_EVENT_TYPE_LAST NQ_SRQ_EVENT_TYPE_SRQ_EVENT + #define NQ_SRQ_EVENT_TOGGLE_MASK 0xc0UL + #define NQ_SRQ_EVENT_TOGGLE_SFT 6 + u8 event; + #define NQ_SRQ_EVENT_EVENT_SRQ_THRESHOLD_EVENT 0x1UL + #define NQ_SRQ_EVENT_EVENT_LAST NQ_SRQ_EVENT_EVENT_SRQ_THRESHOLD_EVENT + __le16 reserved16; + __le32 srq_handle_low; + __le32 v; + #define NQ_SRQ_EVENT_V 0x1UL + __le32 srq_handle_high; +}; + +/* nq_dbq_event (size:128b/16B) */ +struct nq_dbq_event { + u8 type; + #define NQ_DBQ_EVENT_TYPE_MASK 0x3fUL + #define NQ_DBQ_EVENT_TYPE_SFT 0 + #define NQ_DBQ_EVENT_TYPE_DBQ_EVENT 0x34UL + #define NQ_DBQ_EVENT_TYPE_LAST NQ_DBQ_EVENT_TYPE_DBQ_EVENT + u8 event; + #define NQ_DBQ_EVENT_EVENT_DBQ_THRESHOLD_EVENT 0x1UL + #define NQ_DBQ_EVENT_EVENT_LAST NQ_DBQ_EVENT_EVENT_DBQ_THRESHOLD_EVENT + __le16 db_pfid; + #define NQ_DBQ_EVENT_DB_PFID_MASK 0xfUL + #define NQ_DBQ_EVENT_DB_PFID_SFT 0 + __le32 db_dpi; + #define NQ_DBQ_EVENT_DB_DPI_MASK 0xfffffUL + #define NQ_DBQ_EVENT_DB_DPI_SFT 0 + __le32 v; + #define NQ_DBQ_EVENT_V 0x1UL + __le32 db_type_db_xid; + #define NQ_DBQ_EVENT_DB_XID_MASK 0xfffffUL + #define NQ_DBQ_EVENT_DB_XID_SFT 0 + #define NQ_DBQ_EVENT_DB_TYPE_MASK 0xf0000000UL + #define NQ_DBQ_EVENT_DB_TYPE_SFT 28 +}; + +/* nq_reassign (size:128b/16B) */ +struct nq_reassign { + __le16 type; + #define NQ_REASSIGN_TYPE_MASK 0x3fUL + #define NQ_REASSIGN_TYPE_SFT 0 + #define NQ_REASSIGN_TYPE_NQ_REASSIGN 0x3cUL + #define NQ_REASSIGN_TYPE_LAST NQ_REASSIGN_TYPE_NQ_REASSIGN + __le16 reserved16; + __le32 cq_handle_low; + __le32 v; + #define NQ_REASSIGN_V 0x1UL + __le32 cq_handle_high; +}; + +/* xrrq_irrq (size:256b/32B) */ +struct xrrq_irrq { + __le16 credits_type; + #define XRRQ_IRRQ_TYPE 0x1UL + #define XRRQ_IRRQ_TYPE_READ_REQ 0x0UL + #define XRRQ_IRRQ_TYPE_ATOMIC_REQ 0x1UL + #define XRRQ_IRRQ_TYPE_LAST XRRQ_IRRQ_TYPE_ATOMIC_REQ + #define XRRQ_IRRQ_CREDITS_MASK 0xf800UL + #define XRRQ_IRRQ_CREDITS_SFT 11 + __le16 reserved16; + __le32 reserved32; + __le32 psn; + #define XRRQ_IRRQ_PSN_MASK 0xffffffUL + #define XRRQ_IRRQ_PSN_SFT 0 + __le32 msn; + #define XRRQ_IRRQ_MSN_MASK 0xffffffUL + #define XRRQ_IRRQ_MSN_SFT 0 + __le64 va_or_atomic_result; + __le32 rdma_r_key; + __le32 length; +}; + +/* xrrq_orrq (size:256b/32B) */ +struct xrrq_orrq { + __le16 num_sges_type; + #define XRRQ_ORRQ_TYPE 0x1UL + #define XRRQ_ORRQ_TYPE_READ_REQ 0x0UL + #define XRRQ_ORRQ_TYPE_ATOMIC_REQ 0x1UL + #define XRRQ_ORRQ_TYPE_LAST XRRQ_ORRQ_TYPE_ATOMIC_REQ + #define XRRQ_ORRQ_NUM_SGES_MASK 0xf800UL + #define XRRQ_ORRQ_NUM_SGES_SFT 11 + __le16 reserved16; + __le32 length; + __le32 psn; + #define XRRQ_ORRQ_PSN_MASK 0xffffffUL + #define XRRQ_ORRQ_PSN_SFT 0 + __le32 end_psn; + #define XRRQ_ORRQ_END_PSN_MASK 0xffffffUL + #define XRRQ_ORRQ_END_PSN_SFT 0 + __le64 first_sge_phy_or_sing_sge_va; + __le32 single_sge_l_key; + __le32 single_sge_size; +}; + +/* ptu_pte (size:64b/8B) */ +struct ptu_pte { + __le32 page_next_to_last_last_valid[2]; + #define PTU_PTE_VALID 0x1UL + #define PTU_PTE_LAST 0x2UL + #define PTU_PTE_NEXT_TO_LAST 0x4UL + #define PTU_PTE_UNUSED_MASK 0xff8UL + #define PTU_PTE_UNUSED_SFT 3 + #define PTU_PTE_PAGE_MASK 0xfffffffffffff000ULL + #define PTU_PTE_PAGE_SFT 12 +}; + +/* ptu_pde (size:64b/8B) */ +struct ptu_pde { + __le32 page_valid[2]; + #define PTU_PDE_VALID 0x1UL + #define PTU_PDE_UNUSED_MASK 0xffeUL + #define PTU_PDE_UNUSED_SFT 1 + #define PTU_PDE_PAGE_MASK 0xfffffffffffff000ULL + #define PTU_PDE_PAGE_SFT 12 +}; + +#endif /* _ROCE_HSI_H_ */ diff --git a/bnxt_re-1.10.3-229.0.139.0/stats.c b/bnxt_re-1.10.3-229.0.139.0/stats.c new file mode 100644 index 0000000..97afbdf --- /dev/null +++ b/bnxt_re-1.10.3-229.0.139.0/stats.c @@ -0,0 +1,476 @@ +/* + * Copyright (c) 2015-2023, Broadcom. All rights reserved. The term + * Broadcom refers to Broadcom Inc. and/or its subsidiaries. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * BSD license below: + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * Author: Devesh Sharma + * + * Description: statistics related functions + */ + +#include "bnxt_re.h" +#include "bnxt.h" + +int bnxt_re_get_flow_stats_from_service_pf(struct bnxt_re_dev *rdev, + struct bnxt_re_flow_counters *stats, + struct bnxt_qplib_query_stats_info *sinfo) +{ + struct hwrm_cfa_flow_stats_output resp = {}; + struct hwrm_cfa_flow_stats_input req = {}; + struct bnxt_en_dev *en_dev = rdev->en_dev; + struct bnxt_fw_msg fw_msg = {}; + u16 target_id; + int rc = 0; + + /* FIXME Workaround to avoid system hang when this + * thread is competing with device create/destroy + * sequence + */ + if (!bnxt_re_rtnl_trylock()) + /* Not querying stats. Return older values */ + return 0; + + if (sinfo->function_id == 0xFFFFFFFF) + target_id = -1; + else + target_id = sinfo->function_id + 1; + + /* Issue HWRM cmd to read flow counters for CNP tx and rx */ + bnxt_re_init_hwrm_hdr((void *)&req, HWRM_CFA_FLOW_STATS, target_id); + req.num_flows = cpu_to_le16(6); + req.flow_handle_0 = cpu_to_le16(CFA_FLOW_INFO_REQ_FLOW_HANDLE_CNP_CNT); + req.flow_handle_1 = cpu_to_le16(CFA_FLOW_INFO_REQ_FLOW_HANDLE_CNP_CNT | + CFA_FLOW_INFO_REQ_FLOW_HANDLE_DIR_RX); + req.flow_handle_2 = cpu_to_le16(CFA_FLOW_INFO_REQ_FLOW_HANDLE_ROCEV1_CNT); + req.flow_handle_3 = cpu_to_le16(CFA_FLOW_INFO_REQ_FLOW_HANDLE_ROCEV1_CNT | + CFA_FLOW_INFO_REQ_FLOW_HANDLE_DIR_RX); + req.flow_handle_4 = cpu_to_le16(CFA_FLOW_INFO_REQ_FLOW_HANDLE_ROCEV2_CNT); + req.flow_handle_5 = cpu_to_le16(CFA_FLOW_INFO_REQ_FLOW_HANDLE_ROCEV2_CNT | + CFA_FLOW_INFO_REQ_FLOW_HANDLE_DIR_RX); + bnxt_re_fill_fw_msg(&fw_msg, &req, sizeof(req), &resp, + sizeof(resp), BNXT_RE_HWRM_CMD_TIMEOUT(rdev)); + rc = bnxt_send_msg(en_dev, &fw_msg); + if (rc) { + rtnl_unlock(); + dev_dbg(rdev_to_dev(rdev), + "Failed to get CFA Flow stats : rc = 0x%x", rc); + return rc; + } + + stats->cnp_stats.cnp_tx_pkts = le64_to_cpu(resp.packet_0); + stats->cnp_stats.cnp_tx_bytes = le64_to_cpu(resp.byte_0); + stats->cnp_stats.cnp_rx_pkts = le64_to_cpu(resp.packet_1); + stats->cnp_stats.cnp_rx_bytes = le64_to_cpu(resp.byte_1); + + stats->ro_stats.tx_pkts = le64_to_cpu(resp.packet_2) + + le64_to_cpu(resp.packet_4); + stats->ro_stats.tx_bytes = le64_to_cpu(resp.byte_2) + + le64_to_cpu(resp.byte_4); + stats->ro_stats.rx_pkts = le64_to_cpu(resp.packet_3) + + le64_to_cpu(resp.packet_5); + stats->ro_stats.rx_bytes = le64_to_cpu(resp.byte_3) + + le64_to_cpu(resp.byte_5); + + rtnl_unlock(); + return 0; +} + +int bnxt_re_get_qos_stats(struct bnxt_re_dev *rdev) +{ + struct bnxt_re_ro_counters roce_only_tmp[2] = {{}, {}}; + struct bnxt_re_cnp_counters tmp_counters[2] = {{}, {}}; + struct hwrm_cfa_flow_stats_output resp = {}; + struct hwrm_cfa_flow_stats_input req = {}; + struct bnxt_en_dev *en_dev = rdev->en_dev; + struct bnxt_fw_msg fw_msg = {}; + struct bnxt_re_cc_stat *cnps; + struct bnxt_re_rstat *dstat; + int rc = 0; + u64 bytes; + u64 pkts; + + /* FIXME Workaround to avoid system hang when this + * thread is competing with device create/destroy + * sequence + */ + if (!bnxt_re_rtnl_trylock()) + /* Not querying stats. Return older values */ + return 0; + + /* Issue HWRM cmd to read flow counters for CNP tx and rx */ + bnxt_re_init_hwrm_hdr((void *)&req, HWRM_CFA_FLOW_STATS, -1); + req.num_flows = cpu_to_le16(6); + req.flow_handle_0 = cpu_to_le16(CFA_FLOW_INFO_REQ_FLOW_HANDLE_CNP_CNT); + req.flow_handle_1 = cpu_to_le16(CFA_FLOW_INFO_REQ_FLOW_HANDLE_CNP_CNT | + CFA_FLOW_INFO_REQ_FLOW_HANDLE_DIR_RX); + req.flow_handle_2 = cpu_to_le16(CFA_FLOW_INFO_REQ_FLOW_HANDLE_ROCEV1_CNT); + req.flow_handle_3 = cpu_to_le16(CFA_FLOW_INFO_REQ_FLOW_HANDLE_ROCEV1_CNT | + CFA_FLOW_INFO_REQ_FLOW_HANDLE_DIR_RX); + req.flow_handle_4 = cpu_to_le16(CFA_FLOW_INFO_REQ_FLOW_HANDLE_ROCEV2_CNT); + req.flow_handle_5 = cpu_to_le16(CFA_FLOW_INFO_REQ_FLOW_HANDLE_ROCEV2_CNT | + CFA_FLOW_INFO_REQ_FLOW_HANDLE_DIR_RX); + bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp, + sizeof(resp), BNXT_RE_HWRM_CMD_TIMEOUT(rdev)); + rc = bnxt_send_msg(en_dev, &fw_msg); + if (rc) { + dev_dbg(rdev_to_dev(rdev), + "Failed to get CFA Flow stats : rc = 0x%x", rc); + goto done; + } + + tmp_counters[0].cnp_tx_pkts = le64_to_cpu(resp.packet_0); + tmp_counters[0].cnp_tx_bytes = le64_to_cpu(resp.byte_0); + tmp_counters[0].cnp_rx_pkts = le64_to_cpu(resp.packet_1); + tmp_counters[0].cnp_rx_bytes = le64_to_cpu(resp.byte_1); + + roce_only_tmp[0].tx_pkts = le64_to_cpu(resp.packet_2) + + le64_to_cpu(resp.packet_4); + roce_only_tmp[0].tx_bytes = le64_to_cpu(resp.byte_2) + + le64_to_cpu(resp.byte_4); + roce_only_tmp[0].rx_pkts = le64_to_cpu(resp.packet_3) + + le64_to_cpu(resp.packet_5); + roce_only_tmp[0].rx_bytes = le64_to_cpu(resp.byte_3) + + le64_to_cpu(resp.byte_5); + + if (rdev->binfo) { + memset(&req, 0, sizeof(req)); + memset(&fw_msg, 0, sizeof(fw_msg)); + bnxt_re_init_hwrm_hdr((void *)&req, HWRM_CFA_FLOW_STATS, + PCI_FUNC(rdev->binfo->pdev2->devfn) + 1); + req.num_flows = cpu_to_le16(6); + req.flow_handle_0 = cpu_to_le16(CFA_FLOW_INFO_REQ_FLOW_HANDLE_CNP_CNT); + req.flow_handle_1 = cpu_to_le16(CFA_FLOW_INFO_REQ_FLOW_HANDLE_CNP_CNT | + CFA_FLOW_INFO_REQ_FLOW_HANDLE_DIR_RX); + req.flow_handle_2 = cpu_to_le16(CFA_FLOW_INFO_REQ_FLOW_HANDLE_ROCEV1_CNT); + req.flow_handle_3 = cpu_to_le16(CFA_FLOW_INFO_REQ_FLOW_HANDLE_ROCEV1_CNT | + CFA_FLOW_INFO_REQ_FLOW_HANDLE_DIR_RX); + req.flow_handle_4 = cpu_to_le16(CFA_FLOW_INFO_REQ_FLOW_HANDLE_ROCEV2_CNT); + req.flow_handle_5 = cpu_to_le16(CFA_FLOW_INFO_REQ_FLOW_HANDLE_ROCEV2_CNT | + CFA_FLOW_INFO_REQ_FLOW_HANDLE_DIR_RX); + bnxt_re_fill_fw_msg(&fw_msg, &req, sizeof(req), &resp, + sizeof(resp), BNXT_RE_HWRM_CMD_TIMEOUT(rdev)); + rc = bnxt_send_msg(en_dev, &fw_msg); + if (rc) { + dev_dbg(rdev_to_dev(rdev), + "Failed to get CFA 2nd port : rc = 0x%x", + rc); + /* Workaround for avoiding CFA query problem for bond */ + rc = 0; + goto done; + } + tmp_counters[1].cnp_tx_pkts = le64_to_cpu(resp.packet_0); + tmp_counters[1].cnp_tx_bytes = le64_to_cpu(resp.byte_0); + tmp_counters[1].cnp_rx_pkts = le64_to_cpu(resp.packet_1); + tmp_counters[1].cnp_rx_bytes = le64_to_cpu(resp.byte_1); + + roce_only_tmp[1].tx_pkts = le64_to_cpu(resp.packet_2) + + le64_to_cpu(resp.packet_4); + roce_only_tmp[1].tx_bytes = le64_to_cpu(resp.byte_2) + + le64_to_cpu(resp.byte_4); + roce_only_tmp[1].rx_pkts = le64_to_cpu(resp.packet_3) + + le64_to_cpu(resp.packet_5); + roce_only_tmp[1].rx_bytes = le64_to_cpu(resp.byte_3) + + le64_to_cpu(resp.byte_5); + } + + cnps = &rdev->stats.cnps; + dstat = &rdev->stats.dstat; + if (!cnps->is_first) { + /* First query done.. */ + cnps->is_first = true; + cnps->prev[0].cnp_tx_pkts = tmp_counters[0].cnp_tx_pkts; + cnps->prev[0].cnp_tx_bytes = tmp_counters[0].cnp_tx_bytes; + cnps->prev[0].cnp_rx_pkts = tmp_counters[0].cnp_rx_pkts; + cnps->prev[0].cnp_rx_bytes = tmp_counters[0].cnp_rx_bytes; + + cnps->prev[1].cnp_tx_pkts = tmp_counters[1].cnp_tx_pkts; + cnps->prev[1].cnp_tx_bytes = tmp_counters[1].cnp_tx_bytes; + cnps->prev[1].cnp_rx_pkts = tmp_counters[1].cnp_rx_pkts; + cnps->prev[1].cnp_rx_bytes = tmp_counters[1].cnp_rx_bytes; + + dstat->prev[0].tx_pkts = roce_only_tmp[0].tx_pkts; + dstat->prev[0].tx_bytes = roce_only_tmp[0].tx_bytes; + dstat->prev[0].rx_pkts = roce_only_tmp[0].rx_pkts; + dstat->prev[0].rx_bytes = roce_only_tmp[0].rx_bytes; + + dstat->prev[1].tx_pkts = roce_only_tmp[1].tx_pkts; + dstat->prev[1].tx_bytes = roce_only_tmp[1].tx_bytes; + dstat->prev[1].rx_pkts = roce_only_tmp[1].rx_pkts; + dstat->prev[1].rx_bytes = roce_only_tmp[1].rx_bytes; + } else { + u64 byte_mask, pkts_mask; + u64 diff; + + byte_mask = bnxt_re_get_cfa_stat_mask(rdev->chip_ctx, + BYTE_MASK); + pkts_mask = bnxt_re_get_cfa_stat_mask(rdev->chip_ctx, + PKTS_MASK); + /* + * Calculate the number of cnp packets and use + * the value to calculate the CRC bytes. + * Multply pkts with 4 and add it to total bytes + */ + pkts = bnxt_re_stat_diff(tmp_counters[0].cnp_tx_pkts, + &cnps->prev[0].cnp_tx_pkts, + pkts_mask); + cnps->cur[0].cnp_tx_pkts += pkts; + diff = bnxt_re_stat_diff(tmp_counters[0].cnp_tx_bytes, + &cnps->prev[0].cnp_tx_bytes, + byte_mask); + bytes = diff + pkts * 4; + cnps->cur[0].cnp_tx_bytes += bytes; + pkts = bnxt_re_stat_diff(tmp_counters[0].cnp_rx_pkts, + &cnps->prev[0].cnp_rx_pkts, + pkts_mask); + cnps->cur[0].cnp_rx_pkts += pkts; + bytes = bnxt_re_stat_diff(tmp_counters[0].cnp_rx_bytes, + &cnps->prev[0].cnp_rx_bytes, + byte_mask); + cnps->cur[0].cnp_rx_bytes += bytes; + + /* + * Calculate the number of cnp packets and use + * the value to calculate the CRC bytes. + * Multply pkts with 4 and add it to total bytes + */ + pkts = bnxt_re_stat_diff(tmp_counters[1].cnp_tx_pkts, + &cnps->prev[1].cnp_tx_pkts, + pkts_mask); + cnps->cur[1].cnp_tx_pkts += pkts; + diff = bnxt_re_stat_diff(tmp_counters[1].cnp_tx_bytes, + &cnps->prev[1].cnp_tx_bytes, + byte_mask); + cnps->cur[1].cnp_tx_bytes += diff + pkts * 4; + pkts = bnxt_re_stat_diff(tmp_counters[1].cnp_rx_pkts, + &cnps->prev[1].cnp_rx_pkts, + pkts_mask); + cnps->cur[1].cnp_rx_pkts += pkts; + bytes = bnxt_re_stat_diff(tmp_counters[1].cnp_rx_bytes, + &cnps->prev[1].cnp_rx_bytes, + byte_mask); + cnps->cur[1].cnp_rx_bytes += bytes; + + pkts = bnxt_re_stat_diff(roce_only_tmp[0].tx_pkts, + &dstat->prev[0].tx_pkts, + pkts_mask); + dstat->cur[0].tx_pkts += pkts; + diff = bnxt_re_stat_diff(roce_only_tmp[0].tx_bytes, + &dstat->prev[0].tx_bytes, + byte_mask); + dstat->cur[0].tx_bytes += diff + pkts * 4; + pkts = bnxt_re_stat_diff(roce_only_tmp[0].rx_pkts, + &dstat->prev[0].rx_pkts, + pkts_mask); + dstat->cur[0].rx_pkts += pkts; + + bytes = bnxt_re_stat_diff(roce_only_tmp[0].rx_bytes, + &dstat->prev[0].rx_bytes, + byte_mask); + dstat->cur[0].rx_bytes += bytes; + pkts = bnxt_re_stat_diff(roce_only_tmp[1].tx_pkts, + &dstat->prev[1].tx_pkts, + pkts_mask); + dstat->cur[1].tx_pkts += pkts; + diff = bnxt_re_stat_diff(roce_only_tmp[1].tx_bytes, + &dstat->prev[1].tx_bytes, + byte_mask); + dstat->cur[1].tx_bytes += diff + pkts * 4; + pkts = bnxt_re_stat_diff(roce_only_tmp[1].rx_pkts, + &dstat->prev[1].rx_pkts, + pkts_mask); + dstat->cur[1].rx_pkts += pkts; + bytes = bnxt_re_stat_diff(roce_only_tmp[1].rx_bytes, + &dstat->prev[1].rx_bytes, + byte_mask); + dstat->cur[1].rx_bytes += bytes; + } +done: + rtnl_unlock(); + return rc; +} + +static void bnxt_re_copy_ext_stats(struct bnxt_re_dev *rdev, + u8 indx, struct bnxt_qplib_ext_stat *s) +{ + struct bnxt_re_ext_roce_stats *e_errs; + struct bnxt_re_cnp_counters *cnp; + struct bnxt_re_ext_rstat *ext_d; + struct bnxt_re_ro_counters *ro; + + cnp = &rdev->stats.cnps.cur[indx]; + ro = &rdev->stats.dstat.cur[indx]; + ext_d = &rdev->stats.dstat.ext_rstat[indx]; + e_errs = &rdev->stats.dstat.e_errs; + + cnp->cnp_tx_pkts = s->tx_cnp; + cnp->cnp_rx_pkts = s->rx_cnp; + cnp->ecn_marked = s->rx_ecn_marked; + + /* In bonding mode do not duplicate other stats */ + if (indx) + return; + + ro->tx_pkts = s->tx_roce_pkts; + ro->tx_bytes = s->tx_roce_bytes; + ro->rx_pkts = s->rx_roce_pkts; + ro->rx_bytes = s->rx_roce_bytes; + + ext_d->tx.atomic_req = s->tx_atomic_req; + ext_d->tx.read_req = s->tx_read_req; + ext_d->tx.read_resp = s->tx_read_res; + ext_d->tx.write_req = s->tx_write_req; + ext_d->tx.send_req = s->tx_send_req; + ext_d->rx.atomic_req = s->rx_atomic_req; + ext_d->rx.read_req = s->rx_read_req; + ext_d->rx.read_resp = s->rx_read_res; + ext_d->rx.write_req = s->rx_write_req; + ext_d->rx.send_req = s->rx_send_req; + ext_d->grx.rx_pkts = s->rx_roce_good_pkts; + ext_d->grx.rx_bytes = s->rx_roce_good_bytes; + ext_d->rx_dcn_payload_cut = s->rx_dcn_payload_cut; + ext_d->te_bypassed = s->te_bypassed; + e_errs->oob = s->rx_out_of_buffer; + e_errs->oos = s->rx_out_of_sequence; + e_errs->seq_err_naks_rcvd = s->seq_err_naks_rcvd; + e_errs->rnr_naks_rcvd = s->rnr_naks_rcvd; + e_errs->missing_resp = s->missing_resp; + e_errs->to_retransmits = s->to_retransmits; + e_errs->dup_req = s->dup_req; +} + +static int bnxt_re_get_ext_stat(struct bnxt_re_dev *rdev) +{ + struct bnxt_qplib_ext_stat estat[2] = {{}, {}}; + struct bnxt_qplib_query_stats_info sinfo; + u32 fid; + int rc; + + fid = PCI_FUNC(rdev->en_dev->pdev->devfn); + /* Set default values for sinfo */ + sinfo.function_id = 0xFFFFFFFF; + sinfo.collection_id = 0xFF; + sinfo.vf_valid = false; + rc = bnxt_qplib_qext_stat(&rdev->rcfw, fid, &estat[0], &sinfo); + if (rc) + goto done; + bnxt_re_copy_ext_stats(rdev, 0, &estat[0]); + + if (rdev->binfo) { + fid = PCI_FUNC(rdev->binfo->pdev2->devfn); + rc = bnxt_qplib_qext_stat(&rdev->rcfw, fid, &estat[1], &sinfo); + if (rc) + goto done; + bnxt_re_copy_ext_stats(rdev, 1, &estat[1]); + } +done: + return rc; +} + +static void bnxt_re_copy_rstat(struct bnxt_re_rdata_counters *d, + struct ctx_hw_stats_ext *s, + bool is_thor) +{ + d->tx_ucast_pkts = le64_to_cpu(s->tx_ucast_pkts); + d->tx_mcast_pkts = le64_to_cpu(s->tx_mcast_pkts); + d->tx_bcast_pkts = le64_to_cpu(s->tx_bcast_pkts); + d->tx_discard_pkts = le64_to_cpu(s->tx_discard_pkts); + d->tx_error_pkts = le64_to_cpu(s->tx_error_pkts); + d->tx_ucast_bytes = le64_to_cpu(s->tx_ucast_bytes); + /* Add four bytes of CRC bytes per packet */ + d->tx_ucast_bytes += d->tx_ucast_pkts * 4; + d->tx_mcast_bytes = le64_to_cpu(s->tx_mcast_bytes); + d->tx_bcast_bytes = le64_to_cpu(s->tx_bcast_bytes); + d->rx_ucast_pkts = le64_to_cpu(s->rx_ucast_pkts); + d->rx_mcast_pkts = le64_to_cpu(s->rx_mcast_pkts); + d->rx_bcast_pkts = le64_to_cpu(s->rx_bcast_pkts); + d->rx_discard_pkts = le64_to_cpu(s->rx_discard_pkts); + d->rx_error_pkts = le64_to_cpu(s->rx_error_pkts); + d->rx_ucast_bytes = le64_to_cpu(s->rx_ucast_bytes); + d->rx_mcast_bytes = le64_to_cpu(s->rx_mcast_bytes); + d->rx_bcast_bytes = le64_to_cpu(s->rx_bcast_bytes); + if (is_thor) { + d->rx_agg_pkts = le64_to_cpu(s->rx_tpa_pkt); + d->rx_agg_bytes = le64_to_cpu(s->rx_tpa_bytes); + d->rx_agg_events = le64_to_cpu(s->rx_tpa_events); + d->rx_agg_aborts = le64_to_cpu(s->rx_tpa_errors); + } +} + +static void bnxt_re_get_roce_data_stats(struct bnxt_re_dev *rdev) +{ + bool is_thor = _is_chip_gen_p5_p7(rdev->chip_ctx); + struct bnxt_re_rdata_counters *rstat; + + rstat = &rdev->stats.dstat.rstat[0]; + bnxt_re_copy_rstat(rstat, rdev->qplib_res.hctx->stats.dma, is_thor); + + /* Query second port if LAG is enabled */ + if (rdev->binfo) { + rstat = &rdev->stats.dstat.rstat[1]; + bnxt_re_copy_rstat(rstat, rdev->qplib_res.hctx->stats2.dma, is_thor); + } +} + +int bnxt_re_get_device_stats(struct bnxt_re_dev *rdev) +{ + struct bnxt_qplib_query_stats_info sinfo; + int rc = 0; + + /* Stats are in 1s cadence */ + if (test_bit(BNXT_RE_FLAG_ISSUE_CFA_FLOW_STATS, &rdev->flags)) { + if (bnxt_ext_stats_supported(rdev->chip_ctx, rdev->dev_attr->dev_cap_flags, + rdev->is_virtfn)) + rc = bnxt_re_get_ext_stat(rdev); + else if (!_is_chip_gen_p5_p7(rdev->chip_ctx)) + rc = bnxt_re_get_qos_stats(rdev); + + if (rc && rc != -ENOMEM) + clear_bit(BNXT_RE_FLAG_ISSUE_CFA_FLOW_STATS, + &rdev->flags); + } + + if (test_bit(BNXT_RE_FLAG_ISSUE_ROCE_STATS, &rdev->flags)) { + bnxt_re_get_roce_data_stats(rdev); + + /* Set default values for sinfo */ + sinfo.function_id = 0xFFFFFFFF; + sinfo.collection_id = 0xFF; + sinfo.vf_valid = false; + rc = bnxt_qplib_get_roce_error_stats(&rdev->rcfw, + &rdev->stats.dstat.errs, + &sinfo); + if (rc && rc != -ENOMEM) + clear_bit(BNXT_RE_FLAG_ISSUE_ROCE_STATS, + &rdev->flags); + } + + return rc; +} diff --git a/bnxt_re-1.10.3-229.0.139.0/stats.h b/bnxt_re-1.10.3-229.0.139.0/stats.h new file mode 100644 index 0000000..fbc0e37 --- /dev/null +++ b/bnxt_re-1.10.3-229.0.139.0/stats.h @@ -0,0 +1,232 @@ +/* + * Copyright (c) 2015-2022, Broadcom. All rights reserved. The term + * Broadcom refers to Broadcom Inc. and/or its subsidiaries. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * BSD license below: + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * Author: Devesh Sharma + * + * Description: statistics related data structures + */ + +#ifndef __STATS_H__ +#define __STATS_H__ + +#define BNXT_RE_CFA_STAT_BYTES_MASK 0xFFFFFFFFF +#define BNXT_RE_CFA_STAT_PKTS_MASK 0xFFFFFFF +enum{ + BYTE_MASK = 0, + PKTS_MASK = 1 +}; + +struct bnxt_re_cnp_counters { + u64 cnp_tx_pkts; + u64 cnp_tx_bytes; + u64 cnp_rx_pkts; + u64 cnp_rx_bytes; + u64 ecn_marked; +}; + +struct bnxt_re_ro_counters { + u64 tx_pkts; + u64 tx_bytes; + u64 rx_pkts; + u64 rx_bytes; +}; + +struct bnxt_re_flow_counters { + struct bnxt_re_ro_counters ro_stats; + struct bnxt_re_cnp_counters cnp_stats; +}; + +struct bnxt_re_ext_cntr { + u64 atomic_req; + u64 read_req; + u64 read_resp; + u64 write_req; + u64 send_req; +}; + +struct bnxt_re_ext_good { + u64 rx_pkts; + u64 rx_bytes; +}; + +struct bnxt_re_ext_rstat { + struct bnxt_re_ext_cntr tx; + struct bnxt_re_ext_cntr rx; + struct bnxt_re_ext_good grx; + u64 rx_dcn_payload_cut; + u64 te_bypassed; +}; + +struct bnxt_re_rdata_counters { + u64 tx_ucast_pkts; + u64 tx_mcast_pkts; + u64 tx_bcast_pkts; + u64 tx_discard_pkts; + u64 tx_error_pkts; + u64 tx_ucast_bytes; + u64 tx_mcast_bytes; + u64 tx_bcast_bytes; + u64 rx_ucast_pkts; + u64 rx_mcast_pkts; + u64 rx_bcast_pkts; + u64 rx_discard_pkts; + u64 rx_error_pkts; + u64 rx_ucast_bytes; + u64 rx_mcast_bytes; + u64 rx_bcast_bytes; + u64 rx_agg_pkts; + u64 rx_agg_bytes; + u64 rx_agg_events; + u64 rx_agg_aborts; +}; + +struct bnxt_re_cc_stat { + struct bnxt_re_cnp_counters prev[2]; + struct bnxt_re_cnp_counters cur[2]; + bool is_first; +}; + +struct bnxt_re_ext_roce_stats { + u64 oob; + u64 oos; + u64 seq_err_naks_rcvd; + u64 rnr_naks_rcvd; + u64 missing_resp; + u64 to_retransmits; + u64 dup_req; +}; + +struct bnxt_re_rstat { + struct bnxt_re_ro_counters prev[2]; + struct bnxt_re_ro_counters cur[2]; + struct bnxt_re_rdata_counters rstat[2]; + struct bnxt_re_ext_rstat ext_rstat[2]; + struct bnxt_re_ext_roce_stats e_errs; + struct bnxt_qplib_roce_stats errs; + unsigned long long prev_oob; +}; + +struct bnxt_re_res_cntrs { + atomic_t qp_count; + atomic_t rc_qp_count; + atomic_t ud_qp_count; + atomic_t cq_count; + atomic_t srq_count; + atomic_t mr_count; + atomic_t mw_count; + atomic_t ah_count; + atomic_t pd_count; + atomic_t resize_count; + atomic_t max_qp_count; + atomic_t max_rc_qp_count; + atomic_t max_ud_qp_count; + atomic_t max_cq_count; + atomic_t max_srq_count; + atomic_t max_mr_count; + atomic_t max_mw_count; + atomic_t max_ah_count; + atomic_t max_pd_count; +}; + +struct bnxt_re_device_stats { + struct bnxt_re_rstat dstat; + struct bnxt_re_res_cntrs rsors; + struct bnxt_re_cc_stat cnps; + unsigned long read_tstamp; + /* To be used in case to disable stats query from worker or change + * query interval. 0 means stats_query disabled. + */ + u32 stats_query_sec; + /* A free running counter to be used along with stats_query_sec to + * decide whether to issue the command to FW. + */ + u32 stats_query_counter; +}; + +static inline u64 bnxt_re_get_cfa_stat_mask(struct bnxt_qplib_chip_ctx *cctx, + bool type) +{ + u64 mask; + + if (type == BYTE_MASK) { + mask = BNXT_RE_CFA_STAT_BYTES_MASK; /* 36 bits */ + if (_is_chip_gen_p5_p7(cctx)) + mask >>= 0x01; /* 35 bits */ + } else { + mask = BNXT_RE_CFA_STAT_PKTS_MASK; /* 28 bits */ + if (_is_chip_gen_p5_p7(cctx)) + mask |= (0x10000000ULL); /* 29 bits */ + } + + return mask; +} + +static inline u64 bnxt_re_stat_diff(u64 cur, u64 *prev, u64 mask) +{ + u64 diff; + + if (!cur) + return 0; + diff = (cur - *prev) & mask; + if (diff) + *prev = cur; + return diff; +} + +static inline void bnxt_re_clear_rsors_stat(struct bnxt_re_res_cntrs *rsors) +{ + atomic_set(&rsors->qp_count, 0); + atomic_set(&rsors->cq_count, 0); + atomic_set(&rsors->srq_count, 0); + atomic_set(&rsors->mr_count, 0); + atomic_set(&rsors->mw_count, 0); + atomic_set(&rsors->ah_count, 0); + atomic_set(&rsors->pd_count, 0); + atomic_set(&rsors->resize_count, 0); + atomic_set(&rsors->max_qp_count, 0); + atomic_set(&rsors->max_cq_count, 0); + atomic_set(&rsors->max_srq_count, 0); + atomic_set(&rsors->max_mr_count, 0); + atomic_set(&rsors->max_mw_count, 0); + atomic_set(&rsors->max_ah_count, 0); + atomic_set(&rsors->max_pd_count, 0); + atomic_set(&rsors->max_rc_qp_count, 0); + atomic_set(&rsors->max_ud_qp_count, 0); +} + +int bnxt_re_get_device_stats(struct bnxt_re_dev *rdev); +int bnxt_re_get_flow_stats_from_service_pf(struct bnxt_re_dev *rdev, + struct bnxt_re_flow_counters *stats, + struct bnxt_qplib_query_stats_info *sinfo); +int bnxt_re_get_qos_stats(struct bnxt_re_dev *rdev); +#endif /* __STATS_H__ */ diff --git a/kmod-bnxt_en.spec b/kmod-bnxt.spec similarity index 48% rename from kmod-bnxt_en.spec rename to kmod-bnxt.spec index 87d1c46..289d08b 100644 --- a/kmod-bnxt_en.spec +++ b/kmod-bnxt.spec @@ -1,4 +1,4 @@ -%global pkg bnxt_en +%global pkg bnxt %global kernel kernel version %define pkg_version 1.10.3_229.0.139.0 %define anolis_release 1 @@ -6,12 +6,14 @@ Name: kmod-%{pkg} Version: %(echo %{kernel} | sed -E 's/-/~/g; s/\.(an|al)[0-9]+$//g') Release: %{pkg_version}~%{anolis_release}%{?dist} -Summary: Intel(R) 10GbE PCI Express Virtual Function Driver +Summary: Broadcom NetXtreme-C NetXtreme-E Ethernet Network Driver Source: kmod-%{pkg}-%{pkg_version}.tar.gz -Vendor: Intel Corporation +Vendor: Broadcom Corporation License: @ ExclusiveOS: linux Group: System Environment/Kernel +Provides: bnxt_en +Provides: bnxt_re URL: http://support.intel.com BuildRoot: %{_tmppath}/%{pkg}-%{pkgh_version}-root %global debug_package %{nil} @@ -25,10 +27,6 @@ Requires: kernel, findutils, gawk, bash, kmod Provides: kmod-%{pkg}-%{kernel}.%{_arch} = %{version}-%{release} Obsoletes: kmod-%{pkg}-%{kernel}.%{_arch} < %{version}-%{release} -Requires(post): kmod -Requires(postun): kmod -Requires(preun): kmod - %global __strip /bin/true %define kernel_ver %{kernel}.%{_arch} @@ -51,14 +49,37 @@ Requires(preun): kmod BuildRequires: %kernel_module_package_buildreqs_fixed %description -This package contains the Intel(R) 10GbE PCI Express Virtual Function Driver. +This package contains the bnxt_en Linux driver for the Broadcom NetXtreme-C +NetXtreme-E BCM573xx, BCM574xx, BCM575xx, NetXtreme-S BCM5880x +(up to 200 Gbps) Ethernet Network Controllers and Broadcom Nitro +BCM58700 4-port 1/2.5/10 Gbps Ethernet Network Controller and bnxt_re +Linux RoCE driver for the Broadcom NetXtreme-C and NetXtreme-E +10/25/40/50/100/200 Gbps Ethernet Network Controllers. + +%package -n kmod-bnxt_en +Summary: Broadcom NetXtreme-C NetXtreme-E Ethernet Network Driver + +%description -n kmod-bnxt_en +This package contains the bnxt_en Linux driver for the Broadcom NetXtreme-C +NetXtreme-E BCM573xx, BCM574xx, BCM575xx, NetXtreme-S BCM5880x +(up to 200 Gbps) Ethernet Network Controllers and Broadcom Nitro +BCM58700 4-port 1/2.5/10 Gbps Ethernet Network Controller. + +%package -n kmod-bnxt_re +Summary: Broadcom NetXtreme-C NetXtreme-E RoCE Driver +Requires: kmod-bnxt_en = %{version}-%{release} +Conflicts: kmod-bnxt_en < %{version}-%{release} + +%description -n kmod-bnxt_re +This package contains the bnxt_re Linux RoCE driver for the Broadcom +NetXtreme-C and NetXtreme-E 10/25/40/50/100/200 Gbps Ethernet Network Controllers. %prep %setup -q -n kmod-%{pkg}-%{pkg_version} %build -make -C src KDIR=/usr/src/kernels/%{kernel}.%{_arch}/ KVER=%{kernel}.%{_arch} clean -make -C src KDIR=/usr/src/kernels/%{kernel}.%{_arch}/ KVER=%{kernel}.%{_arch} -j32 +make KDIR=/usr/src/kernels/%{kernel}.%{_arch}/ KVER=%{kernel}.%{_arch} clean +make KDIR=/usr/src/kernels/%{kernel}.%{_arch}/ KVER=%{kernel}.%{_arch} -j32 %install rm -rf %{buildroot} @@ -70,27 +91,309 @@ done find %{buildroot}/lib/modules -type f -name \*.ko -exec chmod u+x \{\} \+ %{__install} -d %{buildroot}/%{_sysconfdir}/depmod.d/ -for kmod in $(find %{buildroot}/lib/modules/%{kernel}.%{_arch}/extra -type f -name \*.ko -printf "%%P\n" | sort) +for kmod in $(find %{buildroot}/lib/modules/%{kernel}.%{_arch}/extra -type f -name \bnxt_en.ko -printf "%%P\n" | sort) +do + echo "override $(basename $kmod .ko) * weak-updates/$(dirname $kmod)" >> %{buildroot}/%{_sysconfdir}/depmod.d/bnxt_en.conf + echo "override $(basename $kmod .ko) * extra/$(dirname $kmod)" >> %{buildroot}/%{_sysconfdir}/depmod.d/bnxt_en.conf +done +for kmod in $(find %{buildroot}/lib/modules/%{kernel}.%{_arch}/extra -type f -name \bnxt_re.ko -printf "%%P\n" | sort) do - echo "override $(basename $kmod .ko) * weak-updates/$(dirname $kmod)" >> %{buildroot}/%{_sysconfdir}/depmod.d/%{pkg}.conf - echo "override $(basename $kmod .ko) * extra/$(dirname $kmod)" >> %{buildroot}/%{_sysconfdir}/depmod.d/%{pkg}.conf + echo "override $(basename $kmod .ko) * weak-updates/$(dirname $kmod)" >> %{buildroot}/%{_sysconfdir}/depmod.d/bnxt_re.conf + echo "override $(basename $kmod .ko) * extra/$(dirname $kmod)" >> %{buildroot}/%{_sysconfdir}/depmod.d/bnxt_en.conf done %clean rm -rf %{buildroot} -%files -/lib/modules/%{kernel_ver}/extra/%{pkg}/%{pkg}.ko +%files -n kmod-bnxt_en +/lib/modules/%{kernel_ver}/extra/%{pkg}/bnxt_en.ko %license licenses -%config(noreplace) %{_sysconfdir}/depmod.d/%{pkg}.conf +%config(noreplace) %{_sysconfdir}/depmod.d/bnxt_en.conf %dir /lib/modules/%{kernel_ver}/extra/%{pkg}/ -%post +%files -n kmod-bnxt_re +/lib/modules/%{kernel_ver}/extra/%{pkg}/bnxt_re.ko +%license licenses +%config(noreplace) %{_sysconfdir}/depmod.d/bnxt_re.conf + +%post -n kmod-bnxt_en + +depmod -a > /dev/null 2>&1 + +if [ -x "/usr/sbin/weak-modules" ]; then + printf '%s\n' "/lib/modules/%{kernel}.%{_arch}/extra/%{pkg}/bnxt_en.ko" | /usr/sbin/weak-modules --no-initramfs --add-modules +fi + +if [ -d /usr/local/share/%{name} ]; then + rm -rf /usr/local/share/%{name} +fi +mkdir /usr/local/share/%{name} +cp --parents %{pciids} /usr/local/share/%{name}/ +echo "original pci.ids saved in /usr/local/share/%{name}"; +if [ "%{pcitable}" != "/dev/null" ]; then + cp --parents %{pcitable} /usr/local/share/%{name}/ + echo "original pcitable saved in /usr/local/share/%{name}"; +fi + +LD="%{_docdir}/%{name}"; +if [ -d %{_docdir}/%{name}-%{version} ]; then + LD="%{_docdir}/%{name}-%{version}"; +fi + +<<"END" +#! /bin/bash +# Copyright (C) 2017 - 2023 Intel Corporation +# For licensing information, see the file 'LICENSE' in the root folder +# $1 = system pci.ids file to update +# $2 = system pcitable file to update +# $3 = file with new entries in pci.ids file format +# $4 = pci.ids output file +# $5 = pcitable output file +# $6 = driver name for use in pcitable file + +exec 3<$1 +exec 4<$2 +exec 5<$3 +exec 6>$4 +exec 7>$5 +driver=$6 +IFS= + +# pattern matching strings +ID="[[:xdigit:]][[:xdigit:]][[:xdigit:]][[:xdigit:]]" +VEN="${ID}*" +DEV=" ${ID}*" +SUB=" ${ID}*" +TABLE_DEV="0x${ID} 0x${ID} \"*" +TABLE_SUB="0x${ID} 0x${ID} 0x${ID} 0x${ID} \"*" + +line= +table_line= +ids_in= +table_in= +vendor= +device= +ids_device= +table_device= +subven= +ids_subven= +table_subven= +subdev= +ids_subdev= +table_subdev= +ven_str= +dev_str= +sub_str= + +# force a sub-shell to fork with a new stdin +# this is needed if the shell is reading these instructions from stdin +while true +do + # get the first line of each data file to jump start things + exec 0<&3 + read -r ids_in + if [ "$2" != "/dev/null" ];then + exec 0<&4 + read -r table_in + fi + + # outer loop reads lines from the updates file + exec 0<&5 + while read -r line + do + # vendor entry + if [[ $line == $VEN ]] + then + vendor=0x${line:0:4} + ven_str=${line#${line:0:6}} + # add entry to pci.ids + exec 0<&3 + exec 1>&6 + while [[ $ids_in != $VEN || + 0x${ids_in:0:4} < $vendor ]] + do + echo "$ids_in" + read -r ids_in + done + echo "$line" + if [[ 0x${ids_in:0:4} == $vendor ]] + then + read -r ids_in + fi + + # device entry + elif [[ $line == $DEV ]] + then + device=`echo ${line:1:4} | tr "[:upper:]" "[:lower:]"` + table_device=0x${line:1:4} + dev_str=${line#${line:0:7}} + ids_device=`echo ${ids_in:1:4} | tr "[:upper:]" "[:lower:]"` + table_line="$vendor $table_device \"$driver\" \"$ven_str|$dev_str\"" + # add entry to pci.ids + exec 0<&3 + exec 1>&6 + while [[ $ids_in != $DEV || + $ids_device < $device ]] + do + if [[ $ids_in == $VEN ]] + then + break + fi + if [[ $ids_device != ${ids_in:1:4} ]] + then + echo "${ids_in:0:1}$ids_device${ids_in#${ids_in:0:5}}" + else + echo "$ids_in" + fi + read -r ids_in + ids_device=`echo ${ids_in:1:4} | tr "[:upper:]" "[:lower:]"` + done + if [[ $device != ${line:1:4} ]] + then + echo "${line:0:1}$device${line#${line:0:5}}" + else + echo "$line" + fi + if [[ $ids_device == $device ]] + then + read -r ids_in + fi + # add entry to pcitable + if [ "$2" != "/dev/null" ];then + exec 0<&4 + exec 1>&7 + while [[ $table_in != $TABLE_DEV || + ${table_in:0:6} < $vendor || + ( ${table_in:0:6} == $vendor && + ${table_in:7:6} < $table_device ) ]] + do + echo "$table_in" + read -r table_in + done + echo "$table_line" + if [[ ${table_in:0:6} == $vendor && + ${table_in:7:6} == $table_device ]] + then + read -r table_in + fi + fi + # subsystem entry + elif [[ $line == $SUB ]] + then + subven=`echo ${line:2:4} | tr "[:upper:]" "[:lower:]"` + subdev=`echo ${line:7:4} | tr "[:upper:]" "[:lower:]"` + table_subven=0x${line:2:4} + table_subdev=0x${line:7:4} + sub_str=${line#${line:0:13}} + ids_subven=`echo ${ids_in:2:4} | tr "[:upper:]" "[:lower:]"` + ids_subdev=`echo ${ids_in:7:4} | tr "[:upper:]" "[:lower:]"` + table_line="$vendor $table_device $table_subven $table_subdev \"$driver\" \"$ven_str|$sub_str\"" + # add entry to pci.ids + exec 0<&3 + exec 1>&6 + while [[ $ids_in != $SUB || + $ids_subven < $subven || + ( $ids_subven == $subven && + $ids_subdev < $subdev ) ]] + do + if [[ $ids_in == $VEN || + $ids_in == $DEV ]] + then + break + fi + if [[ ! (${ids_in:2:4} == "1014" && + ${ids_in:7:4} == "052C") ]] + then + if [[ $ids_subven != ${ids_in:2:4} || $ids_subdev != ${ids_in:7:4} ]] + then + echo "${ids_in:0:2}$ids_subven $ids_subdev${ids_in#${ids_in:0:11}}" + else + echo "$ids_in" + fi + fi + read -r ids_in + ids_subven=`echo ${ids_in:2:4} | tr "[:upper:]" "[:lower:]"` + ids_subdev=`echo ${ids_in:7:4} | tr "[:upper:]" "[:lower:]"` + done + if [[ $subven != ${line:2:4} || $subdev != ${line:7:4} ]] + then + echo "${line:0:2}$subven $subdev${line#${line:0:11}}" + else + echo "$line" + fi + if [[ $ids_subven == $subven && + $ids_subdev == $subdev ]] + then + read -r ids_in + fi + # add entry to pcitable + if [ "$2" != "/dev/null" ];then + exec 0<&4 + exec 1>&7 + while [[ $table_in != $TABLE_SUB || + ${table_in:14:6} < $table_subven || + ( ${table_in:14:6} == $table_subven && + ${table_in:21:6} < $table_subdev ) ]] + do + if [[ $table_in == $TABLE_DEV ]] + then + break + fi + if [[ ! (${table_in:14:6} == "0x1014" && + ${table_in:21:6} == "0x052C") ]] + then + echo "$table_in" + fi + read -r table_in + done + echo "$table_line" + if [[ ${table_in:14:6} == $table_subven && + ${table_in:21:6} == $table_subdev ]] + then + read -r table_in + fi + fi + fi + + exec 0<&5 + done + + # print the remainder of the original files + exec 0<&3 + exec 1>&6 + echo "$ids_in" + while read -r ids_in + do + echo "$ids_in" + done + + if [ "$2" != "/dev/null" ];then + exec 0>&4 + exec 1>&7 + echo "$table_in" + while read -r table_in + do + echo "$table_in" + done + fi + + break +done <&5 + +exec 3<&- +exec 4<&- +exec 5<&- +exec 6>&- +exec 7>&- + +END + +%post -n kmod-bnxt_re depmod -a > /dev/null 2>&1 if [ -x "/usr/sbin/weak-modules" ]; then - printf '%s\n' "/lib/modules/%{kernel}.%{_arch}/extra/bnxt_en/%{pkg}.ko" | /usr/sbin/weak-modules --no-initramfs --add-modules + printf '%s\n' "/lib/modules/%{kernel}.%{_arch}/extra/%{pkg}/bnxr_re.ko" | /usr/sbin/weak-modules --no-initramfs --add-modules fi if [ -d /usr/local/share/%{name} ]; then @@ -357,19 +660,53 @@ exec 7>&- END +%preun -n kmod-bnxt_en +echo "/lib/modules/%{kernel}.%{_arch}/extra/%{pkg}/bnxt_en.ko" >> /var/run/rpm-bnxt_en-modules.list -%preun +%preun -n kmod-bnxt_re +echo "/lib/modules/%{kernel}.%{_arch}/extra/%{pkg}/bnxt_re.ko" >> /var/run/rpm-bnxt_re-modules.list -echo "/lib/modules/%{kernel}.%{_arch}/extra/bnxt_en/%{pkg}.ko" >> /var/run/rpm-%{pkg}-modules.list +%postun -n kmod-bnxt_en +uname -r | grep BOOT || /sbin/depmod -a > /dev/null 2>&1 || true + +if [ -x "/usr/sbin/weak-modules" ]; then + modules=( $(cat /var/run/rpm-bnxt_en-modules.list) ) + printf '%s\n' "${modules[@]}" | /usr/sbin/weak-modules --no-initramfs --remove-modules +fi +rm /var/run/rpm-bnxt_en-modules.list + +if which dracut >/dev/null 2>&1; then + echo "Updating initramfs with dracut..." + if dracut --force ; then + echo "Successfully updated initramfs." + else + echo "Failed to update initramfs." + echo "You must update your initramfs image for changes to take place." + exit -1 + fi +elif which mkinitrd >/dev/null 2>&1; then + echo "Updating initrd with mkinitrd..." + if mkinitrd; then + echo "Successfully updated initrd." + else + echo "Failed to update initrd." + echo "You must update your initrd image for changes to take place." + exit -1 + fi +else + echo "Unable to determine utility to update initrd image." + echo "You must update your initrd manually for changes to take place." + exit -1 +fi -%postun +%postun -n kmod-bnxt_re uname -r | grep BOOT || /sbin/depmod -a > /dev/null 2>&1 || true if [ -x "/usr/sbin/weak-modules" ]; then - modules=( $(cat /var/run/rpm-%{pkg}-modules.list) ) + modules=( $(cat /var/run/rpm-bnxt_re-modules.list) ) printf '%s\n' "${modules[@]}" | /usr/sbin/weak-modules --no-initramfs --remove-modules fi -rm /var/run/rpm-%{pkg}-modules.list +rm /var/run/rpm-bnxt_re-modules.list if which dracut >/dev/null 2>&1; then echo "Updating initramfs with dracut..." @@ -396,5 +733,5 @@ else fi %changelog -* Mon Jul 29 2024 Bitao Hu - 1.10.3_229.0.139.0 -- init spec \ No newline at end of file +* Fri Oct 11 2024 wangkaiyuan - 1.10.3_229.0.139.0~1 +- Merge bnxt_en and bnxt_re build. -- Gitee