Leon Romanovsky says:

====================
Mellanox shared branch that includes:

 * Removal of FPGA TLS code https://lore.kernel.org/all/cover.1649073691.git.leonro@nvidia.com

  Mellanox INNOVA TLS cards are EOL in May, 2018 [1]. As such, the code
  is unmaintained, untested and not in-use by any upstream/distro oriented
  customers. In order to reduce code complexity, drop the kernel code,
  clean build config options and delete useless kTLS vs. TLS separation.

  [1] https://network.nvidia.com/related-docs/eol/LCR-000286.pdf

 * Removal of FPGA IPsec code https://lore.kernel.org/all/cover.1649232994.git.leonro@nvidia.com

  Together with FPGA TLS, the IPsec went to EOL state in the November of
  2019 [1]. Exactly like FPGA TLS, no active customers exist for this
  upstream code and all the complexity around that area can be deleted.

  [2] https://network.nvidia.com/related-docs/eol/LCR-000535.pdf

 * Fix to undefined behavior from Borislav https://lore.kernel.org/all/20220405151517.29753-11-bp@alien8.de
====================

* 'mlx5-next' of https://git.kernel.org/pub/scm/linux/kernel/git/mellanox/linux:
  net/mlx5: Remove not-implemented IPsec capabilities
  net/mlx5: Remove ipsec_ops function table
  net/mlx5: Reduce kconfig complexity while building crypto support
  net/mlx5: Move IPsec file to relevant directory
  net/mlx5: Remove not-needed IPsec config
  net/mlx5: Align flow steering allocation namespace to common style
  net/mlx5: Unify device IPsec capabilities check
  net/mlx5: Remove useless IPsec device checks
  net/mlx5: Remove ipsec vs. ipsec offload file separation
  RDMA/core: Delete IPsec flow action logic from the core
  RDMA/mlx5: Drop crypto flow steering API
  RDMA/mlx5: Delete never supported IPsec flow action
  net/mlx5: Remove FPGA ipsec specific statistics
  net/mlx5: Remove XFRM no_trailer flag
  net/mlx5: Remove not-used IDA field from IPsec struct
  net/mlx5: Delete metadata handling logic
  net/mlx5_fpga: Drop INNOVA IPsec support
  IB/mlx5: Fix undefined behavior due to shift overflowing the constant
  net/mlx5: Cleanup kTLS function names and their exposure
  net/mlx5: Remove tls vs. ktls separation as it is the same
  net/mlx5: Remove indirection in TLS build
  net/mlx5: Reliably return TLS device capabilities
  net/mlx5_fpga: Drop INNOVA TLS support

Link: https://lore.kernel.org/r/20220409055303.1223644-1-leon@kernel.org
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
This commit is contained in:
Jason Gunthorpe
2022-04-12 10:43:36 -03:00
54 changed files with 365 additions and 5355 deletions

View File

@@ -2613,7 +2613,6 @@ void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops)
SET_DEVICE_OP(dev_ops, create_counters);
SET_DEVICE_OP(dev_ops, create_cq);
SET_DEVICE_OP(dev_ops, create_flow);
SET_DEVICE_OP(dev_ops, create_flow_action_esp);
SET_DEVICE_OP(dev_ops, create_qp);
SET_DEVICE_OP(dev_ops, create_rwq_ind_table);
SET_DEVICE_OP(dev_ops, create_srq);
@@ -2676,7 +2675,6 @@ void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops)
SET_DEVICE_OP(dev_ops, modify_ah);
SET_DEVICE_OP(dev_ops, modify_cq);
SET_DEVICE_OP(dev_ops, modify_device);
SET_DEVICE_OP(dev_ops, modify_flow_action_esp);
SET_DEVICE_OP(dev_ops, modify_hw_stat);
SET_DEVICE_OP(dev_ops, modify_port);
SET_DEVICE_OP(dev_ops, modify_qp);

View File

@@ -46,385 +46,6 @@ static int uverbs_free_flow_action(struct ib_uobject *uobject,
return action->device->ops.destroy_flow_action(action);
}
static u64 esp_flags_uverbs_to_verbs(struct uverbs_attr_bundle *attrs,
u32 flags, bool is_modify)
{
u64 verbs_flags = flags;
if (uverbs_attr_is_valid(attrs, UVERBS_ATTR_FLOW_ACTION_ESP_ESN))
verbs_flags |= IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED;
if (is_modify && uverbs_attr_is_valid(attrs,
UVERBS_ATTR_FLOW_ACTION_ESP_ATTRS))
verbs_flags |= IB_FLOW_ACTION_ESP_FLAGS_MOD_ESP_ATTRS;
return verbs_flags;
};
static int validate_flow_action_esp_keymat_aes_gcm(struct ib_flow_action_attrs_esp_keymats *keymat)
{
struct ib_uverbs_flow_action_esp_keymat_aes_gcm *aes_gcm =
&keymat->keymat.aes_gcm;
if (aes_gcm->iv_algo > IB_UVERBS_FLOW_ACTION_IV_ALGO_SEQ)
return -EOPNOTSUPP;
if (aes_gcm->key_len != 32 &&
aes_gcm->key_len != 24 &&
aes_gcm->key_len != 16)
return -EINVAL;
if (aes_gcm->icv_len != 16 &&
aes_gcm->icv_len != 8 &&
aes_gcm->icv_len != 12)
return -EINVAL;
return 0;
}
static int (* const flow_action_esp_keymat_validate[])(struct ib_flow_action_attrs_esp_keymats *keymat) = {
[IB_UVERBS_FLOW_ACTION_ESP_KEYMAT_AES_GCM] = validate_flow_action_esp_keymat_aes_gcm,
};
static int flow_action_esp_replay_none(struct ib_flow_action_attrs_esp_replays *replay,
bool is_modify)
{
/* This is used in order to modify an esp flow action with an enabled
* replay protection to a disabled one. This is only supported via
* modify, as in create verb we can simply drop the REPLAY attribute and
* achieve the same thing.
*/
return is_modify ? 0 : -EINVAL;
}
static int flow_action_esp_replay_def_ok(struct ib_flow_action_attrs_esp_replays *replay,
bool is_modify)
{
/* Some replay protections could always be enabled without validating
* anything.
*/
return 0;
}
static int (* const flow_action_esp_replay_validate[])(struct ib_flow_action_attrs_esp_replays *replay,
bool is_modify) = {
[IB_UVERBS_FLOW_ACTION_ESP_REPLAY_NONE] = flow_action_esp_replay_none,
[IB_UVERBS_FLOW_ACTION_ESP_REPLAY_BMP] = flow_action_esp_replay_def_ok,
};
static int parse_esp_ip(enum ib_flow_spec_type proto,
const void __user *val_ptr,
size_t len, union ib_flow_spec *out)
{
int ret;
const struct ib_uverbs_flow_ipv4_filter ipv4 = {
.src_ip = cpu_to_be32(0xffffffffUL),
.dst_ip = cpu_to_be32(0xffffffffUL),
.proto = 0xff,
.tos = 0xff,
.ttl = 0xff,
.flags = 0xff,
};
const struct ib_uverbs_flow_ipv6_filter ipv6 = {
.src_ip = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff},
.dst_ip = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff},
.flow_label = cpu_to_be32(0xffffffffUL),
.next_hdr = 0xff,
.traffic_class = 0xff,
.hop_limit = 0xff,
};
union {
struct ib_uverbs_flow_ipv4_filter ipv4;
struct ib_uverbs_flow_ipv6_filter ipv6;
} user_val = {};
const void *user_pmask;
size_t val_len;
/* If the flow IPv4/IPv6 flow specifications are extended, the mask
* should be changed as well.
*/
BUILD_BUG_ON(offsetof(struct ib_uverbs_flow_ipv4_filter, flags) +
sizeof(ipv4.flags) != sizeof(ipv4));
BUILD_BUG_ON(offsetof(struct ib_uverbs_flow_ipv6_filter, reserved) +
sizeof(ipv6.reserved) != sizeof(ipv6));
switch (proto) {
case IB_FLOW_SPEC_IPV4:
if (len > sizeof(user_val.ipv4) &&
!ib_is_buffer_cleared(val_ptr + sizeof(user_val.ipv4),
len - sizeof(user_val.ipv4)))
return -EOPNOTSUPP;
val_len = min_t(size_t, len, sizeof(user_val.ipv4));
ret = copy_from_user(&user_val.ipv4, val_ptr,
val_len);
if (ret)
return -EFAULT;
user_pmask = &ipv4;
break;
case IB_FLOW_SPEC_IPV6:
if (len > sizeof(user_val.ipv6) &&
!ib_is_buffer_cleared(val_ptr + sizeof(user_val.ipv6),
len - sizeof(user_val.ipv6)))
return -EOPNOTSUPP;
val_len = min_t(size_t, len, sizeof(user_val.ipv6));
ret = copy_from_user(&user_val.ipv6, val_ptr,
val_len);
if (ret)
return -EFAULT;
user_pmask = &ipv6;
break;
default:
return -EOPNOTSUPP;
}
return ib_uverbs_kern_spec_to_ib_spec_filter(proto, user_pmask,
&user_val,
val_len, out);
}
static int flow_action_esp_get_encap(struct ib_flow_spec_list *out,
struct uverbs_attr_bundle *attrs)
{
struct ib_uverbs_flow_action_esp_encap uverbs_encap;
int ret;
ret = uverbs_copy_from(&uverbs_encap, attrs,
UVERBS_ATTR_FLOW_ACTION_ESP_ENCAP);
if (ret)
return ret;
/* We currently support only one encap */
if (uverbs_encap.next_ptr)
return -EOPNOTSUPP;
if (uverbs_encap.type != IB_FLOW_SPEC_IPV4 &&
uverbs_encap.type != IB_FLOW_SPEC_IPV6)
return -EOPNOTSUPP;
return parse_esp_ip(uverbs_encap.type,
u64_to_user_ptr(uverbs_encap.val_ptr),
uverbs_encap.len,
&out->spec);
}
struct ib_flow_action_esp_attr {
struct ib_flow_action_attrs_esp hdr;
struct ib_flow_action_attrs_esp_keymats keymat;
struct ib_flow_action_attrs_esp_replays replay;
/* We currently support only one spec */
struct ib_flow_spec_list encap;
};
#define ESP_LAST_SUPPORTED_FLAG IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW
static int parse_flow_action_esp(struct ib_device *ib_dev,
struct uverbs_attr_bundle *attrs,
struct ib_flow_action_esp_attr *esp_attr,
bool is_modify)
{
struct ib_uverbs_flow_action_esp uverbs_esp = {};
int ret;
/* Optional param, if it doesn't exist, we get -ENOENT and skip it */
ret = uverbs_copy_from(&esp_attr->hdr.esn, attrs,
UVERBS_ATTR_FLOW_ACTION_ESP_ESN);
if (IS_UVERBS_COPY_ERR(ret))
return ret;
/* This can be called from FLOW_ACTION_ESP_MODIFY where
* UVERBS_ATTR_FLOW_ACTION_ESP_ATTRS is optional
*/
if (uverbs_attr_is_valid(attrs, UVERBS_ATTR_FLOW_ACTION_ESP_ATTRS)) {
ret = uverbs_copy_from_or_zero(&uverbs_esp, attrs,
UVERBS_ATTR_FLOW_ACTION_ESP_ATTRS);
if (ret)
return ret;
if (uverbs_esp.flags & ~((ESP_LAST_SUPPORTED_FLAG << 1) - 1))
return -EOPNOTSUPP;
esp_attr->hdr.spi = uverbs_esp.spi;
esp_attr->hdr.seq = uverbs_esp.seq;
esp_attr->hdr.tfc_pad = uverbs_esp.tfc_pad;
esp_attr->hdr.hard_limit_pkts = uverbs_esp.hard_limit_pkts;
}
esp_attr->hdr.flags = esp_flags_uverbs_to_verbs(attrs, uverbs_esp.flags,
is_modify);
if (uverbs_attr_is_valid(attrs, UVERBS_ATTR_FLOW_ACTION_ESP_KEYMAT)) {
esp_attr->keymat.protocol =
uverbs_attr_get_enum_id(attrs,
UVERBS_ATTR_FLOW_ACTION_ESP_KEYMAT);
ret = uverbs_copy_from_or_zero(&esp_attr->keymat.keymat,
attrs,
UVERBS_ATTR_FLOW_ACTION_ESP_KEYMAT);
if (ret)
return ret;
ret = flow_action_esp_keymat_validate[esp_attr->keymat.protocol](&esp_attr->keymat);
if (ret)
return ret;
esp_attr->hdr.keymat = &esp_attr->keymat;
}
if (uverbs_attr_is_valid(attrs, UVERBS_ATTR_FLOW_ACTION_ESP_REPLAY)) {
esp_attr->replay.protocol =
uverbs_attr_get_enum_id(attrs,
UVERBS_ATTR_FLOW_ACTION_ESP_REPLAY);
ret = uverbs_copy_from_or_zero(&esp_attr->replay.replay,
attrs,
UVERBS_ATTR_FLOW_ACTION_ESP_REPLAY);
if (ret)
return ret;
ret = flow_action_esp_replay_validate[esp_attr->replay.protocol](&esp_attr->replay,
is_modify);
if (ret)
return ret;
esp_attr->hdr.replay = &esp_attr->replay;
}
if (uverbs_attr_is_valid(attrs, UVERBS_ATTR_FLOW_ACTION_ESP_ENCAP)) {
ret = flow_action_esp_get_encap(&esp_attr->encap, attrs);
if (ret)
return ret;
esp_attr->hdr.encap = &esp_attr->encap;
}
return 0;
}
static int UVERBS_HANDLER(UVERBS_METHOD_FLOW_ACTION_ESP_CREATE)(
struct uverbs_attr_bundle *attrs)
{
struct ib_uobject *uobj = uverbs_attr_get_uobject(
attrs, UVERBS_ATTR_CREATE_FLOW_ACTION_ESP_HANDLE);
struct ib_device *ib_dev = attrs->context->device;
int ret;
struct ib_flow_action *action;
struct ib_flow_action_esp_attr esp_attr = {};
if (!ib_dev->ops.create_flow_action_esp)
return -EOPNOTSUPP;
ret = parse_flow_action_esp(ib_dev, attrs, &esp_attr, false);
if (ret)
return ret;
/* No need to check as this attribute is marked as MANDATORY */
action = ib_dev->ops.create_flow_action_esp(ib_dev, &esp_attr.hdr,
attrs);
if (IS_ERR(action))
return PTR_ERR(action);
uverbs_flow_action_fill_action(action, uobj, ib_dev,
IB_FLOW_ACTION_ESP);
return 0;
}
static int UVERBS_HANDLER(UVERBS_METHOD_FLOW_ACTION_ESP_MODIFY)(
struct uverbs_attr_bundle *attrs)
{
struct ib_uobject *uobj = uverbs_attr_get_uobject(
attrs, UVERBS_ATTR_MODIFY_FLOW_ACTION_ESP_HANDLE);
struct ib_flow_action *action = uobj->object;
int ret;
struct ib_flow_action_esp_attr esp_attr = {};
if (!action->device->ops.modify_flow_action_esp)
return -EOPNOTSUPP;
ret = parse_flow_action_esp(action->device, attrs, &esp_attr, true);
if (ret)
return ret;
if (action->type != IB_FLOW_ACTION_ESP)
return -EINVAL;
return action->device->ops.modify_flow_action_esp(action,
&esp_attr.hdr,
attrs);
}
static const struct uverbs_attr_spec uverbs_flow_action_esp_keymat[] = {
[IB_UVERBS_FLOW_ACTION_ESP_KEYMAT_AES_GCM] = {
.type = UVERBS_ATTR_TYPE_PTR_IN,
UVERBS_ATTR_STRUCT(
struct ib_uverbs_flow_action_esp_keymat_aes_gcm,
aes_key),
},
};
static const struct uverbs_attr_spec uverbs_flow_action_esp_replay[] = {
[IB_UVERBS_FLOW_ACTION_ESP_REPLAY_NONE] = {
.type = UVERBS_ATTR_TYPE_PTR_IN,
UVERBS_ATTR_NO_DATA(),
},
[IB_UVERBS_FLOW_ACTION_ESP_REPLAY_BMP] = {
.type = UVERBS_ATTR_TYPE_PTR_IN,
UVERBS_ATTR_STRUCT(struct ib_uverbs_flow_action_esp_replay_bmp,
size),
},
};
DECLARE_UVERBS_NAMED_METHOD(
UVERBS_METHOD_FLOW_ACTION_ESP_CREATE,
UVERBS_ATTR_IDR(UVERBS_ATTR_CREATE_FLOW_ACTION_ESP_HANDLE,
UVERBS_OBJECT_FLOW_ACTION,
UVERBS_ACCESS_NEW,
UA_MANDATORY),
UVERBS_ATTR_PTR_IN(UVERBS_ATTR_FLOW_ACTION_ESP_ATTRS,
UVERBS_ATTR_STRUCT(struct ib_uverbs_flow_action_esp,
hard_limit_pkts),
UA_MANDATORY),
UVERBS_ATTR_PTR_IN(UVERBS_ATTR_FLOW_ACTION_ESP_ESN,
UVERBS_ATTR_TYPE(__u32),
UA_OPTIONAL),
UVERBS_ATTR_ENUM_IN(UVERBS_ATTR_FLOW_ACTION_ESP_KEYMAT,
uverbs_flow_action_esp_keymat,
UA_MANDATORY),
UVERBS_ATTR_ENUM_IN(UVERBS_ATTR_FLOW_ACTION_ESP_REPLAY,
uverbs_flow_action_esp_replay,
UA_OPTIONAL),
UVERBS_ATTR_PTR_IN(
UVERBS_ATTR_FLOW_ACTION_ESP_ENCAP,
UVERBS_ATTR_TYPE(struct ib_uverbs_flow_action_esp_encap),
UA_OPTIONAL));
DECLARE_UVERBS_NAMED_METHOD(
UVERBS_METHOD_FLOW_ACTION_ESP_MODIFY,
UVERBS_ATTR_IDR(UVERBS_ATTR_MODIFY_FLOW_ACTION_ESP_HANDLE,
UVERBS_OBJECT_FLOW_ACTION,
UVERBS_ACCESS_WRITE,
UA_MANDATORY),
UVERBS_ATTR_PTR_IN(UVERBS_ATTR_FLOW_ACTION_ESP_ATTRS,
UVERBS_ATTR_STRUCT(struct ib_uverbs_flow_action_esp,
hard_limit_pkts),
UA_OPTIONAL),
UVERBS_ATTR_PTR_IN(UVERBS_ATTR_FLOW_ACTION_ESP_ESN,
UVERBS_ATTR_TYPE(__u32),
UA_OPTIONAL),
UVERBS_ATTR_ENUM_IN(UVERBS_ATTR_FLOW_ACTION_ESP_KEYMAT,
uverbs_flow_action_esp_keymat,
UA_OPTIONAL),
UVERBS_ATTR_ENUM_IN(UVERBS_ATTR_FLOW_ACTION_ESP_REPLAY,
uverbs_flow_action_esp_replay,
UA_OPTIONAL),
UVERBS_ATTR_PTR_IN(
UVERBS_ATTR_FLOW_ACTION_ESP_ENCAP,
UVERBS_ATTR_TYPE(struct ib_uverbs_flow_action_esp_encap),
UA_OPTIONAL));
DECLARE_UVERBS_NAMED_METHOD_DESTROY(
UVERBS_METHOD_FLOW_ACTION_DESTROY,
UVERBS_ATTR_IDR(UVERBS_ATTR_DESTROY_FLOW_ACTION_HANDLE,
@@ -435,9 +56,7 @@ DECLARE_UVERBS_NAMED_METHOD_DESTROY(
DECLARE_UVERBS_NAMED_OBJECT(
UVERBS_OBJECT_FLOW_ACTION,
UVERBS_TYPE_ALLOC_IDR(uverbs_free_flow_action),
&UVERBS_METHOD(UVERBS_METHOD_FLOW_ACTION_ESP_CREATE),
&UVERBS_METHOD(UVERBS_METHOD_FLOW_ACTION_DESTROY),
&UVERBS_METHOD(UVERBS_METHOD_FLOW_ACTION_ESP_MODIFY));
&UVERBS_METHOD(UVERBS_METHOD_FLOW_ACTION_DESTROY));
const struct uapi_definition uverbs_def_obj_flow_action[] = {
UAPI_DEF_CHAIN_OBJ_TREE_NAMED(

View File

@@ -15,7 +15,6 @@
#include <linux/mlx5/driver.h>
#include <linux/mlx5/fs.h>
#include <linux/mlx5/fs_helpers.h>
#include <linux/mlx5/accel.h>
#include <linux/mlx5/eswitch.h>
#include <net/inet_ecn.h>
#include "mlx5_ib.h"
@@ -148,16 +147,6 @@ int parse_flow_flow_action(struct mlx5_ib_flow_action *maction,
{
switch (maction->ib_action.type) {
case IB_FLOW_ACTION_ESP:
if (action->action & (MLX5_FLOW_CONTEXT_ACTION_ENCRYPT |
MLX5_FLOW_CONTEXT_ACTION_DECRYPT))
return -EINVAL;
/* Currently only AES_GCM keymat is supported by the driver */
action->esp_id = (uintptr_t)maction->esp_aes_gcm.ctx;
action->action |= is_egress ?
MLX5_FLOW_CONTEXT_ACTION_ENCRYPT :
MLX5_FLOW_CONTEXT_ACTION_DECRYPT;
return 0;
case IB_FLOW_ACTION_UNSPECIFIED:
if (maction->flow_action_raw.sub_type ==
MLX5_IB_FLOW_ACTION_MODIFY_HEADER) {
@@ -368,14 +357,7 @@ static int parse_flow_attr(struct mlx5_core_dev *mdev,
ib_spec->type & IB_FLOW_SPEC_INNER);
break;
case IB_FLOW_SPEC_ESP:
if (ib_spec->esp.mask.seq)
return -EOPNOTSUPP;
MLX5_SET(fte_match_set_misc, misc_params_c, outer_esp_spi,
ntohl(ib_spec->esp.mask.spi));
MLX5_SET(fte_match_set_misc, misc_params_v, outer_esp_spi,
ntohl(ib_spec->esp.val.spi));
break;
return -EOPNOTSUPP;
case IB_FLOW_SPEC_TCP:
if (FIELDS_NOT_SUPPORTED(ib_spec->tcp_udp.mask,
LAST_TCP_UDP_FIELD))
@@ -587,47 +569,6 @@ static bool flow_is_multicast_only(const struct ib_flow_attr *ib_attr)
return false;
}
enum valid_spec {
VALID_SPEC_INVALID,
VALID_SPEC_VALID,
VALID_SPEC_NA,
};
static enum valid_spec
is_valid_esp_aes_gcm(struct mlx5_core_dev *mdev,
const struct mlx5_flow_spec *spec,
const struct mlx5_flow_act *flow_act,
bool egress)
{
const u32 *match_c = spec->match_criteria;
bool is_crypto =
(flow_act->action & (MLX5_FLOW_CONTEXT_ACTION_ENCRYPT |
MLX5_FLOW_CONTEXT_ACTION_DECRYPT));
bool is_ipsec = mlx5_fs_is_ipsec_flow(match_c);
bool is_drop = flow_act->action & MLX5_FLOW_CONTEXT_ACTION_DROP;
/*
* Currently only crypto is supported in egress, when regular egress
* rules would be supported, always return VALID_SPEC_NA.
*/
if (!is_crypto)
return VALID_SPEC_NA;
return is_crypto && is_ipsec &&
(!egress || (!is_drop &&
!(spec->flow_context.flags & FLOW_CONTEXT_HAS_TAG))) ?
VALID_SPEC_VALID : VALID_SPEC_INVALID;
}
static bool is_valid_spec(struct mlx5_core_dev *mdev,
const struct mlx5_flow_spec *spec,
const struct mlx5_flow_act *flow_act,
bool egress)
{
/* We curretly only support ipsec egress flow */
return is_valid_esp_aes_gcm(mdev, spec, flow_act, egress) != VALID_SPEC_INVALID;
}
static bool is_valid_ethertype(struct mlx5_core_dev *mdev,
const struct ib_flow_attr *flow_attr,
bool check_inner)
@@ -1154,8 +1095,7 @@ static struct mlx5_ib_flow_handler *_create_flow_rule(struct mlx5_ib_dev *dev,
spec->match_criteria_enable = get_match_criteria_enable(spec->match_criteria);
if (is_egress &&
!is_valid_spec(dev->mdev, spec, &flow_act, is_egress)) {
if (is_egress) {
err = -EINVAL;
goto free;
}
@@ -1740,149 +1680,6 @@ unlock:
return ERR_PTR(err);
}
static u32 mlx5_ib_flow_action_flags_to_accel_xfrm_flags(u32 mlx5_flags)
{
u32 flags = 0;
if (mlx5_flags & MLX5_IB_UAPI_FLOW_ACTION_FLAGS_REQUIRE_METADATA)
flags |= MLX5_ACCEL_XFRM_FLAG_REQUIRE_METADATA;
return flags;
}
#define MLX5_FLOW_ACTION_ESP_CREATE_LAST_SUPPORTED \
MLX5_IB_UAPI_FLOW_ACTION_FLAGS_REQUIRE_METADATA
static struct ib_flow_action *
mlx5_ib_create_flow_action_esp(struct ib_device *device,
const struct ib_flow_action_attrs_esp *attr,
struct uverbs_attr_bundle *attrs)
{
struct mlx5_ib_dev *mdev = to_mdev(device);
struct ib_uverbs_flow_action_esp_keymat_aes_gcm *aes_gcm;
struct mlx5_accel_esp_xfrm_attrs accel_attrs = {};
struct mlx5_ib_flow_action *action;
u64 action_flags;
u64 flags;
int err = 0;
err = uverbs_get_flags64(
&action_flags, attrs, MLX5_IB_ATTR_CREATE_FLOW_ACTION_FLAGS,
((MLX5_FLOW_ACTION_ESP_CREATE_LAST_SUPPORTED << 1) - 1));
if (err)
return ERR_PTR(err);
flags = mlx5_ib_flow_action_flags_to_accel_xfrm_flags(action_flags);
/* We current only support a subset of the standard features. Only a
* keymat of type AES_GCM, with icv_len == 16, iv_algo == SEQ and esn
* (with overlap). Full offload mode isn't supported.
*/
if (!attr->keymat || attr->replay || attr->encap ||
attr->spi || attr->seq || attr->tfc_pad ||
attr->hard_limit_pkts ||
(attr->flags & ~(IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED |
IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ENCRYPT)))
return ERR_PTR(-EOPNOTSUPP);
if (attr->keymat->protocol !=
IB_UVERBS_FLOW_ACTION_ESP_KEYMAT_AES_GCM)
return ERR_PTR(-EOPNOTSUPP);
aes_gcm = &attr->keymat->keymat.aes_gcm;
if (aes_gcm->icv_len != 16 ||
aes_gcm->iv_algo != IB_UVERBS_FLOW_ACTION_IV_ALGO_SEQ)
return ERR_PTR(-EOPNOTSUPP);
action = kmalloc(sizeof(*action), GFP_KERNEL);
if (!action)
return ERR_PTR(-ENOMEM);
action->esp_aes_gcm.ib_flags = attr->flags;
memcpy(&accel_attrs.keymat.aes_gcm.aes_key, &aes_gcm->aes_key,
sizeof(accel_attrs.keymat.aes_gcm.aes_key));
accel_attrs.keymat.aes_gcm.key_len = aes_gcm->key_len * 8;
memcpy(&accel_attrs.keymat.aes_gcm.salt, &aes_gcm->salt,
sizeof(accel_attrs.keymat.aes_gcm.salt));
memcpy(&accel_attrs.keymat.aes_gcm.seq_iv, &aes_gcm->iv,
sizeof(accel_attrs.keymat.aes_gcm.seq_iv));
accel_attrs.keymat.aes_gcm.icv_len = aes_gcm->icv_len * 8;
accel_attrs.keymat.aes_gcm.iv_algo = MLX5_ACCEL_ESP_AES_GCM_IV_ALGO_SEQ;
accel_attrs.keymat_type = MLX5_ACCEL_ESP_KEYMAT_AES_GCM;
accel_attrs.esn = attr->esn;
if (attr->flags & IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED)
accel_attrs.flags |= MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED;
if (attr->flags & IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW)
accel_attrs.flags |= MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP;
if (attr->flags & IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ENCRYPT)
accel_attrs.action |= MLX5_ACCEL_ESP_ACTION_ENCRYPT;
action->esp_aes_gcm.ctx =
mlx5_accel_esp_create_xfrm(mdev->mdev, &accel_attrs, flags);
if (IS_ERR(action->esp_aes_gcm.ctx)) {
err = PTR_ERR(action->esp_aes_gcm.ctx);
goto err_parse;
}
action->esp_aes_gcm.ib_flags = attr->flags;
return &action->ib_action;
err_parse:
kfree(action);
return ERR_PTR(err);
}
static int
mlx5_ib_modify_flow_action_esp(struct ib_flow_action *action,
const struct ib_flow_action_attrs_esp *attr,
struct uverbs_attr_bundle *attrs)
{
struct mlx5_ib_flow_action *maction = to_mflow_act(action);
struct mlx5_accel_esp_xfrm_attrs accel_attrs;
int err = 0;
if (attr->keymat || attr->replay || attr->encap ||
attr->spi || attr->seq || attr->tfc_pad ||
attr->hard_limit_pkts ||
(attr->flags & ~(IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED |
IB_FLOW_ACTION_ESP_FLAGS_MOD_ESP_ATTRS |
IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW)))
return -EOPNOTSUPP;
/* Only the ESN value or the MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP can
* be modified.
*/
if (!(maction->esp_aes_gcm.ib_flags &
IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED) &&
attr->flags & (IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED |
IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW))
return -EINVAL;
memcpy(&accel_attrs, &maction->esp_aes_gcm.ctx->attrs,
sizeof(accel_attrs));
accel_attrs.esn = attr->esn;
if (attr->flags & IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW)
accel_attrs.flags |= MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP;
else
accel_attrs.flags &= ~MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP;
err = mlx5_accel_esp_modify_xfrm(maction->esp_aes_gcm.ctx,
&accel_attrs);
if (err)
return err;
maction->esp_aes_gcm.ib_flags &=
~IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW;
maction->esp_aes_gcm.ib_flags |=
attr->flags & IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW;
return 0;
}
static void destroy_flow_action_raw(struct mlx5_ib_flow_action *maction)
{
switch (maction->flow_action_raw.sub_type) {
@@ -1906,13 +1703,6 @@ static int mlx5_ib_destroy_flow_action(struct ib_flow_action *action)
struct mlx5_ib_flow_action *maction = to_mflow_act(action);
switch (action->type) {
case IB_FLOW_ACTION_ESP:
/*
* We only support aes_gcm by now, so we implicitly know this is
* the underline crypto.
*/
mlx5_accel_esp_destroy_xfrm(maction->esp_aes_gcm.ctx);
break;
case IB_FLOW_ACTION_UNSPECIFIED:
destroy_flow_action_raw(maction);
break;
@@ -2709,11 +2499,6 @@ static const struct ib_device_ops flow_ops = {
.destroy_flow_action = mlx5_ib_destroy_flow_action,
};
static const struct ib_device_ops flow_ipsec_ops = {
.create_flow_action_esp = mlx5_ib_create_flow_action_esp,
.modify_flow_action_esp = mlx5_ib_modify_flow_action_esp,
};
int mlx5_ib_fs_init(struct mlx5_ib_dev *dev)
{
dev->flow_db = kzalloc(sizeof(*dev->flow_db), GFP_KERNEL);
@@ -2724,9 +2509,5 @@ int mlx5_ib_fs_init(struct mlx5_ib_dev *dev)
mutex_init(&dev->flow_db->lock);
ib_set_device_ops(&dev->ib_dev, &flow_ops);
if (mlx5_accel_ipsec_device_caps(dev->mdev) &
MLX5_ACCEL_IPSEC_CAP_DEVICE)
ib_set_device_ops(&dev->ib_dev, &flow_ipsec_ops);
return 0;
}

View File

@@ -41,7 +41,6 @@
#include "wr.h"
#include "restrack.h"
#include "counters.h"
#include <linux/mlx5/accel.h>
#include <rdma/uverbs_std_types.h>
#include <rdma/uverbs_ioctl.h>
#include <rdma/mlx5_user_ioctl_verbs.h>
@@ -906,10 +905,6 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
MLX5_RX_HASH_SRC_PORT_UDP |
MLX5_RX_HASH_DST_PORT_UDP |
MLX5_RX_HASH_INNER;
if (mlx5_accel_ipsec_device_caps(dev->mdev) &
MLX5_ACCEL_IPSEC_CAP_DEVICE)
resp.rss_caps.rx_hash_fields_mask |=
MLX5_RX_HASH_IPSEC_SPI;
resp.response_length += sizeof(resp.rss_caps);
}
} else {
@@ -1788,23 +1783,6 @@ static int set_ucontext_resp(struct ib_ucontext *uctx,
resp->num_uars_per_page = MLX5_CAP_GEN(dev->mdev, uar_4k) ?
MLX5_CAP_GEN(dev->mdev,
num_of_uars_per_page) : 1;
if (mlx5_accel_ipsec_device_caps(dev->mdev) &
MLX5_ACCEL_IPSEC_CAP_DEVICE) {
if (mlx5_get_flow_namespace(dev->mdev,
MLX5_FLOW_NAMESPACE_EGRESS))
resp->flow_action_flags |= MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM;
if (mlx5_accel_ipsec_device_caps(dev->mdev) &
MLX5_ACCEL_IPSEC_CAP_REQUIRED_METADATA)
resp->flow_action_flags |= MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_REQ_METADATA;
if (MLX5_CAP_FLOWTABLE(dev->mdev, flow_table_properties_nic_receive.ft_field_support.outer_esp_spi))
resp->flow_action_flags |= MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_SPI_STEERING;
if (mlx5_accel_ipsec_device_caps(dev->mdev) &
MLX5_ACCEL_IPSEC_CAP_TX_IV_IS_ESN)
resp->flow_action_flags |= MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_TX_IV_IS_ESN;
/* MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_FULL_OFFLOAD is currently always 0 */
}
resp->tot_bfregs = bfregi->lib_uar_dyn ? 0 :
bfregi->total_num_bfregs - bfregi->num_dyn_bfregs;
resp->num_ports = dev->num_ports;
@@ -3601,13 +3579,6 @@ DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_UAR,
&UVERBS_METHOD(MLX5_IB_METHOD_UAR_OBJ_ALLOC),
&UVERBS_METHOD(MLX5_IB_METHOD_UAR_OBJ_DESTROY));
ADD_UVERBS_ATTRIBUTES_SIMPLE(
mlx5_ib_flow_action,
UVERBS_OBJECT_FLOW_ACTION,
UVERBS_METHOD_FLOW_ACTION_ESP_CREATE,
UVERBS_ATTR_FLAGS_IN(MLX5_IB_ATTR_CREATE_FLOW_ACTION_FLAGS,
enum mlx5_ib_uapi_flow_action_flags));
ADD_UVERBS_ATTRIBUTES_SIMPLE(
mlx5_ib_query_context,
UVERBS_OBJECT_DEVICE,
@@ -3625,8 +3596,6 @@ static const struct uapi_definition mlx5_ib_defs[] = {
UAPI_DEF_CHAIN(mlx5_ib_std_types_defs),
UAPI_DEF_CHAIN(mlx5_ib_dm_defs),
UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_FLOW_ACTION,
&mlx5_ib_flow_action),
UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_DEVICE, &mlx5_ib_query_context),
UAPI_DEF_CHAIN_OBJ_TREE_NAMED(MLX5_IB_OBJECT_VAR,
UAPI_DEF_IS_OBJ_SUPPORTED(var_is_supported)),

View File

@@ -16,13 +16,9 @@ config MLX5_CORE
Core driver for low level functionality of the ConnectX-4 and
Connect-IB cards by Mellanox Technologies.
config MLX5_ACCEL
bool
config MLX5_FPGA
bool "Mellanox Technologies Innova support"
depends on MLX5_CORE
select MLX5_ACCEL
help
Build support for the Innova family of network cards by Mellanox
Technologies. Innova network cards are comprised of a ConnectX chip
@@ -143,71 +139,21 @@ config MLX5_CORE_IPOIB
help
MLX5 IPoIB offloads & acceleration support.
config MLX5_FPGA_IPSEC
bool "Mellanox Technologies IPsec Innova support"
depends on MLX5_CORE
depends on MLX5_FPGA
help
Build IPsec support for the Innova family of network cards by Mellanox
Technologies. Innova network cards are comprised of a ConnectX chip
and an FPGA chip on one board. If you select this option, the
mlx5_core driver will include the Innova FPGA core and allow building
sandbox-specific client drivers.
config MLX5_IPSEC
config MLX5_EN_IPSEC
bool "Mellanox Technologies IPsec Connect-X support"
depends on MLX5_CORE_EN
depends on XFRM_OFFLOAD
depends on INET_ESP_OFFLOAD || INET6_ESP_OFFLOAD
select MLX5_ACCEL
help
Build IPsec support for the Connect-X family of network cards by Mellanox
Technologies.
Note: If you select this option, the mlx5_core driver will include
IPsec support for the Connect-X family.
config MLX5_EN_IPSEC
bool "IPSec XFRM cryptography-offload acceleration"
depends on MLX5_CORE_EN
depends on XFRM_OFFLOAD
depends on INET_ESP_OFFLOAD || INET6_ESP_OFFLOAD
depends on MLX5_FPGA_IPSEC || MLX5_IPSEC
help
Build support for IPsec cryptography-offload acceleration in the NIC.
Note: Support for hardware with this capability needs to be selected
for this option to become available.
config MLX5_FPGA_TLS
bool "Mellanox Technologies TLS Innova support"
depends on TLS_DEVICE
depends on TLS=y || MLX5_CORE=m
depends on MLX5_CORE_EN
depends on MLX5_FPGA
select MLX5_EN_TLS
help
Build TLS support for the Innova family of network cards by Mellanox
Technologies. Innova network cards are comprised of a ConnectX chip
and an FPGA chip on one board. If you select this option, the
mlx5_core driver will include the Innova FPGA core and allow building
sandbox-specific client drivers.
config MLX5_TLS
config MLX5_EN_TLS
bool "Mellanox Technologies TLS Connect-X support"
depends on TLS_DEVICE
depends on TLS=y || MLX5_CORE=m
depends on MLX5_CORE_EN
select MLX5_ACCEL
select MLX5_EN_TLS
help
Build TLS support for the Connect-X family of network cards by Mellanox
Technologies.
config MLX5_EN_TLS
bool
help
Build support for TLS cryptography-offload acceleration in the NIC.
Note: Support for hardware with this capability needs to be selected
for this option to become available.
config MLX5_SW_STEERING
bool "Mellanox Technologies software-managed steering"

View File

@@ -28,7 +28,7 @@ mlx5_core-$(CONFIG_MLX5_CORE_EN) += en/rqt.o en/tir.o en/rss.o en/rx_res.o \
en_selftest.o en/port.o en/monitor_stats.o en/health.o \
en/reporter_tx.o en/reporter_rx.o en/params.o en/xsk/pool.o \
en/xsk/setup.o en/xsk/rx.o en/xsk/tx.o en/devlink.o en/ptp.o \
en/qos.o en/trap.o en/fs_tt_redirect.o en/selq.o
en/qos.o en/trap.o en/fs_tt_redirect.o en/selq.o lib/crypto.o
#
# Netdev extra
@@ -88,17 +88,13 @@ mlx5_core-$(CONFIG_MLX5_CORE_IPOIB) += ipoib/ipoib.o ipoib/ethtool.o ipoib/ipoib
#
# Accelerations & FPGA
#
mlx5_core-$(CONFIG_MLX5_IPSEC) += accel/ipsec_offload.o
mlx5_core-$(CONFIG_MLX5_FPGA_IPSEC) += fpga/ipsec.o
mlx5_core-$(CONFIG_MLX5_FPGA_TLS) += fpga/tls.o
mlx5_core-$(CONFIG_MLX5_ACCEL) += lib/crypto.o accel/tls.o accel/ipsec.o
mlx5_core-$(CONFIG_MLX5_FPGA) += fpga/cmd.o fpga/core.o fpga/conn.o fpga/sdk.o
mlx5_core-$(CONFIG_MLX5_EN_IPSEC) += en_accel/ipsec.o en_accel/ipsec_rxtx.o \
en_accel/ipsec_stats.o en_accel/ipsec_fs.o
en_accel/ipsec_stats.o en_accel/ipsec_fs.o \
en_accel/ipsec_offload.o
mlx5_core-$(CONFIG_MLX5_EN_TLS) += en_accel/tls.o en_accel/tls_rxtx.o en_accel/tls_stats.o \
mlx5_core-$(CONFIG_MLX5_EN_TLS) += en_accel/ktls_stats.o \
en_accel/fs_tcp.o en_accel/ktls.o en_accel/ktls_txrx.o \
en_accel/ktls_tx.o en_accel/ktls_rx.o

View File

@@ -1,36 +0,0 @@
#ifndef __MLX5E_ACCEL_H__
#define __MLX5E_ACCEL_H__
#ifdef CONFIG_MLX5_ACCEL
#include <linux/skbuff.h>
#include <linux/netdevice.h>
static inline bool is_metadata_hdr_valid(struct sk_buff *skb)
{
__be16 *ethtype;
if (unlikely(skb->len < ETH_HLEN + MLX5E_METADATA_ETHER_LEN))
return false;
ethtype = (__be16 *)(skb->data + ETH_ALEN * 2);
if (*ethtype != cpu_to_be16(MLX5E_METADATA_ETHER_TYPE))
return false;
return true;
}
static inline void remove_metadata_hdr(struct sk_buff *skb)
{
struct ethhdr *old_eth;
struct ethhdr *new_eth;
/* Remove the metadata from the buffer */
old_eth = (struct ethhdr *)skb->data;
new_eth = (struct ethhdr *)(skb->data + MLX5E_METADATA_ETHER_LEN);
memmove(new_eth, old_eth, 2 * ETH_ALEN);
/* Ethertype is already in its new place */
skb_pull_inline(skb, MLX5E_METADATA_ETHER_LEN);
}
#endif /* CONFIG_MLX5_ACCEL */
#endif /* __MLX5E_EN_ACCEL_H__ */

View File

@@ -1,179 +0,0 @@
/*
* Copyright (c) 2017 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#include <linux/mlx5/device.h>
#include "accel/ipsec.h"
#include "mlx5_core.h"
#include "fpga/ipsec.h"
#include "accel/ipsec_offload.h"
void mlx5_accel_ipsec_init(struct mlx5_core_dev *mdev)
{
const struct mlx5_accel_ipsec_ops *ipsec_ops;
int err = 0;
ipsec_ops = (mlx5_ipsec_offload_ops(mdev)) ?
mlx5_ipsec_offload_ops(mdev) :
mlx5_fpga_ipsec_ops(mdev);
if (!ipsec_ops || !ipsec_ops->init) {
mlx5_core_dbg(mdev, "IPsec ops is not supported\n");
return;
}
err = ipsec_ops->init(mdev);
if (err) {
mlx5_core_warn_once(mdev, "Failed to start IPsec device, err = %d\n", err);
return;
}
mdev->ipsec_ops = ipsec_ops;
}
void mlx5_accel_ipsec_cleanup(struct mlx5_core_dev *mdev)
{
const struct mlx5_accel_ipsec_ops *ipsec_ops = mdev->ipsec_ops;
if (!ipsec_ops || !ipsec_ops->cleanup)
return;
ipsec_ops->cleanup(mdev);
}
u32 mlx5_accel_ipsec_device_caps(struct mlx5_core_dev *mdev)
{
const struct mlx5_accel_ipsec_ops *ipsec_ops = mdev->ipsec_ops;
if (!ipsec_ops || !ipsec_ops->device_caps)
return 0;
return ipsec_ops->device_caps(mdev);
}
EXPORT_SYMBOL_GPL(mlx5_accel_ipsec_device_caps);
unsigned int mlx5_accel_ipsec_counters_count(struct mlx5_core_dev *mdev)
{
const struct mlx5_accel_ipsec_ops *ipsec_ops = mdev->ipsec_ops;
if (!ipsec_ops || !ipsec_ops->counters_count)
return -EOPNOTSUPP;
return ipsec_ops->counters_count(mdev);
}
int mlx5_accel_ipsec_counters_read(struct mlx5_core_dev *mdev, u64 *counters,
unsigned int count)
{
const struct mlx5_accel_ipsec_ops *ipsec_ops = mdev->ipsec_ops;
if (!ipsec_ops || !ipsec_ops->counters_read)
return -EOPNOTSUPP;
return ipsec_ops->counters_read(mdev, counters, count);
}
void *mlx5_accel_esp_create_hw_context(struct mlx5_core_dev *mdev,
struct mlx5_accel_esp_xfrm *xfrm,
u32 *sa_handle)
{
const struct mlx5_accel_ipsec_ops *ipsec_ops = mdev->ipsec_ops;
__be32 saddr[4] = {}, daddr[4] = {};
if (!ipsec_ops || !ipsec_ops->create_hw_context)
return ERR_PTR(-EOPNOTSUPP);
if (!xfrm->attrs.is_ipv6) {
saddr[3] = xfrm->attrs.saddr.a4;
daddr[3] = xfrm->attrs.daddr.a4;
} else {
memcpy(saddr, xfrm->attrs.saddr.a6, sizeof(saddr));
memcpy(daddr, xfrm->attrs.daddr.a6, sizeof(daddr));
}
return ipsec_ops->create_hw_context(mdev, xfrm, saddr, daddr, xfrm->attrs.spi,
xfrm->attrs.is_ipv6, sa_handle);
}
void mlx5_accel_esp_free_hw_context(struct mlx5_core_dev *mdev, void *context)
{
const struct mlx5_accel_ipsec_ops *ipsec_ops = mdev->ipsec_ops;
if (!ipsec_ops || !ipsec_ops->free_hw_context)
return;
ipsec_ops->free_hw_context(context);
}
struct mlx5_accel_esp_xfrm *
mlx5_accel_esp_create_xfrm(struct mlx5_core_dev *mdev,
const struct mlx5_accel_esp_xfrm_attrs *attrs,
u32 flags)
{
const struct mlx5_accel_ipsec_ops *ipsec_ops = mdev->ipsec_ops;
struct mlx5_accel_esp_xfrm *xfrm;
if (!ipsec_ops || !ipsec_ops->esp_create_xfrm)
return ERR_PTR(-EOPNOTSUPP);
xfrm = ipsec_ops->esp_create_xfrm(mdev, attrs, flags);
if (IS_ERR(xfrm))
return xfrm;
xfrm->mdev = mdev;
return xfrm;
}
EXPORT_SYMBOL_GPL(mlx5_accel_esp_create_xfrm);
void mlx5_accel_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm)
{
const struct mlx5_accel_ipsec_ops *ipsec_ops = xfrm->mdev->ipsec_ops;
if (!ipsec_ops || !ipsec_ops->esp_destroy_xfrm)
return;
ipsec_ops->esp_destroy_xfrm(xfrm);
}
EXPORT_SYMBOL_GPL(mlx5_accel_esp_destroy_xfrm);
int mlx5_accel_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm,
const struct mlx5_accel_esp_xfrm_attrs *attrs)
{
const struct mlx5_accel_ipsec_ops *ipsec_ops = xfrm->mdev->ipsec_ops;
if (!ipsec_ops || !ipsec_ops->esp_modify_xfrm)
return -EOPNOTSUPP;
return ipsec_ops->esp_modify_xfrm(xfrm, attrs);
}
EXPORT_SYMBOL_GPL(mlx5_accel_esp_modify_xfrm);

View File

@@ -1,96 +0,0 @@
/*
* Copyright (c) 2017 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#ifndef __MLX5_ACCEL_IPSEC_H__
#define __MLX5_ACCEL_IPSEC_H__
#include <linux/mlx5/driver.h>
#include <linux/mlx5/accel.h>
#ifdef CONFIG_MLX5_ACCEL
#define MLX5_IPSEC_DEV(mdev) (mlx5_accel_ipsec_device_caps(mdev) & \
MLX5_ACCEL_IPSEC_CAP_DEVICE)
unsigned int mlx5_accel_ipsec_counters_count(struct mlx5_core_dev *mdev);
int mlx5_accel_ipsec_counters_read(struct mlx5_core_dev *mdev, u64 *counters,
unsigned int count);
void *mlx5_accel_esp_create_hw_context(struct mlx5_core_dev *mdev,
struct mlx5_accel_esp_xfrm *xfrm,
u32 *sa_handle);
void mlx5_accel_esp_free_hw_context(struct mlx5_core_dev *mdev, void *context);
void mlx5_accel_ipsec_init(struct mlx5_core_dev *mdev);
void mlx5_accel_ipsec_cleanup(struct mlx5_core_dev *mdev);
struct mlx5_accel_ipsec_ops {
u32 (*device_caps)(struct mlx5_core_dev *mdev);
unsigned int (*counters_count)(struct mlx5_core_dev *mdev);
int (*counters_read)(struct mlx5_core_dev *mdev, u64 *counters, unsigned int count);
void* (*create_hw_context)(struct mlx5_core_dev *mdev,
struct mlx5_accel_esp_xfrm *xfrm,
const __be32 saddr[4], const __be32 daddr[4],
const __be32 spi, bool is_ipv6, u32 *sa_handle);
void (*free_hw_context)(void *context);
int (*init)(struct mlx5_core_dev *mdev);
void (*cleanup)(struct mlx5_core_dev *mdev);
struct mlx5_accel_esp_xfrm* (*esp_create_xfrm)(struct mlx5_core_dev *mdev,
const struct mlx5_accel_esp_xfrm_attrs *attrs,
u32 flags);
int (*esp_modify_xfrm)(struct mlx5_accel_esp_xfrm *xfrm,
const struct mlx5_accel_esp_xfrm_attrs *attrs);
void (*esp_destroy_xfrm)(struct mlx5_accel_esp_xfrm *xfrm);
};
#else
#define MLX5_IPSEC_DEV(mdev) false
static inline void *
mlx5_accel_esp_create_hw_context(struct mlx5_core_dev *mdev,
struct mlx5_accel_esp_xfrm *xfrm,
u32 *sa_handle)
{
return NULL;
}
static inline void mlx5_accel_esp_free_hw_context(struct mlx5_core_dev *mdev, void *context) {}
static inline void mlx5_accel_ipsec_init(struct mlx5_core_dev *mdev) {}
static inline void mlx5_accel_ipsec_cleanup(struct mlx5_core_dev *mdev) {}
#endif /* CONFIG_MLX5_ACCEL */
#endif /* __MLX5_ACCEL_IPSEC_H__ */

View File

@@ -1,38 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */
#ifndef __MLX5_IPSEC_OFFLOAD_H__
#define __MLX5_IPSEC_OFFLOAD_H__
#include <linux/mlx5/driver.h>
#include "accel/ipsec.h"
#ifdef CONFIG_MLX5_IPSEC
const struct mlx5_accel_ipsec_ops *mlx5_ipsec_offload_ops(struct mlx5_core_dev *mdev);
static inline bool mlx5_is_ipsec_device(struct mlx5_core_dev *mdev)
{
if (!MLX5_CAP_GEN(mdev, ipsec_offload))
return false;
if (!MLX5_CAP_GEN(mdev, log_max_dek))
return false;
if (!(MLX5_CAP_GEN_64(mdev, general_obj_types) &
MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_IPSEC))
return false;
return MLX5_CAP_IPSEC(mdev, ipsec_crypto_offload) &&
MLX5_CAP_ETH(mdev, insert_trailer);
}
#else
static inline const struct mlx5_accel_ipsec_ops *
mlx5_ipsec_offload_ops(struct mlx5_core_dev *mdev) { return NULL; }
static inline bool mlx5_is_ipsec_device(struct mlx5_core_dev *mdev)
{
return false;
}
#endif /* CONFIG_MLX5_IPSEC */
#endif /* __MLX5_IPSEC_OFFLOAD_H__ */

View File

@@ -1,125 +0,0 @@
/*
* Copyright (c) 2018 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#include <linux/mlx5/device.h>
#include "accel/tls.h"
#include "mlx5_core.h"
#include "lib/mlx5.h"
#ifdef CONFIG_MLX5_FPGA_TLS
#include "fpga/tls.h"
int mlx5_accel_tls_add_flow(struct mlx5_core_dev *mdev, void *flow,
struct tls_crypto_info *crypto_info,
u32 start_offload_tcp_sn, u32 *p_swid,
bool direction_sx)
{
return mlx5_fpga_tls_add_flow(mdev, flow, crypto_info,
start_offload_tcp_sn, p_swid,
direction_sx);
}
void mlx5_accel_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid,
bool direction_sx)
{
mlx5_fpga_tls_del_flow(mdev, swid, GFP_KERNEL, direction_sx);
}
int mlx5_accel_tls_resync_rx(struct mlx5_core_dev *mdev, __be32 handle,
u32 seq, __be64 rcd_sn)
{
return mlx5_fpga_tls_resync_rx(mdev, handle, seq, rcd_sn);
}
bool mlx5_accel_is_tls_device(struct mlx5_core_dev *mdev)
{
return mlx5_fpga_is_tls_device(mdev) ||
mlx5_accel_is_ktls_device(mdev);
}
u32 mlx5_accel_tls_device_caps(struct mlx5_core_dev *mdev)
{
return mlx5_fpga_tls_device_caps(mdev);
}
int mlx5_accel_tls_init(struct mlx5_core_dev *mdev)
{
return mlx5_fpga_tls_init(mdev);
}
void mlx5_accel_tls_cleanup(struct mlx5_core_dev *mdev)
{
mlx5_fpga_tls_cleanup(mdev);
}
#endif
#ifdef CONFIG_MLX5_TLS
int mlx5_ktls_create_key(struct mlx5_core_dev *mdev,
struct tls_crypto_info *crypto_info,
u32 *p_key_id)
{
u32 sz_bytes;
void *key;
switch (crypto_info->cipher_type) {
case TLS_CIPHER_AES_GCM_128: {
struct tls12_crypto_info_aes_gcm_128 *info =
(struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
key = info->key;
sz_bytes = sizeof(info->key);
break;
}
case TLS_CIPHER_AES_GCM_256: {
struct tls12_crypto_info_aes_gcm_256 *info =
(struct tls12_crypto_info_aes_gcm_256 *)crypto_info;
key = info->key;
sz_bytes = sizeof(info->key);
break;
}
default:
return -EINVAL;
}
return mlx5_create_encryption_key(mdev, key, sz_bytes,
MLX5_ACCEL_OBJ_TLS_KEY,
p_key_id);
}
void mlx5_ktls_destroy_key(struct mlx5_core_dev *mdev, u32 key_id)
{
mlx5_destroy_encryption_key(mdev, key_id);
}
#endif

View File

@@ -1,156 +0,0 @@
/*
* Copyright (c) 2018 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#ifndef __MLX5_ACCEL_TLS_H__
#define __MLX5_ACCEL_TLS_H__
#include <linux/mlx5/driver.h>
#include <linux/tls.h>
#ifdef CONFIG_MLX5_TLS
int mlx5_ktls_create_key(struct mlx5_core_dev *mdev,
struct tls_crypto_info *crypto_info,
u32 *p_key_id);
void mlx5_ktls_destroy_key(struct mlx5_core_dev *mdev, u32 key_id);
static inline bool mlx5_accel_is_ktls_tx(struct mlx5_core_dev *mdev)
{
return MLX5_CAP_GEN(mdev, tls_tx);
}
static inline bool mlx5_accel_is_ktls_rx(struct mlx5_core_dev *mdev)
{
return MLX5_CAP_GEN(mdev, tls_rx);
}
static inline bool mlx5_accel_is_ktls_device(struct mlx5_core_dev *mdev)
{
if (!mlx5_accel_is_ktls_tx(mdev) &&
!mlx5_accel_is_ktls_rx(mdev))
return false;
if (!MLX5_CAP_GEN(mdev, log_max_dek))
return false;
return MLX5_CAP_TLS(mdev, tls_1_2_aes_gcm_128);
}
static inline bool mlx5e_ktls_type_check(struct mlx5_core_dev *mdev,
struct tls_crypto_info *crypto_info)
{
switch (crypto_info->cipher_type) {
case TLS_CIPHER_AES_GCM_128:
if (crypto_info->version == TLS_1_2_VERSION)
return MLX5_CAP_TLS(mdev, tls_1_2_aes_gcm_128);
break;
}
return false;
}
#else
static inline bool mlx5_accel_is_ktls_tx(struct mlx5_core_dev *mdev)
{ return false; }
static inline bool mlx5_accel_is_ktls_rx(struct mlx5_core_dev *mdev)
{ return false; }
static inline int
mlx5_ktls_create_key(struct mlx5_core_dev *mdev,
struct tls_crypto_info *crypto_info,
u32 *p_key_id) { return -ENOTSUPP; }
static inline void
mlx5_ktls_destroy_key(struct mlx5_core_dev *mdev, u32 key_id) {}
static inline bool
mlx5_accel_is_ktls_device(struct mlx5_core_dev *mdev) { return false; }
static inline bool
mlx5e_ktls_type_check(struct mlx5_core_dev *mdev,
struct tls_crypto_info *crypto_info) { return false; }
#endif
enum {
MLX5_ACCEL_TLS_TX = BIT(0),
MLX5_ACCEL_TLS_RX = BIT(1),
MLX5_ACCEL_TLS_V12 = BIT(2),
MLX5_ACCEL_TLS_V13 = BIT(3),
MLX5_ACCEL_TLS_LRO = BIT(4),
MLX5_ACCEL_TLS_IPV6 = BIT(5),
MLX5_ACCEL_TLS_AES_GCM128 = BIT(30),
MLX5_ACCEL_TLS_AES_GCM256 = BIT(31),
};
struct mlx5_ifc_tls_flow_bits {
u8 src_port[0x10];
u8 dst_port[0x10];
union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits src_ipv4_src_ipv6;
union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits dst_ipv4_dst_ipv6;
u8 ipv6[0x1];
u8 direction_sx[0x1];
u8 reserved_at_2[0x1e];
};
#ifdef CONFIG_MLX5_FPGA_TLS
int mlx5_accel_tls_add_flow(struct mlx5_core_dev *mdev, void *flow,
struct tls_crypto_info *crypto_info,
u32 start_offload_tcp_sn, u32 *p_swid,
bool direction_sx);
void mlx5_accel_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid,
bool direction_sx);
int mlx5_accel_tls_resync_rx(struct mlx5_core_dev *mdev, __be32 handle,
u32 seq, __be64 rcd_sn);
bool mlx5_accel_is_tls_device(struct mlx5_core_dev *mdev);
u32 mlx5_accel_tls_device_caps(struct mlx5_core_dev *mdev);
int mlx5_accel_tls_init(struct mlx5_core_dev *mdev);
void mlx5_accel_tls_cleanup(struct mlx5_core_dev *mdev);
#else
static inline int
mlx5_accel_tls_add_flow(struct mlx5_core_dev *mdev, void *flow,
struct tls_crypto_info *crypto_info,
u32 start_offload_tcp_sn, u32 *p_swid,
bool direction_sx) { return -ENOTSUPP; }
static inline void mlx5_accel_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid,
bool direction_sx) { }
static inline int mlx5_accel_tls_resync_rx(struct mlx5_core_dev *mdev, __be32 handle,
u32 seq, __be64 rcd_sn) { return 0; }
static inline bool mlx5_accel_is_tls_device(struct mlx5_core_dev *mdev)
{
return mlx5_accel_is_ktls_device(mdev);
}
static inline u32 mlx5_accel_tls_device_caps(struct mlx5_core_dev *mdev) { return 0; }
static inline int mlx5_accel_tls_init(struct mlx5_core_dev *mdev) { return 0; }
static inline void mlx5_accel_tls_cleanup(struct mlx5_core_dev *mdev) { }
#endif
#endif /* __MLX5_ACCEL_TLS_H__ */

View File

@@ -354,7 +354,6 @@ enum {
MLX5E_RQ_STATE_AM,
MLX5E_RQ_STATE_NO_CSUM_COMPLETE,
MLX5E_RQ_STATE_CSUM_FULL, /* cqe_csum_full hw bit is set */
MLX5E_RQ_STATE_FPGA_TLS, /* FPGA TLS enabled */
MLX5E_RQ_STATE_MINI_CQE_HW_STRIDX, /* set when mini_cqe_resp_stride_index cap is used */
MLX5E_RQ_STATE_SHAMPO, /* set when SHAMPO cap is used */
};

View File

@@ -5,8 +5,7 @@
#include "en/txrx.h"
#include "en/port.h"
#include "en_accel/en_accel.h"
#include "accel/ipsec.h"
#include "fpga/ipsec.h"
#include "en_accel/ipsec_offload.h"
static bool mlx5e_rx_is_xdp(struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk)
@@ -207,7 +206,7 @@ u16 mlx5e_calc_sq_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *par
bool is_mpwqe = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_SKB_TX_MPWQE);
u16 stop_room;
stop_room = mlx5e_tls_get_stop_room(mdev, params);
stop_room = mlx5e_ktls_get_stop_room(mdev, params);
stop_room += mlx5e_stop_room_for_max_wqe(mdev);
if (is_mpwqe)
/* A MPWQE can take up to the maximum-sized WQE + all the normal
@@ -327,9 +326,6 @@ bool mlx5e_striding_rq_possible(struct mlx5_core_dev *mdev,
if (!mlx5e_check_fragmented_striding_rq_cap(mdev))
return false;
if (mlx5_fpga_is_ipsec_device(mdev))
return false;
if (params->xdp_prog) {
/* XSK params are not considered here. If striding RQ is in use,
* and an XSK is being opened, mlx5e_rx_mpwqe_is_linear_skb will
@@ -423,9 +419,6 @@ static int mlx5e_build_rq_frags_info(struct mlx5_core_dev *mdev,
int max_mtu;
int i;
if (mlx5_fpga_is_ipsec_device(mdev))
byte_count += MLX5E_METADATA_ETHER_LEN;
if (mlx5e_rx_is_linear_skb(params, xsk)) {
int frag_stride;
@@ -696,8 +689,8 @@ void mlx5e_build_sq_param(struct mlx5_core_dev *mdev,
void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
bool allow_swp;
allow_swp = mlx5_geneve_tx_allowed(mdev) ||
!!MLX5_IPSEC_DEV(mdev);
allow_swp =
mlx5_geneve_tx_allowed(mdev) || !!mlx5_ipsec_device_caps(mdev);
mlx5e_build_sq_param_common(mdev, param);
MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size);
MLX5_SET(sqc, sqc, allow_swp, allow_swp);
@@ -804,7 +797,7 @@ static u8 mlx5e_build_icosq_log_wq_sz(struct mlx5_core_dev *mdev,
static u8 mlx5e_build_async_icosq_log_wq_sz(struct mlx5_core_dev *mdev)
{
if (mlx5e_accel_is_ktls_rx(mdev))
if (mlx5e_is_ktls_rx(mdev))
return MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
return MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE;
@@ -833,7 +826,7 @@ static void mlx5e_build_async_icosq_param(struct mlx5_core_dev *mdev,
mlx5e_build_sq_param_common(mdev, param);
param->stop_room = mlx5e_stop_room_for_wqe(mdev, 1); /* for XSK NOP */
param->is_tls = mlx5e_accel_is_ktls_rx(mdev);
param->is_tls = mlx5e_is_ktls_rx(mdev);
if (param->is_tls)
param->stop_room += mlx5e_stop_room_for_wqe(mdev, 1); /* for TLS RX resync NOP */
MLX5_SET(sqc, sqc, reg_umr, MLX5_CAP_ETH(mdev, reg_umr_sq));

View File

@@ -37,8 +37,8 @@
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include "en_accel/ipsec_rxtx.h"
#include "en_accel/tls.h"
#include "en_accel/tls_rxtx.h"
#include "en_accel/ktls.h"
#include "en_accel/ktls_txrx.h"
#include "en.h"
#include "en/txrx.h"
@@ -124,8 +124,9 @@ static inline bool mlx5e_accel_tx_begin(struct net_device *dev,
#ifdef CONFIG_MLX5_EN_TLS
/* May send SKBs and WQEs. */
if (mlx5e_tls_skb_offloaded(skb))
if (unlikely(!mlx5e_tls_handle_tx_skb(dev, sq, skb, &state->tls)))
if (mlx5e_ktls_skb_offloaded(skb))
if (unlikely(!mlx5e_ktls_handle_tx_skb(dev, sq, skb,
&state->tls)))
return false;
#endif
@@ -174,7 +175,7 @@ static inline void mlx5e_accel_tx_finish(struct mlx5e_txqsq *sq,
struct mlx5_wqe_inline_seg *inlseg)
{
#ifdef CONFIG_MLX5_EN_TLS
mlx5e_tls_handle_tx_wqe(&wqe->ctrl, &state->tls);
mlx5e_ktls_handle_tx_wqe(&wqe->ctrl, &state->tls);
#endif
#ifdef CONFIG_MLX5_EN_IPSEC

View File

@@ -226,8 +226,7 @@ static inline int mlx5e_xfrm_validate_state(struct xfrm_state *x)
return -EINVAL;
}
if (x->props.flags & XFRM_STATE_ESN &&
!(mlx5_accel_ipsec_device_caps(priv->mdev) &
MLX5_ACCEL_IPSEC_CAP_ESN)) {
!(mlx5_ipsec_device_caps(priv->mdev) & MLX5_ACCEL_IPSEC_CAP_ESN)) {
netdev_info(netdev, "Cannot offload ESN xfrm states\n");
return -EINVAL;
}
@@ -275,8 +274,7 @@ static inline int mlx5e_xfrm_validate_state(struct xfrm_state *x)
return -EINVAL;
}
if (x->props.family == AF_INET6 &&
!(mlx5_accel_ipsec_device_caps(priv->mdev) &
MLX5_ACCEL_IPSEC_CAP_IPV6)) {
!(mlx5_ipsec_device_caps(priv->mdev) & MLX5_ACCEL_IPSEC_CAP_IPV6)) {
netdev_info(netdev, "IPv6 xfrm state offload is not supported by this device\n");
return -EINVAL;
}
@@ -286,9 +284,6 @@ static inline int mlx5e_xfrm_validate_state(struct xfrm_state *x)
static int mlx5e_xfrm_fs_add_rule(struct mlx5e_priv *priv,
struct mlx5e_ipsec_sa_entry *sa_entry)
{
if (!mlx5_is_ipsec_device(priv->mdev))
return 0;
return mlx5e_accel_ipsec_fs_add_rule(priv, &sa_entry->xfrm->attrs,
sa_entry->ipsec_obj_id,
&sa_entry->ipsec_rule);
@@ -297,9 +292,6 @@ static int mlx5e_xfrm_fs_add_rule(struct mlx5e_priv *priv,
static void mlx5e_xfrm_fs_del_rule(struct mlx5e_priv *priv,
struct mlx5e_ipsec_sa_entry *sa_entry)
{
if (!mlx5_is_ipsec_device(priv->mdev))
return;
mlx5e_accel_ipsec_fs_del_rule(priv, &sa_entry->xfrm->attrs,
&sa_entry->ipsec_rule);
}
@@ -333,9 +325,7 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x)
/* create xfrm */
mlx5e_ipsec_build_accel_xfrm_attrs(sa_entry, &attrs);
sa_entry->xfrm =
mlx5_accel_esp_create_xfrm(priv->mdev, &attrs,
MLX5_ACCEL_XFRM_FLAG_REQUIRE_METADATA);
sa_entry->xfrm = mlx5_accel_esp_create_xfrm(priv->mdev, &attrs);
if (IS_ERR(sa_entry->xfrm)) {
err = PTR_ERR(sa_entry->xfrm);
goto err_sa_entry;
@@ -414,7 +404,7 @@ int mlx5e_ipsec_init(struct mlx5e_priv *priv)
{
struct mlx5e_ipsec *ipsec = NULL;
if (!MLX5_IPSEC_DEV(priv->mdev)) {
if (!mlx5_ipsec_device_caps(priv->mdev)) {
netdev_dbg(priv->netdev, "Not an IPSec offload device\n");
return 0;
}
@@ -425,10 +415,7 @@ int mlx5e_ipsec_init(struct mlx5e_priv *priv)
hash_init(ipsec->sadb_rx);
spin_lock_init(&ipsec->sadb_rx_lock);
ida_init(&ipsec->halloc);
ipsec->en_priv = priv;
ipsec->no_trailer = !!(mlx5_accel_ipsec_device_caps(priv->mdev) &
MLX5_ACCEL_IPSEC_CAP_RX_NO_TRAILER);
ipsec->wq = alloc_ordered_workqueue("mlx5e_ipsec: %s", 0,
priv->netdev->name);
if (!ipsec->wq) {
@@ -452,7 +439,6 @@ void mlx5e_ipsec_cleanup(struct mlx5e_priv *priv)
mlx5e_accel_ipsec_fs_cleanup(priv);
destroy_workqueue(ipsec->wq);
ida_destroy(&ipsec->halloc);
kfree(ipsec);
priv->ipsec = NULL;
}
@@ -531,7 +517,7 @@ void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv)
struct mlx5_core_dev *mdev = priv->mdev;
struct net_device *netdev = priv->netdev;
if (!(mlx5_accel_ipsec_device_caps(mdev) & MLX5_ACCEL_IPSEC_CAP_ESP) ||
if (!(mlx5_ipsec_device_caps(mdev) & MLX5_ACCEL_IPSEC_CAP_ESP) ||
!MLX5_CAP_ETH(mdev, swp)) {
mlx5_core_dbg(mdev, "mlx5e: ESP and SWP offload not supported\n");
return;
@@ -550,15 +536,13 @@ void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv)
netdev->features |= NETIF_F_HW_ESP_TX_CSUM;
netdev->hw_enc_features |= NETIF_F_HW_ESP_TX_CSUM;
if (!(mlx5_accel_ipsec_device_caps(mdev) & MLX5_ACCEL_IPSEC_CAP_LSO) ||
if (!(mlx5_ipsec_device_caps(mdev) & MLX5_ACCEL_IPSEC_CAP_LSO) ||
!MLX5_CAP_ETH(mdev, swp_lso)) {
mlx5_core_dbg(mdev, "mlx5e: ESP LSO not supported\n");
return;
}
if (mlx5_is_ipsec_device(mdev))
netdev->gso_partial_features |= NETIF_F_GSO_ESP;
netdev->gso_partial_features |= NETIF_F_GSO_ESP;
mlx5_core_dbg(mdev, "mlx5e: ESP GSO capability turned on\n");
netdev->features |= NETIF_F_GSO_ESP;
netdev->hw_features |= NETIF_F_GSO_ESP;

View File

@@ -40,7 +40,7 @@
#include <net/xfrm.h>
#include <linux/idr.h>
#include "accel/ipsec.h"
#include "ipsec_offload.h"
#define MLX5E_IPSEC_SADB_RX_BITS 10
#define MLX5E_IPSEC_ESN_SCOPE_MID 0x80000000L
@@ -55,24 +55,6 @@ struct mlx5e_ipsec_sw_stats {
atomic64_t ipsec_tx_drop_no_state;
atomic64_t ipsec_tx_drop_not_ip;
atomic64_t ipsec_tx_drop_trailer;
atomic64_t ipsec_tx_drop_metadata;
};
struct mlx5e_ipsec_stats {
u64 ipsec_dec_in_packets;
u64 ipsec_dec_out_packets;
u64 ipsec_dec_bypass_packets;
u64 ipsec_enc_in_packets;
u64 ipsec_enc_out_packets;
u64 ipsec_enc_bypass_packets;
u64 ipsec_dec_drop_packets;
u64 ipsec_dec_auth_fail_packets;
u64 ipsec_enc_drop_packets;
u64 ipsec_add_sa_success;
u64 ipsec_add_sa_fail;
u64 ipsec_del_sa_success;
u64 ipsec_del_sa_fail;
u64 ipsec_cmd_drop;
};
struct mlx5e_accel_fs_esp;
@@ -81,11 +63,8 @@ struct mlx5e_ipsec_tx;
struct mlx5e_ipsec {
struct mlx5e_priv *en_priv;
DECLARE_HASHTABLE(sadb_rx, MLX5E_IPSEC_SADB_RX_BITS);
bool no_trailer;
spinlock_t sadb_rx_lock; /* Protects sadb_rx and halloc */
struct ida halloc;
spinlock_t sadb_rx_lock; /* Protects sadb_rx */
struct mlx5e_ipsec_sw_stats sw_stats;
struct mlx5e_ipsec_stats stats;
struct workqueue_struct *wq;
struct mlx5e_accel_fs_esp *rx_fs;
struct mlx5e_ipsec_tx *tx_fs;
@@ -116,7 +95,6 @@ struct mlx5e_ipsec_sa_entry {
struct mlx5e_ipsec_rule ipsec_rule;
};
void mlx5e_ipsec_build_inverse_table(void);
int mlx5e_ipsec_init(struct mlx5e_priv *priv);
void mlx5e_ipsec_cleanup(struct mlx5e_priv *priv);
void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv);
@@ -125,11 +103,6 @@ struct xfrm_state *mlx5e_ipsec_sadb_rx_lookup(struct mlx5e_ipsec *dev,
unsigned int handle);
#else
static inline void mlx5e_ipsec_build_inverse_table(void)
{
}
static inline int mlx5e_ipsec_init(struct mlx5e_priv *priv)
{
return 0;

View File

@@ -2,7 +2,7 @@
/* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */
#include <linux/netdevice.h>
#include "accel/ipsec_offload.h"
#include "ipsec_offload.h"
#include "ipsec_fs.h"
#include "fs_core.h"
@@ -700,9 +700,6 @@ int mlx5e_accel_ipsec_fs_init(struct mlx5e_priv *priv)
{
int err;
if (!mlx5_is_ipsec_device(priv->mdev) || !priv->ipsec)
return -EOPNOTSUPP;
err = fs_init_tx(priv);
if (err)
return err;

View File

@@ -6,10 +6,9 @@
#include "en.h"
#include "ipsec.h"
#include "accel/ipsec_offload.h"
#include "ipsec_offload.h"
#include "en/fs.h"
#ifdef CONFIG_MLX5_EN_IPSEC
void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_priv *priv);
int mlx5e_accel_ipsec_fs_init(struct mlx5e_priv *priv);
int mlx5e_accel_ipsec_fs_add_rule(struct mlx5e_priv *priv,
@@ -19,8 +18,4 @@ int mlx5e_accel_ipsec_fs_add_rule(struct mlx5e_priv *priv,
void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_priv *priv,
struct mlx5_accel_esp_xfrm_attrs *attrs,
struct mlx5e_ipsec_rule *ipsec_rule);
#else
static inline void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_priv *priv) {}
static inline int mlx5e_accel_ipsec_fs_init(struct mlx5e_priv *priv) { return 0; }
#endif
#endif /* __MLX5_IPSEC_STEERING_H__ */

View File

@@ -1,14 +1,11 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */
/* Copyright (c) 2017, Mellanox Technologies inc. All rights reserved. */
#include "mlx5_core.h"
#include "ipsec_offload.h"
#include "lib/mlx5.h"
#include "en_accel/ipsec_fs.h"
#define MLX5_IPSEC_DEV_BASIC_CAPS (MLX5_ACCEL_IPSEC_CAP_DEVICE | MLX5_ACCEL_IPSEC_CAP_IPV6 | \
MLX5_ACCEL_IPSEC_CAP_LSO)
struct mlx5_ipsec_sa_ctx {
struct rhash_head hash;
u32 enc_key_id;
@@ -25,25 +22,37 @@ struct mlx5_ipsec_esp_xfrm {
struct mlx5_accel_esp_xfrm accel_xfrm;
};
static u32 mlx5_ipsec_offload_device_caps(struct mlx5_core_dev *mdev)
u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev)
{
u32 caps = MLX5_IPSEC_DEV_BASIC_CAPS;
u32 caps;
if (!mlx5_is_ipsec_device(mdev))
if (!MLX5_CAP_GEN(mdev, ipsec_offload))
return 0;
if (!MLX5_CAP_GEN(mdev, log_max_dek))
return 0;
if (!(MLX5_CAP_GEN_64(mdev, general_obj_types) &
MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_IPSEC))
return 0;
if (!MLX5_CAP_IPSEC(mdev, ipsec_crypto_offload) ||
!MLX5_CAP_ETH(mdev, insert_trailer))
return 0;
if (!MLX5_CAP_FLOWTABLE_NIC_TX(mdev, ipsec_encrypt) ||
!MLX5_CAP_FLOWTABLE_NIC_RX(mdev, ipsec_decrypt))
return 0;
caps = MLX5_ACCEL_IPSEC_CAP_DEVICE | MLX5_ACCEL_IPSEC_CAP_IPV6 |
MLX5_ACCEL_IPSEC_CAP_LSO;
if (MLX5_CAP_IPSEC(mdev, ipsec_crypto_esp_aes_gcm_128_encrypt) &&
MLX5_CAP_IPSEC(mdev, ipsec_crypto_esp_aes_gcm_128_decrypt))
caps |= MLX5_ACCEL_IPSEC_CAP_ESP;
if (MLX5_CAP_IPSEC(mdev, ipsec_esn)) {
if (MLX5_CAP_IPSEC(mdev, ipsec_esn))
caps |= MLX5_ACCEL_IPSEC_CAP_ESN;
caps |= MLX5_ACCEL_IPSEC_CAP_TX_IV_IS_ESN;
}
/* We can accommodate up to 2^24 different IPsec objects
* because we use up to 24 bit in flow table metadata
@@ -52,6 +61,7 @@ static u32 mlx5_ipsec_offload_device_caps(struct mlx5_core_dev *mdev)
WARN_ON_ONCE(MLX5_CAP_IPSEC(mdev, log_max_ipsec_offload) > 24);
return caps;
}
EXPORT_SYMBOL_GPL(mlx5_ipsec_device_caps);
static int
mlx5_ipsec_offload_esp_validate_xfrm_attrs(struct mlx5_core_dev *mdev,
@@ -94,8 +104,7 @@ mlx5_ipsec_offload_esp_validate_xfrm_attrs(struct mlx5_core_dev *mdev,
static struct mlx5_accel_esp_xfrm *
mlx5_ipsec_offload_esp_create_xfrm(struct mlx5_core_dev *mdev,
const struct mlx5_accel_esp_xfrm_attrs *attrs,
u32 flags)
const struct mlx5_accel_esp_xfrm_attrs *attrs)
{
struct mlx5_ipsec_esp_xfrm *mxfrm;
int err = 0;
@@ -274,11 +283,6 @@ static void mlx5_ipsec_offload_delete_sa_ctx(void *context)
mutex_unlock(&mxfrm->lock);
}
static int mlx5_ipsec_offload_init(struct mlx5_core_dev *mdev)
{
return 0;
}
static int mlx5_modify_ipsec_obj(struct mlx5_core_dev *mdev,
struct mlx5_ipsec_obj_attrs *attrs,
u32 ipsec_id)
@@ -366,20 +370,51 @@ change_sw_xfrm_attrs:
return err;
}
static const struct mlx5_accel_ipsec_ops ipsec_offload_ops = {
.device_caps = mlx5_ipsec_offload_device_caps,
.create_hw_context = mlx5_ipsec_offload_create_sa_ctx,
.free_hw_context = mlx5_ipsec_offload_delete_sa_ctx,
.init = mlx5_ipsec_offload_init,
.esp_create_xfrm = mlx5_ipsec_offload_esp_create_xfrm,
.esp_destroy_xfrm = mlx5_ipsec_offload_esp_destroy_xfrm,
.esp_modify_xfrm = mlx5_ipsec_offload_esp_modify_xfrm,
};
const struct mlx5_accel_ipsec_ops *mlx5_ipsec_offload_ops(struct mlx5_core_dev *mdev)
void *mlx5_accel_esp_create_hw_context(struct mlx5_core_dev *mdev,
struct mlx5_accel_esp_xfrm *xfrm,
u32 *sa_handle)
{
if (!mlx5_ipsec_offload_device_caps(mdev))
return NULL;
__be32 saddr[4] = {}, daddr[4] = {};
return &ipsec_offload_ops;
if (!xfrm->attrs.is_ipv6) {
saddr[3] = xfrm->attrs.saddr.a4;
daddr[3] = xfrm->attrs.daddr.a4;
} else {
memcpy(saddr, xfrm->attrs.saddr.a6, sizeof(saddr));
memcpy(daddr, xfrm->attrs.daddr.a6, sizeof(daddr));
}
return mlx5_ipsec_offload_create_sa_ctx(mdev, xfrm, saddr, daddr,
xfrm->attrs.spi,
xfrm->attrs.is_ipv6, sa_handle);
}
void mlx5_accel_esp_free_hw_context(struct mlx5_core_dev *mdev, void *context)
{
mlx5_ipsec_offload_delete_sa_ctx(context);
}
struct mlx5_accel_esp_xfrm *
mlx5_accel_esp_create_xfrm(struct mlx5_core_dev *mdev,
const struct mlx5_accel_esp_xfrm_attrs *attrs)
{
struct mlx5_accel_esp_xfrm *xfrm;
xfrm = mlx5_ipsec_offload_esp_create_xfrm(mdev, attrs);
if (IS_ERR(xfrm))
return xfrm;
xfrm->mdev = mdev;
return xfrm;
}
void mlx5_accel_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm)
{
mlx5_ipsec_offload_esp_destroy_xfrm(xfrm);
}
int mlx5_accel_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm,
const struct mlx5_accel_esp_xfrm_attrs *attrs)
{
return mlx5_ipsec_offload_esp_modify_xfrm(xfrm, attrs);
}

Some files were not shown because too many files have changed in this diff Show More