[PATCH rdma-next 08/12] IB/uverbs: Add flow_action create and destroy verbs

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



From: Matan Barak <matanb@xxxxxxxxxxxx>

A verbs application may receive and transmits packets using a data
path pipeline. Sometimes, the first stage in the receive pipeline or
the last stage in the transmit pipeline involves transforming a
packet, either in order to make it easier for later stages to process
it or to prepare it for transmission over the wire. Such transformation
could be stripping/encapsulating the packet (i.e. vxlan),
decrypting/encrypting it (i.e. ipsec), altering headers, doing some
complex FPGA changes, etc.

Some hardware could do such transformations without software data path
intervention at all. The flow steering API supports steering a
packet (either to a QP or dropping it) and some simple packet
immutable actions (i.e. tagging a packet). Complex actions, that may
change the packet, could bloat the flow steering API extensively.
Sometimes the same action should be applied to several flows.
In this case, it's easier to bind several flows to the same action and
modify it than change all matching flows.

Introducing a new flow_action object that abstracts any packet
transformation (out of a standard and well defined set of actions).
This flow_action object could be tied to a flow steering rule via a
new specification.

Currently, we support esp flow_action, which encrypts or decrypts a
packet according to the given parameters. However, we present a
flexible schema that could be used to other transformation actions tied
to flow rules.

Reviewed-by: Yishai Hadas <yishaih@xxxxxxxxxxxx>
Signed-off-by: Matan Barak <matanb@xxxxxxxxxxxx>
Signed-off-by: Leon Romanovsky <leonro@xxxxxxxxxxxx>
---
 drivers/infiniband/core/uverbs_std_types.c | 329 ++++++++++++++++++++++++++++-
 include/rdma/ib_verbs.h                    |  96 +++++++++
 include/rdma/uverbs_ioctl.h                |   2 +
 include/rdma/uverbs_std_types.h            |   1 +
 include/uapi/rdma/ib_user_ioctl_cmds.h     |  19 ++
 include/uapi/rdma/ib_user_ioctl_verbs.h    |  59 ++++++
 6 files changed, 503 insertions(+), 3 deletions(-)

diff --git a/drivers/infiniband/core/uverbs_std_types.c b/drivers/infiniband/core/uverbs_std_types.c
index 4df277eb5855..36f3c8813ed7 100644
--- a/drivers/infiniband/core/uverbs_std_types.c
+++ b/drivers/infiniband/core/uverbs_std_types.c
@@ -51,6 +51,18 @@ static int uverbs_free_flow(struct ib_uobject *uobject,
 	return ib_destroy_flow((struct ib_flow *)uobject->object);
 }
 
+static int uverbs_free_flow_action(struct ib_uobject *uobject,
+				   enum rdma_remove_reason why)
+{
+	struct ib_flow_action *action = uobject->object;
+
+	if (why == RDMA_REMOVE_DESTROY &&
+	    atomic_read(&action->usecnt))
+		return -EBUSY;
+
+	return action->device->destroy_flow_action(action);
+}
+
 static int uverbs_free_mw(struct ib_uobject *uobject,
 			  enum rdma_remove_reason why)
 {
@@ -219,6 +231,13 @@ static int uverbs_hot_unplug_completion_event_file(struct ib_uobject_file *uobj_
 #define DECLARE_COMMON_OBJECT(id, ...)	\
 	DECLARE_UVERBS_OBJECT(UVERBS_OBJECT(id), id, ##__VA_ARGS__)
 
+static int uverbs_destroy_def_handler(struct ib_device *ib_dev,
+				      struct ib_uverbs_file *file,
+				      struct uverbs_attr_bundle *attrs)
+{
+	return 0;
+}
+
 /*
  * This spec is used in order to pass information to the hardware driver in a
  * legacy way. Every verb that could get driver specific data should get this
@@ -294,8 +313,8 @@ static int UVERBS_HANDLER(UVERBS_METHOD_CQ_CREATE)(struct ib_device *ib_dev,
 		return ret;
 
 	/* Optional param, if it doesn't exist, we get -ENOENT and skip it */
-	if (uverbs_copy_from(&attr.flags, attrs,
-			     UVERBS_ATTR_CREATE_CQ_FLAGS) == -EFAULT)
+	if (IS_UVERBS_COPY_ERR(uverbs_copy_from(&attr.flags, attrs,
+						UVERBS_ATTR_CREATE_CQ_FLAGS)))
 		return -EFAULT;
 
 	ev_file_attr = uverbs_attr_get(attrs, UVERBS_ATTR_CREATE_CQ_COMP_CHANNEL);
@@ -410,6 +429,304 @@ static DECLARE_COMMON_METHOD(UVERBS_METHOD_CQ_DESTROY,
 			     UVERBS_ATTR_TYPE(struct ib_uverbs_destroy_cq_resp),
 			     UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)));
 
+static u64 esp_flags_uverbs_to_verbs(struct uverbs_attr_bundle *attrs,
+				     u32 flags)
+{
+	u64 verbs_flags = flags;
+
+	if (uverbs_attr_is_valid(attrs, UVERBS_ATTR_FLOW_ACTION_ESP_ESN))
+		verbs_flags |= IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED;
+
+	return verbs_flags;
+};
+
+static int validate_flow_action_esp_keymat_aes_gcm(union ib_flow_action_attrs_esp_keymats *keymat)
+{
+	struct ib_flow_action_attrs_esp_keymat_aes_gcm *aes_gcm =
+		&keymat->aes_gcm;
+
+	if (aes_gcm->attrs.iv_algo > IB_UVERBS_FLOW_ACTION_IV_ALGO_SEQ)
+		return -EOPNOTSUPP;
+
+	if (aes_gcm->attrs.key_len != 32 &&
+	    aes_gcm->attrs.key_len != 24 &&
+	    aes_gcm->attrs.key_len != 16)
+		return -EINVAL;
+
+	if (aes_gcm->attrs.icv_len != 16 &&
+	    aes_gcm->attrs.icv_len != 8 &&
+	    aes_gcm->attrs.icv_len != 12)
+		return -EINVAL;
+
+	return 0;
+}
+
+static int (*flow_action_esp_keymat_validate[])(union ib_flow_action_attrs_esp_keymats *keymat) = {
+	[IB_UVERBS_FLOW_ACTION_ESP_KEYMAT_AES_GCM] = validate_flow_action_esp_keymat_aes_gcm,
+};
+
+static int parse_esp_ip(enum ib_flow_spec_type proto,
+			const void __user *val_ptr,
+			size_t len, union ib_flow_spec *out)
+{
+	int ret;
+	const struct ib_uverbs_flow_ipv4_filter ipv4 = {
+		.src_ip = cpu_to_be32(0xffffffffUL),
+		.dst_ip = cpu_to_be32(0xffffffffUL),
+		.proto = 0xff,
+		.tos = 0xff,
+		.ttl = 0xff,
+		.flags = 0xff,
+	};
+	const struct ib_uverbs_flow_ipv6_filter ipv6 = {
+		.src_ip = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+			   0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff},
+		.dst_ip = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+			   0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff},
+		.flow_label = cpu_to_be32(0xffffffffUL),
+		.next_hdr = 0xff,
+		.traffic_class = 0xff,
+		.hop_limit = 0xff,
+	};
+	union {
+		struct ib_uverbs_flow_ipv4_filter ipv4;
+		struct ib_uverbs_flow_ipv6_filter ipv6;
+	} user_val = {};
+	const void *user_pmask;
+	size_t val_len;
+
+	/* If the flow IPv4/IPv6 flow specifications are extended, the mask
+	 * should be changed as well.
+	 */
+	BUILD_BUG_ON(offsetof(struct ib_uverbs_flow_ipv4_filter, flags) +
+		     sizeof(ipv4.flags) != sizeof(ipv4));
+	BUILD_BUG_ON(offsetof(struct ib_uverbs_flow_ipv6_filter, reserved) +
+		     sizeof(ipv6.reserved) != sizeof(ipv6));
+
+	switch (proto) {
+	case IB_FLOW_SPEC_IPV4:
+		if (len > sizeof(user_val.ipv4) &&
+		    !ib_is_buffer_cleared(val_ptr + sizeof(user_val.ipv4),
+					  len - sizeof(user_val.ipv4)))
+			return -EOPNOTSUPP;
+
+		val_len = min_t(size_t, len, sizeof(user_val.ipv4));
+		ret = copy_from_user(&user_val.ipv4, val_ptr,
+				     val_len);
+		if (ret)
+			return -EFAULT;
+
+		user_pmask = &ipv4;
+		break;
+	case IB_FLOW_SPEC_IPV6:
+		if (len > sizeof(user_val.ipv6) &&
+		    !ib_is_buffer_cleared(val_ptr + sizeof(user_val.ipv6),
+					  len - sizeof(user_val.ipv6)))
+			return -EOPNOTSUPP;
+
+		val_len = min_t(size_t, len, sizeof(user_val.ipv6));
+		ret = copy_from_user(&user_val.ipv6, val_ptr,
+				     val_len);
+		if (ret)
+			return -EFAULT;
+
+		user_pmask = &ipv6;
+		break;
+	default:
+		return -EOPNOTSUPP;
+	}
+
+	return ib_uverbs_kern_spec_to_ib_spec_filter(proto, user_pmask,
+						     &user_val,
+						     val_len, out);
+}
+
+static int flow_action_esp_get_encap(struct ib_flow_spec_list *out,
+				     struct uverbs_attr_bundle *attrs)
+{
+	struct ib_uverbs_flow_action_esp_encap uverbs_encap;
+	int ret;
+
+	ret = uverbs_copy_from(&uverbs_encap, attrs,
+			       UVERBS_ATTR_FLOW_ACTION_ESP_ENCAP);
+	if (ret)
+		return ret;
+
+	/* We currently support only one encap */
+	if (uverbs_encap.next_ptr)
+		return -EOPNOTSUPP;
+
+	if (uverbs_encap.type != IB_FLOW_SPEC_IPV4 &&
+	    uverbs_encap.type != IB_FLOW_SPEC_IPV6)
+		return -EOPNOTSUPP;
+
+	return parse_esp_ip(uverbs_encap.type,
+			    (__force const void __user *)uverbs_encap.val_ptr,
+			    uverbs_encap.len,
+			    &out->spec);
+}
+
+struct ib_flow_action_esp_attr {
+	struct	ib_flow_action_attrs_esp		hdr;
+	union	ib_flow_action_attrs_esp_keymats	keymat;
+	union	ib_flow_action_attrs_esp_replays	replay;
+	/* We currently support only one spec */
+	struct	ib_flow_spec_list			encap;
+};
+
+#define ESP_LAST_SUPPORTED_FLAG		IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW
+static int parse_flow_action_esp(struct ib_device *ib_dev,
+				 struct ib_uverbs_file *file,
+				 struct uverbs_attr_bundle *attrs,
+				 struct ib_flow_action_esp_attr *esp_attr)
+{
+	struct ib_uverbs_flow_action_esp uverbs_esp = {};
+	int ret;
+
+	/* Optional param, if it doesn't exist, we get -ENOENT and skip it */
+	ret = uverbs_copy_from(&esp_attr->hdr.esn, attrs,
+			       UVERBS_ATTR_FLOW_ACTION_ESP_ESN);
+	if (IS_UVERBS_COPY_ERR(ret))
+		return ret;
+
+	/* This can be called from FLOW_ACTION_ESP_MODIFY where
+	 * UVERBS_ATTR_FLOW_ACTION_ESP_ATTRS is optional
+	 */
+	if (uverbs_attr_is_valid(attrs, UVERBS_ATTR_FLOW_ACTION_ESP_ATTRS)) {
+		ret = uverbs_copy_from_or_zero(&uverbs_esp, attrs,
+					       UVERBS_ATTR_FLOW_ACTION_ESP_ATTRS);
+		if (ret)
+			return ret;
+
+		if (uverbs_esp.flags & ~((ESP_LAST_SUPPORTED_FLAG << 1) - 1))
+			return -EOPNOTSUPP;
+
+		esp_attr->hdr.spi = uverbs_esp.spi;
+		esp_attr->hdr.seq = uverbs_esp.seq;
+		esp_attr->hdr.tfc_pad = uverbs_esp.tfc_pad;
+		esp_attr->hdr.hard_limit_pkts = uverbs_esp.hard_limit_pkts;
+	}
+	esp_attr->hdr.flags = esp_flags_uverbs_to_verbs(attrs, uverbs_esp.flags);
+
+	if (uverbs_attr_is_valid(attrs, UVERBS_ATTR_FLOW_ACTION_ESP_KEYMAT)) {
+		esp_attr->keymat.keymat.protocol =
+			uverbs_attr_get_enum_id(attrs,
+						UVERBS_ATTR_FLOW_ACTION_ESP_KEYMAT);
+		ret = _uverbs_copy_from_or_zero(&esp_attr->keymat.keymat + 1,
+						attrs,
+						UVERBS_ATTR_FLOW_ACTION_ESP_KEYMAT,
+						sizeof(esp_attr->keymat));
+		if (ret)
+			return ret;
+
+		ret = flow_action_esp_keymat_validate[esp_attr->keymat.keymat.protocol](&esp_attr->keymat);
+		if (ret)
+			return ret;
+
+		esp_attr->hdr.keymat = &esp_attr->keymat.keymat;
+	}
+
+	if (uverbs_attr_is_valid(attrs, UVERBS_ATTR_FLOW_ACTION_ESP_REPLAY)) {
+		esp_attr->replay.replay.protocol =
+			uverbs_attr_get_enum_id(attrs,
+						UVERBS_ATTR_FLOW_ACTION_ESP_REPLAY);
+
+		ret = _uverbs_copy_from_or_zero(&esp_attr->replay.replay + 1,
+						attrs,
+						UVERBS_ATTR_FLOW_ACTION_ESP_REPLAY,
+						sizeof(esp_attr->replay));
+		if (ret)
+			return ret;
+
+		esp_attr->hdr.replay = &esp_attr->replay.replay;
+	}
+
+	if (uverbs_attr_is_valid(attrs, UVERBS_ATTR_FLOW_ACTION_ESP_ENCAP)) {
+		ret = flow_action_esp_get_encap(&esp_attr->encap, attrs);
+		if (ret)
+			return ret;
+
+		esp_attr->hdr.encap = &esp_attr->encap;
+	}
+
+	return 0;
+}
+
+static int UVERBS_HANDLER(UVERBS_METHOD_FLOW_ACTION_ESP_CREATE)(struct ib_device *ib_dev,
+								struct ib_uverbs_file *file,
+								struct uverbs_attr_bundle *attrs)
+{
+	int				  ret;
+	struct ib_uobject		  *uobj;
+	struct ib_flow_action		  *action;
+	struct ib_flow_action_esp_attr	  esp_attr = {};
+
+	if (!ib_dev->create_flow_action_esp)
+		return -EOPNOTSUPP;
+
+	ret = parse_flow_action_esp(ib_dev, file, attrs, &esp_attr);
+	if (ret)
+		return ret;
+
+	/* No need to check as this attribute is marked as MANDATORY */
+	uobj = uverbs_attr_get(attrs, UVERBS_ATTR_FLOW_ACTION_ESP_HANDLE)->obj_attr.uobject;
+	action = ib_dev->create_flow_action_esp(ib_dev, &esp_attr.hdr, attrs);
+	if (IS_ERR(action))
+		return PTR_ERR(action);
+
+	atomic_set(&action->usecnt, 0);
+	action->device = ib_dev;
+	action->type = IB_FLOW_ACTION_ESP;
+	action->uobject = uobj;
+	uobj->object = action;
+
+	return 0;
+}
+
+static struct uverbs_attr_spec uverbs_flow_action_esp_keymat[] = {
+	[IB_UVERBS_FLOW_ACTION_ESP_KEYMAT_AES_GCM] = {
+		.ptr = {
+			.type = UVERBS_ATTR_TYPE_PTR_IN,
+			UVERBS_ATTR_TYPE(struct ib_uverbs_flow_action_esp_keymat_aes_gcm),
+			.flags = UVERBS_ATTR_SPEC_F_MIN_SZ_OR_ZERO,
+		},
+	},
+};
+
+static struct uverbs_attr_spec uverbs_flow_action_esp_replay[] = {
+	[IB_UVERBS_FLOW_ACTION_ESP_REPLAY_BMP] = {
+		.ptr = {
+			.type = UVERBS_ATTR_TYPE_PTR_IN,
+			UVERBS_ATTR_STRUCT(struct ib_uverbs_flow_action_esp_replay_bmp, size),
+			.flags = UVERBS_ATTR_SPEC_F_MIN_SZ_OR_ZERO,
+		}
+	},
+};
+
+static DECLARE_COMMON_METHOD(UVERBS_METHOD_FLOW_ACTION_ESP_CREATE,
+	&UVERBS_ATTR_IDR(UVERBS_ATTR_FLOW_ACTION_ESP_HANDLE, UVERBS_OBJECT_FLOW_ACTION,
+			 UVERBS_ACCESS_NEW,
+			 UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)),
+	&UVERBS_ATTR_PTR_IN(UVERBS_ATTR_FLOW_ACTION_ESP_ATTRS,
+			    UVERBS_ATTR_STRUCT(struct ib_uverbs_flow_action_esp, hard_limit_pkts),
+			    UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY |
+				     UVERBS_ATTR_SPEC_F_MIN_SZ_OR_ZERO)),
+	&UVERBS_ATTR_PTR_IN(UVERBS_ATTR_FLOW_ACTION_ESP_ESN, UVERBS_ATTR_TYPE(__u32)),
+	&UVERBS_ATTR_ENUM_IN(UVERBS_ATTR_FLOW_ACTION_ESP_KEYMAT,
+			     uverbs_flow_action_esp_keymat,
+			     UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)),
+	&UVERBS_ATTR_ENUM_IN(UVERBS_ATTR_FLOW_ACTION_ESP_REPLAY,
+			     uverbs_flow_action_esp_replay),
+	&UVERBS_ATTR_PTR_IN(UVERBS_ATTR_FLOW_ACTION_ESP_ENCAP,
+			    UVERBS_ATTR_STRUCT(struct ib_uverbs_flow_action_esp_encap, type)));
+
+static DECLARE_UVERBS_METHOD(UVERBS_METHOD(UVERBS_METHOD_FLOW_ACTION_DESTROY),
+	UVERBS_METHOD_FLOW_ACTION_DESTROY, uverbs_destroy_def_handler,
+	&UVERBS_ATTR_IDR(UVERBS_ATTR_DESTROY_FLOW_ACTION_HANDLE,
+			 UVERBS_OBJECT_FLOW_ACTION,
+			 UVERBS_ACCESS_DESTROY,
+			 UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)));
+
 DECLARE_COMMON_OBJECT(UVERBS_OBJECT_COMP_CHANNEL,
 		      &UVERBS_TYPE_ALLOC_FD(0,
 					      sizeof(struct ib_uverbs_completion_event_file),
@@ -445,6 +762,11 @@ DECLARE_COMMON_OBJECT(UVERBS_OBJECT_AH,
 DECLARE_COMMON_OBJECT(UVERBS_OBJECT_FLOW,
 		      &UVERBS_TYPE_ALLOC_IDR(0, uverbs_free_flow));
 
+DECLARE_COMMON_OBJECT(UVERBS_OBJECT_FLOW_ACTION,
+		      &UVERBS_TYPE_ALLOC_IDR(0, uverbs_free_flow_action),
+		      &UVERBS_METHOD(UVERBS_METHOD_FLOW_ACTION_ESP_CREATE),
+		      &UVERBS_METHOD(UVERBS_METHOD_FLOW_ACTION_DESTROY));
+
 DECLARE_COMMON_OBJECT(UVERBS_OBJECT_WQ,
 		      &UVERBS_TYPE_ALLOC_IDR_SZ(sizeof(struct ib_uwq_object), 0,
 						  uverbs_free_wq));
@@ -475,4 +797,5 @@ DECLARE_UVERBS_OBJECT_TREE(uverbs_default_objects,
 			   &UVERBS_OBJECT(UVERBS_OBJECT_FLOW),
 			   &UVERBS_OBJECT(UVERBS_OBJECT_WQ),
 			   &UVERBS_OBJECT(UVERBS_OBJECT_RWQ_IND_TBL),
-			   &UVERBS_OBJECT(UVERBS_OBJECT_XRCD));
+			   &UVERBS_OBJECT(UVERBS_OBJECT_XRCD),
+			   &UVERBS_OBJECT(UVERBS_OBJECT_FLOW_ACTION));
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index e7bb73f26eda..e4c7ad2a7d5c 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -65,6 +65,7 @@
 #include <uapi/rdma/ib_user_verbs.h>
 #include <rdma/restrack.h>
 #include <uapi/rdma/rdma_user_ioctl.h>
+#include <uapi/rdma/ib_user_ioctl_verbs.h>
 
 #define IB_FW_VERSION_NAME_MAX	ETHTOOL_FWVERS_LEN
 
@@ -1989,6 +1990,94 @@ struct ib_flow {
 	struct ib_uobject	*uobject;
 };
 
+enum ib_flow_action_type {
+	IB_FLOW_ACTION_UNSPECIFIED,
+	IB_FLOW_ACTION_ESP = 1,
+};
+
+/* We align this struct to u64 as right after this structure we put the keymat
+ * data itself. Since we copy data to this part of the struct via
+ * ret = _uverbs_copy_from_or_zero(&esp_attr->keymat.keymat + 1,
+ *				   attrs,
+ *				   UVERBS_ATTR_FLOW_ACTION_ESP_KEYMAT,
+ *				   sizeof(esp_attr->keymat));
+ * Since the structures after the keymat could have different alignment
+ * requirements, we chose to align the header. This way, we guarentee that all
+ * the structures that come right after would be aligned properly.
+ */
+struct ib_flow_action_attrs_esp_keymat {
+	enum ib_uverbs_flow_action_esp_keymat		protocol;
+} __aligned(__alignof__(u64));
+
+struct ib_flow_action_attrs_esp_keymat_aes_gcm {
+	struct ib_flow_action_attrs_esp_keymat			keymat;
+	struct ib_uverbs_flow_action_esp_keymat_aes_gcm		attrs;
+};
+
+union ib_flow_action_attrs_esp_keymats {
+	struct ib_flow_action_attrs_esp_keymat		keymat;
+	struct ib_flow_action_attrs_esp_keymat_aes_gcm	aes_gcm;
+};
+
+/* see __aligned explanation in the keymat section */
+struct ib_flow_action_attrs_esp_replay {
+	enum ib_uverbs_flow_action_esp_replay		protocol;
+} __aligned(__alignof__(u64));
+
+struct ib_flow_action_attrs_esp_replay_bmp {
+	struct ib_flow_action_attrs_esp_replay		replay;
+	struct ib_uverbs_flow_action_esp_replay_bmp	attrs;
+};
+
+union ib_flow_action_attrs_esp_replays {
+	struct ib_flow_action_attrs_esp_replay		replay;
+	struct ib_flow_action_attrs_esp_replay_bmp	bmp;
+};
+
+enum ib_flow_action_attrs_esp_flags {
+	/* All user-space flags at the top: Use enum ib_uverbs_flow_action_esp_flags
+	 * This is done in order to share the same flags between user-space and
+	 * kernel and spare an unnecessary translation.
+	 */
+
+	/* Kernel flags */
+	IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED	= 1ULL << 32,
+};
+
+struct ib_flow_spec_list {
+	struct ib_flow_spec_list	*next;
+	union ib_flow_spec		spec;
+};
+
+struct ib_flow_action_attrs_esp {
+	/* Pointer to esp_keymat struct. Use container_of to get the actual
+	 * keymat.
+	 */
+	struct ib_flow_action_attrs_esp_keymat		*keymat;
+	/* Pointer to esp_replay struct. Use contaienr_of to get the actual
+	 * replay.
+	 */
+	struct ib_flow_action_attrs_esp_replay		*replay;
+	struct ib_flow_spec_list			*encap;
+	/* Used only if IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED is enabled.
+	 * Value of 0 is a valid value.
+	 */
+	u32						esn;
+	u32						spi;
+	u32						seq;
+	u32						tfc_pad;
+	/* Use enum ib_flow_action_attrs_esp_flags */
+	u64						flags;
+	u64						hard_limit_pkts;
+};
+
+struct ib_flow_action {
+	struct ib_device		*device;
+	struct ib_uobject		*uobject;
+	enum ib_flow_action_type	type;
+	atomic_t			usecnt;
+};
+
 struct ib_mad_hdr;
 struct ib_grh;
 
@@ -2065,6 +2154,8 @@ struct ib_port_pkey_list {
 	struct list_head              pkey_list;
 };
 
+struct uverbs_attr_bundle;
+
 struct ib_device {
 	/* Do not access @dma_device directly from ULP nor from HW drivers. */
 	struct device                *dma_device;
@@ -2320,6 +2411,11 @@ struct ib_device {
 							   struct ib_rwq_ind_table_init_attr *init_attr,
 							   struct ib_udata *udata);
 	int                        (*destroy_rwq_ind_table)(struct ib_rwq_ind_table *wq_ind_table);
+	struct ib_flow_action *	   (*create_flow_action_esp)(struct ib_device *device,
+							     const struct ib_flow_action_attrs_esp *attr,
+							     struct uverbs_attr_bundle *attrs);
+	int			   (*destroy_flow_action)(struct ib_flow_action *action);
+
 	/**
 	 * rdma netdev operation
 	 *
diff --git a/include/rdma/uverbs_ioctl.h b/include/rdma/uverbs_ioctl.h
index 264a07694f49..9745420f59aa 100644
--- a/include/rdma/uverbs_ioctl.h
+++ b/include/rdma/uverbs_ioctl.h
@@ -384,6 +384,8 @@ static inline bool uverbs_attr_is_valid(const struct uverbs_attr_bundle *attrs_b
 					    idx & ~UVERBS_ID_NS_MASK);
 }
 
+#define IS_UVERBS_COPY_ERR(_ret)		((_ret) == -EFAULT)
+
 static inline const struct uverbs_attr *uverbs_attr_get(const struct uverbs_attr_bundle *attrs_bundle,
 							u16 idx)
 {
diff --git a/include/rdma/uverbs_std_types.h b/include/rdma/uverbs_std_types.h
index 45ee7d1bfa32..e93bd7e1f1a0 100644
--- a/include/rdma/uverbs_std_types.h
+++ b/include/rdma/uverbs_std_types.h
@@ -54,6 +54,7 @@ extern const struct uverbs_object_def UVERBS_OBJECT(UVERBS_OBJECT_FLOW);
 extern const struct uverbs_object_def UVERBS_OBJECT(UVERBS_OBJECT_WQ);
 extern const struct uverbs_object_def UVERBS_OBJECT(UVERBS_OBJECT_RWQ_IND_TBL);
 extern const struct uverbs_object_def UVERBS_OBJECT(UVERBS_OBJECT_XRCD);
+extern const struct uverbs_object_def UVERBS_OBJECT(UVERBS_OBJECT_FLOW_ACTION);
 
 extern const struct uverbs_object_tree_def uverbs_default_objects;
 static inline const struct uverbs_object_tree_def *uverbs_default_get_objects(void)
diff --git a/include/uapi/rdma/ib_user_ioctl_cmds.h b/include/uapi/rdma/ib_user_ioctl_cmds.h
index 3c5bd45be188..8a80a2659968 100644
--- a/include/uapi/rdma/ib_user_ioctl_cmds.h
+++ b/include/uapi/rdma/ib_user_ioctl_cmds.h
@@ -54,6 +54,7 @@ enum uverbs_default_objects {
 	UVERBS_OBJECT_XRCD,
 	UVERBS_OBJECT_RWQ_IND_TBL,
 	UVERBS_OBJECT_WQ,
+	UVERBS_OBJECT_FLOW_ACTION,
 };
 
 enum {
@@ -76,9 +77,27 @@ enum uverbs_attrs_destroy_cq_cmd_attr_ids {
 	UVERBS_ATTR_DESTROY_CQ_RESP,
 };
 
+enum uverbs_attrs_create_flow_action_esp {
+	UVERBS_ATTR_FLOW_ACTION_ESP_HANDLE,
+	UVERBS_ATTR_FLOW_ACTION_ESP_ATTRS,
+	UVERBS_ATTR_FLOW_ACTION_ESP_ESN,
+	UVERBS_ATTR_FLOW_ACTION_ESP_KEYMAT,
+	UVERBS_ATTR_FLOW_ACTION_ESP_REPLAY,
+	UVERBS_ATTR_FLOW_ACTION_ESP_ENCAP,
+};
+
+enum uverbs_attrs_destroy_flow_action_esp {
+	UVERBS_ATTR_DESTROY_FLOW_ACTION_HANDLE,
+};
+
 enum uverbs_methods_cq {
 	UVERBS_METHOD_CQ_CREATE,
 	UVERBS_METHOD_CQ_DESTROY,
 };
 
+enum uverbs_methods_actions_flow_action_ops {
+	UVERBS_METHOD_FLOW_ACTION_ESP_CREATE,
+	UVERBS_METHOD_FLOW_ACTION_DESTROY,
+};
+
 #endif
diff --git a/include/uapi/rdma/ib_user_ioctl_verbs.h b/include/uapi/rdma/ib_user_ioctl_verbs.h
index 3d3a2f017abc..8d71e7115611 100644
--- a/include/uapi/rdma/ib_user_ioctl_verbs.h
+++ b/include/uapi/rdma/ib_user_ioctl_verbs.h
@@ -40,4 +40,63 @@
 #define RDMA_UAPI_PTR(_type, _name)	_type __attribute__((aligned(8))) _name
 #endif
 
+enum ib_uverbs_flow_action_esp_keymat {
+	IB_UVERBS_FLOW_ACTION_ESP_KEYMAT_AES_GCM,
+};
+
+enum ib_uverbs_flow_action_esp_keymat_aes_gcm_iv_algo {
+	IB_UVERBS_FLOW_ACTION_IV_ALGO_SEQ,
+};
+
+struct ib_uverbs_flow_action_esp_keymat_aes_gcm {
+	__aligned_u64	iv;
+	__u32		iv_algo; /* Use enum ib_uverbs_flow_action_esp_keymat_aes_gcm_iv_algo */
+
+	__u32		salt;
+	__u32		icv_len;
+
+	__u32		key_len;
+	__u32		aes_key[256 / 32];
+};
+
+enum ib_uverbs_flow_action_esp_replay {
+	IB_UVERBS_FLOW_ACTION_ESP_REPLAY_NONE,
+	IB_UVERBS_FLOW_ACTION_ESP_REPLAY_BMP,
+};
+
+struct ib_uverbs_flow_action_esp_replay_bmp {
+	__u32	size;
+};
+
+enum ib_uverbs_flow_action_esp_flags {
+	IB_UVERBS_FLOW_ACTION_ESP_FLAGS_INLINE_CRYPTO	= 0UL << 0,	/* Default */
+	IB_UVERBS_FLOW_ACTION_ESP_FLAGS_FULL_OFFLOAD	= 1UL << 0,
+
+	IB_UVERBS_FLOW_ACTION_ESP_FLAGS_TUNNEL		= 0UL << 1,	/* Default */
+	IB_UVERBS_FLOW_ACTION_ESP_FLAGS_TRANSPORT	= 1UL << 1,
+
+	IB_UVERBS_FLOW_ACTION_ESP_FLAGS_DECRYPT		= 0UL << 2,	/* Default */
+	IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ENCRYPT		= 1UL << 2,
+
+	IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW	= 1UL << 3,
+};
+
+struct ib_uverbs_flow_action_esp_encap {
+	/* This struct represents a list of pointers to flow_xxxx_filter that
+	 * encapsulates the payload in ESP tunnel mode.
+	 */
+	RDMA_UAPI_PTR(void *, val_ptr); /* pointer to a flow_xxxx_filter */
+	RDMA_UAPI_PTR(struct ib_uverbs_flow_action_esp_encap *, next_ptr);
+	__u16	len;		/* Len of the filter struct val_ptr points to */
+	__u16	type;		/* Use flow_spec_type enum */
+};
+
+struct ib_uverbs_flow_action_esp {
+	__u32		spi;
+	__u32		seq;
+	__u32		tfc_pad;
+	__u32		flags;
+	__aligned_u64	hard_limit_pkts;
+};
+
 #endif
-- 
2.16.2

--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at  http://vger.kernel.org/majordomo-info.html



[Index of Archives]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Photo]     [Yosemite News]     [Yosemite Photos]     [Linux Kernel]     [Linux SCSI]     [XFree86]

  Powered by Linux