[PATCH net-next 12/20] net: ethernet: qualcomm: Add PPE RSS hash config

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



PPE RSS hash is generated by the configured seed based on the
packet content, which is used to select queue and can also be
passed to EDMA RX descriptor.

Signed-off-by: Luo Jie <quic_luoj@xxxxxxxxxxx>
---
 drivers/net/ethernet/qualcomm/ppe/ppe.c      | 53 ++++++++++-
 drivers/net/ethernet/qualcomm/ppe/ppe_ops.c  | 97 ++++++++++++++++++++
 drivers/net/ethernet/qualcomm/ppe/ppe_ops.h  | 22 +++++
 drivers/net/ethernet/qualcomm/ppe/ppe_regs.h | 44 +++++++++
 4 files changed, 215 insertions(+), 1 deletion(-)

diff --git a/drivers/net/ethernet/qualcomm/ppe/ppe.c b/drivers/net/ethernet/qualcomm/ppe/ppe.c
index bce0a9137c9f..746ef42fea5d 100644
--- a/drivers/net/ethernet/qualcomm/ppe/ppe.c
+++ b/drivers/net/ethernet/qualcomm/ppe/ppe.c
@@ -1172,6 +1172,53 @@ static int ppe_port_ctrl_init(struct ppe_device *ppe_dev)
 	return 0;
 }
 
+static int ppe_rss_hash_init(struct ppe_device *ppe_dev)
+{
+	const struct ppe_queue_ops *ppe_queue_ops;
+	struct ppe_rss_hash_cfg hash_cfg;
+	int i, ret;
+	u16 fins[5] = {0x205, 0x264, 0x227, 0x245, 0x201};
+	u8 ips[4] = {0x13, 0xb, 0x13, 0xb};
+
+	ppe_queue_ops = ppe_queue_config_ops_get();
+	if (!ppe_queue_ops->rss_hash_config_set)
+		return -EINVAL;
+
+	hash_cfg.hash_seed = get_random_u32();
+	hash_cfg.hash_mask = 0xfff;
+	hash_cfg.hash_fragment_mode = false;
+
+	i = 0;
+	while (i < ARRAY_SIZE(fins)) {
+		hash_cfg.hash_fin_inner[i] = fins[i] & 0x1f;
+		hash_cfg.hash_fin_outer[i] = fins[i] >> 5;
+		i++;
+	}
+
+	hash_cfg.hash_protocol_mix = 0x13;
+	hash_cfg.hash_dport_mix = 0xb;
+	hash_cfg.hash_sport_mix = 0x13;
+	hash_cfg.hash_sip_mix[0] = 0x13;
+	hash_cfg.hash_dip_mix[0] = 0xb;
+
+	ret = ppe_queue_ops->rss_hash_config_set(ppe_dev,
+						 PPE_RSS_HASH_MODE_IPV4,
+						 hash_cfg);
+	if (ret)
+		return ret;
+
+	i = 0;
+	while (i < ARRAY_SIZE(ips)) {
+		hash_cfg.hash_sip_mix[i] = ips[i];
+		hash_cfg.hash_dip_mix[i] = ips[i];
+		i++;
+	}
+
+	return ppe_queue_ops->rss_hash_config_set(ppe_dev,
+						  PPE_RSS_HASH_MODE_IPV6,
+						  hash_cfg);
+}
+
 static int ppe_dev_hw_init(struct ppe_device *ppe_dev)
 {
 	int ret;
@@ -1184,7 +1231,11 @@ static int ppe_dev_hw_init(struct ppe_device *ppe_dev)
 	if (ret)
 		return ret;
 
-	return ppe_port_ctrl_init(ppe_dev);
+	ret = ppe_port_ctrl_init(ppe_dev);
+	if (ret)
+		return ret;
+
+	return ppe_rss_hash_init(ppe_dev);
 }
 
 static int qcom_ppe_probe(struct platform_device *pdev)
diff --git a/drivers/net/ethernet/qualcomm/ppe/ppe_ops.c b/drivers/net/ethernet/qualcomm/ppe/ppe_ops.c
index b017983e7cbf..0398a36d680a 100644
--- a/drivers/net/ethernet/qualcomm/ppe/ppe_ops.c
+++ b/drivers/net/ethernet/qualcomm/ppe/ppe_ops.c
@@ -333,6 +333,102 @@ int ppe_counter_set(struct ppe_device *ppe_dev, int port, bool enable)
 			FIELD_PREP(PPE_PORT_EG_VLAN_TX_COUNTING_EN, enable));
 }
 
+static int ppe_rss_hash_config_set(struct ppe_device *ppe_dev,
+				   int mode,
+				   struct ppe_rss_hash_cfg cfg)
+{
+	u32 val;
+	int i;
+
+	if (mode & PPE_RSS_HASH_MODE_IPV4) {
+		val = FIELD_PREP(PPE_RSS_HASH_MASK_IPV4_HASH_MASK, cfg.hash_mask) |
+				 FIELD_PREP(PPE_RSS_HASH_MASK_IPV4_FRAGMENT,
+					    cfg.hash_fragment_mode);
+		ppe_write(ppe_dev, PPE_RSS_HASH_MASK_IPV4, val);
+
+		val = FIELD_PREP(PPE_RSS_HASH_SEED_IPV4_VAL, cfg.hash_seed);
+		ppe_write(ppe_dev, PPE_RSS_HASH_SEED_IPV4, val);
+
+		for (i = 0; i < PPE_RSS_HASH_MIX_IPV4_NUM; i++) {
+			switch (i) {
+			case 0:
+				val = FIELD_PREP(PPE_RSS_HASH_MIX_IPV4_VAL,
+						 cfg.hash_sip_mix[0]);
+				break;
+			case 1:
+				val = FIELD_PREP(PPE_RSS_HASH_MIX_IPV4_VAL,
+						 cfg.hash_dip_mix[0]);
+				break;
+			case 2:
+				val = FIELD_PREP(PPE_RSS_HASH_MIX_IPV4_VAL,
+						 cfg.hash_protocol_mix);
+				break;
+			case 3:
+				val = FIELD_PREP(PPE_RSS_HASH_MIX_IPV4_VAL,
+						 cfg.hash_dport_mix);
+				break;
+			case 4:
+				val = FIELD_PREP(PPE_RSS_HASH_MIX_IPV4_VAL,
+						 cfg.hash_sport_mix);
+				break;
+			default:
+				break;
+			}
+			ppe_write(ppe_dev, PPE_RSS_HASH_MIX_IPV4 + i * PPE_RSS_HASH_MIX_IPV4_INC,
+				  val);
+		}
+
+		for (i = 0; i < PPE_RSS_HASH_MIX_IPV4_NUM; i++) {
+			val = FIELD_PREP(PPE_RSS_HASH_FIN_IPV4_INNER, cfg.hash_fin_inner[i]) |
+					 FIELD_PREP(PPE_RSS_HASH_FIN_IPV4_OUTER,
+						    cfg.hash_fin_outer[i]);
+			ppe_write(ppe_dev, PPE_RSS_HASH_FIN_IPV4 + i * PPE_RSS_HASH_FIN_IPV4_INC,
+				  val);
+		}
+	}
+
+	if (mode & PPE_RSS_HASH_MODE_IPV6) {
+		val = FIELD_PREP(PPE_RSS_HASH_MASK_HASH_MASK, cfg.hash_mask) |
+				 FIELD_PREP(PPE_RSS_HASH_MASK_FRAGMENT, cfg.hash_fragment_mode);
+		ppe_write(ppe_dev, PPE_RSS_HASH_MASK, val);
+
+		val = FIELD_PREP(PPE_RSS_HASH_SEED_VAL, cfg.hash_seed);
+		ppe_write(ppe_dev, PPE_RSS_HASH_SEED, val);
+
+		for (i = 0; i < PPE_RSS_HASH_MIX_NUM; i++) {
+			switch (i) {
+			case 0 ... 3:
+				val = FIELD_PREP(PPE_RSS_HASH_MIX_VAL, cfg.hash_sip_mix[i]);
+				break;
+			case 4 ... 7:
+				val = FIELD_PREP(PPE_RSS_HASH_MIX_VAL, cfg.hash_dip_mix[i - 4]);
+				break;
+			case 8:
+				val = FIELD_PREP(PPE_RSS_HASH_MIX_VAL, cfg.hash_protocol_mix);
+				break;
+			case 9:
+				val = FIELD_PREP(PPE_RSS_HASH_MIX_VAL, cfg.hash_dport_mix);
+				break;
+			case 10:
+				val = FIELD_PREP(PPE_RSS_HASH_MIX_VAL, cfg.hash_sport_mix);
+				break;
+			default:
+				break;
+			}
+			ppe_write(ppe_dev, PPE_RSS_HASH_MIX + i * PPE_RSS_HASH_MIX_INC, val);
+		}
+
+		for (i = 0; i < PPE_RSS_HASH_FIN_NUM; i++) {
+			val = FIELD_PREP(PPE_RSS_HASH_FIN_INNER, cfg.hash_fin_inner[i]) |
+					 FIELD_PREP(PPE_RSS_HASH_FIN_OUTER, cfg.hash_fin_outer[i]);
+
+			ppe_write(ppe_dev, PPE_RSS_HASH_FIN + i * PPE_RSS_HASH_FIN_INC, val);
+		}
+	}
+
+	return 0;
+}
+
 static const struct ppe_queue_ops qcom_ppe_queue_config_ops = {
 	.queue_scheduler_set = ppe_queue_scheduler_set,
 	.queue_scheduler_get = ppe_queue_scheduler_get,
@@ -340,6 +436,7 @@ static const struct ppe_queue_ops qcom_ppe_queue_config_ops = {
 	.queue_ucast_base_get = ppe_queue_ucast_base_get,
 	.queue_ucast_pri_class_set = ppe_queue_ucast_pri_class_set,
 	.queue_ucast_hash_class_set = ppe_queue_ucast_hash_class_set,
+	.rss_hash_config_set = ppe_rss_hash_config_set,
 };
 
 const struct ppe_queue_ops *ppe_queue_config_ops_get(void)
diff --git a/drivers/net/ethernet/qualcomm/ppe/ppe_ops.h b/drivers/net/ethernet/qualcomm/ppe/ppe_ops.h
index ab64a760b60b..da0f37323042 100644
--- a/drivers/net/ethernet/qualcomm/ppe/ppe_ops.h
+++ b/drivers/net/ethernet/qualcomm/ppe/ppe_ops.h
@@ -12,6 +12,8 @@
 
 #define PPE_QUEUE_PRI_MAX		16
 #define PPE_QUEUE_HASH_MAX		256
+#define PPE_RSS_HASH_MODE_IPV4		BIT(0)
+#define PPE_RSS_HASH_MODE_IPV6		BIT(1)
 
 /* PPE hardware QoS configurations used to dispatch the packet passed
  * through PPE, the scheduler supports DRR(deficit round robin with the
@@ -148,6 +150,23 @@ struct ppe_servcode_cfg {
 	int offset_sel;
 };
 
+/* PPE RSS hash can be configured to generate the hash value based on
+ * 5 tuples of packet, the generated hash value is used to decides the
+ * final queue ID.
+ */
+struct ppe_rss_hash_cfg {
+	u32 hash_mask;
+	bool hash_fragment_mode;
+	u32 hash_seed;
+	u8 hash_sip_mix[4];
+	u8 hash_dip_mix[4];
+	u8 hash_protocol_mix;
+	u8 hash_sport_mix;
+	u8 hash_dport_mix;
+	u8 hash_fin_inner[5];
+	u8 hash_fin_outer[5];
+};
+
 /* The operations are used to configure the PPE queue related resource */
 struct ppe_queue_ops {
 	int (*queue_scheduler_set)(struct ppe_device *ppe_dev,
@@ -176,6 +195,9 @@ struct ppe_queue_ops {
 					  int profile_id,
 					  int rss_hash,
 					  int class_offset);
+	int (*rss_hash_config_set)(struct ppe_device *ppe_dev,
+				   int mode,
+				   struct ppe_rss_hash_cfg hash_cfg);
 };
 
 const struct ppe_queue_ops *ppe_queue_config_ops_get(void);
diff --git a/drivers/net/ethernet/qualcomm/ppe/ppe_regs.h b/drivers/net/ethernet/qualcomm/ppe/ppe_regs.h
index 3e61de54f921..b42089599cc9 100644
--- a/drivers/net/ethernet/qualcomm/ppe/ppe_regs.h
+++ b/drivers/net/ethernet/qualcomm/ppe/ppe_regs.h
@@ -19,6 +19,50 @@
 #define PPE_RX_FIFO_CFG_INC					4
 #define PPE_RX_FIFO_CFG_THRSH					GENMASK(2, 0)
 
+#define PPE_RSS_HASH_MASK					0xb4318
+#define PPE_RSS_HASH_MASK_NUM					1
+#define PPE_RSS_HASH_MASK_INC					4
+#define PPE_RSS_HASH_MASK_HASH_MASK				GENMASK(20, 0)
+#define PPE_RSS_HASH_MASK_FRAGMENT				BIT(28)
+
+#define PPE_RSS_HASH_SEED					0xb431c
+#define PPE_RSS_HASH_SEED_NUM					1
+#define PPE_RSS_HASH_SEED_INC					4
+#define PPE_RSS_HASH_SEED_VAL					GENMASK(31, 0)
+
+#define PPE_RSS_HASH_MIX					0xb4320
+#define PPE_RSS_HASH_MIX_NUM					11
+#define PPE_RSS_HASH_MIX_INC					4
+#define PPE_RSS_HASH_MIX_VAL					GENMASK(4, 0)
+
+#define PPE_RSS_HASH_FIN					0xb4350
+#define PPE_RSS_HASH_FIN_NUM					5
+#define PPE_RSS_HASH_FIN_INC					4
+#define PPE_RSS_HASH_FIN_INNER					GENMASK(4, 0)
+#define PPE_RSS_HASH_FIN_OUTER					GENMASK(9, 5)
+
+#define PPE_RSS_HASH_MASK_IPV4					0xb4380
+#define PPE_RSS_HASH_MASK_IPV4_NUM				1
+#define PPE_RSS_HASH_MASK_IPV4_INC				4
+#define PPE_RSS_HASH_MASK_IPV4_HASH_MASK			GENMASK(20, 0)
+#define PPE_RSS_HASH_MASK_IPV4_FRAGMENT				BIT(28)
+
+#define PPE_RSS_HASH_SEED_IPV4					0xb4384
+#define PPE_RSS_HASH_SEED_IPV4_NUM				1
+#define PPE_RSS_HASH_SEED_IPV4_INC				4
+#define PPE_RSS_HASH_SEED_IPV4_VAL				GENMASK(31, 0)
+
+#define PPE_RSS_HASH_MIX_IPV4					0xb4390
+#define PPE_RSS_HASH_MIX_IPV4_NUM				5
+#define PPE_RSS_HASH_MIX_IPV4_INC				4
+#define PPE_RSS_HASH_MIX_IPV4_VAL				GENMASK(4, 0)
+
+#define PPE_RSS_HASH_FIN_IPV4					0xb43b0
+#define PPE_RSS_HASH_FIN_IPV4_NUM				5
+#define PPE_RSS_HASH_FIN_IPV4_INC				4
+#define PPE_RSS_HASH_FIN_IPV4_INNER				GENMASK(4, 0)
+#define PPE_RSS_HASH_FIN_IPV4_OUTER				GENMASK(9, 5)
+
 #define PPE_BM_TDM_CFG_TBL					0xc000
 #define PPE_BM_TDM_CFG_TBL_NUM					128
 #define PPE_BM_TDM_CFG_TBL_INC					0x10
-- 
2.42.0





[Index of Archives]     [Linux ARM Kernel]     [Linux ARM]     [Linux Omap]     [Fedora ARM]     [Linux for Sparc]     [IETF Annouce]     [Security]     [Bugtraq]     [Linux MIPS]     [ECOS]     [Asterisk Internet PBX]     [Linux API]

  Powered by Linux