Re: [PATCH 0/9] staging: octeon: multi rx group (queue) support

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Aaro Koskinen wrote:
> Oops, looks like I tested without CONFIG_NET_POLL_CONTROLLER enabled
> and that seems to be broken. Sorry.

I'm not using CONFIG_NET_POLL_CONTROLLER either; the problem is in the
normal cvm_oct_napi_poll() path.

Here's my workaround:

--- a/drivers/staging/octeon/ethernet-rx.c
+++ b/drivers/staging/octeon/ethernet-rx.c
@@ -159,7 +159,7 @@ static inline int cvm_oct_check_rcv_error(cvmx_wqe_t *work)
 	return 0;
 }

-static int cvm_oct_poll(struct oct_rx_group *rx_group, int budget)
+static int cvm_oct_poll(int group, int budget)
 {
 	const int	coreid = cvmx_get_core_num();
 	u64	old_group_mask;
@@ -181,13 +181,13 @@ static int cvm_oct_poll(struct oct_rx_group *rx_group, int budget)
 	if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
 		old_group_mask = cvmx_read_csr(CVMX_SSO_PPX_GRP_MSK(coreid));
 		cvmx_write_csr(CVMX_SSO_PPX_GRP_MSK(coreid),
-			       BIT(rx_group->group));
+			       BIT(group));
 		cvmx_read_csr(CVMX_SSO_PPX_GRP_MSK(coreid)); /* Flush */
 	} else {
 		old_group_mask = cvmx_read_csr(CVMX_POW_PP_GRP_MSKX(coreid));
 		cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(coreid),
 			       (old_group_mask & ~0xFFFFull) |
-			       BIT(rx_group->group));
+			       BIT(group));
 	}

 	if (USE_ASYNC_IOBDMA) {
@@ -212,15 +212,15 @@ static int cvm_oct_poll(struct oct_rx_group *rx_group, int budget)
 		if (!work) {
 			if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
 				cvmx_write_csr(CVMX_SSO_WQ_IQ_DIS,
-					       BIT(rx_group->group));
+					       BIT(group));
 				cvmx_write_csr(CVMX_SSO_WQ_INT,
-					       BIT(rx_group->group));
+					       BIT(group));
 			} else {
 				union cvmx_pow_wq_int wq_int;

 				wq_int.u64 = 0;
-				wq_int.s.iq_dis = BIT(rx_group->group);
-				wq_int.s.wq_int = BIT(rx_group->group);
+				wq_int.s.iq_dis = BIT(group);
+				wq_int.s.wq_int = BIT(group);
 				cvmx_write_csr(CVMX_POW_WQ_INT, wq_int.u64);
 			}
 			break;
@@ -447,7 +447,7 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
 						     napi);
 	int rx_count;

-	rx_count = cvm_oct_poll(rx_group, budget);
+	rx_count = cvm_oct_poll(rx_group->group, budget);

 	if (rx_count < budget) {
 		/* No more work */
@@ -466,7 +466,7 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
  */
 void cvm_oct_poll_controller(struct net_device *dev)
 {
-	cvm_oct_poll(oct_rx_group, 16);
+	cvm_oct_poll(oct_rx_group->group, 16);
 }
 #endif

> Can you see multiple ethernet IRQs in /proc/interrupts and their
> counters increasing?
> 
> With receive_group_order=4 you should see 16 IRQs.

I see the 16 IRQs, and the first one does increase. But packets don't make
it to the application.

--Ed
_______________________________________________
devel mailing list
devel@xxxxxxxxxxxxxxxxxxxxxx
http://driverdev.linuxdriverproject.org/mailman/listinfo/driverdev-devel



[Index of Archives]     [Linux Driver Backports]     [DMA Engine]     [Linux GPIO]     [Linux SPI]     [Video for Linux]     [Linux USB Devel]     [Linux Coverity]     [Linux Audio Users]     [Linux Kernel]     [Linux SCSI]     [Yosemite Backpacking]
  Powered by Linux