Octeon Ethernet driver is currently trying to wake up multiple CPU cores for parallel NAPI processing when needed, to cope with the fact that all network interfaces are handled through the same receive queue processed by a single core/interrupt (see http://marc.info/?l=linux-kernel&m=137209784914565&w=2). This partially overlaps with generic RPS functionality. Commit a6a39a7fcff5 ("staging: octeon-ethernet: disable load balance for receiving packet when CONFIG_RPS is enabled") already disabled this when RPS is enabled. Let's delete this code altogether as it has issues like packet reordering and potential problems with CPU offlining/onlining. Tested on EdgeRouter Lite. Signed-off-by: Aaro Koskinen <aaro.koskinen@xxxxxx> --- drivers/staging/octeon/ethernet-rx.c | 109 +++---------------------------- drivers/staging/octeon/ethernet.c | 6 -- drivers/staging/octeon/octeon-ethernet.h | 1 - 3 files changed, 10 insertions(+), 106 deletions(-) diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c index 44e372f..1789a12 100644 --- a/drivers/staging/octeon/ethernet-rx.c +++ b/drivers/staging/octeon/ethernet-rx.c @@ -61,66 +61,7 @@ #include <asm/octeon/cvmx-gmxx-defs.h> -struct cvm_napi_wrapper { - struct napi_struct napi; -} ____cacheline_aligned_in_smp; - -static struct cvm_napi_wrapper cvm_oct_napi[NR_CPUS] __cacheline_aligned_in_smp; - -struct cvm_oct_core_state { - int baseline_cores; - /* - * The number of additional cores that could be processing - * input packets. - */ - atomic_t available_cores; - cpumask_t cpu_state; -} ____cacheline_aligned_in_smp; - -static struct cvm_oct_core_state core_state __cacheline_aligned_in_smp; - -static int cvm_irq_cpu; - -static void cvm_oct_enable_napi(void *_) -{ - int cpu = smp_processor_id(); - napi_schedule(&cvm_oct_napi[cpu].napi); -} - -static void cvm_oct_enable_one_cpu(void) -{ - int v; - int cpu; - - /* Check to see if more CPUs are available for receive processing... */ - v = atomic_sub_if_positive(1, &core_state.available_cores); - if (v < 0) - return; - - /* ... if a CPU is available, Turn on NAPI polling for that CPU. */ - for_each_online_cpu(cpu) { - if (!cpu_test_and_set(cpu, core_state.cpu_state)) { - v = smp_call_function_single(cpu, cvm_oct_enable_napi, - NULL, 0); - if (v) - panic("Can't enable NAPI."); - break; - } - } -} - -static void cvm_oct_no_more_work(void) -{ - int cpu = smp_processor_id(); - - if (cpu == cvm_irq_cpu) { - enable_irq(OCTEON_IRQ_WORKQ0 + pow_receive_group); - return; - } - - cpu_clear(cpu, core_state.cpu_state); - atomic_add(1, &core_state.available_cores); -} +static struct napi_struct cvm_oct_napi; /** * cvm_oct_do_interrupt - interrupt handler. @@ -132,8 +73,7 @@ static irqreturn_t cvm_oct_do_interrupt(int cpl, void *dev_id) { /* Disable the IRQ and start napi_poll. */ disable_irq_nosync(OCTEON_IRQ_WORKQ0 + pow_receive_group); - cvm_irq_cpu = smp_processor_id(); - cvm_oct_enable_napi(NULL); + napi_schedule(&cvm_oct_napi); return IRQ_HANDLED; } @@ -285,23 +225,6 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget) cvmx_pow_work_request_async_nocheck(CVMX_SCR_SCRATCH, CVMX_POW_NO_WAIT); did_work_request = 1; } - -#ifndef CONFIG_RPS - if (rx_count == 0) { - /* - * First time through, see if there is enough - * work waiting to merit waking another - * CPU. - */ - union cvmx_pow_wq_int_cntx counts; - int backlog; - int cores_in_use = core_state.baseline_cores - atomic_read(&core_state.available_cores); - counts.u64 = cvmx_read_csr(CVMX_POW_WQ_INT_CNTX(pow_receive_group)); - backlog = counts.s.iq_cnt + counts.s.ds_cnt; - if (backlog > budget * cores_in_use && napi != NULL) - cvm_oct_enable_one_cpu(); - } -#endif rx_count++; skb_in_hw = USE_SKBUFFS_IN_HW && work->word2.s.bufs == 1; @@ -478,7 +401,7 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget) if (rx_count < budget && napi != NULL) { /* No more work */ napi_complete(napi); - cvm_oct_no_more_work(); + enable_irq(OCTEON_IRQ_WORKQ0 + pow_receive_group); } return rx_count; } @@ -513,18 +436,10 @@ void cvm_oct_rx_initialize(void) if (NULL == dev_for_napi) panic("No net_devices were allocated."); - if (max_rx_cpus >= 1 && max_rx_cpus < num_online_cpus()) - atomic_set(&core_state.available_cores, max_rx_cpus); - else - atomic_set(&core_state.available_cores, num_online_cpus()); - core_state.baseline_cores = atomic_read(&core_state.available_cores); - - core_state.cpu_state = CPU_MASK_NONE; - for_each_possible_cpu(i) { - netif_napi_add(dev_for_napi, &cvm_oct_napi[i].napi, - cvm_oct_napi_poll, rx_napi_weight); - napi_enable(&cvm_oct_napi[i].napi); - } + netif_napi_add(dev_for_napi, &cvm_oct_napi, cvm_oct_napi_poll, + rx_napi_weight); + napi_enable(&cvm_oct_napi); + /* Register an IRQ handler to receive POW interrupts */ i = request_irq(OCTEON_IRQ_WORKQ0 + pow_receive_group, cvm_oct_do_interrupt, 0, "Ethernet", cvm_oct_device); @@ -545,15 +460,11 @@ void cvm_oct_rx_initialize(void) int_pc.s.pc_thr = 5; cvmx_write_csr(CVMX_POW_WQ_INT_PC, int_pc.u64); - - /* Scheduld NAPI now. This will indirectly enable interrupts. */ - cvm_oct_enable_one_cpu(); + /* Schedule NAPI now. This will indirectly enable the interrupt. */ + napi_schedule(&cvm_oct_napi); } void cvm_oct_rx_shutdown(void) { - int i; - /* Shutdown all of the NAPIs */ - for_each_possible_cpu(i) - netif_napi_del(&cvm_oct_napi[i].napi); + netif_napi_del(&cvm_oct_napi); } diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c index 47d4277..ed1bdea 100644 --- a/drivers/staging/octeon/ethernet.c +++ b/drivers/staging/octeon/ethernet.c @@ -98,12 +98,6 @@ MODULE_PARM_DESC(pow_send_list, "\n" "\t\"eth2,spi3,spi7\" would cause these three devices to transmit\n" "\tusing the pow_send_group."); -int max_rx_cpus = -1; -module_param(max_rx_cpus, int, 0444); -MODULE_PARM_DESC(max_rx_cpus, "\n" - "\t\tThe maximum number of CPUs to use for packet reception.\n" - "\t\tUse -1 to use all available CPUs."); - int rx_napi_weight = 32; module_param(rx_napi_weight, int, 0444); MODULE_PARM_DESC(rx_napi_weight, "The NAPI WEIGHT parameter."); diff --git a/drivers/staging/octeon/octeon-ethernet.h b/drivers/staging/octeon/octeon-ethernet.h index d0e3211..f48dc76 100644 --- a/drivers/staging/octeon/octeon-ethernet.h +++ b/drivers/staging/octeon/octeon-ethernet.h @@ -99,7 +99,6 @@ extern struct workqueue_struct *cvm_oct_poll_queue; extern atomic_t cvm_oct_poll_queue_stopping; extern u64 cvm_oct_tx_poll_interval; -extern int max_rx_cpus; extern int rx_napi_weight; #endif -- 2.1.2 _______________________________________________ devel mailing list devel@xxxxxxxxxxxxxxxxxxxxxx http://driverdev.linuxdriverproject.org/mailman/listinfo/driverdev-devel