[pm:idle-tick 6/7] drivers/cpuidle//governors/menu.c:317:36: error: too many arguments to function 'tick_nohz_get_sleep_length'

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



tree:   https://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm.git idle-tick
head:   40331e907dcafa83382cdda585d9bf5a9e6b8a67
commit: e5233f0d22c78a47563c9c49d2ee4fe836772899 [6/7] cpuidle: menu: Refine idle state selection for running tick
config: x86_64-randconfig-h0-03200551 (attached as .config)
compiler: gcc-7 (Debian 7.3.0-1) 7.3.0
reproduce:
        git checkout e5233f0d22c78a47563c9c49d2ee4fe836772899
        # save the attached .config to linux build tree
        make ARCH=x86_64 

All errors (new ones prefixed by >>):

   drivers/cpuidle//governors/menu.c: In function 'menu_select':
>> drivers/cpuidle//governors/menu.c:317:36: error: too many arguments to function 'tick_nohz_get_sleep_length'
     data->next_timer_us = ktime_to_us(tick_nohz_get_sleep_length(&tick_time));
                                       ^~~~~~~~~~~~~~~~~~~~~~~~~~
   In file included from drivers/cpuidle//governors/menu.c:19:0:
   include/linux/tick.h:145:23: note: declared here
    static inline ktime_t tick_nohz_get_sleep_length(void)
                          ^~~~~~~~~~~~~~~~~~~~~~~~~~

vim +/tick_nohz_get_sleep_length +317 drivers/cpuidle//governors/menu.c

   279	
   280	/**
   281	 * menu_select - selects the next idle state to enter
   282	 * @drv: cpuidle driver containing state data
   283	 * @dev: the CPU
   284	 * @nohz_ret: indication on whether or not to stop the tick
   285	 */
   286	static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
   287			       bool *nohz_ret)
   288	{
   289		struct menu_device *data = this_cpu_ptr(&menu_devices);
   290		struct device *device = get_cpu_device(dev->cpu);
   291		int latency_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY);
   292		int i;
   293		int first_idx;
   294		int idx;
   295		unsigned int interactivity_req;
   296		unsigned int expected_interval;
   297		unsigned long nr_iowaiters, cpu_load;
   298		int resume_latency = dev_pm_qos_raw_read_value(device);
   299		ktime_t tick_time;
   300	
   301		if (data->needs_update) {
   302			menu_update(drv, dev);
   303			data->needs_update = 0;
   304		}
   305	
   306		if (resume_latency < latency_req &&
   307		    resume_latency != PM_QOS_RESUME_LATENCY_NO_CONSTRAINT)
   308			latency_req = resume_latency;
   309	
   310		/* Special case when user has set very strict latency requirement */
   311		if (unlikely(latency_req == 0)) {
   312			*nohz_ret = false;
   313			return 0;
   314		}
   315	
   316		/* determine the expected residency time, round up */
 > 317		data->next_timer_us = ktime_to_us(tick_nohz_get_sleep_length(&tick_time));
   318	
   319		get_iowait_load(&nr_iowaiters, &cpu_load);
   320		data->bucket = which_bucket(data->next_timer_us, nr_iowaiters);
   321	
   322		/*
   323		 * Force the result of multiplication to be 64 bits even if both
   324		 * operands are 32 bits.
   325		 * Make sure to round up for half microseconds.
   326		 */
   327		data->predicted_us = DIV_ROUND_CLOSEST_ULL((uint64_t)data->next_timer_us *
   328						 data->correction_factor[data->bucket],
   329						 RESOLUTION * DECAY);
   330	
   331		expected_interval = get_typical_interval(data);
   332		expected_interval = min(expected_interval, data->next_timer_us);
   333	
   334		first_idx = 0;
   335		if (drv->states[0].flags & CPUIDLE_FLAG_POLLING) {
   336			struct cpuidle_state *s = &drv->states[1];
   337			unsigned int polling_threshold;
   338	
   339			/*
   340			 * We want to default to C1 (hlt), not to busy polling
   341			 * unless the timer is happening really really soon, or
   342			 * C1's exit latency exceeds the user configured limit.
   343			 */
   344			polling_threshold = max_t(unsigned int, 20, s->target_residency);
   345			if (data->next_timer_us > polling_threshold &&
   346			    latency_req > s->exit_latency && !s->disabled &&
   347			    !dev->states_usage[1].disable)
   348				first_idx = 1;
   349		}
   350	
   351		/*
   352		 * Use the lowest expected idle interval to pick the idle state.
   353		 */
   354		data->predicted_us = min(data->predicted_us, expected_interval);
   355	
   356		/*
   357		 * Use the performance multiplier and the user-configurable
   358		 * latency_req to determine the maximum exit latency.
   359		 */
   360		interactivity_req = data->predicted_us / performance_multiplier(nr_iowaiters, cpu_load);
   361		if (latency_req > interactivity_req)
   362			latency_req = interactivity_req;
   363	
   364		expected_interval = TICK_USEC_HZ;
   365		/*
   366		 * Find the idle state with the lowest power while satisfying
   367		 * our constraints.
   368		 */
   369		idx = -1;
   370		for (i = first_idx; i < drv->state_count; i++) {
   371			struct cpuidle_state *s = &drv->states[i];
   372			struct cpuidle_state_usage *su = &dev->states_usage[i];
   373	
   374			if (s->disabled || su->disable)
   375				continue;
   376			if (idx == -1)
   377				idx = i; /* first enabled state */
   378			if (s->target_residency > data->predicted_us) {
   379				/*
   380				 * Retain the tick if the selected state is shallower
   381				 * than the deepest available one with target residency
   382				 * within the tick period range.
   383				 *
   384				 * This allows the tick to be stopped even if the
   385				 * predicted idle duration is within the tick period
   386				 * range to counter the effect by which the prediction
   387				 * may be skewed towards lower values due to the tick
   388				 * bias.
   389				 */
   390				expected_interval = s->target_residency;
   391				break;
   392			}
   393			if (s->exit_latency > latency_req) {
   394				/*
   395				 * If we break out of the loop for latency reasons,
   396				 * retain the tick unless the target residency of the
   397				 * selected state is too high.
   398				 */
   399				expected_interval = drv->states[idx].target_residency;
   400				break;
   401			}
   402			idx = i;
   403		}
   404	
   405		if (idx == -1)
   406			idx = 0; /* No states enabled. Must use 0. */
   407	
   408		/*
   409		 * Don't stop the tick if the selected state is a polling one or it is
   410		 * not deep enough.
   411		 */
   412		if (drv->states[idx].flags & CPUIDLE_FLAG_POLLING) {
   413			*nohz_ret = false;
   414		} else if (expected_interval < TICK_USEC_HZ) {
   415			*nohz_ret = false;
   416	
   417			if (!tick_nohz_tick_stopped()) {
   418				unsigned int tick_us = ktime_to_us(tick_time);
   419	
   420				/*
   421				 * Because the tick is not going to be stopped, make
   422				 * sure that the target residency of the state to be
   423				 * returned is within the time to the next timer event
   424				 * including the tick.
   425				 */
   426				while (idx > 0 &&
   427				    (drv->states[idx].target_residency > tick_us ||
   428				     drv->states[idx].disabled ||
   429				     dev->states_usage[idx].disable))
   430					idx--;
   431			}
   432		}
   433	
   434		data->last_state_idx = idx;
   435	
   436		return data->last_state_idx;
   437	}
   438	

---
0-DAY kernel test infrastructure                Open Source Technology Center
https://lists.01.org/pipermail/kbuild-all                   Intel Corporation

Attachment: .config.gz
Description: application/gzip


[Index of Archives]     [Linux IBM ACPI]     [Linux Power Management]     [Linux Kernel]     [Linux Laptop]     [Kernel Newbies]     [Share Photos]     [Security]     [Netfilter]     [Bugtraq]     [Yosemite News]     [MIPS Linux]     [ARM Linux]     [Linux Security]     [Linux RAID]     [Samba]     [Video 4 Linux]     [Device Mapper]     [Linux Resources]

  Powered by Linux