Re: [PATCH 1/1] scsi_debug: randomize command duration option + %p

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Hi Douglas,

Thank you for the patch! Yet something to improve:

[auto build test ERROR on scsi/for-next]
[cannot apply to v5.3 next-20190925]
[if your patch is applied to the wrong git tree, please drop us a note to help
improve the system. BTW, we also suggest to use '--base' option to specify the
base tree in git format-patch, please see https://stackoverflow.com/a/37406982]

url:    https://github.com/0day-ci/linux/commits/Douglas-Gilbert/scsi_debug-randomize-command-duration-option-p/20190927-094954
base:   https://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi.git for-next
config: i386-randconfig-f004-201938 (attached as .config)
compiler: gcc-7 (Debian 7.4.0-13) 7.4.0
reproduce:
        # save the attached .config to linux build tree
        make ARCH=i386 

If you fix the issue, kindly add following tag
Reported-by: kbuild test robot <lkp@xxxxxxxxx>

All errors (new ones prefixed by >>):

   ld: drivers/scsi/scsi_debug.o: in function `schedule_resp':
>> drivers/scsi/scsi_debug.c:4365: undefined reference to `__udivdi3'

vim +4365 drivers/scsi/scsi_debug.c

  4251	
  4252	/* Complete the processing of the thread that queued a SCSI command to this
  4253	 * driver. It either completes the command by calling cmnd_done() or
  4254	 * schedules a hr timer or work queue then returns 0. Returns
  4255	 * SCSI_MLQUEUE_HOST_BUSY if temporarily out of resources.
  4256	 */
  4257	static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
  4258				 int scsi_result,
  4259				 int (*pfp)(struct scsi_cmnd *,
  4260					    struct sdebug_dev_info *),
  4261				 int delta_jiff, int ndelay)
  4262	{
  4263		unsigned long iflags;
  4264		int k, num_in_q, qdepth, inject;
  4265		struct sdebug_queue *sqp;
  4266		struct sdebug_queued_cmd *sqcp;
  4267		struct scsi_device *sdp;
  4268		struct sdebug_defer *sd_dp;
  4269	
  4270		if (unlikely(devip == NULL)) {
  4271			if (scsi_result == 0)
  4272				scsi_result = DID_NO_CONNECT << 16;
  4273			goto respond_in_thread;
  4274		}
  4275		sdp = cmnd->device;
  4276	
  4277		if (delta_jiff == 0)
  4278			goto respond_in_thread;
  4279	
  4280		/* schedule the response at a later time if resources permit */
  4281		sqp = get_queue(cmnd);
  4282		spin_lock_irqsave(&sqp->qc_lock, iflags);
  4283		if (unlikely(atomic_read(&sqp->blocked))) {
  4284			spin_unlock_irqrestore(&sqp->qc_lock, iflags);
  4285			return SCSI_MLQUEUE_HOST_BUSY;
  4286		}
  4287		num_in_q = atomic_read(&devip->num_in_q);
  4288		qdepth = cmnd->device->queue_depth;
  4289		inject = 0;
  4290		if (unlikely((qdepth > 0) && (num_in_q >= qdepth))) {
  4291			if (scsi_result) {
  4292				spin_unlock_irqrestore(&sqp->qc_lock, iflags);
  4293				goto respond_in_thread;
  4294			} else
  4295				scsi_result = device_qfull_result;
  4296		} else if (unlikely(sdebug_every_nth &&
  4297				    (SDEBUG_OPT_RARE_TSF & sdebug_opts) &&
  4298				    (scsi_result == 0))) {
  4299			if ((num_in_q == (qdepth - 1)) &&
  4300			    (atomic_inc_return(&sdebug_a_tsf) >=
  4301			     abs(sdebug_every_nth))) {
  4302				atomic_set(&sdebug_a_tsf, 0);
  4303				inject = 1;
  4304				scsi_result = device_qfull_result;
  4305			}
  4306		}
  4307	
  4308		k = find_first_zero_bit(sqp->in_use_bm, sdebug_max_queue);
  4309		if (unlikely(k >= sdebug_max_queue)) {
  4310			spin_unlock_irqrestore(&sqp->qc_lock, iflags);
  4311			if (scsi_result)
  4312				goto respond_in_thread;
  4313			else if (SDEBUG_OPT_ALL_TSF & sdebug_opts)
  4314				scsi_result = device_qfull_result;
  4315			if (SDEBUG_OPT_Q_NOISE & sdebug_opts)
  4316				sdev_printk(KERN_INFO, sdp,
  4317					    "%s: max_queue=%d exceeded, %s\n",
  4318					    __func__, sdebug_max_queue,
  4319					    (scsi_result ?  "status: TASK SET FULL" :
  4320							    "report: host busy"));
  4321			if (scsi_result)
  4322				goto respond_in_thread;
  4323			else
  4324				return SCSI_MLQUEUE_HOST_BUSY;
  4325		}
  4326		__set_bit(k, sqp->in_use_bm);
  4327		atomic_inc(&devip->num_in_q);
  4328		sqcp = &sqp->qc_arr[k];
  4329		sqcp->a_cmnd = cmnd;
  4330		cmnd->host_scribble = (unsigned char *)sqcp;
  4331		sd_dp = sqcp->sd_dp;
  4332		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
  4333		if (unlikely(sdebug_every_nth && sdebug_any_injecting_opt))
  4334			setup_inject(sqp, sqcp);
  4335		if (sd_dp == NULL) {
  4336			sd_dp = kzalloc(sizeof(*sd_dp), GFP_ATOMIC);
  4337			if (sd_dp == NULL)
  4338				return SCSI_MLQUEUE_HOST_BUSY;
  4339		}
  4340	
  4341		cmnd->result = pfp != NULL ? pfp(cmnd, devip) : 0;
  4342		if (cmnd->result & SDEG_RES_IMMED_MASK) {
  4343			/*
  4344			 * This is the F_DELAY_OVERR case. No delay.
  4345			 */
  4346			cmnd->result &= ~SDEG_RES_IMMED_MASK;
  4347			delta_jiff = ndelay = 0;
  4348		}
  4349		if (cmnd->result == 0 && scsi_result != 0)
  4350			cmnd->result = scsi_result;
  4351	
  4352		if (unlikely(sdebug_verbose && cmnd->result))
  4353			sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n",
  4354				    __func__, cmnd->result);
  4355	
  4356		if (delta_jiff > 0 || ndelay > 0) {
  4357			ktime_t kt;
  4358	
  4359			if (delta_jiff > 0) {
  4360				u64 ns = (u64)delta_jiff * (NSEC_PER_SEC / HZ);
  4361	
  4362				if (sdebug_random && ns < U32_MAX) {
  4363					ns = prandom_u32_max((u32)ns);
  4364				} else if (sdebug_random) {
> 4365					ns /= 1000;
  4366					if (ns < U32_MAX)  /* an hour and a bit */
  4367						ns = prandom_u32_max((u32)ns);
  4368					ns *= 1000;
  4369				}
  4370				kt = ns_to_ktime(ns);
  4371			} else {
  4372				kt = sdebug_random ? prandom_u32_max((u32)ndelay) :
  4373						     (u32)ndelay;
  4374			}
  4375			if (!sd_dp->init_hrt) {
  4376				sd_dp->init_hrt = true;
  4377				sqcp->sd_dp = sd_dp;
  4378				hrtimer_init(&sd_dp->hrt, CLOCK_MONOTONIC,
  4379					     HRTIMER_MODE_REL_PINNED);
  4380				sd_dp->hrt.function = sdebug_q_cmd_hrt_complete;
  4381				sd_dp->sqa_idx = sqp - sdebug_q_arr;
  4382				sd_dp->qc_idx = k;
  4383			}
  4384			if (sdebug_statistics)
  4385				sd_dp->issuing_cpu = raw_smp_processor_id();
  4386			sd_dp->defer_t = SDEB_DEFER_HRT;
  4387			hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL_PINNED);
  4388		} else {	/* jdelay < 0, use work queue */
  4389			if (!sd_dp->init_wq) {
  4390				sd_dp->init_wq = true;
  4391				sqcp->sd_dp = sd_dp;
  4392				sd_dp->sqa_idx = sqp - sdebug_q_arr;
  4393				sd_dp->qc_idx = k;
  4394				INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete);
  4395			}
  4396			if (sdebug_statistics)
  4397				sd_dp->issuing_cpu = raw_smp_processor_id();
  4398			sd_dp->defer_t = SDEB_DEFER_WQ;
  4399			if (unlikely(sqcp->inj_cmd_abort))
  4400				sd_dp->aborted = true;
  4401			schedule_work(&sd_dp->ew.work);
  4402			if (unlikely(sqcp->inj_cmd_abort)) {
  4403				sdev_printk(KERN_INFO, sdp, "abort request tag %d\n",
  4404					    cmnd->request->tag);
  4405				blk_abort_request(cmnd->request);
  4406			}
  4407		}
  4408		if (unlikely((SDEBUG_OPT_Q_NOISE & sdebug_opts) &&
  4409			     (scsi_result == device_qfull_result)))
  4410			sdev_printk(KERN_INFO, sdp,
  4411				    "%s: num_in_q=%d +1, %s%s\n", __func__,
  4412				    num_in_q, (inject ? "<inject> " : ""),
  4413				    "status: TASK SET FULL");
  4414		return 0;
  4415	

---
0-DAY kernel test infrastructure                Open Source Technology Center
https://lists.01.org/pipermail/kbuild-all                   Intel Corporation

Attachment: .config.gz
Description: application/gzip


[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[Index of Archives]     [SCSI Target Devel]     [Linux SCSI Target Infrastructure]     [Kernel Newbies]     [IDE]     [Security]     [Git]     [Netfilter]     [Bugtraq]     [Yosemite News]     [MIPS Linux]     [ARM Linux]     [Linux Security]     [Linux RAID]     [Linux ATA RAID]     [Linux IIO]     [Samba]     [Device Mapper]

  Powered by Linux