[linux-next:master 5461/12695] drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c:125:5: warning: no previous prototype for 'kgd_arcturus_hqd_sdma_load'

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



tree:   https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git master
head:   1c8ce959b41a18b9657eaafd7a1215a8da67d3ab
commit: 5073506c7eff55b9599ba9d5b52bf45b86a5df4f [5461/12695] drm/amdkfd: add aldebaran kfd2kgd callbacks to kfd device (v2)
config: arm64-randconfig-r024-20210414 (attached as .config)
compiler: aarch64-linux-gcc (GCC) 9.3.0
reproduce (this is a W=1 build):
        wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
        chmod +x ~/bin/make.cross
        # https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git/commit/?id=5073506c7eff55b9599ba9d5b52bf45b86a5df4f
        git remote add linux-next https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
        git fetch --no-tags linux-next master
        git checkout 5073506c7eff55b9599ba9d5b52bf45b86a5df4f
        # save the attached .config to linux build tree
        COMPILER_INSTALL_PATH=$HOME/0day COMPILER=gcc-9.3.0 make.cross W=1 ARCH=arm64 

If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <lkp@xxxxxxxxx>

All warnings (new ones prefixed by >>):

>> drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c:125:5: warning: no previous prototype for 'kgd_arcturus_hqd_sdma_load' [-Wmissing-prototypes]
     125 | int kgd_arcturus_hqd_sdma_load(struct kgd_dev *kgd, void *mqd,
         |     ^~~~~~~~~~~~~~~~~~~~~~~~~~
>> drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c:195:5: warning: no previous prototype for 'kgd_arcturus_hqd_sdma_dump' [-Wmissing-prototypes]
     195 | int kgd_arcturus_hqd_sdma_dump(struct kgd_dev *kgd,
         |     ^~~~~~~~~~~~~~~~~~~~~~~~~~
>> drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c:227:6: warning: no previous prototype for 'kgd_arcturus_hqd_sdma_is_occupied' [-Wmissing-prototypes]
     227 | bool kgd_arcturus_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd)
         |      ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
>> drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c:246:5: warning: no previous prototype for 'kgd_arcturus_hqd_sdma_destroy' [-Wmissing-prototypes]
     246 | int kgd_arcturus_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
         |     ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~


vim +/kgd_arcturus_hqd_sdma_load +125 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c

   124	
 > 125	int kgd_arcturus_hqd_sdma_load(struct kgd_dev *kgd, void *mqd,
   126				     uint32_t __user *wptr, struct mm_struct *mm)
   127	{
   128		struct amdgpu_device *adev = get_amdgpu_device(kgd);
   129		struct v9_sdma_mqd *m;
   130		uint32_t sdma_rlc_reg_offset;
   131		unsigned long end_jiffies;
   132		uint32_t data;
   133		uint64_t data64;
   134		uint64_t __user *wptr64 = (uint64_t __user *)wptr;
   135	
   136		m = get_sdma_mqd(mqd);
   137		sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id,
   138						    m->sdma_queue_id);
   139	
   140		WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL,
   141			m->sdmax_rlcx_rb_cntl & (~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK));
   142	
   143		end_jiffies = msecs_to_jiffies(2000) + jiffies;
   144		while (true) {
   145			data = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS);
   146			if (data & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
   147				break;
   148			if (time_after(jiffies, end_jiffies)) {
   149				pr_err("SDMA RLC not idle in %s\n", __func__);
   150				return -ETIME;
   151			}
   152			usleep_range(500, 1000);
   153		}
   154	
   155		WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL_OFFSET,
   156		       m->sdmax_rlcx_doorbell_offset);
   157	
   158		data = REG_SET_FIELD(m->sdmax_rlcx_doorbell, SDMA0_RLC0_DOORBELL,
   159				     ENABLE, 1);
   160		WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, data);
   161		WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR,
   162					m->sdmax_rlcx_rb_rptr);
   163		WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_HI,
   164					m->sdmax_rlcx_rb_rptr_hi);
   165	
   166		WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 1);
   167		if (read_user_wptr(mm, wptr64, data64)) {
   168			WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR,
   169			       lower_32_bits(data64));
   170			WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR_HI,
   171			       upper_32_bits(data64));
   172		} else {
   173			WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR,
   174			       m->sdmax_rlcx_rb_rptr);
   175			WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR_HI,
   176			       m->sdmax_rlcx_rb_rptr_hi);
   177		}
   178		WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 0);
   179	
   180		WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE, m->sdmax_rlcx_rb_base);
   181		WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE_HI,
   182				m->sdmax_rlcx_rb_base_hi);
   183		WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_LO,
   184				m->sdmax_rlcx_rb_rptr_addr_lo);
   185		WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_HI,
   186				m->sdmax_rlcx_rb_rptr_addr_hi);
   187	
   188		data = REG_SET_FIELD(m->sdmax_rlcx_rb_cntl, SDMA0_RLC0_RB_CNTL,
   189				     RB_ENABLE, 1);
   190		WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, data);
   191	
   192		return 0;
   193	}
   194	
 > 195	int kgd_arcturus_hqd_sdma_dump(struct kgd_dev *kgd,
   196				     uint32_t engine_id, uint32_t queue_id,
   197				     uint32_t (**dump)[2], uint32_t *n_regs)
   198	{
   199		struct amdgpu_device *adev = get_amdgpu_device(kgd);
   200		uint32_t sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev,
   201				engine_id, queue_id);
   202		uint32_t i = 0, reg;
   203	#undef HQD_N_REGS
   204	#define HQD_N_REGS (19+6+7+10)
   205	
   206		*dump = kmalloc_array(HQD_N_REGS * 2, sizeof(uint32_t), GFP_KERNEL);
   207		if (*dump == NULL)
   208			return -ENOMEM;
   209	
   210		for (reg = mmSDMA0_RLC0_RB_CNTL; reg <= mmSDMA0_RLC0_DOORBELL; reg++)
   211			DUMP_REG(sdma_rlc_reg_offset + reg);
   212		for (reg = mmSDMA0_RLC0_STATUS; reg <= mmSDMA0_RLC0_CSA_ADDR_HI; reg++)
   213			DUMP_REG(sdma_rlc_reg_offset + reg);
   214		for (reg = mmSDMA0_RLC0_IB_SUB_REMAIN;
   215		     reg <= mmSDMA0_RLC0_MINOR_PTR_UPDATE; reg++)
   216			DUMP_REG(sdma_rlc_reg_offset + reg);
   217		for (reg = mmSDMA0_RLC0_MIDCMD_DATA0;
   218		     reg <= mmSDMA0_RLC0_MIDCMD_CNTL; reg++)
   219			DUMP_REG(sdma_rlc_reg_offset + reg);
   220	
   221		WARN_ON_ONCE(i != HQD_N_REGS);
   222		*n_regs = i;
   223	
   224		return 0;
   225	}
   226	
 > 227	bool kgd_arcturus_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd)
   228	{
   229		struct amdgpu_device *adev = get_amdgpu_device(kgd);
   230		struct v9_sdma_mqd *m;
   231		uint32_t sdma_rlc_reg_offset;
   232		uint32_t sdma_rlc_rb_cntl;
   233	
   234		m = get_sdma_mqd(mqd);
   235		sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id,
   236						    m->sdma_queue_id);
   237	
   238		sdma_rlc_rb_cntl = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL);
   239	
   240		if (sdma_rlc_rb_cntl & SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK)
   241			return true;
   242	
   243		return false;
   244	}
   245	
 > 246	int kgd_arcturus_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
   247					unsigned int utimeout)
   248	{
   249		struct amdgpu_device *adev = get_amdgpu_device(kgd);
   250		struct v9_sdma_mqd *m;
   251		uint32_t sdma_rlc_reg_offset;
   252		uint32_t temp;
   253		unsigned long end_jiffies = (utimeout * HZ / 1000) + jiffies;
   254	
   255		m = get_sdma_mqd(mqd);
   256		sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id,
   257						    m->sdma_queue_id);
   258	
   259		temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL);
   260		temp = temp & ~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK;
   261		WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, temp);
   262	
   263		while (true) {
   264			temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS);
   265			if (temp & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
   266				break;
   267			if (time_after(jiffies, end_jiffies)) {
   268				pr_err("SDMA RLC not idle in %s\n", __func__);
   269				return -ETIME;
   270			}
   271			usleep_range(500, 1000);
   272		}
   273	
   274		WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, 0);
   275		WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL,
   276			RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL) |
   277			SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK);
   278	
   279		m->sdmax_rlcx_rb_rptr = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR);
   280		m->sdmax_rlcx_rb_rptr_hi =
   281			RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_HI);
   282	
   283		return 0;
   284	}
   285	

---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-all@xxxxxxxxxxxx

Attachment: .config.gz
Description: application/gzip


[Index of Archives]     [Linux ARM Kernel]     [Linux ARM]     [Linux Omap]     [Fedora ARM]     [IETF Annouce]     [Bugtraq]     [Linux OMAP]     [Linux MIPS]     [eCos]     [Asterisk Internet PBX]     [Linux API]

  Powered by Linux