if (start < elfcorebuf_sz) {
u64 pfn;
- tsz = elfcorebuf_sz - start;
- if (size < tsz)
- tsz = size;
+ tsz = min(elfcorebuf_sz - (size_t)start, size);
pfn = __pa(elfcorebuf + start) >> PAGE_SHIFT;
if (remap_pfn_range(vma, vma->vm_start, pfn, tsz,
vma->vm_page_prot))
if (start < elfcorebuf_sz + elfnotes_sz) {
void *kaddr;
- tsz = elfcorebuf_sz + elfnotes_sz - start;
- if (size < tsz)
- tsz = size;
+ tsz = min(elfcorebuf_sz + elfnotes_sz - (size_t)start, size);
kaddr = elfnotes_buf + start - elfcorebuf_sz;
if (remap_vmalloc_range_partial(vma, vma->vm_start + len,
- kaddr, tsz)) {
- do_munmap(vma->vm_mm, vma->vm_start, len);
- return -EAGAIN;
- }
+ kaddr, tsz))
+ goto fail;
size -= tsz;
start += tsz;
len += tsz;
if (start < m->offset + m->size) {
u64 paddr = 0;
- tsz = m->offset + m->size - start;
- if (size < tsz)
- tsz = size;
+ tsz = min_t(size_t, m->offset + m->size - start, size);
paddr = m->paddr + start - m->offset;
if (remap_pfn_range(vma, vma->vm_start + len,
paddr >> PAGE_SHIFT, tsz,
- vma->vm_page_prot)) {
- do_munmap(vma->vm_mm, vma->vm_start, len);
- return -EAGAIN;
- }
+ vma->vm_page_prot))
+ goto fail;
size -= tsz;
start += tsz;
len += tsz;
}
return 0;
+fail:
+ do_munmap(vma->vm_mm, vma->vm_start, len);
+ return -EAGAIN;
}
static const struct file_operations proc_vmcore_operations = {