You've already forked linux-rockchip
mirror of
https://github.com/armbian/linux-rockchip.git
synced 2026-01-06 11:08:10 -08:00
Merge tag 'mm-nonmm-stable-2022-05-26' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
Pull misc updates from Andrew Morton: "The non-MM patch queue for this merge window. Not a lot of material this cycle. Many singleton patches against various subsystems. Most notably some maintenance work in ocfs2 and initramfs" * tag 'mm-nonmm-stable-2022-05-26' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm: (65 commits) kcov: update pos before writing pc in trace function ocfs2: dlmfs: fix error handling of user_dlm_destroy_lock ocfs2: dlmfs: don't clear USER_LOCK_ATTACHED when destroying lock fs/ntfs: remove redundant variable idx fat: remove time truncations in vfat_create/vfat_mkdir fat: report creation time in statx fat: ignore ctime updates, and keep ctime identical to mtime in memory fat: split fat_truncate_time() into separate functions MAINTAINERS: add Muchun as a memcg reviewer proc/sysctl: make protected_* world readable ia64: mca: drop redundant spinlock initialization tty: fix deadlock caused by calling printk() under tty_port->lock relay: remove redundant assignment to pointer buf fs/ntfs3: validate BOOT sectors_per_clusters lib/string_helpers: fix not adding strarray to device's resource list kernel/crash_core.c: remove redundant check of ck_cmdline ELF, uapi: fixup ELF_ST_TYPE definition ipc/mqueue: use get_tree_nodev() in mqueue_get_tree() ipc: update semtimedop() to use hrtimer ipc/sem: remove redundant assignments ...
This commit is contained in:
@@ -5056,6 +5056,7 @@ M: Johannes Weiner <hannes@cmpxchg.org>
|
||||
M: Michal Hocko <mhocko@kernel.org>
|
||||
M: Roman Gushchin <roman.gushchin@linux.dev>
|
||||
M: Shakeel Butt <shakeelb@google.com>
|
||||
R: Muchun Song <songmuchun@bytedance.com>
|
||||
L: cgroups@vger.kernel.org
|
||||
L: linux-mm@kvack.org
|
||||
S: Maintained
|
||||
@@ -16097,7 +16098,6 @@ F: include/asm-generic/syscall.h
|
||||
F: include/linux/ptrace.h
|
||||
F: include/linux/regset.h
|
||||
F: include/uapi/linux/ptrace.h
|
||||
F: include/uapi/linux/ptrace.h
|
||||
F: kernel/ptrace.c
|
||||
|
||||
PULSE8-CEC DRIVER
|
||||
|
||||
@@ -353,7 +353,6 @@ csum_and_copy_from_user(const void __user *src, void *dst, int len)
|
||||
return 0;
|
||||
return __csum_and_copy(src, dst, len);
|
||||
}
|
||||
EXPORT_SYMBOL(csum_and_copy_from_user);
|
||||
|
||||
__wsum
|
||||
csum_partial_copy_nocheck(const void *src, void *dst, int len)
|
||||
|
||||
@@ -14,22 +14,10 @@
|
||||
#include <linux/crash_dump.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/uio.h>
|
||||
|
||||
/**
|
||||
* copy_oldmem_page() - copy one page from old kernel memory
|
||||
* @pfn: page frame number to be copied
|
||||
* @buf: buffer where the copied page is placed
|
||||
* @csize: number of bytes to copy
|
||||
* @offset: offset in bytes into the page
|
||||
* @userbuf: if set, @buf is int he user address space
|
||||
*
|
||||
* This function copies one page from old kernel memory into buffer pointed by
|
||||
* @buf. If @buf is in userspace, set @userbuf to %1. Returns number of bytes
|
||||
* copied or negative error in case of failure.
|
||||
*/
|
||||
ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
|
||||
size_t csize, unsigned long offset,
|
||||
int userbuf)
|
||||
ssize_t copy_oldmem_page(struct iov_iter *iter, unsigned long pfn,
|
||||
size_t csize, unsigned long offset)
|
||||
{
|
||||
void *vaddr;
|
||||
|
||||
@@ -40,14 +28,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
|
||||
if (!vaddr)
|
||||
return -ENOMEM;
|
||||
|
||||
if (userbuf) {
|
||||
if (copy_to_user(buf, vaddr + offset, csize)) {
|
||||
iounmap(vaddr);
|
||||
return -EFAULT;
|
||||
}
|
||||
} else {
|
||||
memcpy(buf, vaddr + offset, csize);
|
||||
}
|
||||
csize = copy_to_iter(vaddr + offset, csize, iter);
|
||||
|
||||
iounmap(vaddr);
|
||||
return csize;
|
||||
|
||||
@@ -9,25 +9,11 @@
|
||||
#include <linux/crash_dump.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/memblock.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/uio.h>
|
||||
#include <asm/memory.h>
|
||||
|
||||
/**
|
||||
* copy_oldmem_page() - copy one page from old kernel memory
|
||||
* @pfn: page frame number to be copied
|
||||
* @buf: buffer where the copied page is placed
|
||||
* @csize: number of bytes to copy
|
||||
* @offset: offset in bytes into the page
|
||||
* @userbuf: if set, @buf is in a user address space
|
||||
*
|
||||
* This function copies one page from old kernel memory into buffer pointed by
|
||||
* @buf. If @buf is in userspace, set @userbuf to %1. Returns number of bytes
|
||||
* copied or negative error in case of failure.
|
||||
*/
|
||||
ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
|
||||
size_t csize, unsigned long offset,
|
||||
int userbuf)
|
||||
ssize_t copy_oldmem_page(struct iov_iter *iter, unsigned long pfn,
|
||||
size_t csize, unsigned long offset)
|
||||
{
|
||||
void *vaddr;
|
||||
|
||||
@@ -38,14 +24,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
|
||||
if (!vaddr)
|
||||
return -ENOMEM;
|
||||
|
||||
if (userbuf) {
|
||||
if (copy_to_user((char __user *)buf, vaddr + offset, csize)) {
|
||||
memunmap(vaddr);
|
||||
return -EFAULT;
|
||||
}
|
||||
} else {
|
||||
memcpy(buf, vaddr + offset, csize);
|
||||
}
|
||||
csize = copy_to_iter(vaddr + offset, csize, iter);
|
||||
|
||||
memunmap(vaddr);
|
||||
|
||||
|
||||
@@ -10,42 +10,18 @@
|
||||
#include <linux/errno.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/crash_dump.h>
|
||||
|
||||
#include <linux/uio.h>
|
||||
#include <asm/page.h>
|
||||
#include <linux/uaccess.h>
|
||||
|
||||
/**
|
||||
* copy_oldmem_page - copy one page from "oldmem"
|
||||
* @pfn: page frame number to be copied
|
||||
* @buf: target memory address for the copy; this can be in kernel address
|
||||
* space or user address space (see @userbuf)
|
||||
* @csize: number of bytes to copy
|
||||
* @offset: offset in bytes into the page (based on pfn) to begin the copy
|
||||
* @userbuf: if set, @buf is in user address space, use copy_to_user(),
|
||||
* otherwise @buf is in kernel address space, use memcpy().
|
||||
*
|
||||
* Copy a page from "oldmem". For this page, there is no pte mapped
|
||||
* in the current kernel. We stitch up a pte, similar to kmap_atomic.
|
||||
*
|
||||
* Calling copy_to_user() in atomic context is not desirable. Hence first
|
||||
* copying the data to a pre-allocated kernel page and then copying to user
|
||||
* space in non-atomic context.
|
||||
*/
|
||||
ssize_t
|
||||
copy_oldmem_page(unsigned long pfn, char *buf,
|
||||
size_t csize, unsigned long offset, int userbuf)
|
||||
ssize_t copy_oldmem_page(struct iov_iter *iter, unsigned long pfn,
|
||||
size_t csize, unsigned long offset)
|
||||
{
|
||||
void *vaddr;
|
||||
|
||||
if (!csize)
|
||||
return 0;
|
||||
vaddr = __va(pfn<<PAGE_SHIFT);
|
||||
if (userbuf) {
|
||||
if (copy_to_user(buf, (vaddr + offset), csize)) {
|
||||
return -EFAULT;
|
||||
}
|
||||
} else
|
||||
memcpy(buf, (vaddr + offset), csize);
|
||||
csize = copy_to_iter(vaddr + offset, csize, iter);
|
||||
return csize;
|
||||
}
|
||||
|
||||
|
||||
@@ -29,38 +29,38 @@ struct kretprobe_blackpoint kretprobe_blacklist[] = {{NULL, NULL}};
|
||||
|
||||
enum instruction_type {A, I, M, F, B, L, X, u};
|
||||
static enum instruction_type bundle_encoding[32][3] = {
|
||||
{ M, I, I }, /* 00 */
|
||||
{ M, I, I }, /* 01 */
|
||||
{ M, I, I }, /* 02 */
|
||||
{ M, I, I }, /* 03 */
|
||||
{ M, L, X }, /* 04 */
|
||||
{ M, L, X }, /* 05 */
|
||||
{ u, u, u }, /* 06 */
|
||||
{ u, u, u }, /* 07 */
|
||||
{ M, M, I }, /* 08 */
|
||||
{ M, M, I }, /* 09 */
|
||||
{ M, M, I }, /* 0A */
|
||||
{ M, M, I }, /* 0B */
|
||||
{ M, F, I }, /* 0C */
|
||||
{ M, F, I }, /* 0D */
|
||||
{ M, M, F }, /* 0E */
|
||||
{ M, M, F }, /* 0F */
|
||||
{ M, I, B }, /* 10 */
|
||||
{ M, I, B }, /* 11 */
|
||||
{ M, B, B }, /* 12 */
|
||||
{ M, B, B }, /* 13 */
|
||||
{ u, u, u }, /* 14 */
|
||||
{ u, u, u }, /* 15 */
|
||||
{ B, B, B }, /* 16 */
|
||||
{ B, B, B }, /* 17 */
|
||||
{ M, M, B }, /* 18 */
|
||||
{ M, M, B }, /* 19 */
|
||||
{ u, u, u }, /* 1A */
|
||||
{ u, u, u }, /* 1B */
|
||||
{ M, F, B }, /* 1C */
|
||||
{ M, F, B }, /* 1D */
|
||||
{ u, u, u }, /* 1E */
|
||||
{ u, u, u }, /* 1F */
|
||||
[0x00] = { M, I, I },
|
||||
[0x01] = { M, I, I },
|
||||
[0x02] = { M, I, I },
|
||||
[0x03] = { M, I, I },
|
||||
[0x04] = { M, L, X },
|
||||
[0x05] = { M, L, X },
|
||||
[0x06] = { u, u, u },
|
||||
[0x07] = { u, u, u },
|
||||
[0x08] = { M, M, I },
|
||||
[0x09] = { M, M, I },
|
||||
[0x0A] = { M, M, I },
|
||||
[0x0B] = { M, M, I },
|
||||
[0x0C] = { M, F, I },
|
||||
[0x0D] = { M, F, I },
|
||||
[0x0E] = { M, M, F },
|
||||
[0x0F] = { M, M, F },
|
||||
[0x10] = { M, I, B },
|
||||
[0x11] = { M, I, B },
|
||||
[0x12] = { M, B, B },
|
||||
[0x13] = { M, B, B },
|
||||
[0x14] = { u, u, u },
|
||||
[0x15] = { u, u, u },
|
||||
[0x16] = { B, B, B },
|
||||
[0x17] = { B, B, B },
|
||||
[0x18] = { M, M, B },
|
||||
[0x19] = { M, M, B },
|
||||
[0x1A] = { u, u, u },
|
||||
[0x1B] = { u, u, u },
|
||||
[0x1C] = { M, F, B },
|
||||
[0x1D] = { M, F, B },
|
||||
[0x1E] = { u, u, u },
|
||||
[0x1F] = { u, u, u },
|
||||
};
|
||||
|
||||
/* Insert a long branch code */
|
||||
|
||||
@@ -290,7 +290,6 @@ static void ia64_mlogbuf_finish(int wait)
|
||||
{
|
||||
BREAK_LOGLEVEL(console_loglevel);
|
||||
|
||||
spin_lock_init(&mlogbuf_rlock);
|
||||
ia64_mlogbuf_dump();
|
||||
printk(KERN_EMERG "mlogbuf_finish: printing switched to urgent mode, "
|
||||
"MCA/INIT might be dodgy or fail.\n");
|
||||
|
||||
@@ -120,7 +120,7 @@ static const char *mem_attrib[]={
|
||||
* Input:
|
||||
* - a pointer to a buffer to hold the string
|
||||
* - a 64-bit vector
|
||||
* Ouput:
|
||||
* Output:
|
||||
* - a pointer to the end of the buffer
|
||||
*
|
||||
*/
|
||||
|
||||
@@ -2025,7 +2025,7 @@ static void syscall_get_args_cb(struct unw_frame_info *info, void *data)
|
||||
* - epsinstruction: cfm is set by br.call
|
||||
* locals don't exist.
|
||||
*
|
||||
* For both cases argguments are reachable in cfm.sof - cfm.sol.
|
||||
* For both cases arguments are reachable in cfm.sof - cfm.sol.
|
||||
* CFM: [ ... | sor: 17..14 | sol : 13..7 | sof : 6..0 ]
|
||||
*/
|
||||
cfm = pt->cr_ifs;
|
||||
|
||||
@@ -309,7 +309,7 @@ handle_fpu_swa (int fp_fault, struct pt_regs *regs, unsigned long isr)
|
||||
/*
|
||||
* Lower 4 bits are used as a count. Upper bits are a sequence
|
||||
* number that is updated when count is reset. The cmpxchg will
|
||||
* fail is seqno has changed. This minimizes mutiple cpus
|
||||
* fail is seqno has changed. This minimizes multiple cpus
|
||||
* resetting the count.
|
||||
*/
|
||||
if (current_jiffies > last.time)
|
||||
|
||||
@@ -449,7 +449,7 @@ mem_init (void)
|
||||
memblock_free_all();
|
||||
|
||||
/*
|
||||
* For fsyscall entrpoints with no light-weight handler, use the ordinary
|
||||
* For fsyscall entrypoints with no light-weight handler, use the ordinary
|
||||
* (heavy-weight) handler, but mark it by setting bit 0, so the fsyscall entry
|
||||
* code can tell them apart.
|
||||
*/
|
||||
|
||||
@@ -174,7 +174,7 @@ __setup("nptcg=", set_nptcg);
|
||||
* override table (in which case we should ignore the value from
|
||||
* PAL_VM_SUMMARY).
|
||||
*
|
||||
* Kernel parameter "nptcg=" overrides maximum number of simultanesous ptc.g
|
||||
* Kernel parameter "nptcg=" overrides maximum number of simultaneous ptc.g
|
||||
* purges defined in either PAL_VM_SUMMARY or PAL override table. In this case,
|
||||
* we should ignore the value from either PAL_VM_SUMMARY or PAL override table.
|
||||
*
|
||||
@@ -516,7 +516,7 @@ found:
|
||||
if (i >= per_cpu(ia64_tr_num, cpu))
|
||||
return -EBUSY;
|
||||
|
||||
/*Record tr info for mca hander use!*/
|
||||
/*Record tr info for mca handler use!*/
|
||||
if (i > per_cpu(ia64_tr_used, cpu))
|
||||
per_cpu(ia64_tr_used, cpu) = i;
|
||||
|
||||
|
||||
@@ -265,8 +265,6 @@ csum_and_copy_from_user(const void __user *src, void *dst, int len)
|
||||
return sum;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(csum_and_copy_from_user);
|
||||
|
||||
|
||||
/*
|
||||
* copy from kernel space while checksumming, otherwise like csum_partial
|
||||
|
||||
@@ -1,22 +1,10 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
#include <linux/highmem.h>
|
||||
#include <linux/crash_dump.h>
|
||||
#include <linux/uio.h>
|
||||
|
||||
/**
|
||||
* copy_oldmem_page - copy one page from "oldmem"
|
||||
* @pfn: page frame number to be copied
|
||||
* @buf: target memory address for the copy; this can be in kernel address
|
||||
* space or user address space (see @userbuf)
|
||||
* @csize: number of bytes to copy
|
||||
* @offset: offset in bytes into the page (based on pfn) to begin the copy
|
||||
* @userbuf: if set, @buf is in user address space, use copy_to_user(),
|
||||
* otherwise @buf is in kernel address space, use memcpy().
|
||||
*
|
||||
* Copy a page from "oldmem". For this page, there is no pte mapped
|
||||
* in the current kernel.
|
||||
*/
|
||||
ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
|
||||
size_t csize, unsigned long offset, int userbuf)
|
||||
ssize_t copy_oldmem_page(struct iov_iter *iter, unsigned long pfn,
|
||||
size_t csize, unsigned long offset)
|
||||
{
|
||||
void *vaddr;
|
||||
|
||||
@@ -24,14 +12,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
|
||||
return 0;
|
||||
|
||||
vaddr = kmap_local_pfn(pfn);
|
||||
|
||||
if (!userbuf) {
|
||||
memcpy(buf, vaddr + offset, csize);
|
||||
} else {
|
||||
if (copy_to_user(buf, vaddr + offset, csize))
|
||||
csize = -EFAULT;
|
||||
}
|
||||
|
||||
csize = copy_to_iter(vaddr + offset, csize, iter);
|
||||
kunmap_local(vaddr);
|
||||
|
||||
return csize;
|
||||
|
||||
@@ -16,7 +16,7 @@
|
||||
#include <asm/kdump.h>
|
||||
#include <asm/prom.h>
|
||||
#include <asm/firmware.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/uio.h>
|
||||
#include <asm/rtas.h>
|
||||
#include <asm/inst.h>
|
||||
|
||||
@@ -68,33 +68,8 @@ void __init setup_kdump_trampoline(void)
|
||||
}
|
||||
#endif /* CONFIG_NONSTATIC_KERNEL */
|
||||
|
||||
static size_t copy_oldmem_vaddr(void *vaddr, char *buf, size_t csize,
|
||||
unsigned long offset, int userbuf)
|
||||
{
|
||||
if (userbuf) {
|
||||
if (copy_to_user((char __user *)buf, (vaddr + offset), csize))
|
||||
return -EFAULT;
|
||||
} else
|
||||
memcpy(buf, (vaddr + offset), csize);
|
||||
|
||||
return csize;
|
||||
}
|
||||
|
||||
/**
|
||||
* copy_oldmem_page - copy one page from "oldmem"
|
||||
* @pfn: page frame number to be copied
|
||||
* @buf: target memory address for the copy; this can be in kernel address
|
||||
* space or user address space (see @userbuf)
|
||||
* @csize: number of bytes to copy
|
||||
* @offset: offset in bytes into the page (based on pfn) to begin the copy
|
||||
* @userbuf: if set, @buf is in user address space, use copy_to_user(),
|
||||
* otherwise @buf is in kernel address space, use memcpy().
|
||||
*
|
||||
* Copy a page from "oldmem". For this page, there is no pte mapped
|
||||
* in the current kernel. We stitch up a pte, similar to kmap_atomic.
|
||||
*/
|
||||
ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
|
||||
size_t csize, unsigned long offset, int userbuf)
|
||||
ssize_t copy_oldmem_page(struct iov_iter *iter, unsigned long pfn,
|
||||
size_t csize, unsigned long offset)
|
||||
{
|
||||
void *vaddr;
|
||||
phys_addr_t paddr;
|
||||
@@ -107,10 +82,10 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
|
||||
|
||||
if (memblock_is_region_memory(paddr, csize)) {
|
||||
vaddr = __va(paddr);
|
||||
csize = copy_oldmem_vaddr(vaddr, buf, csize, offset, userbuf);
|
||||
csize = copy_to_iter(vaddr + offset, csize, iter);
|
||||
} else {
|
||||
vaddr = ioremap_cache(paddr, PAGE_SIZE);
|
||||
csize = copy_oldmem_vaddr(vaddr, buf, csize, offset, userbuf);
|
||||
csize = copy_to_iter(vaddr + offset, csize, iter);
|
||||
iounmap(vaddr);
|
||||
}
|
||||
|
||||
|
||||
@@ -24,7 +24,6 @@ __wsum csum_and_copy_from_user(const void __user *src, void *dst,
|
||||
user_read_access_end();
|
||||
return csum;
|
||||
}
|
||||
EXPORT_SYMBOL(csum_and_copy_from_user);
|
||||
|
||||
__wsum csum_and_copy_to_user(const void *src, void __user *dst, int len)
|
||||
{
|
||||
@@ -38,4 +37,3 @@ __wsum csum_and_copy_to_user(const void *src, void __user *dst, int len)
|
||||
user_write_access_end();
|
||||
return csum;
|
||||
}
|
||||
EXPORT_SYMBOL(csum_and_copy_to_user);
|
||||
|
||||
@@ -7,22 +7,10 @@
|
||||
|
||||
#include <linux/crash_dump.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/uio.h>
|
||||
|
||||
/**
|
||||
* copy_oldmem_page() - copy one page from old kernel memory
|
||||
* @pfn: page frame number to be copied
|
||||
* @buf: buffer where the copied page is placed
|
||||
* @csize: number of bytes to copy
|
||||
* @offset: offset in bytes into the page
|
||||
* @userbuf: if set, @buf is in a user address space
|
||||
*
|
||||
* This function copies one page from old kernel memory into buffer pointed by
|
||||
* @buf. If @buf is in userspace, set @userbuf to %1. Returns number of bytes
|
||||
* copied or negative error in case of failure.
|
||||
*/
|
||||
ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
|
||||
size_t csize, unsigned long offset,
|
||||
int userbuf)
|
||||
ssize_t copy_oldmem_page(struct iov_iter *iter, unsigned long pfn,
|
||||
size_t csize, unsigned long offset)
|
||||
{
|
||||
void *vaddr;
|
||||
|
||||
@@ -33,13 +21,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
|
||||
if (!vaddr)
|
||||
return -ENOMEM;
|
||||
|
||||
if (userbuf) {
|
||||
if (copy_to_user((char __user *)buf, vaddr + offset, csize)) {
|
||||
memunmap(vaddr);
|
||||
return -EFAULT;
|
||||
}
|
||||
} else
|
||||
memcpy(buf, vaddr + offset, csize);
|
||||
csize = copy_to_iter(vaddr + offset, csize, iter);
|
||||
|
||||
memunmap(vaddr);
|
||||
return csize;
|
||||
|
||||
@@ -15,6 +15,7 @@
|
||||
#include <linux/slab.h>
|
||||
#include <linux/memblock.h>
|
||||
#include <linux/elf.h>
|
||||
#include <linux/uio.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
#include <asm/os_info.h>
|
||||
#include <asm/elf.h>
|
||||
@@ -212,8 +213,8 @@ static int copy_oldmem_user(void __user *dst, unsigned long src, size_t count)
|
||||
/*
|
||||
* Copy one page from "oldmem"
|
||||
*/
|
||||
ssize_t copy_oldmem_page(unsigned long pfn, char *buf, size_t csize,
|
||||
unsigned long offset, int userbuf)
|
||||
ssize_t copy_oldmem_page(struct iov_iter *iter, unsigned long pfn, size_t csize,
|
||||
unsigned long offset)
|
||||
{
|
||||
unsigned long src;
|
||||
int rc;
|
||||
@@ -221,10 +222,12 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf, size_t csize,
|
||||
if (!csize)
|
||||
return 0;
|
||||
src = pfn_to_phys(pfn) + offset;
|
||||
if (userbuf)
|
||||
rc = copy_oldmem_user((void __force __user *) buf, src, csize);
|
||||
|
||||
/* XXX: pass the iov_iter down to a common function */
|
||||
if (iter_is_iovec(iter))
|
||||
rc = copy_oldmem_user(iter->iov->iov_base, src, csize);
|
||||
else
|
||||
rc = copy_oldmem_kernel((void *) buf, src, csize);
|
||||
rc = copy_oldmem_kernel(iter->kvec->iov_base, src, csize);
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
||||
@@ -8,23 +8,11 @@
|
||||
#include <linux/errno.h>
|
||||
#include <linux/crash_dump.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/uio.h>
|
||||
#include <linux/uaccess.h>
|
||||
|
||||
/**
|
||||
* copy_oldmem_page - copy one page from "oldmem"
|
||||
* @pfn: page frame number to be copied
|
||||
* @buf: target memory address for the copy; this can be in kernel address
|
||||
* space or user address space (see @userbuf)
|
||||
* @csize: number of bytes to copy
|
||||
* @offset: offset in bytes into the page (based on pfn) to begin the copy
|
||||
* @userbuf: if set, @buf is in user address space, use copy_to_user(),
|
||||
* otherwise @buf is in kernel address space, use memcpy().
|
||||
*
|
||||
* Copy a page from "oldmem". For this page, there is no pte mapped
|
||||
* in the current kernel. We stitch up a pte, similar to kmap_atomic.
|
||||
*/
|
||||
ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
|
||||
size_t csize, unsigned long offset, int userbuf)
|
||||
ssize_t copy_oldmem_page(struct iov_iter *iter, unsigned long pfn,
|
||||
size_t csize, unsigned long offset)
|
||||
{
|
||||
void __iomem *vaddr;
|
||||
|
||||
@@ -32,15 +20,8 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
|
||||
return 0;
|
||||
|
||||
vaddr = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE);
|
||||
|
||||
if (userbuf) {
|
||||
if (copy_to_user((void __user *)buf, (vaddr + offset), csize)) {
|
||||
iounmap(vaddr);
|
||||
return -EFAULT;
|
||||
}
|
||||
} else
|
||||
memcpy(buf, (vaddr + offset), csize);
|
||||
|
||||
csize = copy_to_iter(vaddr + offset, csize, iter);
|
||||
iounmap(vaddr);
|
||||
|
||||
return csize;
|
||||
}
|
||||
|
||||
@@ -10,8 +10,7 @@
|
||||
#include <linux/errno.h>
|
||||
#include <linux/highmem.h>
|
||||
#include <linux/crash_dump.h>
|
||||
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/uio.h>
|
||||
|
||||
static inline bool is_crashed_pfn_valid(unsigned long pfn)
|
||||
{
|
||||
@@ -29,21 +28,8 @@ static inline bool is_crashed_pfn_valid(unsigned long pfn)
|
||||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
* copy_oldmem_page - copy one page from "oldmem"
|
||||
* @pfn: page frame number to be copied
|
||||
* @buf: target memory address for the copy; this can be in kernel address
|
||||
* space or user address space (see @userbuf)
|
||||
* @csize: number of bytes to copy
|
||||
* @offset: offset in bytes into the page (based on pfn) to begin the copy
|
||||
* @userbuf: if set, @buf is in user address space, use copy_to_user(),
|
||||
* otherwise @buf is in kernel address space, use memcpy().
|
||||
*
|
||||
* Copy a page from "oldmem". For this page, there might be no pte mapped
|
||||
* in the current kernel.
|
||||
*/
|
||||
ssize_t copy_oldmem_page(unsigned long pfn, char *buf, size_t csize,
|
||||
unsigned long offset, int userbuf)
|
||||
ssize_t copy_oldmem_page(struct iov_iter *iter, unsigned long pfn, size_t csize,
|
||||
unsigned long offset)
|
||||
{
|
||||
void *vaddr;
|
||||
|
||||
@@ -54,14 +40,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf, size_t csize,
|
||||
return -EFAULT;
|
||||
|
||||
vaddr = kmap_local_pfn(pfn);
|
||||
|
||||
if (!userbuf) {
|
||||
memcpy(buf, vaddr + offset, csize);
|
||||
} else {
|
||||
if (copy_to_user(buf, vaddr + offset, csize))
|
||||
csize = -EFAULT;
|
||||
}
|
||||
|
||||
csize = copy_to_iter(vaddr + offset, csize, iter);
|
||||
kunmap_local(vaddr);
|
||||
|
||||
return csize;
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user