mirror of
https://github.com/armbian/linux-cix.git
synced 2026-01-06 12:30:45 -08:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
@@ -50,11 +50,11 @@ ksmbd.mountd (user space daemon)
|
||||
--------------------------------
|
||||
|
||||
ksmbd.mountd is userspace process to, transfer user account and password that
|
||||
are registered using ksmbd.adduser(part of utils for user space). Further it
|
||||
are registered using ksmbd.adduser (part of utils for user space). Further it
|
||||
allows sharing information parameters that parsed from smb.conf to ksmbd in
|
||||
kernel. For the execution part it has a daemon which is continuously running
|
||||
and connected to the kernel interface using netlink socket, it waits for the
|
||||
requests(dcerpc and share/user info). It handles RPC calls (at a minimum few
|
||||
requests (dcerpc and share/user info). It handles RPC calls (at a minimum few
|
||||
dozen) that are most important for file server from NetShareEnum and
|
||||
NetServerGetInfo. Complete DCE/RPC response is prepared from the user space
|
||||
and passed over to the associated kernel thread for the client.
|
||||
@@ -154,11 +154,11 @@ Each layer
|
||||
1. Enable all component prints
|
||||
# sudo ksmbd.control -d "all"
|
||||
|
||||
2. Enable one of components(smb, auth, vfs, oplock, ipc, conn, rdma)
|
||||
2. Enable one of components (smb, auth, vfs, oplock, ipc, conn, rdma)
|
||||
# sudo ksmbd.control -d "smb"
|
||||
|
||||
3. Show what prints are enable.
|
||||
# cat/sys/class/ksmbd-control/debug
|
||||
3. Show what prints are enabled.
|
||||
# cat /sys/class/ksmbd-control/debug
|
||||
[smb] auth vfs oplock ipc conn [rdma]
|
||||
|
||||
4. Disable prints:
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
.. SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
=================================
|
||||
NETWORK FILESYSTEM HELPER LIBRARY
|
||||
Network Filesystem Helper Library
|
||||
=================================
|
||||
|
||||
.. Contents:
|
||||
@@ -37,22 +37,22 @@ into a common call framework.
|
||||
|
||||
The following services are provided:
|
||||
|
||||
* Handles transparent huge pages (THPs).
|
||||
* Handle folios that span multiple pages.
|
||||
|
||||
* Insulates the netfs from VM interface changes.
|
||||
* Insulate the netfs from VM interface changes.
|
||||
|
||||
* Allows the netfs to arbitrarily split reads up into pieces, even ones that
|
||||
don't match page sizes or page alignments and that may cross pages.
|
||||
* Allow the netfs to arbitrarily split reads up into pieces, even ones that
|
||||
don't match folio sizes or folio alignments and that may cross folios.
|
||||
|
||||
* Allows the netfs to expand a readahead request in both directions to meet
|
||||
its needs.
|
||||
* Allow the netfs to expand a readahead request in both directions to meet its
|
||||
needs.
|
||||
|
||||
* Allows the netfs to partially fulfil a read, which will then be resubmitted.
|
||||
* Allow the netfs to partially fulfil a read, which will then be resubmitted.
|
||||
|
||||
* Handles local caching, allowing cached data and server-read data to be
|
||||
* Handle local caching, allowing cached data and server-read data to be
|
||||
interleaved for a single request.
|
||||
|
||||
* Handles clearing of bufferage that aren't on the server.
|
||||
* Handle clearing of bufferage that aren't on the server.
|
||||
|
||||
* Handle retrying of reads that failed, switching reads from the cache to the
|
||||
server as necessary.
|
||||
@@ -70,22 +70,22 @@ Read Helper Functions
|
||||
|
||||
Three read helpers are provided::
|
||||
|
||||
* void netfs_readahead(struct readahead_control *ractl,
|
||||
const struct netfs_read_request_ops *ops,
|
||||
void *netfs_priv);``
|
||||
* int netfs_readpage(struct file *file,
|
||||
struct page *page,
|
||||
const struct netfs_read_request_ops *ops,
|
||||
void *netfs_priv);
|
||||
* int netfs_write_begin(struct file *file,
|
||||
struct address_space *mapping,
|
||||
loff_t pos,
|
||||
unsigned int len,
|
||||
unsigned int flags,
|
||||
struct page **_page,
|
||||
void **_fsdata,
|
||||
const struct netfs_read_request_ops *ops,
|
||||
void *netfs_priv);
|
||||
void netfs_readahead(struct readahead_control *ractl,
|
||||
const struct netfs_read_request_ops *ops,
|
||||
void *netfs_priv);
|
||||
int netfs_readpage(struct file *file,
|
||||
struct folio *folio,
|
||||
const struct netfs_read_request_ops *ops,
|
||||
void *netfs_priv);
|
||||
int netfs_write_begin(struct file *file,
|
||||
struct address_space *mapping,
|
||||
loff_t pos,
|
||||
unsigned int len,
|
||||
unsigned int flags,
|
||||
struct folio **_folio,
|
||||
void **_fsdata,
|
||||
const struct netfs_read_request_ops *ops,
|
||||
void *netfs_priv);
|
||||
|
||||
Each corresponds to a VM operation, with the addition of a couple of parameters
|
||||
for the use of the read helpers:
|
||||
@@ -103,8 +103,8 @@ Both of these values will be stored into the read request structure.
|
||||
For ->readahead() and ->readpage(), the network filesystem should just jump
|
||||
into the corresponding read helper; whereas for ->write_begin(), it may be a
|
||||
little more complicated as the network filesystem might want to flush
|
||||
conflicting writes or track dirty data and needs to put the acquired page if an
|
||||
error occurs after calling the helper.
|
||||
conflicting writes or track dirty data and needs to put the acquired folio if
|
||||
an error occurs after calling the helper.
|
||||
|
||||
The helpers manage the read request, calling back into the network filesystem
|
||||
through the suppplied table of operations. Waits will be performed as
|
||||
@@ -253,7 +253,7 @@ through which it can issue requests and negotiate::
|
||||
void (*issue_op)(struct netfs_read_subrequest *subreq);
|
||||
bool (*is_still_valid)(struct netfs_read_request *rreq);
|
||||
int (*check_write_begin)(struct file *file, loff_t pos, unsigned len,
|
||||
struct page *page, void **_fsdata);
|
||||
struct folio *folio, void **_fsdata);
|
||||
void (*done)(struct netfs_read_request *rreq);
|
||||
void (*cleanup)(struct address_space *mapping, void *netfs_priv);
|
||||
};
|
||||
@@ -313,13 +313,14 @@ The operations are as follows:
|
||||
|
||||
There is no return value; the netfs_subreq_terminated() function should be
|
||||
called to indicate whether or not the operation succeeded and how much data
|
||||
it transferred. The filesystem also should not deal with setting pages
|
||||
it transferred. The filesystem also should not deal with setting folios
|
||||
uptodate, unlocking them or dropping their refs - the helpers need to deal
|
||||
with this as they have to coordinate with copying to the local cache.
|
||||
|
||||
Note that the helpers have the pages locked, but not pinned. It is possible
|
||||
to use the ITER_XARRAY iov iterator to refer to the range of the inode that
|
||||
is being operated upon without the need to allocate large bvec tables.
|
||||
Note that the helpers have the folios locked, but not pinned. It is
|
||||
possible to use the ITER_XARRAY iov iterator to refer to the range of the
|
||||
inode that is being operated upon without the need to allocate large bvec
|
||||
tables.
|
||||
|
||||
* ``is_still_valid()``
|
||||
|
||||
@@ -330,15 +331,15 @@ The operations are as follows:
|
||||
* ``check_write_begin()``
|
||||
|
||||
[Optional] This is called from the netfs_write_begin() helper once it has
|
||||
allocated/grabbed the page to be modified to allow the filesystem to flush
|
||||
allocated/grabbed the folio to be modified to allow the filesystem to flush
|
||||
conflicting state before allowing it to be modified.
|
||||
|
||||
It should return 0 if everything is now fine, -EAGAIN if the page should be
|
||||
It should return 0 if everything is now fine, -EAGAIN if the folio should be
|
||||
regrabbed and any other error code to abort the operation.
|
||||
|
||||
* ``done``
|
||||
|
||||
[Optional] This is called after the pages in the request have all been
|
||||
[Optional] This is called after the folios in the request have all been
|
||||
unlocked (and marked uptodate if applicable).
|
||||
|
||||
* ``cleanup``
|
||||
@@ -390,7 +391,7 @@ The read helpers work by the following general procedure:
|
||||
* If NETFS_SREQ_CLEAR_TAIL was set, a short read will be cleared to the
|
||||
end of the slice instead of reissuing.
|
||||
|
||||
* Once the data is read, the pages that have been fully read/cleared:
|
||||
* Once the data is read, the folios that have been fully read/cleared:
|
||||
|
||||
* Will be marked uptodate.
|
||||
|
||||
@@ -398,11 +399,11 @@ The read helpers work by the following general procedure:
|
||||
|
||||
* Unlocked
|
||||
|
||||
* Any pages that need writing to the cache will then have DIO writes issued.
|
||||
* Any folios that need writing to the cache will then have DIO writes issued.
|
||||
|
||||
* Synchronous operations will wait for reading to be complete.
|
||||
|
||||
* Writes to the cache will proceed asynchronously and the pages will have the
|
||||
* Writes to the cache will proceed asynchronously and the folios will have the
|
||||
PG_fscache mark removed when that completes.
|
||||
|
||||
* The request structures will be cleaned up when everything has completed.
|
||||
@@ -452,6 +453,9 @@ operation table looks like the following::
|
||||
netfs_io_terminated_t term_func,
|
||||
void *term_func_priv);
|
||||
|
||||
int (*prepare_write)(struct netfs_cache_resources *cres,
|
||||
loff_t *_start, size_t *_len, loff_t i_size);
|
||||
|
||||
int (*write)(struct netfs_cache_resources *cres,
|
||||
loff_t start_pos,
|
||||
struct iov_iter *iter,
|
||||
@@ -509,6 +513,14 @@ The methods defined in the table are:
|
||||
indicating whether the termination is definitely happening in the caller's
|
||||
context.
|
||||
|
||||
* ``prepare_write()``
|
||||
|
||||
[Required] Called to adjust a write to the cache and check that there is
|
||||
sufficient space in the cache. The start and length values indicate the
|
||||
size of the write that netfslib is proposing, and this can be adjusted by
|
||||
the cache to respect DIO boundaries. The file size is passed for
|
||||
information.
|
||||
|
||||
* ``write()``
|
||||
|
||||
[Required] Called to write to the cache. The start file offset is given
|
||||
@@ -525,4 +537,9 @@ not the read request structure as they could be used in other situations where
|
||||
there isn't a read request structure as well, such as writing dirty data to the
|
||||
cache.
|
||||
|
||||
|
||||
API Function Reference
|
||||
======================
|
||||
|
||||
.. kernel-doc:: include/linux/netfs.h
|
||||
.. kernel-doc:: fs/netfs/read_helper.c
|
||||
|
||||
@@ -15994,6 +15994,7 @@ F: arch/mips/generic/board-ranchu.c
|
||||
|
||||
RANDOM NUMBER DRIVER
|
||||
M: "Theodore Ts'o" <tytso@mit.edu>
|
||||
M: Jason A. Donenfeld <Jason@zx2c4.com>
|
||||
S: Maintained
|
||||
F: drivers/char/random.c
|
||||
|
||||
@@ -16638,7 +16639,8 @@ F: drivers/iommu/s390-iommu.c
|
||||
|
||||
S390 IUCV NETWORK LAYER
|
||||
M: Julian Wiedmann <jwi@linux.ibm.com>
|
||||
M: Karsten Graul <kgraul@linux.ibm.com>
|
||||
M: Alexandra Winter <wintera@linux.ibm.com>
|
||||
M: Wenjia Zhang <wenjia@linux.ibm.com>
|
||||
L: linux-s390@vger.kernel.org
|
||||
L: netdev@vger.kernel.org
|
||||
S: Supported
|
||||
@@ -16649,7 +16651,8 @@ F: net/iucv/
|
||||
|
||||
S390 NETWORK DRIVERS
|
||||
M: Julian Wiedmann <jwi@linux.ibm.com>
|
||||
M: Karsten Graul <kgraul@linux.ibm.com>
|
||||
M: Alexandra Winter <wintera@linux.ibm.com>
|
||||
M: Wenjia Zhang <wenjia@linux.ibm.com>
|
||||
L: linux-s390@vger.kernel.org
|
||||
L: netdev@vger.kernel.org
|
||||
S: Supported
|
||||
|
||||
2
Makefile
2
Makefile
@@ -2,7 +2,7 @@
|
||||
VERSION = 5
|
||||
PATCHLEVEL = 16
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc2
|
||||
EXTRAVERSION = -rc3
|
||||
NAME = Gobble Gobble
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
||||
10
arch/Kconfig
10
arch/Kconfig
@@ -991,6 +991,16 @@ config HAVE_ARCH_COMPAT_MMAP_BASES
|
||||
and vice-versa 32-bit applications to call 64-bit mmap().
|
||||
Required for applications doing different bitness syscalls.
|
||||
|
||||
config PAGE_SIZE_LESS_THAN_64KB
|
||||
def_bool y
|
||||
depends on !ARM64_64K_PAGES
|
||||
depends on !IA64_PAGE_SIZE_64KB
|
||||
depends on !PAGE_SIZE_64KB
|
||||
depends on !PARISC_PAGE_SIZE_64KB
|
||||
depends on !PPC_64K_PAGES
|
||||
depends on !PPC_256K_PAGES
|
||||
depends on !PAGE_SIZE_256KB
|
||||
|
||||
# This allows to use a set of generic functions to determine mmap base
|
||||
# address by giving priority to top-down scheme only if the process
|
||||
# is not in legacy mode (compat task, unlimited stack size or
|
||||
|
||||
@@ -91,7 +91,7 @@
|
||||
#define HCR_HOST_VHE_FLAGS (HCR_RW | HCR_TGE | HCR_E2H)
|
||||
|
||||
/* TCR_EL2 Registers bits */
|
||||
#define TCR_EL2_RES1 ((1 << 31) | (1 << 23))
|
||||
#define TCR_EL2_RES1 ((1U << 31) | (1 << 23))
|
||||
#define TCR_EL2_TBI (1 << 20)
|
||||
#define TCR_EL2_PS_SHIFT 16
|
||||
#define TCR_EL2_PS_MASK (7 << TCR_EL2_PS_SHIFT)
|
||||
@@ -276,7 +276,7 @@
|
||||
#define CPTR_EL2_TFP_SHIFT 10
|
||||
|
||||
/* Hyp Coprocessor Trap Register */
|
||||
#define CPTR_EL2_TCPAC (1 << 31)
|
||||
#define CPTR_EL2_TCPAC (1U << 31)
|
||||
#define CPTR_EL2_TAM (1 << 30)
|
||||
#define CPTR_EL2_TTA (1 << 20)
|
||||
#define CPTR_EL2_TFP (1 << CPTR_EL2_TFP_SHIFT)
|
||||
|
||||
@@ -403,6 +403,8 @@ typedef bool (*exit_handler_fn)(struct kvm_vcpu *, u64 *);
|
||||
|
||||
static const exit_handler_fn *kvm_get_exit_handler_array(struct kvm_vcpu *vcpu);
|
||||
|
||||
static void early_exit_filter(struct kvm_vcpu *vcpu, u64 *exit_code);
|
||||
|
||||
/*
|
||||
* Allow the hypervisor to handle the exit with an exit handler if it has one.
|
||||
*
|
||||
@@ -429,6 +431,18 @@ static inline bool kvm_hyp_handle_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
|
||||
*/
|
||||
static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
|
||||
{
|
||||
/*
|
||||
* Save PSTATE early so that we can evaluate the vcpu mode
|
||||
* early on.
|
||||
*/
|
||||
vcpu->arch.ctxt.regs.pstate = read_sysreg_el2(SYS_SPSR);
|
||||
|
||||
/*
|
||||
* Check whether we want to repaint the state one way or
|
||||
* another.
|
||||
*/
|
||||
early_exit_filter(vcpu, exit_code);
|
||||
|
||||
if (ARM_EXCEPTION_CODE(*exit_code) != ARM_EXCEPTION_IRQ)
|
||||
vcpu->arch.fault.esr_el2 = read_sysreg_el2(SYS_ESR);
|
||||
|
||||
|
||||
@@ -70,7 +70,12 @@ static inline void __sysreg_save_el1_state(struct kvm_cpu_context *ctxt)
|
||||
static inline void __sysreg_save_el2_return_state(struct kvm_cpu_context *ctxt)
|
||||
{
|
||||
ctxt->regs.pc = read_sysreg_el2(SYS_ELR);
|
||||
ctxt->regs.pstate = read_sysreg_el2(SYS_SPSR);
|
||||
/*
|
||||
* Guest PSTATE gets saved at guest fixup time in all
|
||||
* cases. We still need to handle the nVHE host side here.
|
||||
*/
|
||||
if (!has_vhe() && ctxt->__hyp_running_vcpu)
|
||||
ctxt->regs.pstate = read_sysreg_el2(SYS_SPSR);
|
||||
|
||||
if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN))
|
||||
ctxt_sys_reg(ctxt, DISR_EL1) = read_sysreg_s(SYS_VDISR_EL2);
|
||||
|
||||
@@ -233,7 +233,7 @@ static const exit_handler_fn *kvm_get_exit_handler_array(struct kvm_vcpu *vcpu)
|
||||
* Returns false if the guest ran in AArch32 when it shouldn't have, and
|
||||
* thus should exit to the host, or true if a the guest run loop can continue.
|
||||
*/
|
||||
static bool handle_aarch32_guest(struct kvm_vcpu *vcpu, u64 *exit_code)
|
||||
static void early_exit_filter(struct kvm_vcpu *vcpu, u64 *exit_code)
|
||||
{
|
||||
struct kvm *kvm = kern_hyp_va(vcpu->kvm);
|
||||
|
||||
@@ -248,10 +248,7 @@ static bool handle_aarch32_guest(struct kvm_vcpu *vcpu, u64 *exit_code)
|
||||
vcpu->arch.target = -1;
|
||||
*exit_code &= BIT(ARM_EXIT_WITH_SERROR_BIT);
|
||||
*exit_code |= ARM_EXCEPTION_IL;
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/* Switch to the guest for legacy non-VHE systems */
|
||||
@@ -316,9 +313,6 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
|
||||
/* Jump in the fire! */
|
||||
exit_code = __guest_enter(vcpu);
|
||||
|
||||
if (unlikely(!handle_aarch32_guest(vcpu, &exit_code)))
|
||||
break;
|
||||
|
||||
/* And we're baaack! */
|
||||
} while (fixup_guest_exit(vcpu, &exit_code));
|
||||
|
||||
|
||||
@@ -112,6 +112,10 @@ static const exit_handler_fn *kvm_get_exit_handler_array(struct kvm_vcpu *vcpu)
|
||||
return hyp_exit_handlers;
|
||||
}
|
||||
|
||||
static void early_exit_filter(struct kvm_vcpu *vcpu, u64 *exit_code)
|
||||
{
|
||||
}
|
||||
|
||||
/* Switch to the guest for VHE systems running in EL2 */
|
||||
static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
|
||||
@@ -3097,7 +3097,7 @@ config STACKTRACE_SUPPORT
|
||||
config PGTABLE_LEVELS
|
||||
int
|
||||
default 4 if PAGE_SIZE_4KB && MIPS_VA_BITS_48
|
||||
default 3 if 64BIT && !PAGE_SIZE_64KB
|
||||
default 3 if 64BIT && (!PAGE_SIZE_64KB || MIPS_VA_BITS_48)
|
||||
default 2
|
||||
|
||||
config MIPS_AUTO_PFN_OFFSET
|
||||
|
||||
@@ -52,7 +52,7 @@ endif
|
||||
|
||||
vmlinuzobjs-$(CONFIG_KERNEL_XZ) += $(obj)/ashldi3.o
|
||||
|
||||
vmlinuzobjs-$(CONFIG_KERNEL_ZSTD) += $(obj)/bswapdi.o
|
||||
vmlinuzobjs-$(CONFIG_KERNEL_ZSTD) += $(obj)/bswapdi.o $(obj)/ashldi3.o
|
||||
|
||||
targets := $(notdir $(vmlinuzobjs-y))
|
||||
|
||||
|
||||
@@ -1734,8 +1734,6 @@ static inline void decode_cpucfg(struct cpuinfo_mips *c)
|
||||
|
||||
static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu)
|
||||
{
|
||||
decode_configs(c);
|
||||
|
||||
/* All Loongson processors covered here define ExcCode 16 as GSExc. */
|
||||
c->options |= MIPS_CPU_GSEXCEX;
|
||||
|
||||
@@ -1796,6 +1794,8 @@ static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu)
|
||||
panic("Unknown Loongson Processor ID!");
|
||||
break;
|
||||
}
|
||||
|
||||
decode_configs(c);
|
||||
}
|
||||
#else
|
||||
static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu) { }
|
||||
|
||||
@@ -185,7 +185,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
|
||||
seq_puts(m, " tx39_cache");
|
||||
if (cpu_has_octeon_cache)
|
||||
seq_puts(m, " octeon_cache");
|
||||
if (cpu_has_fpu)
|
||||
if (raw_cpu_has_fpu)
|
||||
seq_puts(m, " fpu");
|
||||
if (cpu_has_32fpr)
|
||||
seq_puts(m, " 32fpr");
|
||||
|
||||
@@ -202,11 +202,11 @@ vmap_stack_overflow:
|
||||
mfspr r1, SPRN_SPRG_THREAD
|
||||
lwz r1, TASK_CPU - THREAD(r1)
|
||||
slwi r1, r1, 3
|
||||
addis r1, r1, emergency_ctx@ha
|
||||
addis r1, r1, emergency_ctx-PAGE_OFFSET@ha
|
||||
#else
|
||||
lis r1, emergency_ctx@ha
|
||||
lis r1, emergency_ctx-PAGE_OFFSET@ha
|
||||
#endif
|
||||
lwz r1, emergency_ctx@l(r1)
|
||||
lwz r1, emergency_ctx-PAGE_OFFSET@l(r1)
|
||||
addi r1, r1, THREAD_SIZE - INT_FRAME_SIZE
|
||||
EXCEPTION_PROLOG_2 0 vmap_stack_overflow
|
||||
prepare_transfer_to_handler
|
||||
|
||||
@@ -695,6 +695,7 @@ static void flush_guest_tlb(struct kvm *kvm)
|
||||
"r" (0) : "memory");
|
||||
}
|
||||
asm volatile("ptesync": : :"memory");
|
||||
// POWER9 congruence-class TLBIEL leaves ERAT. Flush it now.
|
||||
asm volatile(PPC_RADIX_INVALIDATE_ERAT_GUEST : : :"memory");
|
||||
} else {
|
||||
for (set = 0; set < kvm->arch.tlb_sets; ++set) {
|
||||
@@ -705,7 +706,9 @@ static void flush_guest_tlb(struct kvm *kvm)
|
||||
rb += PPC_BIT(51); /* increment set number */
|
||||
}
|
||||
asm volatile("ptesync": : :"memory");
|
||||
asm volatile(PPC_ISA_3_0_INVALIDATE_ERAT : : :"memory");
|
||||
// POWER9 congruence-class TLBIEL leaves ERAT. Flush it now.
|
||||
if (cpu_has_feature(CPU_FTR_ARCH_300))
|
||||
asm volatile(PPC_ISA_3_0_INVALIDATE_ERAT : : :"memory");
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -12,14 +12,12 @@
|
||||
#include <linux/types.h>
|
||||
#include <linux/kvm.h>
|
||||
#include <linux/kvm_types.h>
|
||||
#include <asm/csr.h>
|
||||
#include <asm/kvm_vcpu_fp.h>
|
||||
#include <asm/kvm_vcpu_timer.h>
|
||||
|
||||
#ifdef CONFIG_64BIT
|
||||
#define KVM_MAX_VCPUS (1U << 16)
|
||||
#else
|
||||
#define KVM_MAX_VCPUS (1U << 9)
|
||||
#endif
|
||||
#define KVM_MAX_VCPUS \
|
||||
((HGATP_VMID_MASK >> HGATP_VMID_SHIFT) + 1)
|
||||
|
||||
#define KVM_HALT_POLL_NS_DEFAULT 500000
|
||||
|
||||
|
||||
@@ -453,6 +453,12 @@ void kvm_arch_flush_shadow_all(struct kvm *kvm)
|
||||
void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
|
||||
struct kvm_memory_slot *slot)
|
||||
{
|
||||
gpa_t gpa = slot->base_gfn << PAGE_SHIFT;
|
||||
phys_addr_t size = slot->npages << PAGE_SHIFT;
|
||||
|
||||
spin_lock(&kvm->mmu_lock);
|
||||
stage2_unmap_range(kvm, gpa, size, false);
|
||||
spin_unlock(&kvm->mmu_lock);
|
||||
}
|
||||
|
||||
void kvm_arch_commit_memory_region(struct kvm *kvm,
|
||||
|
||||
@@ -102,12 +102,6 @@ extern void switch_fpu_return(void);
|
||||
*/
|
||||
extern int cpu_has_xfeatures(u64 xfeatures_mask, const char **feature_name);
|
||||
|
||||
/*
|
||||
* Tasks that are not using SVA have mm->pasid set to zero to note that they
|
||||
* will not have the valid bit set in MSR_IA32_PASID while they are running.
|
||||
*/
|
||||
#define PASID_DISABLED 0
|
||||
|
||||
/* Trap handling */
|
||||
extern int fpu__exception_code(struct fpu *fpu, int trap_nr);
|
||||
extern void fpu_sync_fpstate(struct fpu *fpu);
|
||||
|
||||
@@ -742,7 +742,7 @@ dump_kernel_offset(struct notifier_block *self, unsigned long v, void *p)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static char *prepare_command_line(void)
|
||||
static char * __init prepare_command_line(void)
|
||||
{
|
||||
#ifdef CONFIG_CMDLINE_BOOL
|
||||
#ifdef CONFIG_CMDLINE_OVERRIDE
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user