mirror of
https://github.com/armbian/linux.git
synced 2026-01-06 10:13:00 -08:00
Merge tag 'please-pull-paravirt' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux
Pull ia64 paravirt removal from Tony Luck: "Nobody cares about paravirtualization on ia64 anymore" * tag 'please-pull-paravirt' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux: ia64: remove paravirt code
This commit is contained in:
@@ -137,29 +137,6 @@ config AUDIT_ARCH
|
||||
bool
|
||||
default y
|
||||
|
||||
menuconfig PARAVIRT_GUEST
|
||||
bool "Paravirtualized guest support"
|
||||
depends on BROKEN
|
||||
help
|
||||
Say Y here to get to see options related to running Linux under
|
||||
various hypervisors. This option alone does not add any kernel code.
|
||||
|
||||
If you say N, all options in this submenu will be skipped and disabled.
|
||||
|
||||
if PARAVIRT_GUEST
|
||||
|
||||
config PARAVIRT
|
||||
bool "Enable paravirtualization code"
|
||||
depends on PARAVIRT_GUEST
|
||||
default y
|
||||
help
|
||||
This changes the kernel so it can modify itself when it is run
|
||||
under a hypervisor, potentially improving performance significantly
|
||||
over full virtualization. However, when run without a hypervisor
|
||||
the kernel is theoretically slower and slightly larger.
|
||||
|
||||
endif
|
||||
|
||||
choice
|
||||
prompt "System type"
|
||||
default IA64_GENERIC
|
||||
|
||||
@@ -15,11 +15,7 @@
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/smp.h>
|
||||
|
||||
#ifndef CONFIG_PARAVIRT
|
||||
typedef u8 ia64_vector;
|
||||
#else
|
||||
typedef u16 ia64_vector;
|
||||
#endif
|
||||
|
||||
/*
|
||||
* 0 special
|
||||
@@ -114,15 +110,11 @@ DECLARE_PER_CPU(int[IA64_NUM_VECTORS], vector_irq);
|
||||
|
||||
extern struct irq_chip irq_type_ia64_lsapic; /* CPU-internal interrupt controller */
|
||||
|
||||
#ifdef CONFIG_PARAVIRT_GUEST
|
||||
#include <asm/paravirt.h>
|
||||
#else
|
||||
#define ia64_register_ipi ia64_native_register_ipi
|
||||
#define assign_irq_vector ia64_native_assign_irq_vector
|
||||
#define free_irq_vector ia64_native_free_irq_vector
|
||||
#define register_percpu_irq ia64_native_register_percpu_irq
|
||||
#define ia64_resend_irq ia64_native_resend_irq
|
||||
#endif
|
||||
|
||||
extern void ia64_native_register_ipi(void);
|
||||
extern int bind_irq_vector(int irq, int vector, cpumask_t domain);
|
||||
|
||||
@@ -7,19 +7,6 @@
|
||||
#ifndef _ASM_IA64_INTRINSICS_H
|
||||
#define _ASM_IA64_INTRINSICS_H
|
||||
|
||||
#include <asm/paravirt_privop.h>
|
||||
#include <uapi/asm/intrinsics.h>
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
#if defined(CONFIG_PARAVIRT)
|
||||
# undef IA64_INTRINSIC_API
|
||||
# undef IA64_INTRINSIC_MACRO
|
||||
# ifdef ASM_SUPPORTED
|
||||
# define IA64_INTRINSIC_API(name) paravirt_ ## name
|
||||
# else
|
||||
# define IA64_INTRINSIC_API(name) pv_cpu_ops.name
|
||||
# endif
|
||||
#define IA64_INTRINSIC_MACRO(name) paravirt_ ## name
|
||||
#endif
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
#endif /* _ASM_IA64_INTRINSICS_H */
|
||||
|
||||
@@ -55,14 +55,10 @@
|
||||
|
||||
#define NR_IOSAPICS 256
|
||||
|
||||
#ifdef CONFIG_PARAVIRT_GUEST
|
||||
#include <asm/paravirt.h>
|
||||
#else
|
||||
#define iosapic_pcat_compat_init ia64_native_iosapic_pcat_compat_init
|
||||
#define __iosapic_read __ia64_native_iosapic_read
|
||||
#define __iosapic_write __ia64_native_iosapic_write
|
||||
#define iosapic_get_irq_chip ia64_native_iosapic_get_irq_chip
|
||||
#endif
|
||||
|
||||
extern void __init ia64_native_iosapic_pcat_compat_init(void);
|
||||
extern struct irq_chip *ia64_native_iosapic_get_irq_chip(unsigned long trigger);
|
||||
|
||||
@@ -18,12 +18,6 @@ struct mod_arch_specific {
|
||||
struct elf64_shdr *got; /* global offset table */
|
||||
struct elf64_shdr *opd; /* official procedure descriptors */
|
||||
struct elf64_shdr *unwind; /* unwind-table section */
|
||||
#ifdef CONFIG_PARAVIRT
|
||||
struct elf64_shdr *paravirt_bundles;
|
||||
/* paravirt_alt_bundle_patch table */
|
||||
struct elf64_shdr *paravirt_insts;
|
||||
/* paravirt_alt_inst_patch table */
|
||||
#endif
|
||||
unsigned long gp; /* global-pointer for module */
|
||||
|
||||
void *core_unw_table; /* core unwind-table cookie returned by unwinder */
|
||||
|
||||
@@ -22,32 +22,6 @@
|
||||
|
||||
#define DO_SAVE_MIN IA64_NATIVE_DO_SAVE_MIN
|
||||
|
||||
#define __paravirt_switch_to ia64_native_switch_to
|
||||
#define __paravirt_leave_syscall ia64_native_leave_syscall
|
||||
#define __paravirt_work_processed_syscall ia64_native_work_processed_syscall
|
||||
#define __paravirt_leave_kernel ia64_native_leave_kernel
|
||||
#define __paravirt_pending_syscall_end ia64_work_pending_syscall_end
|
||||
#define __paravirt_work_processed_syscall_target \
|
||||
ia64_work_processed_syscall
|
||||
|
||||
#define paravirt_fsyscall_table ia64_native_fsyscall_table
|
||||
#define paravirt_fsys_bubble_down ia64_native_fsys_bubble_down
|
||||
|
||||
#ifdef CONFIG_PARAVIRT_GUEST_ASM_CLOBBER_CHECK
|
||||
# define PARAVIRT_POISON 0xdeadbeefbaadf00d
|
||||
# define CLOBBER(clob) \
|
||||
;; \
|
||||
movl clob = PARAVIRT_POISON; \
|
||||
;;
|
||||
# define CLOBBER_PRED(pred_clob) \
|
||||
;; \
|
||||
cmp.eq pred_clob, p0 = r0, r0 \
|
||||
;;
|
||||
#else
|
||||
# define CLOBBER(clob) /* nothing */
|
||||
# define CLOBBER_PRED(pred_clob) /* nothing */
|
||||
#endif
|
||||
|
||||
#define MOV_FROM_IFA(reg) \
|
||||
mov reg = cr.ifa
|
||||
|
||||
@@ -70,106 +44,76 @@
|
||||
mov reg = cr.iip
|
||||
|
||||
#define MOV_FROM_IVR(reg, clob) \
|
||||
mov reg = cr.ivr \
|
||||
CLOBBER(clob)
|
||||
mov reg = cr.ivr
|
||||
|
||||
#define MOV_FROM_PSR(pred, reg, clob) \
|
||||
(pred) mov reg = psr \
|
||||
CLOBBER(clob)
|
||||
(pred) mov reg = psr
|
||||
|
||||
#define MOV_FROM_ITC(pred, pred_clob, reg, clob) \
|
||||
(pred) mov reg = ar.itc \
|
||||
CLOBBER(clob) \
|
||||
CLOBBER_PRED(pred_clob)
|
||||
(pred) mov reg = ar.itc
|
||||
|
||||
#define MOV_TO_IFA(reg, clob) \
|
||||
mov cr.ifa = reg \
|
||||
CLOBBER(clob)
|
||||
mov cr.ifa = reg
|
||||
|
||||
#define MOV_TO_ITIR(pred, reg, clob) \
|
||||
(pred) mov cr.itir = reg \
|
||||
CLOBBER(clob)
|
||||
(pred) mov cr.itir = reg
|
||||
|
||||
#define MOV_TO_IHA(pred, reg, clob) \
|
||||
(pred) mov cr.iha = reg \
|
||||
CLOBBER(clob)
|
||||
(pred) mov cr.iha = reg
|
||||
|
||||
#define MOV_TO_IPSR(pred, reg, clob) \
|
||||
(pred) mov cr.ipsr = reg \
|
||||
CLOBBER(clob)
|
||||
(pred) mov cr.ipsr = reg
|
||||
|
||||
#define MOV_TO_IFS(pred, reg, clob) \
|
||||
(pred) mov cr.ifs = reg \
|
||||
CLOBBER(clob)
|
||||
(pred) mov cr.ifs = reg
|
||||
|
||||
#define MOV_TO_IIP(reg, clob) \
|
||||
mov cr.iip = reg \
|
||||
CLOBBER(clob)
|
||||
mov cr.iip = reg
|
||||
|
||||
#define MOV_TO_KR(kr, reg, clob0, clob1) \
|
||||
mov IA64_KR(kr) = reg \
|
||||
CLOBBER(clob0) \
|
||||
CLOBBER(clob1)
|
||||
mov IA64_KR(kr) = reg
|
||||
|
||||
#define ITC_I(pred, reg, clob) \
|
||||
(pred) itc.i reg \
|
||||
CLOBBER(clob)
|
||||
(pred) itc.i reg
|
||||
|
||||
#define ITC_D(pred, reg, clob) \
|
||||
(pred) itc.d reg \
|
||||
CLOBBER(clob)
|
||||
(pred) itc.d reg
|
||||
|
||||
#define ITC_I_AND_D(pred_i, pred_d, reg, clob) \
|
||||
(pred_i) itc.i reg; \
|
||||
(pred_d) itc.d reg \
|
||||
CLOBBER(clob)
|
||||
(pred_d) itc.d reg
|
||||
|
||||
#define THASH(pred, reg0, reg1, clob) \
|
||||
(pred) thash reg0 = reg1 \
|
||||
CLOBBER(clob)
|
||||
(pred) thash reg0 = reg1
|
||||
|
||||
#define SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(clob0, clob1) \
|
||||
ssm psr.ic | PSR_DEFAULT_BITS \
|
||||
CLOBBER(clob0) \
|
||||
CLOBBER(clob1) \
|
||||
;; \
|
||||
srlz.i /* guarantee that interruption collectin is on */ \
|
||||
;;
|
||||
|
||||
#define SSM_PSR_IC_AND_SRLZ_D(clob0, clob1) \
|
||||
ssm psr.ic \
|
||||
CLOBBER(clob0) \
|
||||
CLOBBER(clob1) \
|
||||
;; \
|
||||
srlz.d
|
||||
|
||||
#define RSM_PSR_IC(clob) \
|
||||
rsm psr.ic \
|
||||
CLOBBER(clob)
|
||||
rsm psr.ic
|
||||
|
||||
#define SSM_PSR_I(pred, pred_clob, clob) \
|
||||
(pred) ssm psr.i \
|
||||
CLOBBER(clob) \
|
||||
CLOBBER_PRED(pred_clob)
|
||||
(pred) ssm psr.i
|
||||
|
||||
#define RSM_PSR_I(pred, clob0, clob1) \
|
||||
(pred) rsm psr.i \
|
||||
CLOBBER(clob0) \
|
||||
CLOBBER(clob1)
|
||||
(pred) rsm psr.i
|
||||
|
||||
#define RSM_PSR_I_IC(clob0, clob1, clob2) \
|
||||
rsm psr.i | psr.ic \
|
||||
CLOBBER(clob0) \
|
||||
CLOBBER(clob1) \
|
||||
CLOBBER(clob2)
|
||||
rsm psr.i | psr.ic
|
||||
|
||||
#define RSM_PSR_DT \
|
||||
rsm psr.dt
|
||||
|
||||
#define RSM_PSR_BE_I(clob0, clob1) \
|
||||
rsm psr.be | psr.i \
|
||||
CLOBBER(clob0) \
|
||||
CLOBBER(clob1)
|
||||
rsm psr.be | psr.i
|
||||
|
||||
#define SSM_PSR_DT_AND_SRLZ_I \
|
||||
ssm psr.dt \
|
||||
@@ -177,15 +121,10 @@
|
||||
srlz.i
|
||||
|
||||
#define BSW_0(clob0, clob1, clob2) \
|
||||
bsw.0 \
|
||||
CLOBBER(clob0) \
|
||||
CLOBBER(clob1) \
|
||||
CLOBBER(clob2)
|
||||
bsw.0
|
||||
|
||||
#define BSW_1(clob0, clob1) \
|
||||
bsw.1 \
|
||||
CLOBBER(clob0) \
|
||||
CLOBBER(clob1)
|
||||
bsw.1
|
||||
|
||||
#define COVER \
|
||||
cover
|
||||
|
||||
@@ -1,271 +0,0 @@
|
||||
#ifndef _ASM_NATIVE_PVCHK_INST_H
|
||||
#define _ASM_NATIVE_PVCHK_INST_H
|
||||
|
||||
/******************************************************************************
|
||||
* arch/ia64/include/asm/native/pvchk_inst.h
|
||||
* Checker for paravirtualizations of privileged operations.
|
||||
*
|
||||
* Copyright (C) 2005 Hewlett-Packard Co
|
||||
* Dan Magenheimer <dan.magenheimer@hp.com>
|
||||
*
|
||||
* Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
|
||||
* VA Linux Systems Japan K.K.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||
*
|
||||
*/
|
||||
|
||||
/**********************************************
|
||||
* Instructions paravirtualized for correctness
|
||||
**********************************************/
|
||||
|
||||
/* "fc" and "thash" are privilege-sensitive instructions, meaning they
|
||||
* may have different semantics depending on whether they are executed
|
||||
* at PL0 vs PL!=0. When paravirtualized, these instructions mustn't
|
||||
* be allowed to execute directly, lest incorrect semantics result.
|
||||
*/
|
||||
|
||||
#define fc .error "fc should not be used directly."
|
||||
#define thash .error "thash should not be used directly."
|
||||
|
||||
/* Note that "ttag" and "cover" are also privilege-sensitive; "ttag"
|
||||
* is not currently used (though it may be in a long-format VHPT system!)
|
||||
* and the semantics of cover only change if psr.ic is off which is very
|
||||
* rare (and currently non-existent outside of assembly code
|
||||
*/
|
||||
#define ttag .error "ttag should not be used directly."
|
||||
#define cover .error "cover should not be used directly."
|
||||
|
||||
/* There are also privilege-sensitive registers. These registers are
|
||||
* readable at any privilege level but only writable at PL0.
|
||||
*/
|
||||
#define cpuid .error "cpuid should not be used directly."
|
||||
#define pmd .error "pmd should not be used directly."
|
||||
|
||||
/*
|
||||
* mov ar.eflag =
|
||||
* mov = ar.eflag
|
||||
*/
|
||||
|
||||
/**********************************************
|
||||
* Instructions paravirtualized for performance
|
||||
**********************************************/
|
||||
/*
|
||||
* Those instructions include '.' which can't be handled by cpp.
|
||||
* or can't be handled by cpp easily.
|
||||
* They are handled by sed instead of cpp.
|
||||
*/
|
||||
|
||||
/* for .S
|
||||
* itc.i
|
||||
* itc.d
|
||||
*
|
||||
* bsw.0
|
||||
* bsw.1
|
||||
*
|
||||
* ssm psr.ic | PSR_DEFAULT_BITS
|
||||
* ssm psr.ic
|
||||
* rsm psr.ic
|
||||
* ssm psr.i
|
||||
* rsm psr.i
|
||||
* rsm psr.i | psr.ic
|
||||
* rsm psr.dt
|
||||
* ssm psr.dt
|
||||
*
|
||||
* mov = cr.ifa
|
||||
* mov = cr.itir
|
||||
* mov = cr.isr
|
||||
* mov = cr.iha
|
||||
* mov = cr.ipsr
|
||||
* mov = cr.iim
|
||||
* mov = cr.iip
|
||||
* mov = cr.ivr
|
||||
* mov = psr
|
||||
*
|
||||
* mov cr.ifa =
|
||||
* mov cr.itir =
|
||||
* mov cr.iha =
|
||||
* mov cr.ipsr =
|
||||
* mov cr.ifs =
|
||||
* mov cr.iip =
|
||||
* mov cr.kr =
|
||||
*/
|
||||
|
||||
/* for intrinsics
|
||||
* ssm psr.i
|
||||
* rsm psr.i
|
||||
* mov = psr
|
||||
* mov = ivr
|
||||
* mov = tpr
|
||||
* mov cr.itm =
|
||||
* mov eoi =
|
||||
* mov rr[] =
|
||||
* mov = rr[]
|
||||
* mov = kr
|
||||
* mov kr =
|
||||
* ptc.ga
|
||||
*/
|
||||
|
||||
/*************************************************************
|
||||
* define paravirtualized instrcution macros as nop to ingore.
|
||||
* and check whether arguments are appropriate.
|
||||
*************************************************************/
|
||||
|
||||
/* check whether reg is a regular register */
|
||||
.macro is_rreg_in reg
|
||||
.ifc "\reg", "r0"
|
||||
nop 0
|
||||
.exitm
|
||||
.endif
|
||||
;;
|
||||
mov \reg = r0
|
||||
;;
|
||||
.endm
|
||||
#define IS_RREG_IN(reg) is_rreg_in reg ;
|
||||
|
||||
#define IS_RREG_OUT(reg) \
|
||||
;; \
|
||||
mov reg = r0 \
|
||||
;;
|
||||
|
||||
#define IS_RREG_CLOB(reg) IS_RREG_OUT(reg)
|
||||
|
||||
/* check whether pred is a predicate register */
|
||||
#define IS_PRED_IN(pred) \
|
||||
;; \
|
||||
(pred) nop 0 \
|
||||
;;
|
||||
|
||||
#define IS_PRED_OUT(pred) \
|
||||
;; \
|
||||
cmp.eq pred, p0 = r0, r0 \
|
||||
;;
|
||||
|
||||
#define IS_PRED_CLOB(pred) IS_PRED_OUT(pred)
|
||||
|
||||
|
||||
#define DO_SAVE_MIN(__COVER, SAVE_IFS, EXTRA, WORKAROUND) \
|
||||
nop 0
|
||||
#define MOV_FROM_IFA(reg) \
|
||||
IS_RREG_OUT(reg)
|
||||
#define MOV_FROM_ITIR(reg) \
|
||||
IS_RREG_OUT(reg)
|
||||
#define MOV_FROM_ISR(reg) \
|
||||
IS_RREG_OUT(reg)
|
||||
#define MOV_FROM_IHA(reg) \
|
||||
IS_RREG_OUT(reg)
|
||||
#define MOV_FROM_IPSR(pred, reg) \
|
||||
IS_PRED_IN(pred) \
|
||||
IS_RREG_OUT(reg)
|
||||
#define MOV_FROM_IIM(reg) \
|
||||
IS_RREG_OUT(reg)
|
||||
#define MOV_FROM_IIP(reg) \
|
||||
IS_RREG_OUT(reg)
|
||||
#define MOV_FROM_IVR(reg, clob) \
|
||||
IS_RREG_OUT(reg) \
|
||||
IS_RREG_CLOB(clob)
|
||||
#define MOV_FROM_PSR(pred, reg, clob) \
|
||||
IS_PRED_IN(pred) \
|
||||
IS_RREG_OUT(reg) \
|
||||
IS_RREG_CLOB(clob)
|
||||
#define MOV_FROM_ITC(pred, pred_clob, reg, clob) \
|
||||
IS_PRED_IN(pred) \
|
||||
IS_PRED_CLOB(pred_clob) \
|
||||
IS_RREG_OUT(reg) \
|
||||
IS_RREG_CLOB(clob)
|
||||
#define MOV_TO_IFA(reg, clob) \
|
||||
IS_RREG_IN(reg) \
|
||||
IS_RREG_CLOB(clob)
|
||||
#define MOV_TO_ITIR(pred, reg, clob) \
|
||||
IS_PRED_IN(pred) \
|
||||
IS_RREG_IN(reg) \
|
||||
IS_RREG_CLOB(clob)
|
||||
#define MOV_TO_IHA(pred, reg, clob) \
|
||||
IS_PRED_IN(pred) \
|
||||
IS_RREG_IN(reg) \
|
||||
IS_RREG_CLOB(clob)
|
||||
#define MOV_TO_IPSR(pred, reg, clob) \
|
||||
IS_PRED_IN(pred) \
|
||||
IS_RREG_IN(reg) \
|
||||
IS_RREG_CLOB(clob)
|
||||
#define MOV_TO_IFS(pred, reg, clob) \
|
||||
IS_PRED_IN(pred) \
|
||||
IS_RREG_IN(reg) \
|
||||
IS_RREG_CLOB(clob)
|
||||
#define MOV_TO_IIP(reg, clob) \
|
||||
IS_RREG_IN(reg) \
|
||||
IS_RREG_CLOB(clob)
|
||||
#define MOV_TO_KR(kr, reg, clob0, clob1) \
|
||||
IS_RREG_IN(reg) \
|
||||
IS_RREG_CLOB(clob0) \
|
||||
IS_RREG_CLOB(clob1)
|
||||
#define ITC_I(pred, reg, clob) \
|
||||
IS_PRED_IN(pred) \
|
||||
IS_RREG_IN(reg) \
|
||||
IS_RREG_CLOB(clob)
|
||||
#define ITC_D(pred, reg, clob) \
|
||||
IS_PRED_IN(pred) \
|
||||
IS_RREG_IN(reg) \
|
||||
IS_RREG_CLOB(clob)
|
||||
#define ITC_I_AND_D(pred_i, pred_d, reg, clob) \
|
||||
IS_PRED_IN(pred_i) \
|
||||
IS_PRED_IN(pred_d) \
|
||||
IS_RREG_IN(reg) \
|
||||
IS_RREG_CLOB(clob)
|
||||
#define THASH(pred, reg0, reg1, clob) \
|
||||
IS_PRED_IN(pred) \
|
||||
IS_RREG_OUT(reg0) \
|
||||
IS_RREG_IN(reg1) \
|
||||
IS_RREG_CLOB(clob)
|
||||
#define SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(clob0, clob1) \
|
||||
IS_RREG_CLOB(clob0) \
|
||||
IS_RREG_CLOB(clob1)
|
||||
#define SSM_PSR_IC_AND_SRLZ_D(clob0, clob1) \
|
||||
IS_RREG_CLOB(clob0) \
|
||||
IS_RREG_CLOB(clob1)
|
||||
#define RSM_PSR_IC(clob) \
|
||||
IS_RREG_CLOB(clob)
|
||||
#define SSM_PSR_I(pred, pred_clob, clob) \
|
||||
IS_PRED_IN(pred) \
|
||||
IS_PRED_CLOB(pred_clob) \
|
||||
IS_RREG_CLOB(clob)
|
||||
#define RSM_PSR_I(pred, clob0, clob1) \
|
||||
IS_PRED_IN(pred) \
|
||||
IS_RREG_CLOB(clob0) \
|
||||
IS_RREG_CLOB(clob1)
|
||||
#define RSM_PSR_I_IC(clob0, clob1, clob2) \
|
||||
IS_RREG_CLOB(clob0) \
|
||||
IS_RREG_CLOB(clob1) \
|
||||
IS_RREG_CLOB(clob2)
|
||||
#define RSM_PSR_DT \
|
||||
nop 0
|
||||
#define RSM_PSR_BE_I(clob0, clob1) \
|
||||
IS_RREG_CLOB(clob0) \
|
||||
IS_RREG_CLOB(clob1)
|
||||
#define SSM_PSR_DT_AND_SRLZ_I \
|
||||
nop 0
|
||||
#define BSW_0(clob0, clob1, clob2) \
|
||||
IS_RREG_CLOB(clob0) \
|
||||
IS_RREG_CLOB(clob1) \
|
||||
IS_RREG_CLOB(clob2)
|
||||
#define BSW_1(clob0, clob1) \
|
||||
IS_RREG_CLOB(clob0) \
|
||||
IS_RREG_CLOB(clob1)
|
||||
#define COVER \
|
||||
nop 0
|
||||
#define RFI \
|
||||
br.ret.sptk.many rp /* defining nop causes dependency error */
|
||||
|
||||
#endif /* _ASM_NATIVE_PVCHK_INST_H */
|
||||
@@ -1,321 +0,0 @@
|
||||
/******************************************************************************
|
||||
* Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
|
||||
* VA Linux Systems Japan K.K.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||
*
|
||||
*/
|
||||
|
||||
|
||||
#ifndef __ASM_PARAVIRT_H
|
||||
#define __ASM_PARAVIRT_H
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
/******************************************************************************
|
||||
* fsys related addresses
|
||||
*/
|
||||
struct pv_fsys_data {
|
||||
unsigned long *fsyscall_table;
|
||||
void *fsys_bubble_down;
|
||||
};
|
||||
|
||||
extern struct pv_fsys_data pv_fsys_data;
|
||||
|
||||
unsigned long *paravirt_get_fsyscall_table(void);
|
||||
char *paravirt_get_fsys_bubble_down(void);
|
||||
|
||||
/******************************************************************************
|
||||
* patchlist addresses for gate page
|
||||
*/
|
||||
enum pv_gate_patchlist {
|
||||
PV_GATE_START_FSYSCALL,
|
||||
PV_GATE_END_FSYSCALL,
|
||||
|
||||
PV_GATE_START_BRL_FSYS_BUBBLE_DOWN,
|
||||
PV_GATE_END_BRL_FSYS_BUBBLE_DOWN,
|
||||
|
||||
PV_GATE_START_VTOP,
|
||||
PV_GATE_END_VTOP,
|
||||
|
||||
PV_GATE_START_MCKINLEY_E9,
|
||||
PV_GATE_END_MCKINLEY_E9,
|
||||
};
|
||||
|
||||
struct pv_patchdata {
|
||||
unsigned long start_fsyscall_patchlist;
|
||||
unsigned long end_fsyscall_patchlist;
|
||||
unsigned long start_brl_fsys_bubble_down_patchlist;
|
||||
unsigned long end_brl_fsys_bubble_down_patchlist;
|
||||
unsigned long start_vtop_patchlist;
|
||||
unsigned long end_vtop_patchlist;
|
||||
unsigned long start_mckinley_e9_patchlist;
|
||||
unsigned long end_mckinley_e9_patchlist;
|
||||
|
||||
void *gate_section;
|
||||
};
|
||||
|
||||
extern struct pv_patchdata pv_patchdata;
|
||||
|
||||
unsigned long paravirt_get_gate_patchlist(enum pv_gate_patchlist type);
|
||||
void *paravirt_get_gate_section(void);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_PARAVIRT_GUEST
|
||||
|
||||
#define PARAVIRT_HYPERVISOR_TYPE_DEFAULT 0
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#include <asm/hw_irq.h>
|
||||
#include <asm/meminit.h>
|
||||
|
||||
/******************************************************************************
|
||||
* general info
|
||||
*/
|
||||
struct pv_info {
|
||||
unsigned int kernel_rpl;
|
||||
int paravirt_enabled;
|
||||
const char *name;
|
||||
};
|
||||
|
||||
extern struct pv_info pv_info;
|
||||
|
||||
static inline int paravirt_enabled(void)
|
||||
{
|
||||
return pv_info.paravirt_enabled;
|
||||
}
|
||||
|
||||
static inline unsigned int get_kernel_rpl(void)
|
||||
{
|
||||
return pv_info.kernel_rpl;
|
||||
}
|
||||
|
||||
/******************************************************************************
|
||||
* initialization hooks.
|
||||
*/
|
||||
struct rsvd_region;
|
||||
|
||||
struct pv_init_ops {
|
||||
void (*banner)(void);
|
||||
|
||||
int (*reserve_memory)(struct rsvd_region *region);
|
||||
|
||||
void (*arch_setup_early)(void);
|
||||
void (*arch_setup_console)(char **cmdline_p);
|
||||
int (*arch_setup_nomca)(void);
|
||||
|
||||
void (*post_smp_prepare_boot_cpu)(void);
|
||||
|
||||
#ifdef ASM_SUPPORTED
|
||||
unsigned long (*patch_bundle)(void *sbundle, void *ebundle,
|
||||
unsigned long type);
|
||||
unsigned long (*patch_inst)(unsigned long stag, unsigned long etag,
|
||||
unsigned long type);
|
||||
#endif
|
||||
void (*patch_branch)(unsigned long tag, unsigned long type);
|
||||
};
|
||||
|
||||
extern struct pv_init_ops pv_init_ops;
|
||||
|
||||
static inline void paravirt_banner(void)
|
||||
{
|
||||
if (pv_init_ops.banner)
|
||||
pv_init_ops.banner();
|
||||
}
|
||||
|
||||
static inline int paravirt_reserve_memory(struct rsvd_region *region)
|
||||
{
|
||||
if (pv_init_ops.reserve_memory)
|
||||
return pv_init_ops.reserve_memory(region);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void paravirt_arch_setup_early(void)
|
||||
{
|
||||
if (pv_init_ops.arch_setup_early)
|
||||
pv_init_ops.arch_setup_early();
|
||||
}
|
||||
|
||||
static inline void paravirt_arch_setup_console(char **cmdline_p)
|
||||
{
|
||||
if (pv_init_ops.arch_setup_console)
|
||||
pv_init_ops.arch_setup_console(cmdline_p);
|
||||
}
|
||||
|
||||
static inline int paravirt_arch_setup_nomca(void)
|
||||
{
|
||||
if (pv_init_ops.arch_setup_nomca)
|
||||
return pv_init_ops.arch_setup_nomca();
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void paravirt_post_smp_prepare_boot_cpu(void)
|
||||
{
|
||||
if (pv_init_ops.post_smp_prepare_boot_cpu)
|
||||
pv_init_ops.post_smp_prepare_boot_cpu();
|
||||
}
|
||||
|
||||
/******************************************************************************
|
||||
* replacement of iosapic operations.
|
||||
*/
|
||||
|
||||
struct pv_iosapic_ops {
|
||||
void (*pcat_compat_init)(void);
|
||||
|
||||
struct irq_chip *(*__get_irq_chip)(unsigned long trigger);
|
||||
|
||||
unsigned int (*__read)(char __iomem *iosapic, unsigned int reg);
|
||||
void (*__write)(char __iomem *iosapic, unsigned int reg, u32 val);
|
||||
};
|
||||
|
||||
extern struct pv_iosapic_ops pv_iosapic_ops;
|
||||
|
||||
static inline void
|
||||
iosapic_pcat_compat_init(void)
|
||||
{
|
||||
if (pv_iosapic_ops.pcat_compat_init)
|
||||
pv_iosapic_ops.pcat_compat_init();
|
||||
}
|
||||
|
||||
static inline struct irq_chip*
|
||||
iosapic_get_irq_chip(unsigned long trigger)
|
||||
{
|
||||
return pv_iosapic_ops.__get_irq_chip(trigger);
|
||||
}
|
||||
|
||||
static inline unsigned int
|
||||
__iosapic_read(char __iomem *iosapic, unsigned int reg)
|
||||
{
|
||||
return pv_iosapic_ops.__read(iosapic, reg);
|
||||
}
|
||||
|
||||
static inline void
|
||||
__iosapic_write(char __iomem *iosapic, unsigned int reg, u32 val)
|
||||
{
|
||||
return pv_iosapic_ops.__write(iosapic, reg, val);
|
||||
}
|
||||
|
||||
/******************************************************************************
|
||||
* replacement of irq operations.
|
||||
*/
|
||||
|
||||
struct pv_irq_ops {
|
||||
void (*register_ipi)(void);
|
||||
|
||||
int (*assign_irq_vector)(int irq);
|
||||
void (*free_irq_vector)(int vector);
|
||||
|
||||
void (*register_percpu_irq)(ia64_vector vec,
|
||||
struct irqaction *action);
|
||||
|
||||
void (*resend_irq)(unsigned int vector);
|
||||
};
|
||||
|
||||
extern struct pv_irq_ops pv_irq_ops;
|
||||
|
||||
static inline void
|
||||
ia64_register_ipi(void)
|
||||
{
|
||||
pv_irq_ops.register_ipi();
|
||||
}
|
||||
|
||||
static inline int
|
||||
assign_irq_vector(int irq)
|
||||
{
|
||||
return pv_irq_ops.assign_irq_vector(irq);
|
||||
}
|
||||
|
||||
static inline void
|
||||
free_irq_vector(int vector)
|
||||
{
|
||||
return pv_irq_ops.free_irq_vector(vector);
|
||||
}
|
||||
|
||||
static inline void
|
||||
register_percpu_irq(ia64_vector vec, struct irqaction *action)
|
||||
{
|
||||
pv_irq_ops.register_percpu_irq(vec, action);
|
||||
}
|
||||
|
||||
static inline void
|
||||
ia64_resend_irq(unsigned int vector)
|
||||
{
|
||||
pv_irq_ops.resend_irq(vector);
|
||||
}
|
||||
|
||||
/******************************************************************************
|
||||
* replacement of time operations.
|
||||
*/
|
||||
|
||||
extern struct itc_jitter_data_t itc_jitter_data;
|
||||
extern volatile int time_keeper_id;
|
||||
|
||||
struct pv_time_ops {
|
||||
void (*init_missing_ticks_accounting)(int cpu);
|
||||
int (*do_steal_accounting)(unsigned long *new_itm);
|
||||
|
||||
void (*clocksource_resume)(void);
|
||||
|
||||
unsigned long long (*sched_clock)(void);
|
||||
};
|
||||
|
||||
extern struct pv_time_ops pv_time_ops;
|
||||
|
||||
static inline void
|
||||
paravirt_init_missing_ticks_accounting(int cpu)
|
||||
{
|
||||
if (pv_time_ops.init_missing_ticks_accounting)
|
||||
pv_time_ops.init_missing_ticks_accounting(cpu);
|
||||
}
|
||||
|
||||
struct static_key;
|
||||
extern struct static_key paravirt_steal_enabled;
|
||||
extern struct static_key paravirt_steal_rq_enabled;
|
||||
|
||||
static inline int
|
||||
paravirt_do_steal_accounting(unsigned long *new_itm)
|
||||
{
|
||||
return pv_time_ops.do_steal_accounting(new_itm);
|
||||
}
|
||||
|
||||
static inline unsigned long long paravirt_sched_clock(void)
|
||||
{
|
||||
return pv_time_ops.sched_clock();
|
||||
}
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
||||
#else
|
||||
/* fallback for native case */
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#define paravirt_banner() do { } while (0)
|
||||
#define paravirt_reserve_memory(region) 0
|
||||
|
||||
#define paravirt_arch_setup_early() do { } while (0)
|
||||
#define paravirt_arch_setup_console(cmdline_p) do { } while (0)
|
||||
#define paravirt_arch_setup_nomca() 0
|
||||
#define paravirt_post_smp_prepare_boot_cpu() do { } while (0)
|
||||
|
||||
#define paravirt_init_missing_ticks_accounting(cpu) do { } while (0)
|
||||
#define paravirt_do_steal_accounting(new_itm) 0
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
|
||||
#endif /* CONFIG_PARAVIRT_GUEST */
|
||||
|
||||
#endif /* __ASM_PARAVIRT_H */
|
||||
@@ -1,143 +0,0 @@
|
||||
/******************************************************************************
|
||||
* Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
|
||||
* VA Linux Systems Japan K.K.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef __ASM_PARAVIRT_PATCH_H
|
||||
#define __ASM_PARAVIRT_PATCH_H
|
||||
|
||||
#ifdef __ASSEMBLY__
|
||||
|
||||
.section .paravirt_branches, "a"
|
||||
.previous
|
||||
#define PARAVIRT_PATCH_SITE_BR(type) \
|
||||
{ \
|
||||
[1:] ; \
|
||||
br.cond.sptk.many 2f ; \
|
||||
nop.b 0 ; \
|
||||
nop.b 0;; ; \
|
||||
} ; \
|
||||
2: \
|
||||
.xdata8 ".paravirt_branches", 1b, type
|
||||
|
||||
#else
|
||||
|
||||
#include <linux/stringify.h>
|
||||
#include <asm/intrinsics.h>
|
||||
|
||||
/* for binary patch */
|
||||
struct paravirt_patch_site_bundle {
|
||||
void *sbundle;
|
||||
void *ebundle;
|
||||
unsigned long type;
|
||||
};
|
||||
|
||||
/* label means the beginning of new bundle */
|
||||
#define paravirt_alt_bundle(instr, privop) \
|
||||
"\t998:\n" \
|
||||
"\t" instr "\n" \
|
||||
"\t999:\n" \
|
||||
"\t.pushsection .paravirt_bundles, \"a\"\n" \
|
||||
"\t.popsection\n" \
|
||||
"\t.xdata8 \".paravirt_bundles\", 998b, 999b, " \
|
||||
__stringify(privop) "\n"
|
||||
|
||||
|
||||
struct paravirt_patch_bundle_elem {
|
||||
const void *sbundle;
|
||||
const void *ebundle;
|
||||
unsigned long type;
|
||||
};
|
||||
|
||||
|
||||
struct paravirt_patch_site_inst {
|
||||
unsigned long stag;
|
||||
unsigned long etag;
|
||||
unsigned long type;
|
||||
};
|
||||
|
||||
#define paravirt_alt_inst(instr, privop) \
|
||||
"\t[998:]\n" \
|
||||
"\t" instr "\n" \
|
||||
"\t[999:]\n" \
|
||||
"\t.pushsection .paravirt_insts, \"a\"\n" \
|
||||
"\t.popsection\n" \
|
||||
"\t.xdata8 \".paravirt_insts\", 998b, 999b, " \
|
||||
__stringify(privop) "\n"
|
||||
|
||||
struct paravirt_patch_site_branch {
|
||||
unsigned long tag;
|
||||
unsigned long type;
|
||||
};
|
||||
|
||||
struct paravirt_patch_branch_target {
|
||||
const void *entry;
|
||||
unsigned long type;
|
||||
};
|
||||
|
||||
void
|
||||
__paravirt_patch_apply_branch(
|
||||
unsigned long tag, unsigned long type,
|
||||
const struct paravirt_patch_branch_target *entries,
|
||||
unsigned int nr_entries);
|
||||
|
||||
void
|
||||
paravirt_patch_reloc_br(unsigned long tag, const void *target);
|
||||
|
||||
void
|
||||
paravirt_patch_reloc_brl(unsigned long tag, const void *target);
|
||||
|
||||
|
||||
#if defined(ASM_SUPPORTED) && defined(CONFIG_PARAVIRT)
|
||||
unsigned long
|
||||
ia64_native_patch_bundle(void *sbundle, void *ebundle, unsigned long type);
|
||||
|
||||
unsigned long
|
||||
__paravirt_patch_apply_bundle(void *sbundle, void *ebundle, unsigned long type,
|
||||
const struct paravirt_patch_bundle_elem *elems,
|
||||
unsigned long nelems,
|
||||
const struct paravirt_patch_bundle_elem **found);
|
||||
|
||||
void
|
||||
paravirt_patch_apply_bundle(const struct paravirt_patch_site_bundle *start,
|
||||
const struct paravirt_patch_site_bundle *end);
|
||||
|
||||
void
|
||||
paravirt_patch_apply_inst(const struct paravirt_patch_site_inst *start,
|
||||
const struct paravirt_patch_site_inst *end);
|
||||
|
||||
void paravirt_patch_apply(void);
|
||||
#else
|
||||
#define paravirt_patch_apply_bundle(start, end) do { } while (0)
|
||||
#define paravirt_patch_apply_inst(start, end) do { } while (0)
|
||||
#define paravirt_patch_apply() do { } while (0)
|
||||
#endif
|
||||
|
||||
#endif /* !__ASSEMBLEY__ */
|
||||
|
||||
#endif /* __ASM_PARAVIRT_PATCH_H */
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* mode: C
|
||||
* c-set-style: "linux"
|
||||
* c-basic-offset: 8
|
||||
* tab-width: 8
|
||||
* indent-tabs-mode: t
|
||||
* End:
|
||||
*/
|
||||
@@ -1,479 +0,0 @@
|
||||
/******************************************************************************
|
||||
* Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
|
||||
* VA Linux Systems Japan K.K.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef _ASM_IA64_PARAVIRT_PRIVOP_H
|
||||
#define _ASM_IA64_PARAVIRT_PRIVOP_H
|
||||
|
||||
#ifdef CONFIG_PARAVIRT
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <asm/kregs.h> /* for IA64_PSR_I */
|
||||
|
||||
/******************************************************************************
|
||||
* replacement of intrinsics operations.
|
||||
*/
|
||||
|
||||
struct pv_cpu_ops {
|
||||
void (*fc)(void *addr);
|
||||
unsigned long (*thash)(unsigned long addr);
|
||||
unsigned long (*get_cpuid)(int index);
|
||||
unsigned long (*get_pmd)(int index);
|
||||
unsigned long (*getreg)(int reg);
|
||||
void (*setreg)(int reg, unsigned long val);
|
||||
void (*ptcga)(unsigned long addr, unsigned long size);
|
||||
unsigned long (*get_rr)(unsigned long index);
|
||||
void (*set_rr)(unsigned long index, unsigned long val);
|
||||
void (*set_rr0_to_rr4)(unsigned long val0, unsigned long val1,
|
||||
unsigned long val2, unsigned long val3,
|
||||
unsigned long val4);
|
||||
void (*ssm_i)(void);
|
||||
void (*rsm_i)(void);
|
||||
unsigned long (*get_psr_i)(void);
|
||||
void (*intrin_local_irq_restore)(unsigned long flags);
|
||||
};
|
||||
|
||||
extern struct pv_cpu_ops pv_cpu_ops;
|
||||
|
||||
extern void ia64_native_setreg_func(int regnum, unsigned long val);
|
||||
extern unsigned long ia64_native_getreg_func(int regnum);
|
||||
|
||||
/************************************************/
|
||||
/* Instructions paravirtualized for performance */
|
||||
/************************************************/
|
||||
|
||||
#ifndef ASM_SUPPORTED
|
||||
#define paravirt_ssm_i() pv_cpu_ops.ssm_i()
|
||||
#define paravirt_rsm_i() pv_cpu_ops.rsm_i()
|
||||
#define __paravirt_getreg() pv_cpu_ops.getreg()
|
||||
#endif
|
||||
|
||||
/* mask for ia64_native_ssm/rsm() must be constant.("i" constraing).
|
||||
* static inline function doesn't satisfy it. */
|
||||
#define paravirt_ssm(mask) \
|
||||
do { \
|
||||
if ((mask) == IA64_PSR_I) \
|
||||
paravirt_ssm_i(); \
|
||||
else \
|
||||
ia64_native_ssm(mask); \
|
||||
} while (0)
|
||||
|
||||
#define paravirt_rsm(mask) \
|
||||
do { \
|
||||
if ((mask) == IA64_PSR_I) \
|
||||
paravirt_rsm_i(); \
|
||||
else \
|
||||
ia64_native_rsm(mask); \
|
||||
} while (0)
|
||||
|
||||
/* returned ip value should be the one in the caller,
|
||||
* not in __paravirt_getreg() */
|
||||
#define paravirt_getreg(reg) \
|
||||
({ \
|
||||
unsigned long res; \
|
||||
if ((reg) == _IA64_REG_IP) \
|
||||
res = ia64_native_getreg(_IA64_REG_IP); \
|
||||
else \
|
||||
res = __paravirt_getreg(reg); \
|
||||
res; \
|
||||
})
|
||||
|
||||
/******************************************************************************
|
||||
* replacement of hand written assembly codes.
|
||||
*/
|
||||
struct pv_cpu_asm_switch {
|
||||
unsigned long switch_to;
|
||||
unsigned long leave_syscall;
|
||||
unsigned long work_processed_syscall;
|
||||
unsigned long leave_kernel;
|
||||
};
|
||||
void paravirt_cpu_asm_init(const struct pv_cpu_asm_switch *cpu_asm_switch);
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
#define IA64_PARAVIRT_ASM_FUNC(name) paravirt_ ## name
|
||||
|
||||
#else
|
||||
|
||||
/* fallback for native case */
|
||||
#define IA64_PARAVIRT_ASM_FUNC(name) ia64_native_ ## name
|
||||
|
||||
#endif /* CONFIG_PARAVIRT */
|
||||
|
||||
#if defined(CONFIG_PARAVIRT) && defined(ASM_SUPPORTED)
|
||||
#define paravirt_dv_serialize_data() ia64_dv_serialize_data()
|
||||
#else
|
||||
#define paravirt_dv_serialize_data() /* nothing */
|
||||
#endif
|
||||
|
||||
/* these routines utilize privilege-sensitive or performance-sensitive
|
||||
* privileged instructions so the code must be replaced with
|
||||
* paravirtualized versions */
|
||||
#define ia64_switch_to IA64_PARAVIRT_ASM_FUNC(switch_to)
|
||||
#define ia64_leave_syscall IA64_PARAVIRT_ASM_FUNC(leave_syscall)
|
||||
#define ia64_work_processed_syscall \
|
||||
IA64_PARAVIRT_ASM_FUNC(work_processed_syscall)
|
||||
#define ia64_leave_kernel IA64_PARAVIRT_ASM_FUNC(leave_kernel)
|
||||
|
||||
|
||||
#if defined(CONFIG_PARAVIRT)
|
||||
/******************************************************************************
|
||||
* binary patching infrastructure
|
||||
*/
|
||||
#define PARAVIRT_PATCH_TYPE_FC 1
|
||||
#define PARAVIRT_PATCH_TYPE_THASH 2
|
||||
#define PARAVIRT_PATCH_TYPE_GET_CPUID 3
|
||||
#define PARAVIRT_PATCH_TYPE_GET_PMD 4
|
||||
#define PARAVIRT_PATCH_TYPE_PTCGA 5
|
||||
#define PARAVIRT_PATCH_TYPE_GET_RR 6
|
||||
#define PARAVIRT_PATCH_TYPE_SET_RR 7
|
||||
#define PARAVIRT_PATCH_TYPE_SET_RR0_TO_RR4 8
|
||||
#define PARAVIRT_PATCH_TYPE_SSM_I 9
|
||||
#define PARAVIRT_PATCH_TYPE_RSM_I 10
|
||||
#define PARAVIRT_PATCH_TYPE_GET_PSR_I 11
|
||||
#define PARAVIRT_PATCH_TYPE_INTRIN_LOCAL_IRQ_RESTORE 12
|
||||
|
||||
/* PARAVIRT_PATY_TYPE_[GS]ETREG + _IA64_REG_xxx */
|
||||
#define PARAVIRT_PATCH_TYPE_GETREG 0x10000000
|
||||
#define PARAVIRT_PATCH_TYPE_SETREG 0x20000000
|
||||
|
||||
/*
|
||||
* struct task_struct* (*ia64_switch_to)(void* next_task);
|
||||
* void *ia64_leave_syscall;
|
||||
* void *ia64_work_processed_syscall
|
||||
* void *ia64_leave_kernel;
|
||||
*/
|
||||
|
||||
#define PARAVIRT_PATCH_TYPE_BR_START 0x30000000
|
||||
#define PARAVIRT_PATCH_TYPE_BR_SWITCH_TO \
|
||||
(PARAVIRT_PATCH_TYPE_BR_START + 0)
|
||||
#define PARAVIRT_PATCH_TYPE_BR_LEAVE_SYSCALL \
|
||||
(PARAVIRT_PATCH_TYPE_BR_START + 1)
|
||||
#define PARAVIRT_PATCH_TYPE_BR_WORK_PROCESSED_SYSCALL \
|
||||
(PARAVIRT_PATCH_TYPE_BR_START + 2)
|
||||
#define PARAVIRT_PATCH_TYPE_BR_LEAVE_KERNEL \
|
||||
(PARAVIRT_PATCH_TYPE_BR_START + 3)
|
||||
|
||||
#ifdef ASM_SUPPORTED
|
||||
#include <asm/paravirt_patch.h>
|
||||
|
||||
/*
|
||||
* pv_cpu_ops calling stub.
|
||||
* normal function call convension can't be written by gcc
|
||||
* inline assembly.
|
||||
*
|
||||
* from the caller's point of view,
|
||||
* the following registers will be clobbered.
|
||||
* r2, r3
|
||||
* r8-r15
|
||||
* r16, r17
|
||||
* b6, b7
|
||||
* p6-p15
|
||||
* ar.ccv
|
||||
*
|
||||
* from the callee's point of view ,
|
||||
* the following registers can be used.
|
||||
* r2, r3: scratch
|
||||
* r8: scratch, input argument0 and return value
|
||||
* r0-r15: scratch, input argument1-5
|
||||
* b6: return pointer
|
||||
* b7: scratch
|
||||
* p6-p15: scratch
|
||||
* ar.ccv: scratch
|
||||
*
|
||||
* other registers must not be changed. especially
|
||||
* b0: rp: preserved. gcc ignores b0 in clobbered register.
|
||||
* r16: saved gp
|
||||
*/
|
||||
/* 5 bundles */
|
||||
#define __PARAVIRT_BR \
|
||||
";;\n" \
|
||||
"{ .mlx\n" \
|
||||
"nop 0\n" \
|
||||
"movl r2 = %[op_addr]\n"/* get function pointer address */ \
|
||||
";;\n" \
|
||||
"}\n" \
|
||||
"1:\n" \
|
||||
"{ .mii\n" \
|
||||
"ld8 r2 = [r2]\n" /* load function descriptor address */ \
|
||||
"mov r17 = ip\n" /* get ip to calc return address */ \
|
||||
"mov r16 = gp\n" /* save gp */ \
|
||||
";;\n" \
|
||||
"}\n" \
|
||||
"{ .mii\n" \
|
||||
"ld8 r3 = [r2], 8\n" /* load entry address */ \
|
||||
"adds r17 = 1f - 1b, r17\n" /* calculate return address */ \
|
||||
";;\n" \
|
||||
"mov b7 = r3\n" /* set entry address */ \
|
||||
"}\n" \
|
||||
"{ .mib\n" \
|
||||
"ld8 gp = [r2]\n" /* load gp value */ \
|
||||
"mov b6 = r17\n" /* set return address */ \
|
||||
"br.cond.sptk.few b7\n" /* intrinsics are very short isns */ \
|
||||
"}\n" \
|
||||
"1:\n" \
|
||||
"{ .mii\n" \
|
||||
"mov gp = r16\n" /* restore gp value */ \
|
||||
"nop 0\n" \
|
||||
"nop 0\n" \
|
||||
";;\n" \
|
||||
"}\n"
|
||||
|
||||
#define PARAVIRT_OP(op) \
|
||||
[op_addr] "i"(&pv_cpu_ops.op)
|
||||
|
||||
#define PARAVIRT_TYPE(type) \
|
||||
PARAVIRT_PATCH_TYPE_ ## type
|
||||
|
||||
#define PARAVIRT_REG_CLOBBERS0 \
|
||||
"r2", "r3", /*"r8",*/ "r9", "r10", "r11", "r14", \
|
||||
"r15", "r16", "r17"
|
||||
|
||||
#define PARAVIRT_REG_CLOBBERS1 \
|
||||
"r2","r3", /*"r8",*/ "r9", "r10", "r11", "r14", \
|
||||
"r15", "r16", "r17"
|
||||
|
||||
#define PARAVIRT_REG_CLOBBERS2 \
|
||||
"r2", "r3", /*"r8", "r9",*/ "r10", "r11", "r14", \
|
||||
"r15", "r16", "r17"
|
||||
|
||||
#define PARAVIRT_REG_CLOBBERS5 \
|
||||
"r2", "r3", /*"r8", "r9", "r10", "r11", "r14",*/ \
|
||||
"r15", "r16", "r17"
|
||||
|
||||
#define PARAVIRT_BR_CLOBBERS \
|
||||
"b6", "b7"
|
||||
|
||||
#define PARAVIRT_PR_CLOBBERS \
|
||||
"p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15"
|
||||
|
||||
#define PARAVIRT_AR_CLOBBERS \
|
||||
"ar.ccv"
|
||||
|
||||
#define PARAVIRT_CLOBBERS0 \
|
||||
PARAVIRT_REG_CLOBBERS0, \
|
||||
PARAVIRT_BR_CLOBBERS, \
|
||||
PARAVIRT_PR_CLOBBERS, \
|
||||
PARAVIRT_AR_CLOBBERS, \
|
||||
"memory"
|
||||
|
||||
#define PARAVIRT_CLOBBERS1 \
|
||||
PARAVIRT_REG_CLOBBERS1, \
|
||||
PARAVIRT_BR_CLOBBERS, \
|
||||
PARAVIRT_PR_CLOBBERS, \
|
||||
PARAVIRT_AR_CLOBBERS, \
|
||||
"memory"
|
||||
|
||||
#define PARAVIRT_CLOBBERS2 \
|
||||
PARAVIRT_REG_CLOBBERS2, \
|
||||
PARAVIRT_BR_CLOBBERS, \
|
||||
PARAVIRT_PR_CLOBBERS, \
|
||||
PARAVIRT_AR_CLOBBERS, \
|
||||
"memory"
|
||||
|
||||
#define PARAVIRT_CLOBBERS5 \
|
||||
PARAVIRT_REG_CLOBBERS5, \
|
||||
PARAVIRT_BR_CLOBBERS, \
|
||||
PARAVIRT_PR_CLOBBERS, \
|
||||
PARAVIRT_AR_CLOBBERS, \
|
||||
"memory"
|
||||
|
||||
#define PARAVIRT_BR0(op, type) \
|
||||
register unsigned long ia64_clobber asm ("r8"); \
|
||||
asm volatile (paravirt_alt_bundle(__PARAVIRT_BR, \
|
||||
PARAVIRT_TYPE(type)) \
|
||||
: "=r"(ia64_clobber) \
|
||||
: PARAVIRT_OP(op) \
|
||||
: PARAVIRT_CLOBBERS0)
|
||||
|
||||
#define PARAVIRT_BR0_RET(op, type) \
|
||||
register unsigned long ia64_intri_res asm ("r8"); \
|
||||
asm volatile (paravirt_alt_bundle(__PARAVIRT_BR, \
|
||||
PARAVIRT_TYPE(type)) \
|
||||
: "=r"(ia64_intri_res) \
|
||||
: PARAVIRT_OP(op) \
|
||||
: PARAVIRT_CLOBBERS0)
|
||||
|
||||
#define PARAVIRT_BR1(op, type, arg1) \
|
||||
register unsigned long __##arg1 asm ("r8") = arg1; \
|
||||
register unsigned long ia64_clobber asm ("r8"); \
|
||||
asm volatile (paravirt_alt_bundle(__PARAVIRT_BR, \
|
||||
PARAVIRT_TYPE(type)) \
|
||||
: "=r"(ia64_clobber) \
|
||||
: PARAVIRT_OP(op), "0"(__##arg1) \
|
||||
: PARAVIRT_CLOBBERS1)
|
||||
|
||||
#define PARAVIRT_BR1_RET(op, type, arg1) \
|
||||
register unsigned long ia64_intri_res asm ("r8"); \
|
||||
register unsigned long __##arg1 asm ("r8") = arg1; \
|
||||
asm volatile (paravirt_alt_bundle(__PARAVIRT_BR, \
|
||||
PARAVIRT_TYPE(type)) \
|
||||
: "=r"(ia64_intri_res) \
|
||||
: PARAVIRT_OP(op), "0"(__##arg1) \
|
||||
: PARAVIRT_CLOBBERS1)
|
||||
|
||||
#define PARAVIRT_BR1_VOID(op, type, arg1) \
|
||||
register void *__##arg1 asm ("r8") = arg1; \
|
||||
register unsigned long ia64_clobber asm ("r8"); \
|
||||
asm volatile (paravirt_alt_bundle(__PARAVIRT_BR, \
|
||||
PARAVIRT_TYPE(type)) \
|
||||
: "=r"(ia64_clobber) \
|
||||
: PARAVIRT_OP(op), "0"(__##arg1) \
|
||||
: PARAVIRT_CLOBBERS1)
|
||||
|
||||
#define PARAVIRT_BR2(op, type, arg1, arg2) \
|
||||
register unsigned long __##arg1 asm ("r8") = arg1; \
|
||||
register unsigned long __##arg2 asm ("r9") = arg2; \
|
||||
register unsigned long ia64_clobber1 asm ("r8"); \
|
||||
register unsigned long ia64_clobber2 asm ("r9"); \
|
||||
asm volatile (paravirt_alt_bundle(__PARAVIRT_BR, \
|
||||
PARAVIRT_TYPE(type)) \
|
||||
: "=r"(ia64_clobber1), "=r"(ia64_clobber2) \
|
||||
: PARAVIRT_OP(op), "0"(__##arg1), "1"(__##arg2) \
|
||||
: PARAVIRT_CLOBBERS2)
|
||||
|
||||
|
||||
#define PARAVIRT_DEFINE_CPU_OP0(op, type) \
|
||||
static inline void \
|
||||
paravirt_ ## op (void) \
|
||||
{ \
|
||||
PARAVIRT_BR0(op, type); \
|
||||
}
|
||||
|
||||
#define PARAVIRT_DEFINE_CPU_OP0_RET(op, type) \
|
||||
static inline unsigned long \
|
||||
paravirt_ ## op (void) \
|
||||
{ \
|
||||
PARAVIRT_BR0_RET(op, type); \
|
||||
return ia64_intri_res; \
|
||||
}
|
||||
|
||||
#define PARAVIRT_DEFINE_CPU_OP1_VOID(op, type) \
|
||||
static inline void \
|
||||
paravirt_ ## op (void *arg1) \
|
||||
{ \
|
||||
PARAVIRT_BR1_VOID(op, type, arg1); \
|
||||
}
|
||||
|
||||
#define PARAVIRT_DEFINE_CPU_OP1(op, type) \
|
||||
static inline void \
|
||||
paravirt_ ## op (unsigned long arg1) \
|
||||
{ \
|
||||
PARAVIRT_BR1(op, type, arg1); \
|
||||
}
|
||||
|
||||
#define PARAVIRT_DEFINE_CPU_OP1_RET(op, type) \
|
||||
static inline unsigned long \
|
||||
paravirt_ ## op (unsigned long arg1) \
|
||||
{ \
|
||||
PARAVIRT_BR1_RET(op, type, arg1); \
|
||||
return ia64_intri_res; \
|
||||
}
|
||||
|
||||
#define PARAVIRT_DEFINE_CPU_OP2(op, type) \
|
||||
static inline void \
|
||||
paravirt_ ## op (unsigned long arg1, \
|
||||
unsigned long arg2) \
|
||||
{ \
|
||||
PARAVIRT_BR2(op, type, arg1, arg2); \
|
||||
}
|
||||
|
||||
|
||||
PARAVIRT_DEFINE_CPU_OP1_VOID(fc, FC);
|
||||
PARAVIRT_DEFINE_CPU_OP1_RET(thash, THASH)
|
||||
PARAVIRT_DEFINE_CPU_OP1_RET(get_cpuid, GET_CPUID)
|
||||
PARAVIRT_DEFINE_CPU_OP1_RET(get_pmd, GET_PMD)
|
||||
PARAVIRT_DEFINE_CPU_OP2(ptcga, PTCGA)
|
||||
PARAVIRT_DEFINE_CPU_OP1_RET(get_rr, GET_RR)
|
||||
PARAVIRT_DEFINE_CPU_OP2(set_rr, SET_RR)
|
||||
PARAVIRT_DEFINE_CPU_OP0(ssm_i, SSM_I)
|
||||
PARAVIRT_DEFINE_CPU_OP0(rsm_i, RSM_I)
|
||||
PARAVIRT_DEFINE_CPU_OP0_RET(get_psr_i, GET_PSR_I)
|
||||
PARAVIRT_DEFINE_CPU_OP1(intrin_local_irq_restore, INTRIN_LOCAL_IRQ_RESTORE)
|
||||
|
||||
static inline void
|
||||
paravirt_set_rr0_to_rr4(unsigned long val0, unsigned long val1,
|
||||
unsigned long val2, unsigned long val3,
|
||||
unsigned long val4)
|
||||
{
|
||||
register unsigned long __val0 asm ("r8") = val0;
|
||||
register unsigned long __val1 asm ("r9") = val1;
|
||||
register unsigned long __val2 asm ("r10") = val2;
|
||||
register unsigned long __val3 asm ("r11") = val3;
|
||||
register unsigned long __val4 asm ("r14") = val4;
|
||||
|
||||
register unsigned long ia64_clobber0 asm ("r8");
|
||||
register unsigned long ia64_clobber1 asm ("r9");
|
||||
register unsigned long ia64_clobber2 asm ("r10");
|
||||
register unsigned long ia64_clobber3 asm ("r11");
|
||||
register unsigned long ia64_clobber4 asm ("r14");
|
||||
|
||||
asm volatile (paravirt_alt_bundle(__PARAVIRT_BR,
|
||||
PARAVIRT_TYPE(SET_RR0_TO_RR4))
|
||||
: "=r"(ia64_clobber0),
|
||||
"=r"(ia64_clobber1),
|
||||
"=r"(ia64_clobber2),
|
||||
"=r"(ia64_clobber3),
|
||||
"=r"(ia64_clobber4)
|
||||
: PARAVIRT_OP(set_rr0_to_rr4),
|
||||
"0"(__val0), "1"(__val1), "2"(__val2),
|
||||
"3"(__val3), "4"(__val4)
|
||||
: PARAVIRT_CLOBBERS5);
|
||||
}
|
||||
|
||||
/* unsigned long paravirt_getreg(int reg) */
|
||||
#define __paravirt_getreg(reg) \
|
||||
({ \
|
||||
register unsigned long ia64_intri_res asm ("r8"); \
|
||||
register unsigned long __reg asm ("r8") = (reg); \
|
||||
\
|
||||
asm volatile (paravirt_alt_bundle(__PARAVIRT_BR, \
|
||||
PARAVIRT_TYPE(GETREG) \
|
||||
+ (reg)) \
|
||||
: "=r"(ia64_intri_res) \
|
||||
: PARAVIRT_OP(getreg), "0"(__reg) \
|
||||
: PARAVIRT_CLOBBERS1); \
|
||||
\
|
||||
ia64_intri_res; \
|
||||
})
|
||||
|
||||
/* void paravirt_setreg(int reg, unsigned long val) */
|
||||
#define paravirt_setreg(reg, val) \
|
||||
do { \
|
||||
register unsigned long __val asm ("r8") = val; \
|
||||
register unsigned long __reg asm ("r9") = reg; \
|
||||
register unsigned long ia64_clobber1 asm ("r8"); \
|
||||
register unsigned long ia64_clobber2 asm ("r9"); \
|
||||
\
|
||||
asm volatile (paravirt_alt_bundle(__PARAVIRT_BR, \
|
||||
PARAVIRT_TYPE(SETREG) \
|
||||
+ (reg)) \
|
||||
: "=r"(ia64_clobber1), \
|
||||
"=r"(ia64_clobber2) \
|
||||
: PARAVIRT_OP(setreg), \
|
||||
"1"(__reg), "0"(__val) \
|
||||
: PARAVIRT_CLOBBERS2); \
|
||||
} while (0)
|
||||
|
||||
#endif /* ASM_SUPPORTED */
|
||||
#endif /* CONFIG_PARAVIRT && ASM_SUPPOTED */
|
||||
|
||||
#endif /* _ASM_IA64_PARAVIRT_PRIVOP_H */
|
||||
@@ -9,7 +9,7 @@ endif
|
||||
extra-y := head.o init_task.o vmlinux.lds
|
||||
|
||||
obj-y := entry.o efi.o efi_stub.o gate-data.o fsys.o ia64_ksyms.o irq.o irq_ia64.o \
|
||||
irq_lsapic.o ivt.o machvec.o pal.o paravirt_patchlist.o patch.o process.o perfmon.o ptrace.o sal.o \
|
||||
irq_lsapic.o ivt.o machvec.o pal.o patch.o process.o perfmon.o ptrace.o sal.o \
|
||||
salinfo.o setup.o signal.o sys_ia64.o time.o traps.o unaligned.o \
|
||||
unwind.o mca.o mca_asm.o topology.o dma-mapping.o
|
||||
|
||||
@@ -35,9 +35,6 @@ mca_recovery-y += mca_drv.o mca_drv_asm.o
|
||||
obj-$(CONFIG_IA64_MC_ERR_INJECT)+= err_inject.o
|
||||
obj-$(CONFIG_STACKTRACE) += stacktrace.o
|
||||
|
||||
obj-$(CONFIG_PARAVIRT) += paravirt.o paravirtentry.o \
|
||||
paravirt_patch.o
|
||||
|
||||
obj-$(CONFIG_IA64_ESI) += esi.o
|
||||
ifneq ($(CONFIG_IA64_ESI),)
|
||||
obj-y += esi_stub.o # must be in kernel proper
|
||||
@@ -52,8 +49,6 @@ CFLAGS_traps.o += -mfixed-range=f2-f5,f16-f31
|
||||
|
||||
# The gate DSO image is built using a special linker script.
|
||||
include $(src)/Makefile.gate
|
||||
# tell compiled for native
|
||||
CPPFLAGS_gate.lds += -D__IA64_GATE_PARAVIRTUALIZED_NATIVE
|
||||
|
||||
# Calculate NR_IRQ = max(IA64_NATIVE_NR_IRQS, XEN_NR_IRQS, ...) based on config
|
||||
define sed-y
|
||||
@@ -84,30 +79,3 @@ arch/$(SRCARCH)/kernel/nr-irqs.s: arch/$(SRCARCH)/kernel/nr-irqs.c
|
||||
include/generated/nr-irqs.h: arch/$(SRCARCH)/kernel/nr-irqs.s
|
||||
$(Q)mkdir -p $(dir $@)
|
||||
$(call cmd,nr_irqs)
|
||||
|
||||
#
|
||||
# native ivt.S, entry.S and fsys.S
|
||||
#
|
||||
ASM_PARAVIRT_OBJS = ivt.o entry.o fsys.o
|
||||
define paravirtualized_native
|
||||
AFLAGS_$(1) += -D__IA64_ASM_PARAVIRTUALIZED_NATIVE
|
||||
AFLAGS_pvchk-sed-$(1) += -D__IA64_ASM_PARAVIRTUALIZED_PVCHECK
|
||||
extra-y += pvchk-$(1)
|
||||
endef
|
||||
$(foreach obj,$(ASM_PARAVIRT_OBJS),$(eval $(call paravirtualized_native,$(obj))))
|
||||
|
||||
#
|
||||
# Checker for paravirtualizations of privileged operations.
|
||||
#
|
||||
quiet_cmd_pv_check_sed = PVCHK $@
|
||||
define cmd_pv_check_sed
|
||||
sed -f $(srctree)/arch/$(SRCARCH)/scripts/pvcheck.sed $< > $@
|
||||
endef
|
||||
|
||||
$(obj)/pvchk-sed-%.s: $(src)/%.S $(srctree)/arch/$(SRCARCH)/scripts/pvcheck.sed FORCE
|
||||
$(call if_changed_dep,as_s_S)
|
||||
$(obj)/pvchk-%.s: $(obj)/pvchk-sed-%.s FORCE
|
||||
$(call if_changed,pv_check_sed)
|
||||
$(obj)/pvchk-%.o: $(obj)/pvchk-%.s FORCE
|
||||
$(call if_changed,as_o_S)
|
||||
.PRECIOUS: $(obj)/pvchk-sed-%.s $(obj)/pvchk-%.s $(obj)/pvchk-%.o
|
||||
|
||||
@@ -464,7 +464,6 @@ efi_map_pal_code (void)
|
||||
GRANULEROUNDDOWN((unsigned long) pal_vaddr),
|
||||
pte_val(pfn_pte(__pa(pal_vaddr) >> PAGE_SHIFT, PAGE_KERNEL)),
|
||||
IA64_GRANULE_SHIFT);
|
||||
paravirt_dv_serialize_data();
|
||||
ia64_set_psr(psr); /* restore psr */
|
||||
}
|
||||
|
||||
|
||||
@@ -51,7 +51,6 @@
|
||||
|
||||
#include "minstate.h"
|
||||
|
||||
#ifdef __IA64_ASM_PARAVIRTUALIZED_NATIVE
|
||||
/*
|
||||
* execve() is special because in case of success, we need to
|
||||
* setup a null register window frame.
|
||||
@@ -161,7 +160,6 @@ GLOBAL_ENTRY(sys_clone)
|
||||
mov rp=loc0
|
||||
br.ret.sptk.many rp
|
||||
END(sys_clone)
|
||||
#endif /* __IA64_ASM_PARAVIRTUALIZED_NATIVE */
|
||||
|
||||
/*
|
||||
* prev_task <- ia64_switch_to(struct task_struct *next)
|
||||
@@ -169,7 +167,7 @@ END(sys_clone)
|
||||
* called. The code starting at .map relies on this. The rest of the code
|
||||
* doesn't care about the interrupt masking status.
|
||||
*/
|
||||
GLOBAL_ENTRY(__paravirt_switch_to)
|
||||
GLOBAL_ENTRY(ia64_switch_to)
|
||||
.prologue
|
||||
alloc r16=ar.pfs,1,0,0,0
|
||||
DO_SAVE_SWITCH_STACK
|
||||
@@ -221,9 +219,8 @@ GLOBAL_ENTRY(__paravirt_switch_to)
|
||||
itr.d dtr[r25]=r23 // wire in new mapping...
|
||||
SSM_PSR_IC_AND_SRLZ_D(r8, r9) // reenable the psr.ic bit
|
||||
br.cond.sptk .done
|
||||
END(__paravirt_switch_to)
|
||||
END(ia64_switch_to)
|
||||
|
||||
#ifdef __IA64_ASM_PARAVIRTUALIZED_NATIVE
|
||||
/*
|
||||
* Note that interrupts are enabled during save_switch_stack and load_switch_stack. This
|
||||
* means that we may get an interrupt with "sp" pointing to the new kernel stack while
|
||||
@@ -639,16 +636,8 @@ GLOBAL_ENTRY(ia64_ret_from_syscall)
|
||||
adds r2=PT(R8)+16,sp // r2 = &pt_regs.r8
|
||||
mov r10=r0 // clear error indication in r10
|
||||
(p7) br.cond.spnt handle_syscall_error // handle potential syscall failure
|
||||
#ifdef CONFIG_PARAVIRT
|
||||
;;
|
||||
br.cond.sptk.few ia64_leave_syscall
|
||||
;;
|
||||
#endif /* CONFIG_PARAVIRT */
|
||||
END(ia64_ret_from_syscall)
|
||||
#ifndef CONFIG_PARAVIRT
|
||||
// fall through
|
||||
#endif
|
||||
#endif /* __IA64_ASM_PARAVIRTUALIZED_NATIVE */
|
||||
|
||||
/*
|
||||
* ia64_leave_syscall(): Same as ia64_leave_kernel, except that it doesn't
|
||||
@@ -694,7 +683,7 @@ END(ia64_ret_from_syscall)
|
||||
* ar.csd: cleared
|
||||
* ar.ssd: cleared
|
||||
*/
|
||||
GLOBAL_ENTRY(__paravirt_leave_syscall)
|
||||
GLOBAL_ENTRY(ia64_leave_syscall)
|
||||
PT_REGS_UNWIND_INFO(0)
|
||||
/*
|
||||
* work.need_resched etc. mustn't get changed by this CPU before it returns to
|
||||
@@ -722,8 +711,8 @@ GLOBAL_ENTRY(__paravirt_leave_syscall)
|
||||
cmp.eq pLvSys,p0=r0,r0 // pLvSys=1: leave from syscall
|
||||
(pUStk) cmp.eq.unc p6,p0=r0,r0 // p6 <- pUStk
|
||||
#endif
|
||||
.global __paravirt_work_processed_syscall;
|
||||
__paravirt_work_processed_syscall:
|
||||
.global ia64_work_processed_syscall;
|
||||
ia64_work_processed_syscall:
|
||||
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
|
||||
adds r2=PT(LOADRS)+16,r12
|
||||
MOV_FROM_ITC(pUStk, p9, r22, r19) // fetch time at leave
|
||||
@@ -836,9 +825,9 @@ __paravirt_work_processed_syscall:
|
||||
mov.m ar.ssd=r0 // M2 clear ar.ssd
|
||||
mov f11=f0 // F clear f11
|
||||
br.cond.sptk.many rbs_switch // B
|
||||
END(__paravirt_leave_syscall)
|
||||
END(ia64_leave_syscall)
|
||||
|
||||
GLOBAL_ENTRY(__paravirt_leave_kernel)
|
||||
GLOBAL_ENTRY(ia64_leave_kernel)
|
||||
PT_REGS_UNWIND_INFO(0)
|
||||
/*
|
||||
* work.need_resched etc. mustn't get changed by this CPU before it returns to
|
||||
@@ -1171,26 +1160,25 @@ skip_rbs_switch:
|
||||
(p6) br.cond.sptk.few .notify
|
||||
br.call.spnt.many rp=preempt_schedule_irq
|
||||
.ret9: cmp.eq p6,p0=r0,r0 // p6 <- 1 (re-check)
|
||||
(pLvSys)br.cond.sptk.few __paravirt_pending_syscall_end
|
||||
(pLvSys)br.cond.sptk.few ia64_work_pending_syscall_end
|
||||
br.cond.sptk.many .work_processed_kernel
|
||||
|
||||
.notify:
|
||||
(pUStk) br.call.spnt.many rp=notify_resume_user
|
||||
.ret10: cmp.ne p6,p0=r0,r0 // p6 <- 0 (don't re-check)
|
||||
(pLvSys)br.cond.sptk.few __paravirt_pending_syscall_end
|
||||
(pLvSys)br.cond.sptk.few ia64_work_pending_syscall_end
|
||||
br.cond.sptk.many .work_processed_kernel
|
||||
|
||||
.global __paravirt_pending_syscall_end;
|
||||
__paravirt_pending_syscall_end:
|
||||
.global ia64_work_pending_syscall_end;
|
||||
ia64_work_pending_syscall_end:
|
||||
adds r2=PT(R8)+16,r12
|
||||
adds r3=PT(R10)+16,r12
|
||||
;;
|
||||
ld8 r8=[r2]
|
||||
ld8 r10=[r3]
|
||||
br.cond.sptk.many __paravirt_work_processed_syscall_target
|
||||
END(__paravirt_leave_kernel)
|
||||
br.cond.sptk.many ia64_work_processed_syscall
|
||||
END(ia64_leave_kernel)
|
||||
|
||||
#ifdef __IA64_ASM_PARAVIRTUALIZED_NATIVE
|
||||
ENTRY(handle_syscall_error)
|
||||
/*
|
||||
* Some system calls (e.g., ptrace, mmap) can return arbitrary values which could
|
||||
@@ -1294,7 +1282,7 @@ ENTRY(sys_rt_sigreturn)
|
||||
adds sp=16,sp
|
||||
;;
|
||||
ld8 r9=[sp] // load new ar.unat
|
||||
mov.sptk b7=r8,ia64_native_leave_kernel
|
||||
mov.sptk b7=r8,ia64_leave_kernel
|
||||
;;
|
||||
mov ar.unat=r9
|
||||
br.many b7
|
||||
@@ -1782,4 +1770,3 @@ sys_call_table:
|
||||
data8 sys_execveat
|
||||
|
||||
.org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls
|
||||
#endif /* __IA64_ASM_PARAVIRTUALIZED_NATIVE */
|
||||
|
||||
@@ -24,7 +24,7 @@
|
||||
#include <asm/unistd.h>
|
||||
|
||||
#include "entry.h"
|
||||
#include "paravirt_inst.h"
|
||||
#include <asm/native/inst.h>
|
||||
|
||||
/*
|
||||
* See Documentation/ia64/fsys.txt for details on fsyscalls.
|
||||
@@ -402,7 +402,7 @@ ENTRY(fsys_fallback_syscall)
|
||||
mov r26=ar.pfs
|
||||
END(fsys_fallback_syscall)
|
||||
/* FALL THROUGH */
|
||||
GLOBAL_ENTRY(paravirt_fsys_bubble_down)
|
||||
GLOBAL_ENTRY(fsys_bubble_down)
|
||||
.prologue
|
||||
.altrp b6
|
||||
.body
|
||||
@@ -440,7 +440,7 @@ GLOBAL_ENTRY(paravirt_fsys_bubble_down)
|
||||
*
|
||||
* PSR.BE : already is turned off in __kernel_syscall_via_epc()
|
||||
* PSR.AC : don't care (kernel normally turns PSR.AC on)
|
||||
* PSR.I : already turned off by the time paravirt_fsys_bubble_down gets
|
||||
* PSR.I : already turned off by the time fsys_bubble_down gets
|
||||
* invoked
|
||||
* PSR.DFL: always 0 (kernel never turns it on)
|
||||
* PSR.DFH: don't care --- kernel never touches f32-f127 on its own
|
||||
@@ -450,7 +450,7 @@ GLOBAL_ENTRY(paravirt_fsys_bubble_down)
|
||||
* PSR.DB : don't care --- kernel never enables kernel-level
|
||||
* breakpoints
|
||||
* PSR.TB : must be 0 already; if it wasn't zero on entry to
|
||||
* __kernel_syscall_via_epc, the branch to paravirt_fsys_bubble_down
|
||||
* __kernel_syscall_via_epc, the branch to fsys_bubble_down
|
||||
* will trigger a taken branch; the taken-trap-handler then
|
||||
* converts the syscall into a break-based system-call.
|
||||
*/
|
||||
@@ -541,14 +541,14 @@ GLOBAL_ENTRY(paravirt_fsys_bubble_down)
|
||||
nop.m 0
|
||||
(p8) br.call.sptk.many b6=b6 // B (ignore return address)
|
||||
br.cond.spnt ia64_trace_syscall // B
|
||||
END(paravirt_fsys_bubble_down)
|
||||
END(fsys_bubble_down)
|
||||
|
||||
.rodata
|
||||
.align 8
|
||||
.globl paravirt_fsyscall_table
|
||||
.globl fsyscall_table
|
||||
|
||||
data8 paravirt_fsys_bubble_down
|
||||
paravirt_fsyscall_table:
|
||||
data8 fsys_bubble_down
|
||||
fsyscall_table:
|
||||
data8 fsys_ni_syscall
|
||||
data8 0 // exit // 1025
|
||||
data8 0 // read
|
||||
@@ -833,4 +833,4 @@ paravirt_fsyscall_table:
|
||||
|
||||
// fill in zeros for the remaining entries
|
||||
.zero:
|
||||
.space paravirt_fsyscall_table + 8*NR_syscalls - .zero, 0
|
||||
.space fsyscall_table + 8*NR_syscalls - .zero, 0
|
||||
|
||||
@@ -14,7 +14,7 @@
|
||||
#include <asm/unistd.h>
|
||||
#include <asm/kregs.h>
|
||||
#include <asm/page.h>
|
||||
#include "paravirt_inst.h"
|
||||
#include <asm/native/inst.h>
|
||||
|
||||
/*
|
||||
* We can't easily refer to symbols inside the kernel. To avoid full runtime relocation,
|
||||
@@ -376,11 +376,4 @@ GLOBAL_ENTRY(__kernel_syscall_via_epc)
|
||||
(p9) mov r8=ENOSYS
|
||||
FSYS_RETURN
|
||||
|
||||
#ifdef CONFIG_PARAVIRT
|
||||
/*
|
||||
* padd to make the size of this symbol constant
|
||||
* independent of paravirtualization.
|
||||
*/
|
||||
.align PAGE_SIZE / 8
|
||||
#endif
|
||||
END(__kernel_syscall_via_epc)
|
||||
|
||||
@@ -6,7 +6,6 @@
|
||||
*/
|
||||
|
||||
#include <asm/page.h>
|
||||
#include "paravirt_patchlist.h"
|
||||
|
||||
SECTIONS
|
||||
{
|
||||
@@ -33,21 +32,21 @@ SECTIONS
|
||||
. = GATE_ADDR + 0x600;
|
||||
|
||||
.data..patch : {
|
||||
__paravirt_start_gate_mckinley_e9_patchlist = .;
|
||||
__start_gate_mckinley_e9_patchlist = .;
|
||||
*(.data..patch.mckinley_e9)
|
||||
__paravirt_end_gate_mckinley_e9_patchlist = .;
|
||||
__end_gate_mckinley_e9_patchlist = .;
|
||||
|
||||
__paravirt_start_gate_vtop_patchlist = .;
|
||||
__start_gate_vtop_patchlist = .;
|
||||
*(.data..patch.vtop)
|
||||
__paravirt_end_gate_vtop_patchlist = .;
|
||||
__end_gate_vtop_patchlist = .;
|
||||
|
||||
__paravirt_start_gate_fsyscall_patchlist = .;
|
||||
__start_gate_fsyscall_patchlist = .;
|
||||
*(.data..patch.fsyscall_table)
|
||||
__paravirt_end_gate_fsyscall_patchlist = .;
|
||||
__end_gate_fsyscall_patchlist = .;
|
||||
|
||||
__paravirt_start_gate_brl_fsys_bubble_down_patchlist = .;
|
||||
__start_gate_brl_fsys_bubble_down_patchlist = .;
|
||||
*(.data..patch.brl_fsys_bubble_down)
|
||||
__paravirt_end_gate_brl_fsys_bubble_down_patchlist = .;
|
||||
__end_gate_brl_fsys_bubble_down_patchlist = .;
|
||||
} :readable
|
||||
|
||||
.IA_64.unwind_info : { *(.IA_64.unwind_info*) }
|
||||
|
||||
@@ -26,7 +26,6 @@
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
#include <asm/pal.h>
|
||||
#include <asm/paravirt.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/ptrace.h>
|
||||
@@ -394,41 +393,6 @@ start_ap:
|
||||
;;
|
||||
(isBP) st8 [r2]=r28 // save the address of the boot param area passed by the bootloader
|
||||
|
||||
#ifdef CONFIG_PARAVIRT
|
||||
|
||||
movl r14=hypervisor_setup_hooks
|
||||
movl r15=hypervisor_type
|
||||
mov r16=num_hypervisor_hooks
|
||||
;;
|
||||
ld8 r2=[r15]
|
||||
;;
|
||||
cmp.ltu p7,p0=r2,r16 // array size check
|
||||
shladd r8=r2,3,r14
|
||||
;;
|
||||
(p7) ld8 r9=[r8]
|
||||
;;
|
||||
(p7) mov b1=r9
|
||||
(p7) cmp.ne.unc p7,p0=r9,r0 // no actual branch to NULL
|
||||
;;
|
||||
(p7) br.call.sptk.many rp=b1
|
||||
|
||||
__INITDATA
|
||||
|
||||
default_setup_hook = 0 // Currently nothing needs to be done.
|
||||
|
||||
.global hypervisor_type
|
||||
hypervisor_type:
|
||||
data8 PARAVIRT_HYPERVISOR_TYPE_DEFAULT
|
||||
|
||||
// must have the same order with PARAVIRT_HYPERVISOR_TYPE_xxx
|
||||
|
||||
hypervisor_setup_hooks:
|
||||
data8 default_setup_hook
|
||||
num_hypervisor_hooks = (. - hypervisor_setup_hooks) / 8
|
||||
.previous
|
||||
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
(isAP) br.call.sptk.many rp=start_secondary
|
||||
.ret0:
|
||||
@@ -1063,12 +1027,6 @@ GLOBAL_ENTRY(ia64_native_sched_clock)
|
||||
shrp r8=r9,r8,IA64_NSEC_PER_CYC_SHIFT
|
||||
br.ret.sptk.many rp
|
||||
END(ia64_native_sched_clock)
|
||||
#ifndef CONFIG_PARAVIRT
|
||||
//unsigned long long
|
||||
//sched_clock(void) __attribute__((alias("ia64_native_sched_clock")));
|
||||
.global sched_clock
|
||||
sched_clock = ia64_native_sched_clock
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
|
||||
GLOBAL_ENTRY(cycle_to_cputime)
|
||||
|
||||
@@ -937,7 +937,6 @@ END(interrupt)
|
||||
* - ar.fpsr: set to kernel settings
|
||||
* - b6: preserved (same as on entry)
|
||||
*/
|
||||
#ifdef __IA64_ASM_PARAVIRTUALIZED_NATIVE
|
||||
GLOBAL_ENTRY(ia64_syscall_setup)
|
||||
#if PT(B6) != 0
|
||||
# error This code assumes that b6 is the first field in pt_regs.
|
||||
@@ -1029,7 +1028,6 @@ GLOBAL_ENTRY(ia64_syscall_setup)
|
||||
(p10) mov r8=-EINVAL
|
||||
br.ret.sptk.many b7
|
||||
END(ia64_syscall_setup)
|
||||
#endif /* __IA64_ASM_PARAVIRTUALIZED_NATIVE */
|
||||
|
||||
.org ia64_ivt+0x3c00
|
||||
/////////////////////////////////////////////////////////////////////////////////////////
|
||||
@@ -1043,7 +1041,7 @@ END(ia64_syscall_setup)
|
||||
DBG_FAULT(16)
|
||||
FAULT(16)
|
||||
|
||||
#if defined(CONFIG_VIRT_CPU_ACCOUNTING_NATIVE) && defined(__IA64_ASM_PARAVIRTUALIZED_NATIVE)
|
||||
#if defined(CONFIG_VIRT_CPU_ACCOUNTING_NATIVE)
|
||||
/*
|
||||
* There is no particular reason for this code to be here, other than
|
||||
* that there happens to be space here that would go unused otherwise.
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
#include <asm/cache.h>
|
||||
|
||||
#include "entry.h"
|
||||
#include "paravirt_inst.h"
|
||||
#include <asm/native/inst.h>
|
||||
|
||||
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
|
||||
/* read ar.itc in advance, and use it before leaving bank 0 */
|
||||
|
||||
@@ -439,14 +439,6 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
|
||||
mod->arch.opd = s;
|
||||
else if (strcmp(".IA_64.unwind", secstrings + s->sh_name) == 0)
|
||||
mod->arch.unwind = s;
|
||||
#ifdef CONFIG_PARAVIRT
|
||||
else if (strcmp(".paravirt_bundles",
|
||||
secstrings + s->sh_name) == 0)
|
||||
mod->arch.paravirt_bundles = s;
|
||||
else if (strcmp(".paravirt_insts",
|
||||
secstrings + s->sh_name) == 0)
|
||||
mod->arch.paravirt_insts = s;
|
||||
#endif
|
||||
|
||||
if (!mod->arch.core_plt || !mod->arch.init_plt || !mod->arch.got || !mod->arch.opd) {
|
||||
printk(KERN_ERR "%s: sections missing\n", mod->name);
|
||||
@@ -914,30 +906,6 @@ module_finalize (const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, struct module *mo
|
||||
DEBUGP("%s: init: entry=%p\n", __func__, mod->init);
|
||||
if (mod->arch.unwind)
|
||||
register_unwind_table(mod);
|
||||
#ifdef CONFIG_PARAVIRT
|
||||
if (mod->arch.paravirt_bundles) {
|
||||
struct paravirt_patch_site_bundle *start =
|
||||
(struct paravirt_patch_site_bundle *)
|
||||
mod->arch.paravirt_bundles->sh_addr;
|
||||
struct paravirt_patch_site_bundle *end =
|
||||
(struct paravirt_patch_site_bundle *)
|
||||
(mod->arch.paravirt_bundles->sh_addr +
|
||||
mod->arch.paravirt_bundles->sh_size);
|
||||
|
||||
paravirt_patch_apply_bundle(start, end);
|
||||
}
|
||||
if (mod->arch.paravirt_insts) {
|
||||
struct paravirt_patch_site_inst *start =
|
||||
(struct paravirt_patch_site_inst *)
|
||||
mod->arch.paravirt_insts->sh_addr;
|
||||
struct paravirt_patch_site_inst *end =
|
||||
(struct paravirt_patch_site_inst *)
|
||||
(mod->arch.paravirt_insts->sh_addr +
|
||||
mod->arch.paravirt_insts->sh_size);
|
||||
|
||||
paravirt_patch_apply_inst(start, end);
|
||||
}
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user