You've already forked linux-apfs
mirror of
https://github.com/linux-apfs/linux-apfs.git
synced 2026-05-01 15:00:59 -07:00
x86: Fix common misspellings
They were generated by 'codespell' and then manually reviewed. Signed-off-by: Lucas De Marchi <lucas.demarchi@profusion.mobi> Cc: trivial@kernel.org LKML-Reference: <1300389856-1099-3-git-send-email-lucas.demarchi@profusion.mobi> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
committed by
Ingo Molnar
parent
a6c3270b04
commit
0d2eb44f63
@@ -326,7 +326,7 @@ config X86_PPRO_FENCE
|
||||
Old PentiumPro multiprocessor systems had errata that could cause
|
||||
memory operations to violate the x86 ordering standard in rare cases.
|
||||
Enabling this option will attempt to work around some (but not all)
|
||||
occurances of this problem, at the cost of much heavier spinlock and
|
||||
occurrences of this problem, at the cost of much heavier spinlock and
|
||||
memory barrier operations.
|
||||
|
||||
If unsure, say n here. Even distro kernels should think twice before
|
||||
|
||||
@@ -1346,7 +1346,7 @@ _zero_cipher_left_decrypt:
|
||||
and $15, %r13 # %r13 = arg4 (mod 16)
|
||||
je _multiple_of_16_bytes_decrypt
|
||||
|
||||
# Handle the last <16 byte block seperately
|
||||
# Handle the last <16 byte block separately
|
||||
|
||||
paddd ONE(%rip), %xmm0 # increment CNT to get Yn
|
||||
movdqa SHUF_MASK(%rip), %xmm10
|
||||
@@ -1355,7 +1355,7 @@ _zero_cipher_left_decrypt:
|
||||
ENCRYPT_SINGLE_BLOCK %xmm0, %xmm1 # E(K, Yn)
|
||||
sub $16, %r11
|
||||
add %r13, %r11
|
||||
movdqu (%arg3,%r11,1), %xmm1 # recieve the last <16 byte block
|
||||
movdqu (%arg3,%r11,1), %xmm1 # receive the last <16 byte block
|
||||
lea SHIFT_MASK+16(%rip), %r12
|
||||
sub %r13, %r12
|
||||
# adjust the shuffle mask pointer to be able to shift 16-%r13 bytes
|
||||
@@ -1607,7 +1607,7 @@ _zero_cipher_left_encrypt:
|
||||
and $15, %r13 # %r13 = arg4 (mod 16)
|
||||
je _multiple_of_16_bytes_encrypt
|
||||
|
||||
# Handle the last <16 Byte block seperately
|
||||
# Handle the last <16 Byte block separately
|
||||
paddd ONE(%rip), %xmm0 # INCR CNT to get Yn
|
||||
movdqa SHUF_MASK(%rip), %xmm10
|
||||
PSHUFB_XMM %xmm10, %xmm0
|
||||
|
||||
@@ -71,7 +71,7 @@ static inline void set_page_memtype(struct page *pg, unsigned long memtype) { }
|
||||
* Read/Write : ReadOnly, ReadWrite
|
||||
* Presence : NotPresent
|
||||
*
|
||||
* Within a catagory, the attributes are mutually exclusive.
|
||||
* Within a category, the attributes are mutually exclusive.
|
||||
*
|
||||
* The implementation of this API will take care of various aspects that
|
||||
* are associated with changing such attributes, such as:
|
||||
|
||||
@@ -29,8 +29,8 @@ void arch_trigger_all_cpu_backtrace(void);
|
||||
* external nmis, because the local ones are more frequent.
|
||||
*
|
||||
* Also setup some default high/normal/low settings for
|
||||
* subsystems to registers with. Using 4 bits to seperate
|
||||
* the priorities. This can go alot higher if needed be.
|
||||
* subsystems to registers with. Using 4 bits to separate
|
||||
* the priorities. This can go a lot higher if needed be.
|
||||
*/
|
||||
|
||||
#define NMI_LOCAL_SHIFT 16 /* randomly picked */
|
||||
|
||||
@@ -38,7 +38,7 @@
|
||||
#define K8_NOP8 K8_NOP4 K8_NOP4
|
||||
|
||||
/* K7 nops
|
||||
uses eax dependencies (arbitary choice)
|
||||
uses eax dependencies (arbitrary choice)
|
||||
1: nop
|
||||
2: movl %eax,%eax
|
||||
3: leal (,%eax,1),%eax
|
||||
|
||||
@@ -20,7 +20,7 @@ extern struct olpc_platform_t olpc_platform_info;
|
||||
|
||||
/*
|
||||
* OLPC board IDs contain the major build number within the mask 0x0ff0,
|
||||
* and the minor build number withing 0x000f. Pre-builds have a minor
|
||||
* and the minor build number within 0x000f. Pre-builds have a minor
|
||||
* number less than 8, and normal builds start at 8. For example, 0x0B10
|
||||
* is a PreB1, and 0x0C18 is a C1.
|
||||
*/
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Netburst Perfomance Events (P4, old Xeon)
|
||||
* Netburst Performance Events (P4, old Xeon)
|
||||
*/
|
||||
|
||||
#ifndef PERF_EVENT_P4_H
|
||||
@@ -9,7 +9,7 @@
|
||||
#include <linux/bitops.h>
|
||||
|
||||
/*
|
||||
* NetBurst has perfomance MSRs shared between
|
||||
* NetBurst has performance MSRs shared between
|
||||
* threads if HT is turned on, ie for both logical
|
||||
* processors (mem: in turn in Atom with HT support
|
||||
* perf-MSRs are not shared and every thread has its
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
*/
|
||||
#define X86_EFLAGS_CF 0x00000001 /* Carry Flag */
|
||||
#define X86_EFLAGS_PF 0x00000004 /* Parity Flag */
|
||||
#define X86_EFLAGS_AF 0x00000010 /* Auxillary carry Flag */
|
||||
#define X86_EFLAGS_AF 0x00000010 /* Auxiliary carry Flag */
|
||||
#define X86_EFLAGS_ZF 0x00000040 /* Zero Flag */
|
||||
#define X86_EFLAGS_SF 0x00000080 /* Sign Flag */
|
||||
#define X86_EFLAGS_TF 0x00000100 /* Trap Flag */
|
||||
|
||||
@@ -31,7 +31,7 @@
|
||||
#define R12 24
|
||||
#define RBP 32
|
||||
#define RBX 40
|
||||
/* arguments: interrupts/non tracing syscalls only save upto here*/
|
||||
/* arguments: interrupts/non tracing syscalls only save up to here*/
|
||||
#define R11 48
|
||||
#define R10 56
|
||||
#define R9 64
|
||||
|
||||
@@ -73,7 +73,7 @@ struct pt_regs {
|
||||
unsigned long r12;
|
||||
unsigned long rbp;
|
||||
unsigned long rbx;
|
||||
/* arguments: non interrupts/non tracing syscalls only save upto here*/
|
||||
/* arguments: non interrupts/non tracing syscalls only save up to here*/
|
||||
unsigned long r11;
|
||||
unsigned long r10;
|
||||
unsigned long r9;
|
||||
@@ -103,7 +103,7 @@ struct pt_regs {
|
||||
unsigned long r12;
|
||||
unsigned long bp;
|
||||
unsigned long bx;
|
||||
/* arguments: non interrupts/non tracing syscalls only save upto here*/
|
||||
/* arguments: non interrupts/non tracing syscalls only save up to here*/
|
||||
unsigned long r11;
|
||||
unsigned long r10;
|
||||
unsigned long r9;
|
||||
|
||||
@@ -35,7 +35,7 @@ static inline cycles_t get_cycles(void)
|
||||
static __always_inline cycles_t vget_cycles(void)
|
||||
{
|
||||
/*
|
||||
* We only do VDSOs on TSC capable CPUs, so this shouldnt
|
||||
* We only do VDSOs on TSC capable CPUs, so this shouldn't
|
||||
* access boot_cpu_data (which is not VDSO-safe):
|
||||
*/
|
||||
#ifndef CONFIG_X86_TSC
|
||||
|
||||
@@ -86,7 +86,7 @@ DEFINE_GUEST_HANDLE(void);
|
||||
* The privilege level specifies which modes may enter a trap via a software
|
||||
* interrupt. On x86/64, since rings 1 and 2 are unavailable, we allocate
|
||||
* privilege levels as follows:
|
||||
* Level == 0: Noone may enter
|
||||
* Level == 0: No one may enter
|
||||
* Level == 1: Kernel may enter
|
||||
* Level == 2: Kernel may enter
|
||||
* Level == 3: Everyone may enter
|
||||
|
||||
@@ -199,7 +199,7 @@ void *text_poke_early(void *addr, const void *opcode, size_t len);
|
||||
|
||||
/* Replace instructions with better alternatives for this CPU type.
|
||||
This runs before SMP is initialized to avoid SMP problems with
|
||||
self modifying code. This implies that assymetric systems where
|
||||
self modifying code. This implies that asymmetric systems where
|
||||
APs have less capabilities than the boot processor are not handled.
|
||||
Tough. Make sure you disable such features by hand. */
|
||||
|
||||
|
||||
@@ -73,7 +73,7 @@ static u32 __init allocate_aperture(void)
|
||||
/*
|
||||
* using 512M as goal, in case kexec will load kernel_big
|
||||
* that will do the on position decompress, and could overlap with
|
||||
* that positon with gart that is used.
|
||||
* that position with gart that is used.
|
||||
* sequende:
|
||||
* kernel_small
|
||||
* ==> kexec (with kdump trigger path or previous doesn't shutdown gart)
|
||||
|
||||
@@ -1886,7 +1886,7 @@ void disable_IO_APIC(void)
|
||||
*
|
||||
* With interrupt-remapping, for now we will use virtual wire A mode,
|
||||
* as virtual wire B is little complex (need to configure both
|
||||
* IOAPIC RTE aswell as interrupt-remapping table entry).
|
||||
* IOAPIC RTE as well as interrupt-remapping table entry).
|
||||
* As this gets called during crash dump, keep this simple for now.
|
||||
*/
|
||||
if (ioapic_i8259.pin != -1 && !intr_remapping_enabled) {
|
||||
@@ -2905,7 +2905,7 @@ void __init setup_IO_APIC(void)
|
||||
}
|
||||
|
||||
/*
|
||||
* Called after all the initialization is done. If we didnt find any
|
||||
* Called after all the initialization is done. If we didn't find any
|
||||
* APIC bugs then we can allow the modify fast path
|
||||
*/
|
||||
|
||||
|
||||
@@ -66,7 +66,7 @@
|
||||
* 1.5: Fix segment register reloading (in case of bad segments saved
|
||||
* across BIOS call).
|
||||
* Stephen Rothwell
|
||||
* 1.6: Cope with complier/assembler differences.
|
||||
* 1.6: Cope with compiler/assembler differences.
|
||||
* Only try to turn off the first display device.
|
||||
* Fix OOPS at power off with no APM BIOS by Jan Echternach
|
||||
* <echter@informatik.uni-rostock.de>
|
||||
|
||||
@@ -444,7 +444,7 @@ static int __cpuinit longhaul_get_ranges(void)
|
||||
return -EINVAL;
|
||||
}
|
||||
/* Get max multiplier - as we always did.
|
||||
* Longhaul MSR is usefull only when voltage scaling is enabled.
|
||||
* Longhaul MSR is useful only when voltage scaling is enabled.
|
||||
* C3 is booting at max anyway. */
|
||||
maxmult = mult;
|
||||
/* Get min multiplier */
|
||||
@@ -1011,7 +1011,7 @@ static void __exit longhaul_exit(void)
|
||||
* trigger frequency transition in some cases. */
|
||||
module_param(disable_acpi_c3, int, 0644);
|
||||
MODULE_PARM_DESC(disable_acpi_c3, "Don't use ACPI C3 support");
|
||||
/* Change CPU voltage with frequency. Very usefull to save
|
||||
/* Change CPU voltage with frequency. Very useful to save
|
||||
* power, but most VIA C3 processors aren't supporting it. */
|
||||
module_param(scale_voltage, int, 0644);
|
||||
MODULE_PARM_DESC(scale_voltage, "Scale voltage of processor");
|
||||
|
||||
@@ -1276,7 +1276,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
|
||||
|
||||
if (powernow_k8_cpu_init_acpi(data)) {
|
||||
/*
|
||||
* Use the PSB BIOS structure. This is only availabe on
|
||||
* Use the PSB BIOS structure. This is only available on
|
||||
* an UP version, and is deprecated by AMD.
|
||||
*/
|
||||
if (num_online_cpus() != 1) {
|
||||
|
||||
@@ -292,7 +292,7 @@ static int speedstep_cpu_init(struct cpufreq_policy *policy)
|
||||
|
||||
result = speedstep_smi_ownership();
|
||||
if (result) {
|
||||
dprintk("fails in aquiring ownership of a SMI interface.\n");
|
||||
dprintk("fails in acquiring ownership of a SMI interface.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@@ -360,7 +360,7 @@ static int speedstep_resume(struct cpufreq_policy *policy)
|
||||
int result = speedstep_smi_ownership();
|
||||
|
||||
if (result)
|
||||
dprintk("fails in re-aquiring ownership of a SMI interface.\n");
|
||||
dprintk("fails in re-acquiring ownership of a SMI interface.\n");
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
@@ -32,7 +32,7 @@ static void inject_mce(struct mce *m)
|
||||
{
|
||||
struct mce *i = &per_cpu(injectm, m->extcpu);
|
||||
|
||||
/* Make sure noone reads partially written injectm */
|
||||
/* Make sure no one reads partially written injectm */
|
||||
i->finished = 0;
|
||||
mb();
|
||||
m->finished = 0;
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user