mirror of
https://github.com/armbian/linux.git
synced 2026-01-06 10:13:00 -08:00
Merge branch 'linux-linaro-lsk' into linux-linaro-lsk-android
This commit is contained in:
2
Makefile
2
Makefile
@@ -1,6 +1,6 @@
|
||||
VERSION = 3
|
||||
PATCHLEVEL = 10
|
||||
SUBLEVEL = 30
|
||||
SUBLEVEL = 32
|
||||
EXTRAVERSION =
|
||||
NAME = TOSSUG Baby Fish
|
||||
|
||||
|
||||
@@ -116,6 +116,7 @@ extern void flush_dcache_page(struct page *);
|
||||
static inline void __flush_icache_all(void)
|
||||
{
|
||||
asm("ic ialluis");
|
||||
dsb();
|
||||
}
|
||||
|
||||
#define flush_dcache_mmap_lock(mapping) \
|
||||
|
||||
@@ -26,7 +26,6 @@ extern struct cputopo_arm cpu_topology[NR_CPUS];
|
||||
void init_cpu_topology(void);
|
||||
void store_cpu_topology(unsigned int cpuid);
|
||||
const struct cpumask *cpu_coregroup_mask(int cpu);
|
||||
int cluster_to_logical_mask(unsigned int socket_id, cpumask_t *cluster_mask);
|
||||
|
||||
#ifdef CONFIG_DISABLE_CPU_SCHED_DOMAIN_BALANCE
|
||||
/* Common values for CPUs */
|
||||
@@ -63,8 +62,6 @@ int cluster_to_logical_mask(unsigned int socket_id, cpumask_t *cluster_mask);
|
||||
|
||||
static inline void init_cpu_topology(void) { }
|
||||
static inline void store_cpu_topology(unsigned int cpuid) { }
|
||||
static inline int cluster_to_logical_mask(unsigned int socket_id,
|
||||
cpumask_t *cluster_mask) { return -EINVAL; }
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
@@ -2,9 +2,9 @@
|
||||
* arch/arm64/kernel/topology.c
|
||||
*
|
||||
* Copyright (C) 2011,2013 Linaro Limited.
|
||||
* Written by: Vincent Guittot
|
||||
*
|
||||
* based on arch/sh/kernel/topology.c
|
||||
* Based on the arm32 version written by Vincent Guittot in turn based on
|
||||
* arch/sh/kernel/topology.c
|
||||
*
|
||||
* This file is subject to the terms and conditions of the GNU General Public
|
||||
* License. See the file "COPYING" in the main directory of this archive
|
||||
@@ -13,7 +13,6 @@
|
||||
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/cpumask.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/percpu.h>
|
||||
#include <linux/node.h>
|
||||
@@ -22,9 +21,10 @@
|
||||
#include <linux/sched.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
#include <asm/topology.h>
|
||||
#include <asm/cputype.h>
|
||||
#include <asm/smp_plat.h>
|
||||
#include <asm/topology.h>
|
||||
|
||||
|
||||
/*
|
||||
* cpu power scale management
|
||||
@@ -115,7 +115,7 @@ static void __init parse_core(struct device_node *core, int core_id)
|
||||
if (t) {
|
||||
leaf = false;
|
||||
cpu = get_cpu_for_node(t);
|
||||
if (cpu) {
|
||||
if (cpu >= 0) {
|
||||
pr_info("CPU%d: socket %d core %d thread %d\n",
|
||||
cpu, cluster_id, core_id, i);
|
||||
cpu_topology[cpu].socket_id = cluster_id;
|
||||
@@ -146,7 +146,7 @@ static void __init parse_core(struct device_node *core, int core_id)
|
||||
}
|
||||
}
|
||||
|
||||
static void __init parse_cluster(struct device_node *cluster)
|
||||
static void __init parse_cluster(struct device_node *cluster, int depth)
|
||||
{
|
||||
char name[10];
|
||||
bool leaf = true;
|
||||
@@ -165,7 +165,7 @@ static void __init parse_cluster(struct device_node *cluster)
|
||||
snprintf(name, sizeof(name), "cluster%d", i);
|
||||
c = of_get_child_by_name(cluster, name);
|
||||
if (c) {
|
||||
parse_cluster(c);
|
||||
parse_cluster(c, depth + 1);
|
||||
leaf = false;
|
||||
}
|
||||
i++;
|
||||
@@ -179,6 +179,10 @@ static void __init parse_cluster(struct device_node *cluster)
|
||||
if (c) {
|
||||
has_cores = true;
|
||||
|
||||
if (depth == 0)
|
||||
pr_err("%s: cpu-map children should be clusters\n",
|
||||
c->full_name);
|
||||
|
||||
if (leaf)
|
||||
parse_core(c, core_id++);
|
||||
else
|
||||
@@ -228,7 +232,7 @@ static void __init parse_dt_topology(void)
|
||||
cn = of_find_node_by_name(cn, "cpu-map");
|
||||
if (!cn)
|
||||
return;
|
||||
parse_cluster(cn);
|
||||
parse_cluster(cn, 0);
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
const u32 *rate;
|
||||
@@ -354,9 +358,9 @@ void store_cpu_topology(unsigned int cpuid)
|
||||
{
|
||||
struct cputopo_arm *cpuid_topo = &cpu_topology[cpuid];
|
||||
|
||||
/* DT should have been parsed by the time we get here */
|
||||
/* Something should have picked a topology by the time we get here */
|
||||
if (cpuid_topo->core_id == -1)
|
||||
pr_info("CPU%u: No topology information configured\n", cpuid);
|
||||
pr_warn("CPU%u: No topology information configured\n", cpuid);
|
||||
else
|
||||
update_siblings_masks(cpuid);
|
||||
|
||||
@@ -534,4 +538,17 @@ void __init init_cpu_topology(void)
|
||||
smp_wmb();
|
||||
|
||||
parse_dt_topology();
|
||||
|
||||
/*
|
||||
* Assign all remaining CPUs to a cluster so the scheduler
|
||||
* doesn't get confused.
|
||||
*/
|
||||
for_each_possible_cpu(cpu) {
|
||||
struct cputopo_arm *cpu_topo = &cpu_topology[cpu];
|
||||
|
||||
if (cpu_topo->socket_id == -1) {
|
||||
cpu_topo->socket_id = INT_MAX;
|
||||
cpu_topo->core_id = cpu;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -235,6 +235,8 @@ void update_vsyscall(struct timekeeper *tk)
|
||||
vdso_data->use_syscall = use_syscall;
|
||||
vdso_data->xtime_coarse_sec = xtime_coarse.tv_sec;
|
||||
vdso_data->xtime_coarse_nsec = xtime_coarse.tv_nsec;
|
||||
vdso_data->wtm_clock_sec = tk->wall_to_monotonic.tv_sec;
|
||||
vdso_data->wtm_clock_nsec = tk->wall_to_monotonic.tv_nsec;
|
||||
|
||||
if (!use_syscall) {
|
||||
vdso_data->cs_cycle_last = tk->clock->cycle_last;
|
||||
@@ -242,8 +244,6 @@ void update_vsyscall(struct timekeeper *tk)
|
||||
vdso_data->xtime_clock_nsec = tk->xtime_nsec;
|
||||
vdso_data->cs_mult = tk->mult;
|
||||
vdso_data->cs_shift = tk->shift;
|
||||
vdso_data->wtm_clock_sec = tk->wall_to_monotonic.tv_sec;
|
||||
vdso_data->wtm_clock_nsec = tk->wall_to_monotonic.tv_nsec;
|
||||
}
|
||||
|
||||
smp_wmb();
|
||||
|
||||
@@ -48,7 +48,7 @@ $(obj-vdso): %.o: %.S
|
||||
|
||||
# Actual build commands
|
||||
quiet_cmd_vdsold = VDSOL $@
|
||||
cmd_vdsold = $(CC) $(c_flags) -Wl,-T $^ -o $@
|
||||
cmd_vdsold = $(CC) $(c_flags) -Wl,-n -Wl,-T $^ -o $@
|
||||
quiet_cmd_vdsoas = VDSOA $@
|
||||
cmd_vdsoas = $(CC) $(a_flags) -c -o $@ $<
|
||||
|
||||
|
||||
@@ -103,6 +103,8 @@ ENTRY(__kernel_clock_gettime)
|
||||
bl __do_get_tspec
|
||||
seqcnt_check w9, 1b
|
||||
|
||||
mov x30, x2
|
||||
|
||||
cmp w0, #CLOCK_MONOTONIC
|
||||
b.ne 6f
|
||||
|
||||
@@ -118,6 +120,9 @@ ENTRY(__kernel_clock_gettime)
|
||||
ccmp w0, #CLOCK_MONOTONIC_COARSE, #0x4, ne
|
||||
b.ne 8f
|
||||
|
||||
/* xtime_coarse_nsec is already right-shifted */
|
||||
mov x12, #0
|
||||
|
||||
/* Get coarse timespec. */
|
||||
adr vdso_data, _vdso_data
|
||||
3: seqcnt_acquire
|
||||
@@ -156,7 +161,7 @@ ENTRY(__kernel_clock_gettime)
|
||||
lsr x11, x11, x12
|
||||
stp x10, x11, [x1, #TSPEC_TV_SEC]
|
||||
mov x0, xzr
|
||||
ret x2
|
||||
ret
|
||||
7:
|
||||
mov x30, x2
|
||||
8: /* Syscall fallback. */
|
||||
|
||||
@@ -203,10 +203,18 @@ static void __init alloc_init_pmd(pud_t *pud, unsigned long addr,
|
||||
do {
|
||||
next = pmd_addr_end(addr, end);
|
||||
/* try section mapping first */
|
||||
if (((addr | next | phys) & ~SECTION_MASK) == 0)
|
||||
if (((addr | next | phys) & ~SECTION_MASK) == 0) {
|
||||
pmd_t old_pmd =*pmd;
|
||||
set_pmd(pmd, __pmd(phys | prot_sect_kernel));
|
||||
else
|
||||
/*
|
||||
* Check for previous table entries created during
|
||||
* boot (__create_page_tables) and flush them.
|
||||
*/
|
||||
if (!pmd_none(old_pmd))
|
||||
flush_tlb_all();
|
||||
} else {
|
||||
alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys));
|
||||
}
|
||||
phys += next - addr;
|
||||
} while (pmd++, addr = next, addr != end);
|
||||
}
|
||||
|
||||
@@ -25,6 +25,7 @@
|
||||
#include <linux/err.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include "crypt_s390.h"
|
||||
|
||||
#define AES_KEYLEN_128 1
|
||||
@@ -32,6 +33,7 @@
|
||||
#define AES_KEYLEN_256 4
|
||||
|
||||
static u8 *ctrblk;
|
||||
static DEFINE_SPINLOCK(ctrblk_lock);
|
||||
static char keylen_flag;
|
||||
|
||||
struct s390_aes_ctx {
|
||||
@@ -756,43 +758,67 @@ static int ctr_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
|
||||
return aes_set_key(tfm, in_key, key_len);
|
||||
}
|
||||
|
||||
static unsigned int __ctrblk_init(u8 *ctrptr, unsigned int nbytes)
|
||||
{
|
||||
unsigned int i, n;
|
||||
|
||||
/* only use complete blocks, max. PAGE_SIZE */
|
||||
n = (nbytes > PAGE_SIZE) ? PAGE_SIZE : nbytes & ~(AES_BLOCK_SIZE - 1);
|
||||
for (i = AES_BLOCK_SIZE; i < n; i += AES_BLOCK_SIZE) {
|
||||
memcpy(ctrptr + i, ctrptr + i - AES_BLOCK_SIZE,
|
||||
AES_BLOCK_SIZE);
|
||||
crypto_inc(ctrptr + i, AES_BLOCK_SIZE);
|
||||
}
|
||||
return n;
|
||||
}
|
||||
|
||||
static int ctr_aes_crypt(struct blkcipher_desc *desc, long func,
|
||||
struct s390_aes_ctx *sctx, struct blkcipher_walk *walk)
|
||||
{
|
||||
int ret = blkcipher_walk_virt_block(desc, walk, AES_BLOCK_SIZE);
|
||||
unsigned int i, n, nbytes;
|
||||
u8 buf[AES_BLOCK_SIZE];
|
||||
u8 *out, *in;
|
||||
unsigned int n, nbytes;
|
||||
u8 buf[AES_BLOCK_SIZE], ctrbuf[AES_BLOCK_SIZE];
|
||||
u8 *out, *in, *ctrptr = ctrbuf;
|
||||
|
||||
if (!walk->nbytes)
|
||||
return ret;
|
||||
|
||||
memcpy(ctrblk, walk->iv, AES_BLOCK_SIZE);
|
||||
if (spin_trylock(&ctrblk_lock))
|
||||
ctrptr = ctrblk;
|
||||
|
||||
memcpy(ctrptr, walk->iv, AES_BLOCK_SIZE);
|
||||
while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) {
|
||||
out = walk->dst.virt.addr;
|
||||
in = walk->src.virt.addr;
|
||||
while (nbytes >= AES_BLOCK_SIZE) {
|
||||
/* only use complete blocks, max. PAGE_SIZE */
|
||||
n = (nbytes > PAGE_SIZE) ? PAGE_SIZE :
|
||||
nbytes & ~(AES_BLOCK_SIZE - 1);
|
||||
for (i = AES_BLOCK_SIZE; i < n; i += AES_BLOCK_SIZE) {
|
||||
memcpy(ctrblk + i, ctrblk + i - AES_BLOCK_SIZE,
|
||||
AES_BLOCK_SIZE);
|
||||
crypto_inc(ctrblk + i, AES_BLOCK_SIZE);
|
||||
}
|
||||
ret = crypt_s390_kmctr(func, sctx->key, out, in, n, ctrblk);
|
||||
if (ret < 0 || ret != n)
|
||||
if (ctrptr == ctrblk)
|
||||
n = __ctrblk_init(ctrptr, nbytes);
|
||||
else
|
||||
n = AES_BLOCK_SIZE;
|
||||
ret = crypt_s390_kmctr(func, sctx->key, out, in,
|
||||
n, ctrptr);
|
||||
if (ret < 0 || ret != n) {
|
||||
if (ctrptr == ctrblk)
|
||||
spin_unlock(&ctrblk_lock);
|
||||
return -EIO;
|
||||
}
|
||||
if (n > AES_BLOCK_SIZE)
|
||||
memcpy(ctrblk, ctrblk + n - AES_BLOCK_SIZE,
|
||||
memcpy(ctrptr, ctrptr + n - AES_BLOCK_SIZE,
|
||||
AES_BLOCK_SIZE);
|
||||
crypto_inc(ctrblk, AES_BLOCK_SIZE);
|
||||
crypto_inc(ctrptr, AES_BLOCK_SIZE);
|
||||
out += n;
|
||||
in += n;
|
||||
nbytes -= n;
|
||||
}
|
||||
ret = blkcipher_walk_done(desc, walk, nbytes);
|
||||
}
|
||||
if (ctrptr == ctrblk) {
|
||||
if (nbytes)
|
||||
memcpy(ctrbuf, ctrptr, AES_BLOCK_SIZE);
|
||||
else
|
||||
memcpy(walk->iv, ctrptr, AES_BLOCK_SIZE);
|
||||
spin_unlock(&ctrblk_lock);
|
||||
}
|
||||
/*
|
||||
* final block may be < AES_BLOCK_SIZE, copy only nbytes
|
||||
*/
|
||||
@@ -800,14 +826,15 @@ static int ctr_aes_crypt(struct blkcipher_desc *desc, long func,
|
||||
out = walk->dst.virt.addr;
|
||||
in = walk->src.virt.addr;
|
||||
ret = crypt_s390_kmctr(func, sctx->key, buf, in,
|
||||
AES_BLOCK_SIZE, ctrblk);
|
||||
AES_BLOCK_SIZE, ctrbuf);
|
||||
if (ret < 0 || ret != AES_BLOCK_SIZE)
|
||||
return -EIO;
|
||||
memcpy(out, buf, nbytes);
|
||||
crypto_inc(ctrblk, AES_BLOCK_SIZE);
|
||||
crypto_inc(ctrbuf, AES_BLOCK_SIZE);
|
||||
ret = blkcipher_walk_done(desc, walk, 0);
|
||||
memcpy(walk->iv, ctrbuf, AES_BLOCK_SIZE);
|
||||
}
|
||||
memcpy(walk->iv, ctrblk, AES_BLOCK_SIZE);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
@@ -25,6 +25,7 @@
|
||||
#define DES3_KEY_SIZE (3 * DES_KEY_SIZE)
|
||||
|
||||
static u8 *ctrblk;
|
||||
static DEFINE_SPINLOCK(ctrblk_lock);
|
||||
|
||||
struct s390_des_ctx {
|
||||
u8 iv[DES_BLOCK_SIZE];
|
||||
@@ -105,29 +106,35 @@ static int ecb_desall_crypt(struct blkcipher_desc *desc, long func,
|
||||
}
|
||||
|
||||
static int cbc_desall_crypt(struct blkcipher_desc *desc, long func,
|
||||
u8 *iv, struct blkcipher_walk *walk)
|
||||
struct blkcipher_walk *walk)
|
||||
{
|
||||
struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
|
||||
int ret = blkcipher_walk_virt(desc, walk);
|
||||
unsigned int nbytes = walk->nbytes;
|
||||
struct {
|
||||
u8 iv[DES_BLOCK_SIZE];
|
||||
u8 key[DES3_KEY_SIZE];
|
||||
} param;
|
||||
|
||||
if (!nbytes)
|
||||
goto out;
|
||||
|
||||
memcpy(iv, walk->iv, DES_BLOCK_SIZE);
|
||||
memcpy(param.iv, walk->iv, DES_BLOCK_SIZE);
|
||||
memcpy(param.key, ctx->key, DES3_KEY_SIZE);
|
||||
do {
|
||||
/* only use complete blocks */
|
||||
unsigned int n = nbytes & ~(DES_BLOCK_SIZE - 1);
|
||||
u8 *out = walk->dst.virt.addr;
|
||||
u8 *in = walk->src.virt.addr;
|
||||
|
||||
ret = crypt_s390_kmc(func, iv, out, in, n);
|
||||
ret = crypt_s390_kmc(func, ¶m, out, in, n);
|
||||
if (ret < 0 || ret != n)
|
||||
return -EIO;
|
||||
|
||||
nbytes &= DES_BLOCK_SIZE - 1;
|
||||
ret = blkcipher_walk_done(desc, walk, nbytes);
|
||||
} while ((nbytes = walk->nbytes));
|
||||
memcpy(walk->iv, iv, DES_BLOCK_SIZE);
|
||||
memcpy(walk->iv, param.iv, DES_BLOCK_SIZE);
|
||||
|
||||
out:
|
||||
return ret;
|
||||
@@ -179,22 +186,20 @@ static int cbc_des_encrypt(struct blkcipher_desc *desc,
|
||||
struct scatterlist *dst, struct scatterlist *src,
|
||||
unsigned int nbytes)
|
||||
{
|
||||
struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
|
||||
struct blkcipher_walk walk;
|
||||
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
return cbc_desall_crypt(desc, KMC_DEA_ENCRYPT, ctx->iv, &walk);
|
||||
return cbc_desall_crypt(desc, KMC_DEA_ENCRYPT, &walk);
|
||||
}
|
||||
|
||||
static int cbc_des_decrypt(struct blkcipher_desc *desc,
|
||||
struct scatterlist *dst, struct scatterlist *src,
|
||||
unsigned int nbytes)
|
||||
{
|
||||
struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
|
||||
struct blkcipher_walk walk;
|
||||
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
return cbc_desall_crypt(desc, KMC_DEA_DECRYPT, ctx->iv, &walk);
|
||||
return cbc_desall_crypt(desc, KMC_DEA_DECRYPT, &walk);
|
||||
}
|
||||
|
||||
static struct crypto_alg cbc_des_alg = {
|
||||
@@ -327,22 +332,20 @@ static int cbc_des3_encrypt(struct blkcipher_desc *desc,
|
||||
struct scatterlist *dst, struct scatterlist *src,
|
||||
unsigned int nbytes)
|
||||
{
|
||||
struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
|
||||
struct blkcipher_walk walk;
|
||||
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
return cbc_desall_crypt(desc, KMC_TDEA_192_ENCRYPT, ctx->iv, &walk);
|
||||
return cbc_desall_crypt(desc, KMC_TDEA_192_ENCRYPT, &walk);
|
||||
}
|
||||
|
||||
static int cbc_des3_decrypt(struct blkcipher_desc *desc,
|
||||
struct scatterlist *dst, struct scatterlist *src,
|
||||
unsigned int nbytes)
|
||||
{
|
||||
struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
|
||||
struct blkcipher_walk walk;
|
||||
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
return cbc_desall_crypt(desc, KMC_TDEA_192_DECRYPT, ctx->iv, &walk);
|
||||
return cbc_desall_crypt(desc, KMC_TDEA_192_DECRYPT, &walk);
|
||||
}
|
||||
|
||||
static struct crypto_alg cbc_des3_alg = {
|
||||
@@ -366,54 +369,80 @@ static struct crypto_alg cbc_des3_alg = {
|
||||
}
|
||||
};
|
||||
|
||||
static unsigned int __ctrblk_init(u8 *ctrptr, unsigned int nbytes)
|
||||
{
|
||||
unsigned int i, n;
|
||||
|
||||
/* align to block size, max. PAGE_SIZE */
|
||||
n = (nbytes > PAGE_SIZE) ? PAGE_SIZE : nbytes & ~(DES_BLOCK_SIZE - 1);
|
||||
for (i = DES_BLOCK_SIZE; i < n; i += DES_BLOCK_SIZE) {
|
||||
memcpy(ctrptr + i, ctrptr + i - DES_BLOCK_SIZE, DES_BLOCK_SIZE);
|
||||
crypto_inc(ctrptr + i, DES_BLOCK_SIZE);
|
||||
}
|
||||
return n;
|
||||
}
|
||||
|
||||
static int ctr_desall_crypt(struct blkcipher_desc *desc, long func,
|
||||
struct s390_des_ctx *ctx, struct blkcipher_walk *walk)
|
||||
struct s390_des_ctx *ctx,
|
||||
struct blkcipher_walk *walk)
|
||||
{
|
||||
int ret = blkcipher_walk_virt_block(desc, walk, DES_BLOCK_SIZE);
|
||||
unsigned int i, n, nbytes;
|
||||
u8 buf[DES_BLOCK_SIZE];
|
||||
u8 *out, *in;
|
||||
unsigned int n, nbytes;
|
||||
u8 buf[DES_BLOCK_SIZE], ctrbuf[DES_BLOCK_SIZE];
|
||||
u8 *out, *in, *ctrptr = ctrbuf;
|
||||
|
||||
memcpy(ctrblk, walk->iv, DES_BLOCK_SIZE);
|
||||
if (!walk->nbytes)
|
||||
return ret;
|
||||
|
||||
if (spin_trylock(&ctrblk_lock))
|
||||
ctrptr = ctrblk;
|
||||
|
||||
memcpy(ctrptr, walk->iv, DES_BLOCK_SIZE);
|
||||
while ((nbytes = walk->nbytes) >= DES_BLOCK_SIZE) {
|
||||
out = walk->dst.virt.addr;
|
||||
in = walk->src.virt.addr;
|
||||
while (nbytes >= DES_BLOCK_SIZE) {
|
||||
/* align to block size, max. PAGE_SIZE */
|
||||
n = (nbytes > PAGE_SIZE) ? PAGE_SIZE :
|
||||
nbytes & ~(DES_BLOCK_SIZE - 1);
|
||||
for (i = DES_BLOCK_SIZE; i < n; i += DES_BLOCK_SIZE) {
|
||||
memcpy(ctrblk + i, ctrblk + i - DES_BLOCK_SIZE,
|
||||
DES_BLOCK_SIZE);
|
||||
crypto_inc(ctrblk + i, DES_BLOCK_SIZE);
|
||||
}
|
||||
ret = crypt_s390_kmctr(func, ctx->key, out, in, n, ctrblk);
|
||||
if (ret < 0 || ret != n)
|
||||
if (ctrptr == ctrblk)
|
||||
n = __ctrblk_init(ctrptr, nbytes);
|
||||
else
|
||||
n = DES_BLOCK_SIZE;
|
||||
ret = crypt_s390_kmctr(func, ctx->key, out, in,
|
||||
n, ctrptr);
|
||||
if (ret < 0 || ret != n) {
|
||||
if (ctrptr == ctrblk)
|
||||
spin_unlock(&ctrblk_lock);
|
||||
return -EIO;
|
||||
}
|
||||
if (n > DES_BLOCK_SIZE)
|
||||
memcpy(ctrblk, ctrblk + n - DES_BLOCK_SIZE,
|
||||
memcpy(ctrptr, ctrptr + n - DES_BLOCK_SIZE,
|
||||
DES_BLOCK_SIZE);
|
||||
crypto_inc(ctrblk, DES_BLOCK_SIZE);
|
||||
crypto_inc(ctrptr, DES_BLOCK_SIZE);
|
||||
out += n;
|
||||
in += n;
|
||||
nbytes -= n;
|
||||
}
|
||||
ret = blkcipher_walk_done(desc, walk, nbytes);
|
||||
}
|
||||
|
||||
if (ctrptr == ctrblk) {
|
||||
if (nbytes)
|
||||
memcpy(ctrbuf, ctrptr, DES_BLOCK_SIZE);
|
||||
else
|
||||
memcpy(walk->iv, ctrptr, DES_BLOCK_SIZE);
|
||||
spin_unlock(&ctrblk_lock);
|
||||
}
|
||||
/* final block may be < DES_BLOCK_SIZE, copy only nbytes */
|
||||
if (nbytes) {
|
||||
out = walk->dst.virt.addr;
|
||||
in = walk->src.virt.addr;
|
||||
ret = crypt_s390_kmctr(func, ctx->key, buf, in,
|
||||
DES_BLOCK_SIZE, ctrblk);
|
||||
DES_BLOCK_SIZE, ctrbuf);
|
||||
if (ret < 0 || ret != DES_BLOCK_SIZE)
|
||||
return -EIO;
|
||||
memcpy(out, buf, nbytes);
|
||||
crypto_inc(ctrblk, DES_BLOCK_SIZE);
|
||||
crypto_inc(ctrbuf, DES_BLOCK_SIZE);
|
||||
ret = blkcipher_walk_done(desc, walk, 0);
|
||||
memcpy(walk->iv, ctrbuf, DES_BLOCK_SIZE);
|
||||
}
|
||||
memcpy(walk->iv, ctrblk, DES_BLOCK_SIZE);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
@@ -59,7 +59,7 @@ ENTRY(startup_continue)
|
||||
.quad 0 # cr12: tracing off
|
||||
.quad 0 # cr13: home space segment table
|
||||
.quad 0xc0000000 # cr14: machine check handling off
|
||||
.quad 0 # cr15: linkage stack operations
|
||||
.quad .Llinkage_stack # cr15: linkage stack operations
|
||||
.Lpcmsk:.quad 0x0000000180000000
|
||||
.L4malign:.quad 0xffffffffffc00000
|
||||
.Lscan2g:.quad 0x80000000 + 0x20000 - 8 # 2GB + 128K - 8
|
||||
@@ -67,12 +67,15 @@ ENTRY(startup_continue)
|
||||
.Lparmaddr:
|
||||
.quad PARMAREA
|
||||
.align 64
|
||||
.Lduct: .long 0,0,0,0,.Lduald,0,0,0
|
||||
.Lduct: .long 0,.Laste,.Laste,0,.Lduald,0,0,0
|
||||
.long 0,0,0,0,0,0,0,0
|
||||
.Laste: .quad 0,0xffffffffffffffff,0,0,0,0,0,0
|
||||
.align 128
|
||||
.Lduald:.rept 8
|
||||
.long 0x80000000,0,0,0 # invalid access-list entries
|
||||
.endr
|
||||
.Llinkage_stack:
|
||||
.long 0,0,0x89000000,0,0,0,0x8a000000,0
|
||||
|
||||
ENTRY(_ehead)
|
||||
|
||||
|
||||
@@ -12,6 +12,8 @@
|
||||
#include <linux/mm.h>
|
||||
#include <linux/gfp.h>
|
||||
#include <linux/init.h>
|
||||
#include <asm/setup.h>
|
||||
#include <asm/ipl.h>
|
||||
|
||||
#define ESSA_SET_STABLE 1
|
||||
#define ESSA_SET_UNUSED 2
|
||||
@@ -41,6 +43,14 @@ void __init cmma_init(void)
|
||||
|
||||
if (!cmma_flag)
|
||||
return;
|
||||
/*
|
||||
* Disable CMM for dump, otherwise the tprot based memory
|
||||
* detection can fail because of unstable pages.
|
||||
*/
|
||||
if (OLDMEM_BASE || ipl_info.type == IPL_TYPE_FCP_DUMP) {
|
||||
cmma_flag = 0;
|
||||
return;
|
||||
}
|
||||
asm volatile(
|
||||
" .insn rrf,0xb9ab0000,%1,%1,0,0\n"
|
||||
"0: la %0,0\n"
|
||||
|
||||
@@ -79,30 +79,38 @@ static inline int phys_to_machine_mapping_valid(unsigned long pfn)
|
||||
return get_phys_to_machine(pfn) != INVALID_P2M_ENTRY;
|
||||
}
|
||||
|
||||
static inline unsigned long mfn_to_pfn(unsigned long mfn)
|
||||
static inline unsigned long mfn_to_pfn_no_overrides(unsigned long mfn)
|
||||
{
|
||||
unsigned long pfn;
|
||||
int ret = 0;
|
||||
int ret;
|
||||
|
||||
if (xen_feature(XENFEAT_auto_translated_physmap))
|
||||
return mfn;
|
||||
|
||||
if (unlikely(mfn >= machine_to_phys_nr)) {
|
||||
pfn = ~0;
|
||||
goto try_override;
|
||||
}
|
||||
pfn = 0;
|
||||
if (unlikely(mfn >= machine_to_phys_nr))
|
||||
return ~0;
|
||||
|
||||
/*
|
||||
* The array access can fail (e.g., device space beyond end of RAM).
|
||||
* In such cases it doesn't matter what we return (we return garbage),
|
||||
* but we must handle the fault without crashing!
|
||||
*/
|
||||
ret = __get_user(pfn, &machine_to_phys_mapping[mfn]);
|
||||
try_override:
|
||||
/* ret might be < 0 if there are no entries in the m2p for mfn */
|
||||
if (ret < 0)
|
||||
pfn = ~0;
|
||||
else if (get_phys_to_machine(pfn) != mfn)
|
||||
return ~0;
|
||||
|
||||
return pfn;
|
||||
}
|
||||
|
||||
static inline unsigned long mfn_to_pfn(unsigned long mfn)
|
||||
{
|
||||
unsigned long pfn;
|
||||
|
||||
if (xen_feature(XENFEAT_auto_translated_physmap))
|
||||
return mfn;
|
||||
|
||||
pfn = mfn_to_pfn_no_overrides(mfn);
|
||||
if (get_phys_to_machine(pfn) != mfn) {
|
||||
/*
|
||||
* If this appears to be a foreign mfn (because the pfn
|
||||
* doesn't map back to the mfn), then check the local override
|
||||
@@ -111,6 +119,7 @@ try_override:
|
||||
* m2p_find_override_pfn returns ~0 if it doesn't find anything.
|
||||
*/
|
||||
pfn = m2p_find_override_pfn(mfn, ~0);
|
||||
}
|
||||
|
||||
/*
|
||||
* pfn is ~0 if there are no entries in the m2p for mfn or if the
|
||||
|
||||
@@ -284,8 +284,13 @@ static __always_inline void setup_smap(struct cpuinfo_x86 *c)
|
||||
raw_local_save_flags(eflags);
|
||||
BUG_ON(eflags & X86_EFLAGS_AC);
|
||||
|
||||
if (cpu_has(c, X86_FEATURE_SMAP))
|
||||
if (cpu_has(c, X86_FEATURE_SMAP)) {
|
||||
#ifdef CONFIG_X86_SMAP
|
||||
set_in_cr4(X86_CR4_SMAP);
|
||||
#else
|
||||
clear_in_cr4(X86_CR4_SMAP);
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
|
||||
@@ -628,7 +628,7 @@ static void __cpuinit intel_tlb_flushall_shift_set(struct cpuinfo_x86 *c)
|
||||
tlb_flushall_shift = 5;
|
||||
break;
|
||||
case 0x63a: /* Ivybridge */
|
||||
tlb_flushall_shift = 1;
|
||||
tlb_flushall_shift = 2;
|
||||
break;
|
||||
default:
|
||||
tlb_flushall_shift = 6;
|
||||
|
||||
@@ -77,8 +77,7 @@ within(unsigned long addr, unsigned long start, unsigned long end)
|
||||
return addr >= start && addr < end;
|
||||
}
|
||||
|
||||
static int
|
||||
do_ftrace_mod_code(unsigned long ip, const void *new_code)
|
||||
static unsigned long text_ip_addr(unsigned long ip)
|
||||
{
|
||||
/*
|
||||
* On x86_64, kernel text mappings are mapped read-only with
|
||||
@@ -91,7 +90,7 @@ do_ftrace_mod_code(unsigned long ip, const void *new_code)
|
||||
if (within(ip, (unsigned long)_text, (unsigned long)_etext))
|
||||
ip = (unsigned long)__va(__pa_symbol(ip));
|
||||
|
||||
return probe_kernel_write((void *)ip, new_code, MCOUNT_INSN_SIZE);
|
||||
return ip;
|
||||
}
|
||||
|
||||
static const unsigned char *ftrace_nop_replace(void)
|
||||
@@ -123,8 +122,10 @@ ftrace_modify_code_direct(unsigned long ip, unsigned const char *old_code,
|
||||
if (memcmp(replaced, old_code, MCOUNT_INSN_SIZE) != 0)
|
||||
return -EINVAL;
|
||||
|
||||
ip = text_ip_addr(ip);
|
||||
|
||||
/* replace the text with the new text */
|
||||
if (do_ftrace_mod_code(ip, new_code))
|
||||
if (probe_kernel_write((void *)ip, new_code, MCOUNT_INSN_SIZE))
|
||||
return -EPERM;
|
||||
|
||||
sync_core();
|
||||
@@ -221,37 +222,51 @@ int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
int ftrace_update_ftrace_func(ftrace_func_t func)
|
||||
static unsigned long ftrace_update_func;
|
||||
|
||||
static int update_ftrace_func(unsigned long ip, void *new)
|
||||
{
|
||||
unsigned long ip = (unsigned long)(&ftrace_call);
|
||||
unsigned char old[MCOUNT_INSN_SIZE], *new;
|
||||
unsigned char old[MCOUNT_INSN_SIZE];
|
||||
int ret;
|
||||
|
||||
memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
|
||||
new = ftrace_call_replace(ip, (unsigned long)func);
|
||||
memcpy(old, (void *)ip, MCOUNT_INSN_SIZE);
|
||||
|
||||
ftrace_update_func = ip;
|
||||
/* Make sure the breakpoints see the ftrace_update_func update */
|
||||
smp_wmb();
|
||||
|
||||
/* See comment above by declaration of modifying_ftrace_code */
|
||||
atomic_inc(&modifying_ftrace_code);
|
||||
|
||||
ret = ftrace_modify_code(ip, old, new);
|
||||
|
||||
atomic_dec(&modifying_ftrace_code);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int ftrace_update_ftrace_func(ftrace_func_t func)
|
||||
{
|
||||
unsigned long ip = (unsigned long)(&ftrace_call);
|
||||
unsigned char *new;
|
||||
int ret;
|
||||
|
||||
new = ftrace_call_replace(ip, (unsigned long)func);
|
||||
ret = update_ftrace_func(ip, new);
|
||||
|
||||
/* Also update the regs callback function */
|
||||
if (!ret) {
|
||||
ip = (unsigned long)(&ftrace_regs_call);
|
||||
memcpy(old, &ftrace_regs_call, MCOUNT_INSN_SIZE);
|
||||
new = ftrace_call_replace(ip, (unsigned long)func);
|
||||
ret = ftrace_modify_code(ip, old, new);
|
||||
ret = update_ftrace_func(ip, new);
|
||||
}
|
||||
|
||||
atomic_dec(&modifying_ftrace_code);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int is_ftrace_caller(unsigned long ip)
|
||||
{
|
||||
if (ip == (unsigned long)(&ftrace_call) ||
|
||||
ip == (unsigned long)(&ftrace_regs_call))
|
||||
if (ip == ftrace_update_func)
|
||||
return 1;
|
||||
|
||||
return 0;
|
||||
@@ -677,45 +692,41 @@ int __init ftrace_dyn_arch_init(void *data)
|
||||
#ifdef CONFIG_DYNAMIC_FTRACE
|
||||
extern void ftrace_graph_call(void);
|
||||
|
||||
static int ftrace_mod_jmp(unsigned long ip,
|
||||
int old_offset, int new_offset)
|
||||
static unsigned char *ftrace_jmp_replace(unsigned long ip, unsigned long addr)
|
||||
{
|
||||
unsigned char code[MCOUNT_INSN_SIZE];
|
||||
static union ftrace_code_union calc;
|
||||
|
||||
if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
|
||||
return -EFAULT;
|
||||
/* Jmp not a call (ignore the .e8) */
|
||||
calc.e8 = 0xe9;
|
||||
calc.offset = ftrace_calc_offset(ip + MCOUNT_INSN_SIZE, addr);
|
||||
|
||||
if (code[0] != 0xe9 || old_offset != *(int *)(&code[1]))
|
||||
return -EINVAL;
|
||||
/*
|
||||
* ftrace external locks synchronize the access to the static variable.
|
||||
*/
|
||||
return calc.code;
|
||||
}
|
||||
|
||||
*(int *)(&code[1]) = new_offset;
|
||||
static int ftrace_mod_jmp(unsigned long ip, void *func)
|
||||
{
|
||||
unsigned char *new;
|
||||
|
||||
if (do_ftrace_mod_code(ip, &code))
|
||||
return -EPERM;
|
||||
new = ftrace_jmp_replace(ip, (unsigned long)func);
|
||||
|
||||
return 0;
|
||||
return update_ftrace_func(ip, new);
|
||||
}
|
||||
|
||||
int ftrace_enable_ftrace_graph_caller(void)
|
||||
{
|
||||
unsigned long ip = (unsigned long)(&ftrace_graph_call);
|
||||
int old_offset, new_offset;
|
||||
|
||||
old_offset = (unsigned long)(&ftrace_stub) - (ip + MCOUNT_INSN_SIZE);
|
||||
new_offset = (unsigned long)(&ftrace_graph_caller) - (ip + MCOUNT_INSN_SIZE);
|
||||
|
||||
return ftrace_mod_jmp(ip, old_offset, new_offset);
|
||||
return ftrace_mod_jmp(ip, &ftrace_graph_caller);
|
||||
}
|
||||
|
||||
int ftrace_disable_ftrace_graph_caller(void)
|
||||
{
|
||||
unsigned long ip = (unsigned long)(&ftrace_graph_call);
|
||||
int old_offset, new_offset;
|
||||
|
||||
old_offset = (unsigned long)(&ftrace_graph_caller) - (ip + MCOUNT_INSN_SIZE);
|
||||
new_offset = (unsigned long)(&ftrace_stub) - (ip + MCOUNT_INSN_SIZE);
|
||||
|
||||
return ftrace_mod_jmp(ip, old_offset, new_offset);
|
||||
return ftrace_mod_jmp(ip, &ftrace_stub);
|
||||
}
|
||||
|
||||
#endif /* !CONFIG_DYNAMIC_FTRACE */
|
||||
|
||||
@@ -989,6 +989,12 @@ static int fault_in_kernel_space(unsigned long address)
|
||||
|
||||
static inline bool smap_violation(int error_code, struct pt_regs *regs)
|
||||
{
|
||||
if (!IS_ENABLED(CONFIG_X86_SMAP))
|
||||
return false;
|
||||
|
||||
if (!static_cpu_has(X86_FEATURE_SMAP))
|
||||
return false;
|
||||
|
||||
if (error_code & PF_USER)
|
||||
return false;
|
||||
|
||||
@@ -1091,11 +1097,9 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code)
|
||||
if (unlikely(error_code & PF_RSVD))
|
||||
pgtable_bad(regs, error_code, address);
|
||||
|
||||
if (static_cpu_has(X86_FEATURE_SMAP)) {
|
||||
if (unlikely(smap_violation(error_code, regs))) {
|
||||
bad_area_nosemaphore(regs, error_code, address);
|
||||
return;
|
||||
}
|
||||
if (unlikely(smap_violation(error_code, regs))) {
|
||||
bad_area_nosemaphore(regs, error_code, address);
|
||||
return;
|
||||
}
|
||||
|
||||
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
|
||||
|
||||
@@ -878,7 +878,6 @@ int m2p_add_override(unsigned long mfn, struct page *page,
|
||||
unsigned long uninitialized_var(address);
|
||||
unsigned level;
|
||||
pte_t *ptep = NULL;
|
||||
int ret = 0;
|
||||
|
||||
pfn = page_to_pfn(page);
|
||||
if (!PageHighMem(page)) {
|
||||
@@ -925,8 +924,8 @@ int m2p_add_override(unsigned long mfn, struct page *page,
|
||||
* frontend pages while they are being shared with the backend,
|
||||
* because mfn_to_pfn (that ends up being called by GUPF) will
|
||||
* return the backend pfn rather than the frontend pfn. */
|
||||
ret = __get_user(pfn, &machine_to_phys_mapping[mfn]);
|
||||
if (ret == 0 && get_phys_to_machine(pfn) == mfn)
|
||||
pfn = mfn_to_pfn_no_overrides(mfn);
|
||||
if (get_phys_to_machine(pfn) == mfn)
|
||||
set_phys_to_machine(pfn, FOREIGN_FRAME(mfn));
|
||||
|
||||
return 0;
|
||||
@@ -941,7 +940,6 @@ int m2p_remove_override(struct page *page,
|
||||
unsigned long uninitialized_var(address);
|
||||
unsigned level;
|
||||
pte_t *ptep = NULL;
|
||||
int ret = 0;
|
||||
|
||||
pfn = page_to_pfn(page);
|
||||
mfn = get_phys_to_machine(pfn);
|
||||
@@ -1019,8 +1017,8 @@ int m2p_remove_override(struct page *page,
|
||||
* the original pfn causes mfn_to_pfn(mfn) to return the frontend
|
||||
* pfn again. */
|
||||
mfn &= ~FOREIGN_FRAME_BIT;
|
||||
ret = __get_user(pfn, &machine_to_phys_mapping[mfn]);
|
||||
if (ret == 0 && get_phys_to_machine(pfn) == FOREIGN_FRAME(mfn) &&
|
||||
pfn = mfn_to_pfn_no_overrides(mfn);
|
||||
if (get_phys_to_machine(pfn) == FOREIGN_FRAME(mfn) &&
|
||||
m2p_find_override(mfn) == NULL)
|
||||
set_phys_to_machine(pfn, mfn);
|
||||
|
||||
|
||||
@@ -245,6 +245,15 @@ static void __init xen_smp_prepare_boot_cpu(void)
|
||||
old memory can be recycled */
|
||||
make_lowmem_page_readwrite(xen_initial_gdt);
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
/*
|
||||
* Xen starts us with XEN_FLAT_RING1_DS, but linux code
|
||||
* expects __USER_DS
|
||||
*/
|
||||
loadsegment(ds, __USER_DS);
|
||||
loadsegment(es, __USER_DS);
|
||||
#endif
|
||||
|
||||
xen_filter_cpu_maps();
|
||||
xen_setup_vcpu_info_placement();
|
||||
}
|
||||
|
||||
@@ -121,6 +121,14 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
|
||||
|
||||
atomic_inc(&bb.done);
|
||||
submit_bio(type, bio);
|
||||
|
||||
/*
|
||||
* We can loop for a long time in here, if someone does
|
||||
* full device discards (like mkfs). Be nice and allow
|
||||
* us to schedule out to avoid softlocking if preempt
|
||||
* is disabled.
|
||||
*/
|
||||
cond_resched();
|
||||
}
|
||||
blk_finish_plug(&plug);
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user