Merge tag 'v3.10.64' into linux-linaro-lsk

This is the 3.10.64 stable release
This commit is contained in:
Mark Brown
2015-01-08 18:54:04 +00:00
37 changed files with 598 additions and 143 deletions

View File

@@ -1,6 +1,6 @@
VERSION = 3
PATCHLEVEL = 10
SUBLEVEL = 63
SUBLEVEL = 64
EXTRAVERSION =
NAME = TOSSUG Baby Fish

View File

@@ -248,7 +248,7 @@ asmlinkage long sys32_setgroups16(int gidsetsize, u16 __user *grouplist)
struct group_info *group_info;
int retval;
if (!capable(CAP_SETGID))
if (!may_setgroups())
return -EPERM;
if ((unsigned)gidsetsize > NGROUPS_MAX)
return -EINVAL;

View File

@@ -28,6 +28,13 @@ struct user_desc {
unsigned int seg_not_present:1;
unsigned int useable:1;
#ifdef __x86_64__
/*
* Because this bit is not present in 32-bit user code, user
* programs can pass uninitialized values here. Therefore, in
* any context in which a user_desc comes from a 32-bit program,
* the kernel must act as though lm == 0, regardless of the
* actual value.
*/
unsigned int lm:1;
#endif
};

View File

@@ -279,7 +279,14 @@ do_async_page_fault(struct pt_regs *regs, unsigned long error_code)
static void __init paravirt_ops_setup(void)
{
pv_info.name = "KVM";
pv_info.paravirt_enabled = 1;
/*
* KVM isn't paravirt in the sense of paravirt_enabled. A KVM
* guest kernel works like a bare metal kernel with additional
* features, and paravirt_enabled is about features that are
* missing.
*/
pv_info.paravirt_enabled = 0;
if (kvm_para_has_feature(KVM_FEATURE_NOP_IO_DELAY))
pv_cpu_ops.io_delay = kvm_io_delay;

View File

@@ -265,7 +265,6 @@ void __init kvmclock_init(void)
#endif
kvm_get_preset_lpj();
clocksource_register_hz(&kvm_clock, NSEC_PER_SEC);
pv_info.paravirt_enabled = 1;
pv_info.name = "KVM";
if (kvm_para_has_feature(KVM_FEATURE_CLOCKSOURCE_STABLE_BIT))

View File

@@ -279,24 +279,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
fpu = switch_fpu_prepare(prev_p, next_p, cpu);
/*
* Reload esp0, LDT and the page table pointer:
*/
/* Reload esp0 and ss1. */
load_sp0(tss, next);
/*
* Switch DS and ES.
* This won't pick up thread selector changes, but I guess that is ok.
*/
savesegment(es, prev->es);
if (unlikely(next->es | prev->es))
loadsegment(es, next->es);
savesegment(ds, prev->ds);
if (unlikely(next->ds | prev->ds))
loadsegment(ds, next->ds);
/* We must save %fs and %gs before load_TLS() because
* %fs and %gs may be cleared by load_TLS().
*
@@ -305,41 +290,101 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
savesegment(fs, fsindex);
savesegment(gs, gsindex);
/*
* Load TLS before restoring any segments so that segment loads
* reference the correct GDT entries.
*/
load_TLS(next, cpu);
/*
* Leave lazy mode, flushing any hypercalls made here.
* This must be done before restoring TLS segments so
* the GDT and LDT are properly updated, and must be
* done before math_state_restore, so the TS bit is up
* to date.
* Leave lazy mode, flushing any hypercalls made here. This
* must be done after loading TLS entries in the GDT but before
* loading segments that might reference them, and and it must
* be done before math_state_restore, so the TS bit is up to
* date.
*/
arch_end_context_switch(next_p);
/* Switch DS and ES.
*
* Reading them only returns the selectors, but writing them (if
* nonzero) loads the full descriptor from the GDT or LDT. The
* LDT for next is loaded in switch_mm, and the GDT is loaded
* above.
*
* We therefore need to write new values to the segment
* registers on every context switch unless both the new and old
* values are zero.
*
* Note that we don't need to do anything for CS and SS, as
* those are saved and restored as part of pt_regs.
*/
savesegment(es, prev->es);
if (unlikely(next->es | prev->es))
loadsegment(es, next->es);
savesegment(ds, prev->ds);
if (unlikely(next->ds | prev->ds))
loadsegment(ds, next->ds);
/*
* Switch FS and GS.
*
* Segment register != 0 always requires a reload. Also
* reload when it has changed. When prev process used 64bit
* base always reload to avoid an information leak.
* These are even more complicated than FS and GS: they have
* 64-bit bases are that controlled by arch_prctl. Those bases
* only differ from the values in the GDT or LDT if the selector
* is 0.
*
* Loading the segment register resets the hidden base part of
* the register to 0 or the value from the GDT / LDT. If the
* next base address zero, writing 0 to the segment register is
* much faster than using wrmsr to explicitly zero the base.
*
* The thread_struct.fs and thread_struct.gs values are 0
* if the fs and gs bases respectively are not overridden
* from the values implied by fsindex and gsindex. They
* are nonzero, and store the nonzero base addresses, if
* the bases are overridden.
*
* (fs != 0 && fsindex != 0) || (gs != 0 && gsindex != 0) should
* be impossible.
*
* Therefore we need to reload the segment registers if either
* the old or new selector is nonzero, and we need to override
* the base address if next thread expects it to be overridden.
*
* This code is unnecessarily slow in the case where the old and
* new indexes are zero and the new base is nonzero -- it will
* unnecessarily write 0 to the selector before writing the new
* base address.
*
* Note: This all depends on arch_prctl being the only way that
* user code can override the segment base. Once wrfsbase and
* wrgsbase are enabled, most of this code will need to change.
*/
if (unlikely(fsindex | next->fsindex | prev->fs)) {
loadsegment(fs, next->fsindex);
/*
* Check if the user used a selector != 0; if yes
* clear 64bit base, since overloaded base is always
* mapped to the Null selector
* If user code wrote a nonzero value to FS, then it also
* cleared the overridden base address.
*
* XXX: if user code wrote 0 to FS and cleared the base
* address itself, we won't notice and we'll incorrectly
* restore the prior base address next time we reschdule
* the process.
*/
if (fsindex)
prev->fs = 0;
}
/* when next process has a 64bit base use it */
if (next->fs)
wrmsrl(MSR_FS_BASE, next->fs);
prev->fsindex = fsindex;
if (unlikely(gsindex | next->gsindex | prev->gs)) {
load_gs_index(next->gsindex);
/* This works (and fails) the same way as fsindex above. */
if (gsindex)
prev->gs = 0;
}

View File

@@ -27,6 +27,37 @@ static int get_free_idx(void)
return -ESRCH;
}
static bool tls_desc_okay(const struct user_desc *info)
{
if (LDT_empty(info))
return true;
/*
* espfix is required for 16-bit data segments, but espfix
* only works for LDT segments.
*/
if (!info->seg_32bit)
return false;
/* Only allow data segments in the TLS array. */
if (info->contents > 1)
return false;
/*
* Non-present segments with DPL 3 present an interesting attack
* surface. The kernel should handle such segments correctly,
* but TLS is very difficult to protect in a sandbox, so prevent
* such segments from being created.
*
* If userspace needs to remove a TLS entry, it can still delete
* it outright.
*/
if (info->seg_not_present)
return false;
return true;
}
static void set_tls_desc(struct task_struct *p, int idx,
const struct user_desc *info, int n)
{
@@ -66,6 +97,9 @@ int do_set_thread_area(struct task_struct *p, int idx,
if (copy_from_user(&info, u_info, sizeof(info)))
return -EFAULT;
if (!tls_desc_okay(&info))
return -EINVAL;
if (idx == -1)
idx = info.entry_number;
@@ -192,6 +226,7 @@ int regset_tls_set(struct task_struct *target, const struct user_regset *regset,
{
struct user_desc infobuf[GDT_ENTRY_TLS_ENTRIES];
const struct user_desc *info;
int i;
if (pos >= GDT_ENTRY_TLS_ENTRIES * sizeof(struct user_desc) ||
(pos % sizeof(struct user_desc)) != 0 ||
@@ -205,6 +240,10 @@ int regset_tls_set(struct task_struct *target, const struct user_regset *regset,
else
info = infobuf;
for (i = 0; i < count / sizeof(struct user_desc); i++)
if (!tls_desc_okay(info + i))
return -EINVAL;
set_tls_desc(target,
GDT_ENTRY_TLS_MIN + (pos / sizeof(struct user_desc)),
info, count / sizeof(struct user_desc));

View File

@@ -449,6 +449,9 @@ void af_alg_complete(struct crypto_async_request *req, int err)
{
struct af_alg_completion *completion = req->data;
if (err == -EINPROGRESS)
return;
completion->err = err;
complete(&completion->completion);
}

View File

@@ -883,7 +883,6 @@ void bitmap_unplug(struct bitmap *bitmap)
{
unsigned long i;
int dirty, need_write;
int wait = 0;
if (!bitmap || !bitmap->storage.filemap ||
test_bit(BITMAP_STALE, &bitmap->flags))
@@ -901,16 +900,13 @@ void bitmap_unplug(struct bitmap *bitmap)
clear_page_attr(bitmap, i, BITMAP_PAGE_PENDING);
write_page(bitmap, bitmap->storage.filemap[i], 0);
}
if (dirty)
wait = 1;
}
if (wait) { /* if any writes were performed, we need to wait on them */
if (bitmap->storage.file)
wait_event(bitmap->write_wait,
atomic_read(&bitmap->pending_writes)==0);
else
md_super_wait(bitmap->mddev);
}
if (bitmap->storage.file)
wait_event(bitmap->write_wait,
atomic_read(&bitmap->pending_writes)==0);
else
md_super_wait(bitmap->mddev);
if (test_bit(BITMAP_WRITE_ERROR, &bitmap->flags))
bitmap_file_kick(bitmap);
}

View File

@@ -529,6 +529,19 @@ static void use_dmio(struct dm_buffer *b, int rw, sector_t block,
end_io(&b->bio, r);
}
static void inline_endio(struct bio *bio, int error)
{
bio_end_io_t *end_fn = bio->bi_private;
/*
* Reset the bio to free any attached resources
* (e.g. bio integrity profiles).
*/
bio_reset(bio);
end_fn(bio, error);
}
static void use_inline_bio(struct dm_buffer *b, int rw, sector_t block,
bio_end_io_t *end_io)
{
@@ -540,7 +553,12 @@ static void use_inline_bio(struct dm_buffer *b, int rw, sector_t block,
b->bio.bi_max_vecs = DM_BUFIO_INLINE_VECS;
b->bio.bi_sector = block << b->c->sectors_per_block_bits;
b->bio.bi_bdev = b->c->bdev;
b->bio.bi_end_io = end_io;
b->bio.bi_end_io = inline_endio;
/*
* Use of .bi_private isn't a problem here because
* the dm_buffer's inline bio is local to bufio.
*/
b->bio.bi_private = end_io;
/*
* We assume that if len >= PAGE_SIZE ptr is page-aligned.

View File

@@ -493,7 +493,9 @@ static int sm_bootstrap_get_nr_blocks(struct dm_space_map *sm, dm_block_t *count
{
struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
return smm->ll.nr_blocks;
*count = smm->ll.nr_blocks;
return 0;
}
static int sm_bootstrap_get_nr_free(struct dm_space_map *sm, dm_block_t *count)

View File

@@ -263,6 +263,17 @@ static int tc6393xb_ohci_disable(struct platform_device *dev)
return 0;
}
static int tc6393xb_ohci_suspend(struct platform_device *dev)
{
struct tc6393xb_platform_data *tcpd = dev_get_platdata(dev->dev.parent);
/* We can't properly store/restore OHCI state, so fail here */
if (tcpd->resume_restore)
return -EBUSY;
return tc6393xb_ohci_disable(dev);
}
static int tc6393xb_fb_enable(struct platform_device *dev)
{
struct tc6393xb *tc6393xb = dev_get_drvdata(dev->dev.parent);
@@ -403,7 +414,7 @@ static struct mfd_cell tc6393xb_cells[] = {
.num_resources = ARRAY_SIZE(tc6393xb_ohci_resources),
.resources = tc6393xb_ohci_resources,
.enable = tc6393xb_ohci_enable,
.suspend = tc6393xb_ohci_disable,
.suspend = tc6393xb_ohci_suspend,
.resume = tc6393xb_ohci_enable,
.disable = tc6393xb_ohci_disable,
},

View File

@@ -257,7 +257,7 @@ static ssize_t force_ro_show(struct device *dev, struct device_attribute *attr,
int ret;
struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
ret = snprintf(buf, PAGE_SIZE, "%d",
ret = snprintf(buf, PAGE_SIZE, "%d\n",
get_disk_ro(dev_to_disk(dev)) ^
md->read_only);
mmc_blk_put(md);

View File

@@ -933,7 +933,7 @@ megasas_issue_blocked_abort_cmd(struct megasas_instance *instance,
abort_fr->abort_mfi_phys_addr_hi = 0;
cmd->sync_cmd = 1;
cmd->cmd_status = 0xFF;
cmd->cmd_status = ENODATA;
instance->instancet->issue_dcmd(instance, cmd);

View File

@@ -3857,12 +3857,6 @@ again:
if (ret)
break;
/* opt_discard */
if (btrfs_test_opt(root, DISCARD))
ret = btrfs_error_discard_extent(root, start,
end + 1 - start,
NULL);
clear_extent_dirty(unpin, start, end, GFP_NOFS);
btrfs_error_unpin_extent_range(root, start, end);
cond_resched();

View File

@@ -5277,7 +5277,8 @@ void btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
update_global_block_rsv(fs_info);
}
static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end,
const bool return_free_space)
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_block_group_cache *cache = NULL;
@@ -5301,7 +5302,8 @@ static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
if (start < cache->last_byte_to_unpin) {
len = min(len, cache->last_byte_to_unpin - start);
btrfs_add_free_space(cache, start, len);
if (return_free_space)
btrfs_add_free_space(cache, start, len);
}
start += len;
@@ -5364,7 +5366,7 @@ int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
end + 1 - start, NULL);
clear_extent_dirty(unpin, start, end, GFP_NOFS);
unpin_extent_range(root, start, end);
unpin_extent_range(root, start, end, true);
cond_resched();
}
@@ -8564,7 +8566,7 @@ out:
int btrfs_error_unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
{
return unpin_extent_range(root, start, end);
return unpin_extent_range(root, start, end, false);
}
int btrfs_error_discard_extent(struct btrfs_root *root, u64 bytenr,

View File

@@ -263,8 +263,6 @@ int unpin_extent_cache(struct extent_map_tree *tree, u64 start, u64 len,
if (!em)
goto out;
if (!test_bit(EXTENT_FLAG_LOGGING, &em->flags))
list_move(&em->list, &tree->modified_extents);
em->generation = gen;
clear_bit(EXTENT_FLAG_PINNED, &em->flags);
em->mod_start = em->start;

View File

@@ -2102,7 +2102,6 @@ ecryptfs_decode_from_filename(unsigned char *dst, size_t *dst_size,
break;
case 2:
dst[dst_byte_offset++] |= (src_byte);
dst[dst_byte_offset] = 0;
current_bit_offset = 0;
break;
}

View File

@@ -196,23 +196,11 @@ static int ecryptfs_open(struct inode *inode, struct file *file)
{
int rc = 0;
struct ecryptfs_crypt_stat *crypt_stat = NULL;
struct ecryptfs_mount_crypt_stat *mount_crypt_stat;
struct dentry *ecryptfs_dentry = file->f_path.dentry;
/* Private value of ecryptfs_dentry allocated in
* ecryptfs_lookup() */
struct ecryptfs_file_info *file_info;
mount_crypt_stat = &ecryptfs_superblock_to_private(
ecryptfs_dentry->d_sb)->mount_crypt_stat;
if ((mount_crypt_stat->flags & ECRYPTFS_ENCRYPTED_VIEW_ENABLED)
&& ((file->f_flags & O_WRONLY) || (file->f_flags & O_RDWR)
|| (file->f_flags & O_CREAT) || (file->f_flags & O_TRUNC)
|| (file->f_flags & O_APPEND))) {
printk(KERN_WARNING "Mount has encrypted view enabled; "
"files may only be read\n");
rc = -EPERM;
goto out;
}
/* Released in ecryptfs_release or end of function if failure */
file_info = kmem_cache_zalloc(ecryptfs_file_info_cache, GFP_KERNEL);
ecryptfs_set_file_private(file, file_info);

View File

@@ -494,6 +494,7 @@ static struct dentry *ecryptfs_mount(struct file_system_type *fs_type, int flags
{
struct super_block *s;
struct ecryptfs_sb_info *sbi;
struct ecryptfs_mount_crypt_stat *mount_crypt_stat;
struct ecryptfs_dentry_info *root_info;
const char *err = "Getting sb failed";
struct inode *inode;
@@ -512,6 +513,7 @@ static struct dentry *ecryptfs_mount(struct file_system_type *fs_type, int flags
err = "Error parsing options";
goto out;
}
mount_crypt_stat = &sbi->mount_crypt_stat;
s = sget(fs_type, NULL, set_anon_super, flags, NULL);
if (IS_ERR(s)) {
@@ -558,11 +560,19 @@ static struct dentry *ecryptfs_mount(struct file_system_type *fs_type, int flags
/**
* Set the POSIX ACL flag based on whether they're enabled in the lower
* mount. Force a read-only eCryptfs mount if the lower mount is ro.
* Allow a ro eCryptfs mount even when the lower mount is rw.
* mount.
*/
s->s_flags = flags & ~MS_POSIXACL;
s->s_flags |= path.dentry->d_sb->s_flags & (MS_RDONLY | MS_POSIXACL);
s->s_flags |= path.dentry->d_sb->s_flags & MS_POSIXACL;
/**
* Force a read-only eCryptfs mount when:
* 1) The lower mount is ro
* 2) The ecryptfs_encrypted_view mount option is specified
*/
if (path.dentry->d_sb->s_flags & MS_RDONLY ||
mount_crypt_stat->flags & ECRYPTFS_ENCRYPTED_VIEW_ENABLED)
s->s_flags |= MS_RDONLY;
s->s_maxbytes = path.dentry->d_sb->s_maxbytes;
s->s_blocksize = path.dentry->d_sb->s_blocksize;

Some files were not shown because too many files have changed in this diff Show More