You've already forked linux-apfs
mirror of
https://github.com/linux-apfs/linux-apfs.git
synced 2026-05-01 15:00:59 -07:00
Merge branch 'akpm' (patches from Andrew Morton)
Merge fixes from Andrew Morton:
"Bunch of fixes.
And a reversion of mhocko's "Soft limit rework" patch series. This is
actually your fault for opening the merge window when I was off racing ;)
I didn't read the email thread before sending everything off.
Johannes Weiner raised significant issues:
http://www.spinics.net/lists/cgroups/msg08813.html
and we agreed to back it all out"
I clearly need to be more aware of Andrew's racing schedule.
* akpm:
MAINTAINERS: update mach-bcm related email address
checkpatch: make extern in .h prototypes quieter
cciss: fix info leak in cciss_ioctl32_passthru()
cpqarray: fix info leak in ida_locked_ioctl()
kernel/reboot.c: re-enable the function of variable reboot_default
audit: fix endless wait in audit_log_start()
revert "memcg, vmscan: integrate soft reclaim tighter with zone shrinking code"
revert "memcg: get rid of soft-limit tree infrastructure"
revert "vmscan, memcg: do softlimit reclaim also for targeted reclaim"
revert "memcg: enhance memcg iterator to support predicates"
revert "memcg: track children in soft limit excess to improve soft limit"
revert "memcg, vmscan: do not attempt soft limit reclaim if it would not scan anything"
revert "memcg: track all children over limit in the root"
revert "memcg, vmscan: do not fall into reclaim-all pass too quickly"
fs/ocfs2/super.c: use a bigger nodestr in ocfs2_dismount_volume
watchdog: update watchdog_thresh properly
watchdog: update watchdog attributes atomically
This commit is contained in:
+2
-1
@@ -1812,7 +1812,8 @@ S: Supported
|
||||
F: drivers/net/ethernet/broadcom/bnx2x/
|
||||
|
||||
BROADCOM BCM281XX/BCM11XXX ARM ARCHITECTURE
|
||||
M: Christian Daudt <csd@broadcom.com>
|
||||
M: Christian Daudt <bcm@fixthebug.org>
|
||||
L: bcm-kernel-feedback-list@broadcom.com
|
||||
T: git git://git.github.com/broadcom/bcm11351
|
||||
S: Maintained
|
||||
F: arch/arm/mach-bcm/
|
||||
|
||||
@@ -1189,6 +1189,7 @@ static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
|
||||
int err;
|
||||
u32 cp;
|
||||
|
||||
memset(&arg64, 0, sizeof(arg64));
|
||||
err = 0;
|
||||
err |=
|
||||
copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
|
||||
|
||||
@@ -1193,6 +1193,7 @@ out_passthru:
|
||||
ida_pci_info_struct pciinfo;
|
||||
|
||||
if (!arg) return -EINVAL;
|
||||
memset(&pciinfo, 0, sizeof(pciinfo));
|
||||
pciinfo.bus = host->pci_dev->bus->number;
|
||||
pciinfo.dev_fn = host->pci_dev->devfn;
|
||||
pciinfo.board_id = host->board_id;
|
||||
|
||||
+1
-1
@@ -1924,7 +1924,7 @@ static void ocfs2_dismount_volume(struct super_block *sb, int mnt_err)
|
||||
{
|
||||
int tmp, hangup_needed = 0;
|
||||
struct ocfs2_super *osb = NULL;
|
||||
char nodestr[8];
|
||||
char nodestr[12];
|
||||
|
||||
trace_ocfs2_dismount_volume(sb);
|
||||
|
||||
|
||||
+10
-45
@@ -53,23 +53,6 @@ struct mem_cgroup_reclaim_cookie {
|
||||
unsigned int generation;
|
||||
};
|
||||
|
||||
enum mem_cgroup_filter_t {
|
||||
VISIT, /* visit current node */
|
||||
SKIP, /* skip the current node and continue traversal */
|
||||
SKIP_TREE, /* skip the whole subtree and continue traversal */
|
||||
};
|
||||
|
||||
/*
|
||||
* mem_cgroup_filter_t predicate might instruct mem_cgroup_iter_cond how to
|
||||
* iterate through the hierarchy tree. Each tree element is checked by the
|
||||
* predicate before it is returned by the iterator. If a filter returns
|
||||
* SKIP or SKIP_TREE then the iterator code continues traversal (with the
|
||||
* next node down the hierarchy or the next node that doesn't belong under the
|
||||
* memcg's subtree).
|
||||
*/
|
||||
typedef enum mem_cgroup_filter_t
|
||||
(*mem_cgroup_iter_filter)(struct mem_cgroup *memcg, struct mem_cgroup *root);
|
||||
|
||||
#ifdef CONFIG_MEMCG
|
||||
/*
|
||||
* All "charge" functions with gfp_mask should use GFP_KERNEL or
|
||||
@@ -137,18 +120,9 @@ mem_cgroup_prepare_migration(struct page *page, struct page *newpage,
|
||||
extern void mem_cgroup_end_migration(struct mem_cgroup *memcg,
|
||||
struct page *oldpage, struct page *newpage, bool migration_ok);
|
||||
|
||||
struct mem_cgroup *mem_cgroup_iter_cond(struct mem_cgroup *root,
|
||||
struct mem_cgroup *prev,
|
||||
struct mem_cgroup_reclaim_cookie *reclaim,
|
||||
mem_cgroup_iter_filter cond);
|
||||
|
||||
static inline struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
|
||||
struct mem_cgroup *prev,
|
||||
struct mem_cgroup_reclaim_cookie *reclaim)
|
||||
{
|
||||
return mem_cgroup_iter_cond(root, prev, reclaim, NULL);
|
||||
}
|
||||
|
||||
struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *,
|
||||
struct mem_cgroup *,
|
||||
struct mem_cgroup_reclaim_cookie *);
|
||||
void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *);
|
||||
|
||||
/*
|
||||
@@ -260,9 +234,9 @@ static inline void mem_cgroup_dec_page_stat(struct page *page,
|
||||
mem_cgroup_update_page_stat(page, idx, -1);
|
||||
}
|
||||
|
||||
enum mem_cgroup_filter_t
|
||||
mem_cgroup_soft_reclaim_eligible(struct mem_cgroup *memcg,
|
||||
struct mem_cgroup *root);
|
||||
unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
|
||||
gfp_t gfp_mask,
|
||||
unsigned long *total_scanned);
|
||||
|
||||
void __mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx);
|
||||
static inline void mem_cgroup_count_vm_event(struct mm_struct *mm,
|
||||
@@ -376,15 +350,6 @@ static inline void mem_cgroup_end_migration(struct mem_cgroup *memcg,
|
||||
struct page *oldpage, struct page *newpage, bool migration_ok)
|
||||
{
|
||||
}
|
||||
static inline struct mem_cgroup *
|
||||
mem_cgroup_iter_cond(struct mem_cgroup *root,
|
||||
struct mem_cgroup *prev,
|
||||
struct mem_cgroup_reclaim_cookie *reclaim,
|
||||
mem_cgroup_iter_filter cond)
|
||||
{
|
||||
/* first call must return non-NULL, second return NULL */
|
||||
return (struct mem_cgroup *)(unsigned long)!prev;
|
||||
}
|
||||
|
||||
static inline struct mem_cgroup *
|
||||
mem_cgroup_iter(struct mem_cgroup *root,
|
||||
@@ -471,11 +436,11 @@ static inline void mem_cgroup_dec_page_stat(struct page *page,
|
||||
}
|
||||
|
||||
static inline
|
||||
enum mem_cgroup_filter_t
|
||||
mem_cgroup_soft_reclaim_eligible(struct mem_cgroup *memcg,
|
||||
struct mem_cgroup *root)
|
||||
unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
|
||||
gfp_t gfp_mask,
|
||||
unsigned long *total_scanned)
|
||||
{
|
||||
return VISIT;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void mem_cgroup_split_huge_fixup(struct page *head)
|
||||
|
||||
@@ -155,6 +155,12 @@ smp_call_function_any(const struct cpumask *mask, smp_call_func_t func,
|
||||
|
||||
static inline void kick_all_cpus_sync(void) { }
|
||||
|
||||
static inline void __smp_call_function_single(int cpuid,
|
||||
struct call_single_data *data, int wait)
|
||||
{
|
||||
on_each_cpu(data->func, data->info, wait);
|
||||
}
|
||||
|
||||
#endif /* !SMP */
|
||||
|
||||
/*
|
||||
|
||||
+3
-2
@@ -1117,9 +1117,10 @@ struct audit_buffer *audit_log_start(struct audit_context *ctx, gfp_t gfp_mask,
|
||||
|
||||
sleep_time = timeout_start + audit_backlog_wait_time -
|
||||
jiffies;
|
||||
if ((long)sleep_time > 0)
|
||||
if ((long)sleep_time > 0) {
|
||||
wait_for_auditd(sleep_time);
|
||||
continue;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
if (audit_rate_check() && printk_ratelimit())
|
||||
printk(KERN_WARNING
|
||||
|
||||
+8
-1
@@ -32,7 +32,14 @@ EXPORT_SYMBOL(cad_pid);
|
||||
#endif
|
||||
enum reboot_mode reboot_mode DEFAULT_REBOOT_MODE;
|
||||
|
||||
int reboot_default;
|
||||
/*
|
||||
* This variable is used privately to keep track of whether or not
|
||||
* reboot_type is still set to its default value (i.e., reboot= hasn't
|
||||
* been set on the command line). This is needed so that we can
|
||||
* suppress DMI scanning for reboot quirks. Without it, it's
|
||||
* impossible to override a faulty reboot quirk without recompiling.
|
||||
*/
|
||||
int reboot_default = 1;
|
||||
int reboot_cpu;
|
||||
enum reboot_type reboot_type = BOOT_ACPI;
|
||||
int reboot_force;
|
||||
|
||||
+55
-5
@@ -486,7 +486,52 @@ static struct smp_hotplug_thread watchdog_threads = {
|
||||
.unpark = watchdog_enable,
|
||||
};
|
||||
|
||||
static int watchdog_enable_all_cpus(void)
|
||||
static void restart_watchdog_hrtimer(void *info)
|
||||
{
|
||||
struct hrtimer *hrtimer = &__raw_get_cpu_var(watchdog_hrtimer);
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* No need to cancel and restart hrtimer if it is currently executing
|
||||
* because it will reprogram itself with the new period now.
|
||||
* We should never see it unqueued here because we are running per-cpu
|
||||
* with interrupts disabled.
|
||||
*/
|
||||
ret = hrtimer_try_to_cancel(hrtimer);
|
||||
if (ret == 1)
|
||||
hrtimer_start(hrtimer, ns_to_ktime(sample_period),
|
||||
HRTIMER_MODE_REL_PINNED);
|
||||
}
|
||||
|
||||
static void update_timers(int cpu)
|
||||
{
|
||||
struct call_single_data data = {.func = restart_watchdog_hrtimer};
|
||||
/*
|
||||
* Make sure that perf event counter will adopt to a new
|
||||
* sampling period. Updating the sampling period directly would
|
||||
* be much nicer but we do not have an API for that now so
|
||||
* let's use a big hammer.
|
||||
* Hrtimer will adopt the new period on the next tick but this
|
||||
* might be late already so we have to restart the timer as well.
|
||||
*/
|
||||
watchdog_nmi_disable(cpu);
|
||||
__smp_call_function_single(cpu, &data, 1);
|
||||
watchdog_nmi_enable(cpu);
|
||||
}
|
||||
|
||||
static void update_timers_all_cpus(void)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
get_online_cpus();
|
||||
preempt_disable();
|
||||
for_each_online_cpu(cpu)
|
||||
update_timers(cpu);
|
||||
preempt_enable();
|
||||
put_online_cpus();
|
||||
}
|
||||
|
||||
static int watchdog_enable_all_cpus(bool sample_period_changed)
|
||||
{
|
||||
int err = 0;
|
||||
|
||||
@@ -496,6 +541,8 @@ static int watchdog_enable_all_cpus(void)
|
||||
pr_err("Failed to create watchdog threads, disabled\n");
|
||||
else
|
||||
watchdog_running = 1;
|
||||
} else if (sample_period_changed) {
|
||||
update_timers_all_cpus();
|
||||
}
|
||||
|
||||
return err;
|
||||
@@ -520,13 +567,15 @@ int proc_dowatchdog(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
int err, old_thresh, old_enabled;
|
||||
static DEFINE_MUTEX(watchdog_proc_mutex);
|
||||
|
||||
mutex_lock(&watchdog_proc_mutex);
|
||||
old_thresh = ACCESS_ONCE(watchdog_thresh);
|
||||
old_enabled = ACCESS_ONCE(watchdog_user_enabled);
|
||||
|
||||
err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
|
||||
if (err || !write)
|
||||
return err;
|
||||
goto out;
|
||||
|
||||
set_sample_period();
|
||||
/*
|
||||
@@ -535,7 +584,7 @@ int proc_dowatchdog(struct ctl_table *table, int write,
|
||||
* watchdog_*_all_cpus() function takes care of this.
|
||||
*/
|
||||
if (watchdog_user_enabled && watchdog_thresh)
|
||||
err = watchdog_enable_all_cpus();
|
||||
err = watchdog_enable_all_cpus(old_thresh != watchdog_thresh);
|
||||
else
|
||||
watchdog_disable_all_cpus();
|
||||
|
||||
@@ -544,7 +593,8 @@ int proc_dowatchdog(struct ctl_table *table, int write,
|
||||
watchdog_thresh = old_thresh;
|
||||
watchdog_user_enabled = old_enabled;
|
||||
}
|
||||
|
||||
out:
|
||||
mutex_unlock(&watchdog_proc_mutex);
|
||||
return err;
|
||||
}
|
||||
#endif /* CONFIG_SYSCTL */
|
||||
@@ -554,5 +604,5 @@ void __init lockup_detector_init(void)
|
||||
set_sample_period();
|
||||
|
||||
if (watchdog_user_enabled)
|
||||
watchdog_enable_all_cpus();
|
||||
watchdog_enable_all_cpus(false);
|
||||
}
|
||||
|
||||
+404
-150
File diff suppressed because it is too large
Load Diff
+31
-52
@@ -139,23 +139,11 @@ static bool global_reclaim(struct scan_control *sc)
|
||||
{
|
||||
return !sc->target_mem_cgroup;
|
||||
}
|
||||
|
||||
static bool mem_cgroup_should_soft_reclaim(struct scan_control *sc)
|
||||
{
|
||||
struct mem_cgroup *root = sc->target_mem_cgroup;
|
||||
return !mem_cgroup_disabled() &&
|
||||
mem_cgroup_soft_reclaim_eligible(root, root) != SKIP_TREE;
|
||||
}
|
||||
#else
|
||||
static bool global_reclaim(struct scan_control *sc)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool mem_cgroup_should_soft_reclaim(struct scan_control *sc)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
|
||||
unsigned long zone_reclaimable_pages(struct zone *zone)
|
||||
@@ -2176,11 +2164,9 @@ static inline bool should_continue_reclaim(struct zone *zone,
|
||||
}
|
||||
}
|
||||
|
||||
static int
|
||||
__shrink_zone(struct zone *zone, struct scan_control *sc, bool soft_reclaim)
|
||||
static void shrink_zone(struct zone *zone, struct scan_control *sc)
|
||||
{
|
||||
unsigned long nr_reclaimed, nr_scanned;
|
||||
int groups_scanned = 0;
|
||||
|
||||
do {
|
||||
struct mem_cgroup *root = sc->target_mem_cgroup;
|
||||
@@ -2188,17 +2174,15 @@ __shrink_zone(struct zone *zone, struct scan_control *sc, bool soft_reclaim)
|
||||
.zone = zone,
|
||||
.priority = sc->priority,
|
||||
};
|
||||
struct mem_cgroup *memcg = NULL;
|
||||
mem_cgroup_iter_filter filter = (soft_reclaim) ?
|
||||
mem_cgroup_soft_reclaim_eligible : NULL;
|
||||
struct mem_cgroup *memcg;
|
||||
|
||||
nr_reclaimed = sc->nr_reclaimed;
|
||||
nr_scanned = sc->nr_scanned;
|
||||
|
||||
while ((memcg = mem_cgroup_iter_cond(root, memcg, &reclaim, filter))) {
|
||||
memcg = mem_cgroup_iter(root, NULL, &reclaim);
|
||||
do {
|
||||
struct lruvec *lruvec;
|
||||
|
||||
groups_scanned++;
|
||||
lruvec = mem_cgroup_zone_lruvec(zone, memcg);
|
||||
|
||||
shrink_lruvec(lruvec, sc);
|
||||
@@ -2218,7 +2202,8 @@ __shrink_zone(struct zone *zone, struct scan_control *sc, bool soft_reclaim)
|
||||
mem_cgroup_iter_break(root, memcg);
|
||||
break;
|
||||
}
|
||||
}
|
||||
memcg = mem_cgroup_iter(root, memcg, &reclaim);
|
||||
} while (memcg);
|
||||
|
||||
vmpressure(sc->gfp_mask, sc->target_mem_cgroup,
|
||||
sc->nr_scanned - nr_scanned,
|
||||
@@ -2226,37 +2211,6 @@ __shrink_zone(struct zone *zone, struct scan_control *sc, bool soft_reclaim)
|
||||
|
||||
} while (should_continue_reclaim(zone, sc->nr_reclaimed - nr_reclaimed,
|
||||
sc->nr_scanned - nr_scanned, sc));
|
||||
|
||||
return groups_scanned;
|
||||
}
|
||||
|
||||
|
||||
static void shrink_zone(struct zone *zone, struct scan_control *sc)
|
||||
{
|
||||
bool do_soft_reclaim = mem_cgroup_should_soft_reclaim(sc);
|
||||
unsigned long nr_scanned = sc->nr_scanned;
|
||||
int scanned_groups;
|
||||
|
||||
scanned_groups = __shrink_zone(zone, sc, do_soft_reclaim);
|
||||
/*
|
||||
* memcg iterator might race with other reclaimer or start from
|
||||
* a incomplete tree walk so the tree walk in __shrink_zone
|
||||
* might have missed groups that are above the soft limit. Try
|
||||
* another loop to catch up with others. Do it just once to
|
||||
* prevent from reclaim latencies when other reclaimers always
|
||||
* preempt this one.
|
||||
*/
|
||||
if (do_soft_reclaim && !scanned_groups)
|
||||
__shrink_zone(zone, sc, do_soft_reclaim);
|
||||
|
||||
/*
|
||||
* No group is over the soft limit or those that are do not have
|
||||
* pages in the zone we are reclaiming so we have to reclaim everybody
|
||||
*/
|
||||
if (do_soft_reclaim && (sc->nr_scanned == nr_scanned)) {
|
||||
__shrink_zone(zone, sc, false);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
/* Returns true if compaction should go ahead for a high-order request */
|
||||
@@ -2320,6 +2274,8 @@ static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
|
||||
{
|
||||
struct zoneref *z;
|
||||
struct zone *zone;
|
||||
unsigned long nr_soft_reclaimed;
|
||||
unsigned long nr_soft_scanned;
|
||||
bool aborted_reclaim = false;
|
||||
|
||||
/*
|
||||
@@ -2359,6 +2315,18 @@ static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
|
||||
continue;
|
||||
}
|
||||
}
|
||||
/*
|
||||
* This steals pages from memory cgroups over softlimit
|
||||
* and returns the number of reclaimed pages and
|
||||
* scanned pages. This works for global memory pressure
|
||||
* and balancing, not for a memcg's limit.
|
||||
*/
|
||||
nr_soft_scanned = 0;
|
||||
nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone,
|
||||
sc->order, sc->gfp_mask,
|
||||
&nr_soft_scanned);
|
||||
sc->nr_reclaimed += nr_soft_reclaimed;
|
||||
sc->nr_scanned += nr_soft_scanned;
|
||||
/* need some check for avoid more shrink_zone() */
|
||||
}
|
||||
|
||||
@@ -2952,6 +2920,8 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
|
||||
{
|
||||
int i;
|
||||
int end_zone = 0; /* Inclusive. 0 = ZONE_DMA */
|
||||
unsigned long nr_soft_reclaimed;
|
||||
unsigned long nr_soft_scanned;
|
||||
struct scan_control sc = {
|
||||
.gfp_mask = GFP_KERNEL,
|
||||
.priority = DEF_PRIORITY,
|
||||
@@ -3066,6 +3036,15 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
|
||||
|
||||
sc.nr_scanned = 0;
|
||||
|
||||
nr_soft_scanned = 0;
|
||||
/*
|
||||
* Call soft limit reclaim before calling shrink_zone.
|
||||
*/
|
||||
nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone,
|
||||
order, sc.gfp_mask,
|
||||
&nr_soft_scanned);
|
||||
sc.nr_reclaimed += nr_soft_reclaimed;
|
||||
|
||||
/*
|
||||
* There should be no need to raise the scanning
|
||||
* priority if enough pages are already being scanned
|
||||
|
||||
@@ -3975,8 +3975,8 @@ sub string_find_replace {
|
||||
# check for new externs in .h files.
|
||||
if ($realfile =~ /\.h$/ &&
|
||||
$line =~ /^\+\s*(extern\s+)$Type\s*$Ident\s*\(/s) {
|
||||
if (WARN("AVOID_EXTERNS",
|
||||
"extern prototypes should be avoided in .h files\n" . $herecurr) &&
|
||||
if (CHK("AVOID_EXTERNS",
|
||||
"extern prototypes should be avoided in .h files\n" . $herecurr) &&
|
||||
$fix) {
|
||||
$fixed[$linenr - 1] =~ s/(.*)\bextern\b\s*(.*)/$1$2/;
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user