You've already forked linux-apfs
mirror of
https://github.com/linux-apfs/linux-apfs.git
synced 2026-05-01 15:00:59 -07:00
Merge branch 'sh/stable-updates'
This commit is contained in:
@@ -1,7 +1,7 @@
|
||||
VERSION = 2
|
||||
PATCHLEVEL = 6
|
||||
SUBLEVEL = 31
|
||||
EXTRAVERSION = -rc5
|
||||
EXTRAVERSION = -rc6
|
||||
NAME = Man-Eating Seals of Antiquity
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
||||
@@ -246,7 +246,7 @@ static struct platform_device ceu1_device = {
|
||||
},
|
||||
};
|
||||
|
||||
/* KEYSC */
|
||||
/* KEYSC in SoC (Needs SW33-2 set to ON) */
|
||||
static struct sh_keysc_info keysc_info = {
|
||||
.mode = SH_KEYSC_MODE_1,
|
||||
.scan_timing = 10,
|
||||
@@ -263,12 +263,13 @@ static struct sh_keysc_info keysc_info = {
|
||||
|
||||
static struct resource keysc_resources[] = {
|
||||
[0] = {
|
||||
.start = 0x1a204000,
|
||||
.end = 0x1a20400f,
|
||||
.name = "KEYSC",
|
||||
.start = 0x044b0000,
|
||||
.end = 0x044b000f,
|
||||
.flags = IORESOURCE_MEM,
|
||||
},
|
||||
[1] = {
|
||||
.start = IRQ0_KEY,
|
||||
.start = 79,
|
||||
.flags = IORESOURCE_IRQ,
|
||||
},
|
||||
};
|
||||
|
||||
@@ -26,8 +26,30 @@ ENTRY(sh_mobile_standby)
|
||||
|
||||
tst #SUSP_SH_SF, r0
|
||||
bt skip_set_sf
|
||||
#ifdef CONFIG_CPU_SUBTYPE_SH7724
|
||||
/* DBSC: put memory in self-refresh mode */
|
||||
|
||||
/* SDRAM: disable power down and put in self-refresh mode */
|
||||
mov.l dben_reg, r4
|
||||
mov.l dben_data0, r1
|
||||
mov.l r1, @r4
|
||||
|
||||
mov.l dbrfpdn0_reg, r4
|
||||
mov.l dbrfpdn0_data0, r1
|
||||
mov.l r1, @r4
|
||||
|
||||
mov.l dbcmdcnt_reg, r4
|
||||
mov.l dbcmdcnt_data0, r1
|
||||
mov.l r1, @r4
|
||||
|
||||
mov.l dbcmdcnt_reg, r4
|
||||
mov.l dbcmdcnt_data1, r1
|
||||
mov.l r1, @r4
|
||||
|
||||
mov.l dbrfpdn0_reg, r4
|
||||
mov.l dbrfpdn0_data1, r1
|
||||
mov.l r1, @r4
|
||||
#else
|
||||
/* SBSC: disable power down and put in self-refresh mode */
|
||||
mov.l 1f, r4
|
||||
mov.l 2f, r1
|
||||
mov.l @r4, r2
|
||||
@@ -35,6 +57,7 @@ ENTRY(sh_mobile_standby)
|
||||
mov.l 3f, r3
|
||||
and r3, r2
|
||||
mov.l r2, @r4
|
||||
#endif
|
||||
|
||||
skip_set_sf:
|
||||
tst #SUSP_SH_SLEEP, r0
|
||||
@@ -84,7 +107,36 @@ done_sleep:
|
||||
tst #SUSP_SH_SF, r0
|
||||
bt skip_restore_sf
|
||||
|
||||
/* SDRAM: set auto-refresh mode */
|
||||
#ifdef CONFIG_CPU_SUBTYPE_SH7724
|
||||
/* DBSC: put memory in auto-refresh mode */
|
||||
|
||||
mov.l dbrfpdn0_reg, r4
|
||||
mov.l dbrfpdn0_data0, r1
|
||||
mov.l r1, @r4
|
||||
|
||||
/* sleep 140 ns */
|
||||
nop
|
||||
nop
|
||||
nop
|
||||
nop
|
||||
|
||||
mov.l dbcmdcnt_reg, r4
|
||||
mov.l dbcmdcnt_data0, r1
|
||||
mov.l r1, @r4
|
||||
|
||||
mov.l dbcmdcnt_reg, r4
|
||||
mov.l dbcmdcnt_data1, r1
|
||||
mov.l r1, @r4
|
||||
|
||||
mov.l dben_reg, r4
|
||||
mov.l dben_data1, r1
|
||||
mov.l r1, @r4
|
||||
|
||||
mov.l dbrfpdn0_reg, r4
|
||||
mov.l dbrfpdn0_data2, r1
|
||||
mov.l r1, @r4
|
||||
#else
|
||||
/* SBSC: set auto-refresh mode */
|
||||
mov.l 1f, r4
|
||||
mov.l @r4, r2
|
||||
mov.l 4f, r3
|
||||
@@ -98,15 +150,29 @@ done_sleep:
|
||||
add r4, r3
|
||||
or r2, r3
|
||||
mov.l r3, @r1
|
||||
#endif
|
||||
skip_restore_sf:
|
||||
rts
|
||||
nop
|
||||
|
||||
.balign 4
|
||||
#ifdef CONFIG_CPU_SUBTYPE_SH7724
|
||||
dben_reg: .long 0xfd000010 /* DBEN */
|
||||
dben_data0: .long 0
|
||||
dben_data1: .long 1
|
||||
dbrfpdn0_reg: .long 0xfd000040 /* DBRFPDN0 */
|
||||
dbrfpdn0_data0: .long 0
|
||||
dbrfpdn0_data1: .long 1
|
||||
dbrfpdn0_data2: .long 0x00010000
|
||||
dbcmdcnt_reg: .long 0xfd000014 /* DBCMDCNT */
|
||||
dbcmdcnt_data0: .long 2
|
||||
dbcmdcnt_data1: .long 4
|
||||
#else
|
||||
1: .long 0xfe400008 /* SDCR0 */
|
||||
2: .long 0x00000400
|
||||
3: .long 0xffff7fff
|
||||
4: .long 0xfffffbff
|
||||
#endif
|
||||
5: .long 0xa4150020 /* STBCR */
|
||||
6: .long 0xfe40001c /* RTCOR */
|
||||
7: .long 0xfe400018 /* RTCNT */
|
||||
|
||||
+1
-1
@@ -24,6 +24,7 @@ config X86
|
||||
select HAVE_UNSTABLE_SCHED_CLOCK
|
||||
select HAVE_IDE
|
||||
select HAVE_OPROFILE
|
||||
select HAVE_PERF_COUNTERS if (!M386 && !M486)
|
||||
select HAVE_IOREMAP_PROT
|
||||
select HAVE_KPROBES
|
||||
select ARCH_WANT_OPTIONAL_GPIOLIB
|
||||
@@ -742,7 +743,6 @@ config X86_UP_IOAPIC
|
||||
config X86_LOCAL_APIC
|
||||
def_bool y
|
||||
depends on X86_64 || SMP || X86_32_NON_STANDARD || X86_UP_APIC
|
||||
select HAVE_PERF_COUNTERS if (!M386 && !M486)
|
||||
|
||||
config X86_IO_APIC
|
||||
def_bool y
|
||||
|
||||
@@ -400,6 +400,13 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
|
||||
level = cpuid_eax(1);
|
||||
if((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58)
|
||||
set_cpu_cap(c, X86_FEATURE_REP_GOOD);
|
||||
|
||||
/*
|
||||
* Some BIOSes incorrectly force this feature, but only K8
|
||||
* revision D (model = 0x14) and later actually support it.
|
||||
*/
|
||||
if (c->x86_model < 0x14)
|
||||
clear_cpu_cap(c, X86_FEATURE_LAHF_LM);
|
||||
}
|
||||
if (c->x86 == 0x10 || c->x86 == 0x11)
|
||||
set_cpu_cap(c, X86_FEATURE_REP_GOOD);
|
||||
|
||||
@@ -59,7 +59,30 @@ void __init setup_cpu_local_masks(void)
|
||||
alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask);
|
||||
}
|
||||
|
||||
static const struct cpu_dev *this_cpu __cpuinitdata;
|
||||
static void __cpuinit default_init(struct cpuinfo_x86 *c)
|
||||
{
|
||||
#ifdef CONFIG_X86_64
|
||||
display_cacheinfo(c);
|
||||
#else
|
||||
/* Not much we can do here... */
|
||||
/* Check if at least it has cpuid */
|
||||
if (c->cpuid_level == -1) {
|
||||
/* No cpuid. It must be an ancient CPU */
|
||||
if (c->x86 == 4)
|
||||
strcpy(c->x86_model_id, "486");
|
||||
else if (c->x86 == 3)
|
||||
strcpy(c->x86_model_id, "386");
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
static const struct cpu_dev __cpuinitconst default_cpu = {
|
||||
.c_init = default_init,
|
||||
.c_vendor = "Unknown",
|
||||
.c_x86_vendor = X86_VENDOR_UNKNOWN,
|
||||
};
|
||||
|
||||
static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
|
||||
|
||||
DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
|
||||
#ifdef CONFIG_X86_64
|
||||
@@ -332,29 +355,6 @@ void switch_to_new_gdt(int cpu)
|
||||
|
||||
static const struct cpu_dev *__cpuinitdata cpu_devs[X86_VENDOR_NUM] = {};
|
||||
|
||||
static void __cpuinit default_init(struct cpuinfo_x86 *c)
|
||||
{
|
||||
#ifdef CONFIG_X86_64
|
||||
display_cacheinfo(c);
|
||||
#else
|
||||
/* Not much we can do here... */
|
||||
/* Check if at least it has cpuid */
|
||||
if (c->cpuid_level == -1) {
|
||||
/* No cpuid. It must be an ancient CPU */
|
||||
if (c->x86 == 4)
|
||||
strcpy(c->x86_model_id, "486");
|
||||
else if (c->x86 == 3)
|
||||
strcpy(c->x86_model_id, "386");
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
static const struct cpu_dev __cpuinitconst default_cpu = {
|
||||
.c_init = default_init,
|
||||
.c_vendor = "Unknown",
|
||||
.c_x86_vendor = X86_VENDOR_UNKNOWN,
|
||||
};
|
||||
|
||||
static void __cpuinit get_model_name(struct cpuinfo_x86 *c)
|
||||
{
|
||||
unsigned int *v;
|
||||
|
||||
@@ -36,6 +36,7 @@
|
||||
|
||||
static DEFINE_PER_CPU(__u64, next_check) = INITIAL_JIFFIES;
|
||||
static DEFINE_PER_CPU(unsigned long, thermal_throttle_count);
|
||||
static DEFINE_PER_CPU(bool, thermal_throttle_active);
|
||||
|
||||
static atomic_t therm_throt_en = ATOMIC_INIT(0);
|
||||
|
||||
@@ -96,24 +97,27 @@ static int therm_throt_process(int curr)
|
||||
{
|
||||
unsigned int cpu = smp_processor_id();
|
||||
__u64 tmp_jiffs = get_jiffies_64();
|
||||
bool was_throttled = __get_cpu_var(thermal_throttle_active);
|
||||
bool is_throttled = __get_cpu_var(thermal_throttle_active) = curr;
|
||||
|
||||
if (curr)
|
||||
if (is_throttled)
|
||||
__get_cpu_var(thermal_throttle_count)++;
|
||||
|
||||
if (time_before64(tmp_jiffs, __get_cpu_var(next_check)))
|
||||
if (!(was_throttled ^ is_throttled) &&
|
||||
time_before64(tmp_jiffs, __get_cpu_var(next_check)))
|
||||
return 0;
|
||||
|
||||
__get_cpu_var(next_check) = tmp_jiffs + CHECK_INTERVAL;
|
||||
|
||||
/* if we just entered the thermal event */
|
||||
if (curr) {
|
||||
if (is_throttled) {
|
||||
printk(KERN_CRIT "CPU%d: Temperature above threshold, "
|
||||
"cpu clock throttled (total events = %lu)\n", cpu,
|
||||
__get_cpu_var(thermal_throttle_count));
|
||||
"cpu clock throttled (total events = %lu)\n",
|
||||
cpu, __get_cpu_var(thermal_throttle_count));
|
||||
|
||||
add_taint(TAINT_MACHINE_CHECK);
|
||||
} else {
|
||||
printk(KERN_CRIT "CPU%d: Temperature/speed normal\n", cpu);
|
||||
} else if (was_throttled) {
|
||||
printk(KERN_INFO "CPU%d: Temperature/speed normal\n", cpu);
|
||||
}
|
||||
|
||||
return 1;
|
||||
|
||||
@@ -55,6 +55,7 @@ struct x86_pmu {
|
||||
int num_counters_fixed;
|
||||
int counter_bits;
|
||||
u64 counter_mask;
|
||||
int apic;
|
||||
u64 max_period;
|
||||
u64 intel_ctrl;
|
||||
};
|
||||
@@ -72,8 +73,8 @@ static const u64 p6_perfmon_event_map[] =
|
||||
{
|
||||
[PERF_COUNT_HW_CPU_CYCLES] = 0x0079,
|
||||
[PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
|
||||
[PERF_COUNT_HW_CACHE_REFERENCES] = 0x0000,
|
||||
[PERF_COUNT_HW_CACHE_MISSES] = 0x0000,
|
||||
[PERF_COUNT_HW_CACHE_REFERENCES] = 0x0f2e,
|
||||
[PERF_COUNT_HW_CACHE_MISSES] = 0x012e,
|
||||
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4,
|
||||
[PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5,
|
||||
[PERF_COUNT_HW_BUS_CYCLES] = 0x0062,
|
||||
@@ -613,6 +614,7 @@ static DEFINE_MUTEX(pmc_reserve_mutex);
|
||||
|
||||
static bool reserve_pmc_hardware(void)
|
||||
{
|
||||
#ifdef CONFIG_X86_LOCAL_APIC
|
||||
int i;
|
||||
|
||||
if (nmi_watchdog == NMI_LOCAL_APIC)
|
||||
@@ -627,9 +629,11 @@ static bool reserve_pmc_hardware(void)
|
||||
if (!reserve_evntsel_nmi(x86_pmu.eventsel + i))
|
||||
goto eventsel_fail;
|
||||
}
|
||||
#endif
|
||||
|
||||
return true;
|
||||
|
||||
#ifdef CONFIG_X86_LOCAL_APIC
|
||||
eventsel_fail:
|
||||
for (i--; i >= 0; i--)
|
||||
release_evntsel_nmi(x86_pmu.eventsel + i);
|
||||
@@ -644,10 +648,12 @@ perfctr_fail:
|
||||
enable_lapic_nmi_watchdog();
|
||||
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
|
||||
static void release_pmc_hardware(void)
|
||||
{
|
||||
#ifdef CONFIG_X86_LOCAL_APIC
|
||||
int i;
|
||||
|
||||
for (i = 0; i < x86_pmu.num_counters; i++) {
|
||||
@@ -657,6 +663,7 @@ static void release_pmc_hardware(void)
|
||||
|
||||
if (nmi_watchdog == NMI_LOCAL_APIC)
|
||||
enable_lapic_nmi_watchdog();
|
||||
#endif
|
||||
}
|
||||
|
||||
static void hw_perf_counter_destroy(struct perf_counter *counter)
|
||||
@@ -748,6 +755,15 @@ static int __hw_perf_counter_init(struct perf_counter *counter)
|
||||
hwc->sample_period = x86_pmu.max_period;
|
||||
hwc->last_period = hwc->sample_period;
|
||||
atomic64_set(&hwc->period_left, hwc->sample_period);
|
||||
} else {
|
||||
/*
|
||||
* If we have a PMU initialized but no APIC
|
||||
* interrupts, we cannot sample hardware
|
||||
* counters (user-space has to fall back and
|
||||
* sample via a hrtimer based software counter):
|
||||
*/
|
||||
if (!x86_pmu.apic)
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
counter->destroy = hw_perf_counter_destroy;
|
||||
@@ -1449,18 +1465,22 @@ void smp_perf_pending_interrupt(struct pt_regs *regs)
|
||||
|
||||
void set_perf_counter_pending(void)
|
||||
{
|
||||
#ifdef CONFIG_X86_LOCAL_APIC
|
||||
apic->send_IPI_self(LOCAL_PENDING_VECTOR);
|
||||
#endif
|
||||
}
|
||||
|
||||
void perf_counters_lapic_init(void)
|
||||
{
|
||||
if (!x86_pmu_initialized())
|
||||
#ifdef CONFIG_X86_LOCAL_APIC
|
||||
if (!x86_pmu.apic || !x86_pmu_initialized())
|
||||
return;
|
||||
|
||||
/*
|
||||
* Always use NMI for PMU
|
||||
*/
|
||||
apic_write(APIC_LVTPC, APIC_DM_NMI);
|
||||
#endif
|
||||
}
|
||||
|
||||
static int __kprobes
|
||||
@@ -1484,7 +1504,9 @@ perf_counter_nmi_handler(struct notifier_block *self,
|
||||
|
||||
regs = args->regs;
|
||||
|
||||
#ifdef CONFIG_X86_LOCAL_APIC
|
||||
apic_write(APIC_LVTPC, APIC_DM_NMI);
|
||||
#endif
|
||||
/*
|
||||
* Can't rely on the handled return value to say it was our NMI, two
|
||||
* counters could trigger 'simultaneously' raising two back-to-back NMIs.
|
||||
@@ -1515,6 +1537,7 @@ static struct x86_pmu p6_pmu = {
|
||||
.event_map = p6_pmu_event_map,
|
||||
.raw_event = p6_pmu_raw_event,
|
||||
.max_events = ARRAY_SIZE(p6_perfmon_event_map),
|
||||
.apic = 1,
|
||||
.max_period = (1ULL << 31) - 1,
|
||||
.version = 0,
|
||||
.num_counters = 2,
|
||||
@@ -1541,6 +1564,7 @@ static struct x86_pmu intel_pmu = {
|
||||
.event_map = intel_pmu_event_map,
|
||||
.raw_event = intel_pmu_raw_event,
|
||||
.max_events = ARRAY_SIZE(intel_perfmon_event_map),
|
||||
.apic = 1,
|
||||
/*
|
||||
* Intel PMCs cannot be accessed sanely above 32 bit width,
|
||||
* so we install an artificial 1<<31 period regardless of
|
||||
@@ -1564,6 +1588,7 @@ static struct x86_pmu amd_pmu = {
|
||||
.num_counters = 4,
|
||||
.counter_bits = 48,
|
||||
.counter_mask = (1ULL << 48) - 1,
|
||||
.apic = 1,
|
||||
/* use highest bit to detect overflow */
|
||||
.max_period = (1ULL << 47) - 1,
|
||||
};
|
||||
@@ -1589,12 +1614,13 @@ static int p6_pmu_init(void)
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
if (!cpu_has_apic) {
|
||||
pr_info("no Local APIC, try rebooting with lapic");
|
||||
return -ENODEV;
|
||||
}
|
||||
x86_pmu = p6_pmu;
|
||||
|
||||
x86_pmu = p6_pmu;
|
||||
if (!cpu_has_apic) {
|
||||
pr_info("no APIC, boot with the \"lapic\" boot parameter to force-enable it.\n");
|
||||
pr_info("no hardware sampling interrupt available.\n");
|
||||
x86_pmu.apic = 0;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -418,20 +418,20 @@ static int __init set_pci_reboot(const struct dmi_system_id *d)
|
||||
}
|
||||
|
||||
static struct dmi_system_id __initdata pci_reboot_dmi_table[] = {
|
||||
{ /* Handle problems with rebooting on Apple MacBook5,2 */
|
||||
{ /* Handle problems with rebooting on Apple MacBook5 */
|
||||
.callback = set_pci_reboot,
|
||||
.ident = "Apple MacBook",
|
||||
.ident = "Apple MacBook5",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "MacBook5,2"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "MacBook5"),
|
||||
},
|
||||
},
|
||||
{ /* Handle problems with rebooting on Apple MacBookPro5,1 */
|
||||
{ /* Handle problems with rebooting on Apple MacBookPro5 */
|
||||
.callback = set_pci_reboot,
|
||||
.ident = "Apple MacBookPro5,1",
|
||||
.ident = "Apple MacBookPro5",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro5,1"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro5"),
|
||||
},
|
||||
},
|
||||
{ }
|
||||
|
||||
@@ -40,6 +40,7 @@ struct sh_cmt_priv {
|
||||
struct platform_device *pdev;
|
||||
|
||||
unsigned long flags;
|
||||
unsigned long flags_suspend;
|
||||
unsigned long match_value;
|
||||
unsigned long next_match_value;
|
||||
unsigned long max_match_value;
|
||||
@@ -667,11 +668,38 @@ static int __devexit sh_cmt_remove(struct platform_device *pdev)
|
||||
return -EBUSY; /* cannot unregister clockevent and clocksource */
|
||||
}
|
||||
|
||||
static int sh_cmt_suspend(struct device *dev)
|
||||
{
|
||||
struct platform_device *pdev = to_platform_device(dev);
|
||||
struct sh_cmt_priv *p = platform_get_drvdata(pdev);
|
||||
|
||||
/* save flag state and stop CMT channel */
|
||||
p->flags_suspend = p->flags;
|
||||
sh_cmt_stop(p, p->flags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int sh_cmt_resume(struct device *dev)
|
||||
{
|
||||
struct platform_device *pdev = to_platform_device(dev);
|
||||
struct sh_cmt_priv *p = platform_get_drvdata(pdev);
|
||||
|
||||
/* start CMT channel from saved state */
|
||||
sh_cmt_start(p, p->flags_suspend);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct dev_pm_ops sh_cmt_dev_pm_ops = {
|
||||
.suspend = sh_cmt_suspend,
|
||||
.resume = sh_cmt_resume,
|
||||
};
|
||||
|
||||
static struct platform_driver sh_cmt_device_driver = {
|
||||
.probe = sh_cmt_probe,
|
||||
.remove = __devexit_p(sh_cmt_remove),
|
||||
.driver = {
|
||||
.name = "sh_cmt",
|
||||
.pm = &sh_cmt_dev_pm_ops,
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
+16
-16
@@ -359,6 +359,7 @@ static mddev_t * mddev_find(dev_t unit)
|
||||
else
|
||||
new->md_minor = MINOR(unit) >> MdpMinorShift;
|
||||
|
||||
mutex_init(&new->open_mutex);
|
||||
mutex_init(&new->reconfig_mutex);
|
||||
INIT_LIST_HEAD(&new->disks);
|
||||
INIT_LIST_HEAD(&new->all_mddevs);
|
||||
@@ -1974,17 +1975,14 @@ repeat:
|
||||
/* otherwise we have to go forward and ... */
|
||||
mddev->events ++;
|
||||
if (!mddev->in_sync || mddev->recovery_cp != MaxSector) { /* not clean */
|
||||
/* .. if the array isn't clean, insist on an odd 'events' */
|
||||
if ((mddev->events&1)==0) {
|
||||
mddev->events++;
|
||||
/* .. if the array isn't clean, an 'even' event must also go
|
||||
* to spares. */
|
||||
if ((mddev->events&1)==0)
|
||||
nospares = 0;
|
||||
}
|
||||
} else {
|
||||
/* otherwise insist on an even 'events' (for clean states) */
|
||||
if ((mddev->events&1)) {
|
||||
mddev->events++;
|
||||
/* otherwise an 'odd' event must go to spares */
|
||||
if ((mddev->events&1))
|
||||
nospares = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3601,6 +3599,7 @@ max_sync_store(mddev_t *mddev, const char *buf, size_t len)
|
||||
if (max < mddev->resync_min)
|
||||
return -EINVAL;
|
||||
if (max < mddev->resync_max &&
|
||||
mddev->ro == 0 &&
|
||||
test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
|
||||
return -EBUSY;
|
||||
|
||||
@@ -4304,12 +4303,11 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open)
|
||||
struct gendisk *disk = mddev->gendisk;
|
||||
mdk_rdev_t *rdev;
|
||||
|
||||
mutex_lock(&mddev->open_mutex);
|
||||
if (atomic_read(&mddev->openers) > is_open) {
|
||||
printk("md: %s still in use.\n",mdname(mddev));
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
if (mddev->pers) {
|
||||
err = -EBUSY;
|
||||
} else if (mddev->pers) {
|
||||
|
||||
if (mddev->sync_thread) {
|
||||
set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
|
||||
@@ -4367,7 +4365,10 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open)
|
||||
set_disk_ro(disk, 1);
|
||||
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
|
||||
}
|
||||
|
||||
out:
|
||||
mutex_unlock(&mddev->open_mutex);
|
||||
if (err)
|
||||
return err;
|
||||
/*
|
||||
* Free resources if final stop
|
||||
*/
|
||||
@@ -4433,7 +4434,6 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open)
|
||||
blk_integrity_unregister(disk);
|
||||
md_new_event(mddev);
|
||||
sysfs_notify_dirent(mddev->sysfs_state);
|
||||
out:
|
||||
return err;
|
||||
}
|
||||
|
||||
@@ -5518,12 +5518,12 @@ static int md_open(struct block_device *bdev, fmode_t mode)
|
||||
}
|
||||
BUG_ON(mddev != bdev->bd_disk->private_data);
|
||||
|
||||
if ((err = mutex_lock_interruptible_nested(&mddev->reconfig_mutex, 1)))
|
||||
if ((err = mutex_lock_interruptible(&mddev->open_mutex)))
|
||||
goto out;
|
||||
|
||||
err = 0;
|
||||
atomic_inc(&mddev->openers);
|
||||
mddev_unlock(mddev);
|
||||
mutex_unlock(&mddev->open_mutex);
|
||||
|
||||
check_disk_change(bdev);
|
||||
out:
|
||||
|
||||
@@ -223,6 +223,16 @@ struct mddev_s
|
||||
* so we don't loop trying */
|
||||
|
||||
int in_sync; /* know to not need resync */
|
||||
/* 'open_mutex' avoids races between 'md_open' and 'do_md_stop', so
|
||||
* that we are never stopping an array while it is open.
|
||||
* 'reconfig_mutex' protects all other reconfiguration.
|
||||
* These locks are separate due to conflicting interactions
|
||||
* with bdev->bd_mutex.
|
||||
* Lock ordering is:
|
||||
* reconfig_mutex -> bd_mutex : e.g. do_md_run -> revalidate_disk
|
||||
* bd_mutex -> open_mutex: e.g. __blkdev_get -> md_open
|
||||
*/
|
||||
struct mutex open_mutex;
|
||||
struct mutex reconfig_mutex;
|
||||
atomic_t active; /* general refcount */
|
||||
atomic_t openers; /* number of active opens */
|
||||
|
||||
+30
-4
@@ -3785,7 +3785,7 @@ static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped
|
||||
conf->reshape_progress < raid5_size(mddev, 0, 0)) {
|
||||
sector_nr = raid5_size(mddev, 0, 0)
|
||||
- conf->reshape_progress;
|
||||
} else if (mddev->delta_disks > 0 &&
|
||||
} else if (mddev->delta_disks >= 0 &&
|
||||
conf->reshape_progress > 0)
|
||||
sector_nr = conf->reshape_progress;
|
||||
sector_div(sector_nr, new_data_disks);
|
||||
@@ -4509,7 +4509,26 @@ static int run(mddev_t *mddev)
|
||||
(old_disks-max_degraded));
|
||||
/* here_old is the first stripe that we might need to read
|
||||
* from */
|
||||
if (here_new >= here_old) {
|
||||
if (mddev->delta_disks == 0) {
|
||||
/* We cannot be sure it is safe to start an in-place
|
||||
* reshape. It is only safe if user-space if monitoring
|
||||
* and taking constant backups.
|
||||
* mdadm always starts a situation like this in
|
||||
* readonly mode so it can take control before
|
||||
* allowing any writes. So just check for that.
|
||||
*/
|
||||
if ((here_new * mddev->new_chunk_sectors !=
|
||||
here_old * mddev->chunk_sectors) ||
|
||||
mddev->ro == 0) {
|
||||
printk(KERN_ERR "raid5: in-place reshape must be started"
|
||||
" in read-only mode - aborting\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
} else if (mddev->delta_disks < 0
|
||||
? (here_new * mddev->new_chunk_sectors <=
|
||||
here_old * mddev->chunk_sectors)
|
||||
: (here_new * mddev->new_chunk_sectors >=
|
||||
here_old * mddev->chunk_sectors)) {
|
||||
/* Reading from the same stripe as writing to - bad */
|
||||
printk(KERN_ERR "raid5: reshape_position too early for "
|
||||
"auto-recovery - aborting.\n");
|
||||
@@ -5078,8 +5097,15 @@ static void raid5_finish_reshape(mddev_t *mddev)
|
||||
mddev->degraded--;
|
||||
for (d = conf->raid_disks ;
|
||||
d < conf->raid_disks - mddev->delta_disks;
|
||||
d++)
|
||||
raid5_remove_disk(mddev, d);
|
||||
d++) {
|
||||
mdk_rdev_t *rdev = conf->disks[d].rdev;
|
||||
if (rdev && raid5_remove_disk(mddev, d) == 0) {
|
||||
char nm[20];
|
||||
sprintf(nm, "rd%d", rdev->raid_disk);
|
||||
sysfs_remove_link(&mddev->kobj, nm);
|
||||
rdev->raid_disk = -1;
|
||||
}
|
||||
}
|
||||
}
|
||||
mddev->layout = conf->algorithm;
|
||||
mddev->chunk_sectors = conf->chunk_sectors;
|
||||
|
||||
@@ -477,6 +477,9 @@ static int sh_mobile_lcdc_start(struct sh_mobile_lcdc_priv *priv)
|
||||
/* tell the board code to enable the panel */
|
||||
for (k = 0; k < ARRAY_SIZE(priv->ch); k++) {
|
||||
ch = &priv->ch[k];
|
||||
if (!ch->enabled)
|
||||
continue;
|
||||
|
||||
board_cfg = &ch->cfg.board_cfg;
|
||||
if (board_cfg->display_on)
|
||||
board_cfg->display_on(board_cfg->board_data);
|
||||
@@ -494,6 +497,8 @@ static void sh_mobile_lcdc_stop(struct sh_mobile_lcdc_priv *priv)
|
||||
/* clean up deferred io and ask board code to disable panel */
|
||||
for (k = 0; k < ARRAY_SIZE(priv->ch); k++) {
|
||||
ch = &priv->ch[k];
|
||||
if (!ch->enabled)
|
||||
continue;
|
||||
|
||||
/* deferred io mode:
|
||||
* flush frame, and wait for frame end interrupt
|
||||
|
||||
+41
-6
@@ -1914,7 +1914,8 @@ static void ocfs2_adjust_adjacent_records(struct ocfs2_extent_rec *left_rec,
|
||||
* immediately to their right.
|
||||
*/
|
||||
left_clusters = le32_to_cpu(right_child_el->l_recs[0].e_cpos);
|
||||
if (ocfs2_is_empty_extent(&right_child_el->l_recs[0])) {
|
||||
if (!ocfs2_rec_clusters(right_child_el, &right_child_el->l_recs[0])) {
|
||||
BUG_ON(right_child_el->l_tree_depth);
|
||||
BUG_ON(le16_to_cpu(right_child_el->l_next_free_rec) <= 1);
|
||||
left_clusters = le32_to_cpu(right_child_el->l_recs[1].e_cpos);
|
||||
}
|
||||
@@ -2476,15 +2477,37 @@ out_ret_path:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void ocfs2_update_edge_lengths(struct inode *inode, handle_t *handle,
|
||||
struct ocfs2_path *path)
|
||||
static int ocfs2_update_edge_lengths(struct inode *inode, handle_t *handle,
|
||||
int subtree_index, struct ocfs2_path *path)
|
||||
{
|
||||
int i, idx;
|
||||
int i, idx, ret;
|
||||
struct ocfs2_extent_rec *rec;
|
||||
struct ocfs2_extent_list *el;
|
||||
struct ocfs2_extent_block *eb;
|
||||
u32 range;
|
||||
|
||||
/*
|
||||
* In normal tree rotation process, we will never touch the
|
||||
* tree branch above subtree_index and ocfs2_extend_rotate_transaction
|
||||
* doesn't reserve the credits for them either.
|
||||
*
|
||||
* But we do have a special case here which will update the rightmost
|
||||
* records for all the bh in the path.
|
||||
* So we have to allocate extra credits and access them.
|
||||
*/
|
||||
ret = ocfs2_extend_trans(handle,
|
||||
handle->h_buffer_credits + subtree_index);
|
||||
if (ret) {
|
||||
mlog_errno(ret);
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = ocfs2_journal_access_path(inode, handle, path);
|
||||
if (ret) {
|
||||
mlog_errno(ret);
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Path should always be rightmost. */
|
||||
eb = (struct ocfs2_extent_block *)path_leaf_bh(path)->b_data;
|
||||
BUG_ON(eb->h_next_leaf_blk != 0ULL);
|
||||
@@ -2505,6 +2528,8 @@ static void ocfs2_update_edge_lengths(struct inode *inode, handle_t *handle,
|
||||
|
||||
ocfs2_journal_dirty(handle, path->p_node[i].bh);
|
||||
}
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void ocfs2_unlink_path(struct inode *inode, handle_t *handle,
|
||||
@@ -2717,7 +2742,12 @@ static int ocfs2_rotate_subtree_left(struct inode *inode, handle_t *handle,
|
||||
if (del_right_subtree) {
|
||||
ocfs2_unlink_subtree(inode, handle, left_path, right_path,
|
||||
subtree_index, dealloc);
|
||||
ocfs2_update_edge_lengths(inode, handle, left_path);
|
||||
ret = ocfs2_update_edge_lengths(inode, handle, subtree_index,
|
||||
left_path);
|
||||
if (ret) {
|
||||
mlog_errno(ret);
|
||||
goto out;
|
||||
}
|
||||
|
||||
eb = (struct ocfs2_extent_block *)path_leaf_bh(left_path)->b_data;
|
||||
ocfs2_et_set_last_eb_blk(et, le64_to_cpu(eb->h_blkno));
|
||||
@@ -3034,7 +3064,12 @@ static int ocfs2_remove_rightmost_path(struct inode *inode, handle_t *handle,
|
||||
|
||||
ocfs2_unlink_subtree(inode, handle, left_path, path,
|
||||
subtree_index, dealloc);
|
||||
ocfs2_update_edge_lengths(inode, handle, left_path);
|
||||
ret = ocfs2_update_edge_lengths(inode, handle, subtree_index,
|
||||
left_path);
|
||||
if (ret) {
|
||||
mlog_errno(ret);
|
||||
goto out;
|
||||
}
|
||||
|
||||
eb = (struct ocfs2_extent_block *)path_leaf_bh(left_path)->b_data;
|
||||
ocfs2_et_set_last_eb_blk(et, le64_to_cpu(eb->h_blkno));
|
||||
|
||||
+49
-20
@@ -193,6 +193,7 @@ static int ocfs2_get_block(struct inode *inode, sector_t iblock,
|
||||
(unsigned long long)OCFS2_I(inode)->ip_blkno);
|
||||
mlog(ML_ERROR, "Size %llu, clusters %u\n", (unsigned long long)i_size_read(inode), OCFS2_I(inode)->ip_clusters);
|
||||
dump_stack();
|
||||
goto bail;
|
||||
}
|
||||
|
||||
past_eof = ocfs2_blocks_for_bytes(inode->i_sb, i_size_read(inode));
|
||||
@@ -894,18 +895,17 @@ struct ocfs2_write_cluster_desc {
|
||||
*/
|
||||
unsigned c_new;
|
||||
unsigned c_unwritten;
|
||||
unsigned c_needs_zero;
|
||||
};
|
||||
|
||||
static inline int ocfs2_should_zero_cluster(struct ocfs2_write_cluster_desc *d)
|
||||
{
|
||||
return d->c_new || d->c_unwritten;
|
||||
}
|
||||
|
||||
struct ocfs2_write_ctxt {
|
||||
/* Logical cluster position / len of write */
|
||||
u32 w_cpos;
|
||||
u32 w_clen;
|
||||
|
||||
/* First cluster allocated in a nonsparse extend */
|
||||
u32 w_first_new_cpos;
|
||||
|
||||
struct ocfs2_write_cluster_desc w_desc[OCFS2_MAX_CLUSTERS_PER_PAGE];
|
||||
|
||||
/*
|
||||
@@ -983,6 +983,7 @@ static int ocfs2_alloc_write_ctxt(struct ocfs2_write_ctxt **wcp,
|
||||
return -ENOMEM;
|
||||
|
||||
wc->w_cpos = pos >> osb->s_clustersize_bits;
|
||||
wc->w_first_new_cpos = UINT_MAX;
|
||||
cend = (pos + len - 1) >> osb->s_clustersize_bits;
|
||||
wc->w_clen = cend - wc->w_cpos + 1;
|
||||
get_bh(di_bh);
|
||||
@@ -1217,20 +1218,18 @@ out:
|
||||
*/
|
||||
static int ocfs2_write_cluster(struct address_space *mapping,
|
||||
u32 phys, unsigned int unwritten,
|
||||
unsigned int should_zero,
|
||||
struct ocfs2_alloc_context *data_ac,
|
||||
struct ocfs2_alloc_context *meta_ac,
|
||||
struct ocfs2_write_ctxt *wc, u32 cpos,
|
||||
loff_t user_pos, unsigned user_len)
|
||||
{
|
||||
int ret, i, new, should_zero = 0;
|
||||
int ret, i, new;
|
||||
u64 v_blkno, p_blkno;
|
||||
struct inode *inode = mapping->host;
|
||||
struct ocfs2_extent_tree et;
|
||||
|
||||
new = phys == 0 ? 1 : 0;
|
||||
if (new || unwritten)
|
||||
should_zero = 1;
|
||||
|
||||
if (new) {
|
||||
u32 tmp_pos;
|
||||
|
||||
@@ -1301,7 +1300,7 @@ static int ocfs2_write_cluster(struct address_space *mapping,
|
||||
if (tmpret) {
|
||||
mlog_errno(tmpret);
|
||||
if (ret == 0)
|
||||
tmpret = ret;
|
||||
ret = tmpret;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1341,7 +1340,9 @@ static int ocfs2_write_cluster_by_desc(struct address_space *mapping,
|
||||
local_len = osb->s_clustersize - cluster_off;
|
||||
|
||||
ret = ocfs2_write_cluster(mapping, desc->c_phys,
|
||||
desc->c_unwritten, data_ac, meta_ac,
|
||||
desc->c_unwritten,
|
||||
desc->c_needs_zero,
|
||||
data_ac, meta_ac,
|
||||
wc, desc->c_cpos, pos, local_len);
|
||||
if (ret) {
|
||||
mlog_errno(ret);
|
||||
@@ -1391,14 +1392,14 @@ static void ocfs2_set_target_boundaries(struct ocfs2_super *osb,
|
||||
* newly allocated cluster.
|
||||
*/
|
||||
desc = &wc->w_desc[0];
|
||||
if (ocfs2_should_zero_cluster(desc))
|
||||
if (desc->c_needs_zero)
|
||||
ocfs2_figure_cluster_boundaries(osb,
|
||||
desc->c_cpos,
|
||||
&wc->w_target_from,
|
||||
NULL);
|
||||
|
||||
desc = &wc->w_desc[wc->w_clen - 1];
|
||||
if (ocfs2_should_zero_cluster(desc))
|
||||
if (desc->c_needs_zero)
|
||||
ocfs2_figure_cluster_boundaries(osb,
|
||||
desc->c_cpos,
|
||||
NULL,
|
||||
@@ -1466,13 +1467,28 @@ static int ocfs2_populate_write_desc(struct inode *inode,
|
||||
phys++;
|
||||
}
|
||||
|
||||
/*
|
||||
* If w_first_new_cpos is < UINT_MAX, we have a non-sparse
|
||||
* file that got extended. w_first_new_cpos tells us
|
||||
* where the newly allocated clusters are so we can
|
||||
* zero them.
|
||||
*/
|
||||
if (desc->c_cpos >= wc->w_first_new_cpos) {
|
||||
BUG_ON(phys == 0);
|
||||
desc->c_needs_zero = 1;
|
||||
}
|
||||
|
||||
desc->c_phys = phys;
|
||||
if (phys == 0) {
|
||||
desc->c_new = 1;
|
||||
desc->c_needs_zero = 1;
|
||||
*clusters_to_alloc = *clusters_to_alloc + 1;
|
||||
}
|
||||
if (ext_flags & OCFS2_EXT_UNWRITTEN)
|
||||
|
||||
if (ext_flags & OCFS2_EXT_UNWRITTEN) {
|
||||
desc->c_unwritten = 1;
|
||||
desc->c_needs_zero = 1;
|
||||
}
|
||||
|
||||
num_clusters--;
|
||||
}
|
||||
@@ -1632,10 +1648,13 @@ static int ocfs2_expand_nonsparse_inode(struct inode *inode, loff_t pos,
|
||||
if (newsize <= i_size_read(inode))
|
||||
return 0;
|
||||
|
||||
ret = ocfs2_extend_no_holes(inode, newsize, newsize - len);
|
||||
ret = ocfs2_extend_no_holes(inode, newsize, pos);
|
||||
if (ret)
|
||||
mlog_errno(ret);
|
||||
|
||||
wc->w_first_new_cpos =
|
||||
ocfs2_clusters_for_bytes(inode->i_sb, i_size_read(inode));
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -1644,7 +1663,7 @@ int ocfs2_write_begin_nolock(struct address_space *mapping,
|
||||
struct page **pagep, void **fsdata,
|
||||
struct buffer_head *di_bh, struct page *mmap_page)
|
||||
{
|
||||
int ret, credits = OCFS2_INODE_UPDATE_CREDITS;
|
||||
int ret, cluster_of_pages, credits = OCFS2_INODE_UPDATE_CREDITS;
|
||||
unsigned int clusters_to_alloc, extents_to_split;
|
||||
struct ocfs2_write_ctxt *wc;
|
||||
struct inode *inode = mapping->host;
|
||||
@@ -1722,8 +1741,19 @@ int ocfs2_write_begin_nolock(struct address_space *mapping,
|
||||
|
||||
}
|
||||
|
||||
ocfs2_set_target_boundaries(osb, wc, pos, len,
|
||||
clusters_to_alloc + extents_to_split);
|
||||
/*
|
||||
* We have to zero sparse allocated clusters, unwritten extent clusters,
|
||||
* and non-sparse clusters we just extended. For non-sparse writes,
|
||||
* we know zeros will only be needed in the first and/or last cluster.
|
||||
*/
|
||||
if (clusters_to_alloc || extents_to_split ||
|
||||
wc->w_desc[0].c_needs_zero ||
|
||||
wc->w_desc[wc->w_clen - 1].c_needs_zero)
|
||||
cluster_of_pages = 1;
|
||||
else
|
||||
cluster_of_pages = 0;
|
||||
|
||||
ocfs2_set_target_boundaries(osb, wc, pos, len, cluster_of_pages);
|
||||
|
||||
handle = ocfs2_start_trans(osb, credits);
|
||||
if (IS_ERR(handle)) {
|
||||
@@ -1756,8 +1786,7 @@ int ocfs2_write_begin_nolock(struct address_space *mapping,
|
||||
* extent.
|
||||
*/
|
||||
ret = ocfs2_grab_pages_for_write(mapping, wc, wc->w_cpos, pos,
|
||||
clusters_to_alloc + extents_to_split,
|
||||
mmap_page);
|
||||
cluster_of_pages, mmap_page);
|
||||
if (ret) {
|
||||
mlog_errno(ret);
|
||||
goto out_quota;
|
||||
|
||||
+27
-8
@@ -310,22 +310,19 @@ out_attach:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static DEFINE_SPINLOCK(dentry_list_lock);
|
||||
DEFINE_SPINLOCK(dentry_list_lock);
|
||||
|
||||
/* We limit the number of dentry locks to drop in one go. We have
|
||||
* this limit so that we don't starve other users of ocfs2_wq. */
|
||||
#define DL_INODE_DROP_COUNT 64
|
||||
|
||||
/* Drop inode references from dentry locks */
|
||||
void ocfs2_drop_dl_inodes(struct work_struct *work)
|
||||
static void __ocfs2_drop_dl_inodes(struct ocfs2_super *osb, int drop_count)
|
||||
{
|
||||
struct ocfs2_super *osb = container_of(work, struct ocfs2_super,
|
||||
dentry_lock_work);
|
||||
struct ocfs2_dentry_lock *dl;
|
||||
int drop_count = DL_INODE_DROP_COUNT;
|
||||
|
||||
spin_lock(&dentry_list_lock);
|
||||
while (osb->dentry_lock_list && drop_count--) {
|
||||
while (osb->dentry_lock_list && (drop_count < 0 || drop_count--)) {
|
||||
dl = osb->dentry_lock_list;
|
||||
osb->dentry_lock_list = dl->dl_next;
|
||||
spin_unlock(&dentry_list_lock);
|
||||
@@ -333,11 +330,32 @@ void ocfs2_drop_dl_inodes(struct work_struct *work)
|
||||
kfree(dl);
|
||||
spin_lock(&dentry_list_lock);
|
||||
}
|
||||
if (osb->dentry_lock_list)
|
||||
spin_unlock(&dentry_list_lock);
|
||||
}
|
||||
|
||||
void ocfs2_drop_dl_inodes(struct work_struct *work)
|
||||
{
|
||||
struct ocfs2_super *osb = container_of(work, struct ocfs2_super,
|
||||
dentry_lock_work);
|
||||
|
||||
__ocfs2_drop_dl_inodes(osb, DL_INODE_DROP_COUNT);
|
||||
/*
|
||||
* Don't queue dropping if umount is in progress. We flush the
|
||||
* list in ocfs2_dismount_volume
|
||||
*/
|
||||
spin_lock(&dentry_list_lock);
|
||||
if (osb->dentry_lock_list &&
|
||||
!ocfs2_test_osb_flag(osb, OCFS2_OSB_DROP_DENTRY_LOCK_IMMED))
|
||||
queue_work(ocfs2_wq, &osb->dentry_lock_work);
|
||||
spin_unlock(&dentry_list_lock);
|
||||
}
|
||||
|
||||
/* Flush the whole work queue */
|
||||
void ocfs2_drop_all_dl_inodes(struct ocfs2_super *osb)
|
||||
{
|
||||
__ocfs2_drop_dl_inodes(osb, -1);
|
||||
}
|
||||
|
||||
/*
|
||||
* ocfs2_dentry_iput() and friends.
|
||||
*
|
||||
@@ -368,7 +386,8 @@ static void ocfs2_drop_dentry_lock(struct ocfs2_super *osb,
|
||||
/* We leave dropping of inode reference to ocfs2_wq as that can
|
||||
* possibly lead to inode deletion which gets tricky */
|
||||
spin_lock(&dentry_list_lock);
|
||||
if (!osb->dentry_lock_list)
|
||||
if (!osb->dentry_lock_list &&
|
||||
!ocfs2_test_osb_flag(osb, OCFS2_OSB_DROP_DENTRY_LOCK_IMMED))
|
||||
queue_work(ocfs2_wq, &osb->dentry_lock_work);
|
||||
dl->dl_next = osb->dentry_lock_list;
|
||||
osb->dentry_lock_list = dl;
|
||||
|
||||
@@ -49,10 +49,13 @@ struct ocfs2_dentry_lock {
|
||||
int ocfs2_dentry_attach_lock(struct dentry *dentry, struct inode *inode,
|
||||
u64 parent_blkno);
|
||||
|
||||
extern spinlock_t dentry_list_lock;
|
||||
|
||||
void ocfs2_dentry_lock_put(struct ocfs2_super *osb,
|
||||
struct ocfs2_dentry_lock *dl);
|
||||
|
||||
void ocfs2_drop_dl_inodes(struct work_struct *work);
|
||||
void ocfs2_drop_all_dl_inodes(struct ocfs2_super *osb);
|
||||
|
||||
struct dentry *ocfs2_find_local_alias(struct inode *inode, u64 parent_blkno,
|
||||
int skip_unhashed);
|
||||
|
||||
@@ -103,7 +103,6 @@ static void __dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock)
|
||||
lock->ast_pending, lock->ml.type);
|
||||
BUG();
|
||||
}
|
||||
BUG_ON(!list_empty(&lock->ast_list));
|
||||
if (lock->ast_pending)
|
||||
mlog(0, "lock has an ast getting flushed right now\n");
|
||||
|
||||
|
||||
@@ -1118,7 +1118,7 @@ static int dlm_send_mig_lockres_msg(struct dlm_ctxt *dlm,
|
||||
|
||||
mlog(0, "%s:%.*s: sending mig lockres (%s) to %u\n",
|
||||
dlm->name, res->lockname.len, res->lockname.name,
|
||||
orig_flags & DLM_MRES_MIGRATION ? "migrate" : "recovery",
|
||||
orig_flags & DLM_MRES_MIGRATION ? "migration" : "recovery",
|
||||
send_to);
|
||||
|
||||
/* send it */
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user