You've already forked linux-rockchip
mirror of
https://github.com/armbian/linux-rockchip.git
synced 2026-01-06 11:08:10 -08:00
Merge branch 'android12-5.10' into android12-5.10-lts
Sync up with android12-5.10 for the following commits:1419b69403Merge tag 'android12-5.10.101_r00' into android12-5.103eec441822UPSTREAM: usb: gadget: Fix use-after-free bug by not setting udc->dev.driver821f3e53d9UPSTREAM: usb: gadget: rndis: prevent integer overflow in rndis_set_response()39aca15979FROMGIT: mm/migrate: fix race between lock page and clear PG_Isolatedde0334216bUPSTREAM: arm64: proton-pack: Include unprivileged eBPF status in Spectre v2 mitigation reportingd236f7b4cbUPSTREAM: arm64: Use the clearbhb instruction in mitigations98b16e808fUPSTREAM: KVM: arm64: Allow SMCCC_ARCH_WORKAROUND_3 to be discovered and migrated0f76dfc55dUPSTREAM: arm64: Mitigate spectre style branch history side channels5411474f65UPSTREAM: arm64: Do not include __READ_ONCE() block in assembly filese9a39a642cUPSTREAM: KVM: arm64: Allow indirect vectors to be used without SPECTRE_V3Afee1ae7c7cUPSTREAM: arm64: proton-pack: Report Spectre-BHB vulnerabilities as part of Spectre-v2d95b0b4e5dUPSTREAM: arm64: Add percpu vectors for EL130180ef431Revert "BACKPORT: FROMLIST: scsi: core: Reserve one tag for the UFS driver"28837e415dUPSTREAM: arm64: entry: Add macro for reading symbol addresses from the trampolinee322fe26a1UPSTREAM: arm64: entry: Add vectors that have the bhb mitigation sequences2a90cf9af2UPSTREAM: arm64: entry: Add non-kpti __bp_harden_el1_vectors for mitigations0db372ec4bUPSTREAM: arm64: entry: Allow the trampoline text to occupy multiple pages158c87e50aUPSTREAM: arm64: entry: Make the kpti trampoline's kpti sequence optionale6408b96a0UPSTREAM: arm64: entry: Move trampoline macros out of ifdef'd section00d8bb6b90UPSTREAM: arm64: entry: Don't assume tramp_vectors is the start of the vectors0defb52ce6UPSTREAM: arm64: entry: Allow tramp_alias to access symbols after the 4K boundary6e48449c91UPSTREAM: arm64: entry: Move the trampoline data page before the text pagebb8baaf3afUPSTREAM: arm64: entry: Free up another register on kpti's tramp_exit path32ba6d5d61UPSTREAM: arm64: entry: Make the trampoline cleanup optionalc8b567d888UPSTREAM: arm64: spectre: Rename spectre_v4_patch_fw_mitigation_conduit17867c11a2UPSTREAM: arm64: entry.S: Add ventry overflow sanity checks81ec26aafeUPSTREAM: arm64: cpufeature: add HWCAP for FEAT_RPRES18c4e4fa56UPSTREAM: arm64: cpufeature: add HWCAP for FEAT_AFP68bc555a23UPSTREAM: arm64: add ID_AA64ISAR2_EL1 sys register2e2eef400bUPSTREAM: arm64: Add HWCAP for self-synchronising virtual counter6d1f2678e2UPSTREAM: arm64: Add Cortex-X2 CPU part definition51eded5d1bUPSTREAM: arm64: cputype: Add CPU implementor & types for the Apple M1 cores803ff1161cUPSTREAM: binder: Add invalid handle info in user error loga40cd23755UPSTREAM: ARM: fix Thumb2 regression with Spectre BHB56186c7e4aUPSTREAM: ARM: Spectre-BHB: provide empty stub for non-config1ea0d91588UPSTREAM: ARM: fix build warning in proc-v7-bugs.cfdf3cb8a00UPSTREAM: ARM: Do not use NOCROSSREFS directive with ld.lld57bc1e13f0UPSTREAM: ARM: fix co-processor register typoa4e68d43f5UPSTREAM: ARM: fix build error when BPF_SYSCALL is disabledbd2376838dUPSTREAM: ARM: include unprivileged BPF status in Spectre V2 reportingafbbe4048fUPSTREAM: ARM: Spectre-BHB workaround5a41f364e7UPSTREAM: ARM: use LOADADDR() to get load address of sections3bfcb356dfUPSTREAM: ARM: early traps initialisation5a64a66802UPSTREAM: ARM: report Spectre v2 status through sysfs9362cd2b47UPSTREAM: x86/speculation: Warn about eIBRS + LFENCE + Unprivileged eBPF + SMT54a2bd029fUPSTREAM: x86/speculation: Warn about Spectre v2 LFENCE mitigationf1b1f893b4UPSTREAM: x86/speculation: Update link to AMD speculation whitepaperc4188388a3UPSTREAM: x86/speculation: Use generic retpoline by default on AMDbd02dc4329UPSTREAM: x86/speculation: Include unprivileged eBPF status in Spectre v2 mitigation reporting3883503747UPSTREAM: Documentation/hw-vuln: Update spectre doc1c3e98581bUPSTREAM: x86/speculation: Add eIBRS + Retpoline optionscc9e9aa4e0UPSTREAM: x86/speculation: Rename RETPOLINE_AMD to RETPOLINE_LFENCE414a6076acUPSTREAM: x86,bugs: Unconditionally allow spectre_v2=retpoline,amdf27f62fecdUPSTREAM: bpf: Add kconfig knob for disabling unpriv bpf by defaultf3ca80ccedANDROID: dm-bow: Protect Ranges fetched and erased from the RB tree4ebb639f0dANDROID: mm: page_pinner: fix build warningfe75d58387ANDROID: fault: Add vendor hook for TLB conflict8248a3e758BACKPORT: sched: Fix yet more sched_fork() racescd6e5d5d7dANDROID: mm/slub: Fix Kasan issue with for_each_object_track8dbcaf63b2ANDROID: dm kcopyd: Use reserved memory for the copy buffer7b5fea2f46ANDROID: GKI: add allowed list file for xiaomiae38f9954bANDROID: GKI: Update symbols to symbol list786bcb1109FROMGIT: f2fs: quota: fix loop condition at f2fs_quota_sync()91fef75d48FROMGIT: f2fs: Restore rwsem lockdep support4cc8ec84beANDROID: ABI: update allowed list for galaxyfcaaaaae6dUPSTREAM: mac80211_hwsim: initialize ieee80211_tx_info at hw_scan_work91be4236fbANDROID: GKI: remove vfs-only namespace from 2 symbolsa817d6ed87ANDROID: mm: Fix page table lookup in speculative fault pathe53b1b9ad4UPSTREAM: xhci: re-initialize the HC during resume if HCE was set767f384155FROMGIT: xhci: make xhci_handshake timeout for xhci_reset() adjustableebbf267fc0ANDROID: vendor_hooks: Add hooks for __alloc_pages_direct_reclaim135406cecbANDROID: dma-direct: Document disable_dma32bf96382fb9ANDROID: dma-direct: Make DMA32 disablement work for CONFIG_NUMA8f66dc1a78UPSTREAM: mmc: block: fix read single on recovery logiccf221db753UPSTREAM: fget: check that the fd still exists after getting a ref to it43754d8b7fANDROID: GKI: Update symbols to symbol listf2d0c30576ANDROID: vendor_hooks: Add hooks for shrink_active_list62412e5b8cFROMGIT: mm: count time in drain_all_pages during direct reclaim as memory pressure3b9fe10e46ANDROID: incremental-fs: remove spurious kfree()acefa91e51ANDROID: vendor_hooks: Add hooks for binderc3ac7418e6ANDROID: qcom: Add sysfs related symbol Change-Id: Icbe5fb26e3cef602e3bbc01745a755a95d72a1a0
This commit is contained in:
@@ -939,6 +939,10 @@
|
||||
can be useful when debugging issues that require an SLB
|
||||
miss to occur.
|
||||
|
||||
disable_dma32= [KNL]
|
||||
Dynamically disable ZONE_DMA32 on kernels compiled with
|
||||
CONFIG_ZONE_DMA32=y.
|
||||
|
||||
stress_slb [PPC]
|
||||
Limits the number of kernel SLB entries, and flushes
|
||||
them frequently to increase the rate of SLB faults
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -343,6 +343,7 @@
|
||||
__traceiter_dwc3_readl
|
||||
__traceiter_dwc3_writel
|
||||
__traceiter_gpu_mem_total
|
||||
__traceiter_kfree_skb
|
||||
__traceiter_sched_util_est_se_tp
|
||||
__traceiter_xdp_exception
|
||||
__tracepoint_android_rvh_account_irq
|
||||
@@ -494,6 +495,7 @@
|
||||
__tracepoint_ipi_raise
|
||||
__tracepoint_irq_handler_entry
|
||||
__tracepoint_irq_handler_exit
|
||||
__tracepoint_kfree_skb
|
||||
__tracepoint_pelt_cfs_tp
|
||||
__tracepoint_pelt_dl_tp
|
||||
__tracepoint_pelt_irq_tp
|
||||
@@ -4278,6 +4280,7 @@
|
||||
usb_hcd_start_port_resume
|
||||
usb_hcd_unlink_urb_from_ep
|
||||
usb_hcds_loaded
|
||||
usb_hid_driver
|
||||
usb_hub_clear_tt_buffer
|
||||
usb_hub_find_child
|
||||
usb_ifnum_to_if
|
||||
|
||||
@@ -2692,6 +2692,8 @@
|
||||
__traceiter_android_vh_tune_inactive_ratio
|
||||
__traceiter_android_vh_tune_scan_type
|
||||
__traceiter_android_vh_tune_swappiness
|
||||
__traceiter_android_vh_page_referenced_check_bypass
|
||||
__traceiter_android_vh_drain_all_pages_bypass
|
||||
__traceiter_android_vh_ufs_compl_command
|
||||
__traceiter_android_vh_ufs_send_command
|
||||
__traceiter_android_vh_ufs_send_tm_command
|
||||
@@ -2894,6 +2896,8 @@
|
||||
__tracepoint_android_vh_tune_inactive_ratio
|
||||
__tracepoint_android_vh_tune_scan_type
|
||||
__tracepoint_android_vh_tune_swappiness
|
||||
__tracepoint_android_vh_page_referenced_check_bypass
|
||||
__tracepoint_android_vh_drain_all_pages_bypass
|
||||
__tracepoint_android_vh_ufs_compl_command
|
||||
__tracepoint_android_vh_ufs_send_command
|
||||
__tracepoint_android_vh_ufs_send_tm_command
|
||||
|
||||
@@ -2430,6 +2430,7 @@
|
||||
sysfs_create_groups
|
||||
sysfs_create_link
|
||||
sysfs_emit
|
||||
sysfs_group_change_owner
|
||||
__sysfs_match_string
|
||||
sysfs_notify
|
||||
sysfs_remove_bin_file
|
||||
@@ -2687,6 +2688,7 @@
|
||||
__tracepoint_android_vh_ftrace_size_check
|
||||
__tracepoint_android_vh_gic_resume
|
||||
__tracepoint_android_vh_gpio_block_read
|
||||
__tracepoint_android_vh_handle_tlb_conf
|
||||
__tracepoint_android_vh_iommu_setup_dma_ops
|
||||
__tracepoint_android_vh_ipi_stop
|
||||
__tracepoint_android_vh_jiffies_update
|
||||
|
||||
@@ -194,3 +194,9 @@
|
||||
|
||||
#extend_reclaim.ko
|
||||
try_to_free_mem_cgroup_pages
|
||||
|
||||
##required by xm_power_debug.ko module
|
||||
wakeup_sources_read_lock
|
||||
wakeup_sources_read_unlock
|
||||
wakeup_sources_walk_start
|
||||
wakeup_sources_walk_next
|
||||
|
||||
@@ -112,6 +112,7 @@
|
||||
#define ESR_ELx_FSC_ACCESS (0x08)
|
||||
#define ESR_ELx_FSC_FAULT (0x04)
|
||||
#define ESR_ELx_FSC_PERM (0x0C)
|
||||
#define ESR_ELx_FSC_TLBCONF (0x30)
|
||||
|
||||
/* ISS field definitions for Data Aborts */
|
||||
#define ESR_ELx_ISV_SHIFT (24)
|
||||
|
||||
@@ -711,7 +711,11 @@ static int do_alignment_fault(unsigned long far, unsigned int esr,
|
||||
|
||||
static int do_bad(unsigned long far, unsigned int esr, struct pt_regs *regs)
|
||||
{
|
||||
return 1; /* "fault" */
|
||||
unsigned long addr = untagged_addr(far);
|
||||
int ret = 1;
|
||||
|
||||
trace_android_vh_handle_tlb_conf(addr, esr, &ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int do_sea(unsigned long far, unsigned int esr, struct pt_regs *regs)
|
||||
|
||||
@@ -2490,6 +2490,7 @@ static int binder_proc_transaction(struct binder_transaction *t,
|
||||
struct binder_priority node_prio;
|
||||
bool oneway = !!(t->flags & TF_ONE_WAY);
|
||||
bool pending_async = false;
|
||||
bool skip = false;
|
||||
|
||||
BUG_ON(!node);
|
||||
binder_node_lock(node);
|
||||
@@ -2517,7 +2518,10 @@ static int binder_proc_transaction(struct binder_transaction *t,
|
||||
return proc->is_frozen ? BR_FROZEN_REPLY : BR_DEAD_REPLY;
|
||||
}
|
||||
|
||||
if (!thread && !pending_async)
|
||||
trace_android_vh_binder_proc_transaction_entry(proc, t,
|
||||
&thread, node->debug_id, pending_async, !oneway, &skip);
|
||||
|
||||
if (!thread && !pending_async && !skip)
|
||||
thread = binder_select_thread_ilocked(proc);
|
||||
|
||||
trace_android_vh_binder_proc_transaction(current, proc->tsk,
|
||||
@@ -2701,8 +2705,8 @@ static void binder_transaction(struct binder_proc *proc,
|
||||
ref->node, &target_proc,
|
||||
&return_error);
|
||||
} else {
|
||||
binder_user_error("%d:%d got transaction to invalid handle\n",
|
||||
proc->pid, thread->pid);
|
||||
binder_user_error("%d:%d got transaction to invalid handle, %u\n",
|
||||
proc->pid, thread->pid, tr->target.handle);
|
||||
return_error = BR_FAILED_REPLY;
|
||||
}
|
||||
binder_proc_unlock(proc);
|
||||
@@ -4032,6 +4036,10 @@ retry:
|
||||
size_t trsize = sizeof(*trd);
|
||||
|
||||
binder_inner_proc_lock(proc);
|
||||
trace_android_vh_binder_select_worklist_ilocked(&list, thread,
|
||||
proc, wait_for_proc_work);
|
||||
if (list)
|
||||
goto skip;
|
||||
if (!binder_worklist_empty_ilocked(&thread->todo))
|
||||
list = &thread->todo;
|
||||
else if (!binder_worklist_empty_ilocked(&proc->todo) &&
|
||||
@@ -4045,7 +4053,7 @@ retry:
|
||||
goto retry;
|
||||
break;
|
||||
}
|
||||
|
||||
skip:
|
||||
if (end - ptr < sizeof(tr) + 4) {
|
||||
binder_inner_proc_unlock(proc);
|
||||
break;
|
||||
|
||||
@@ -277,8 +277,10 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_reply);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_trans);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_binder_transaction);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_preset);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_proc_transaction_entry);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_proc_transaction);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_proc_transaction_end);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_select_worklist_ilocked);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_new_ref);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_del_ref);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_post_init_entity_util_avg);
|
||||
@@ -289,6 +291,8 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_logbuf_pr_cont);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_tune_scan_type);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_tune_swappiness);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_shrink_slab_bypass);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_page_referenced_check_bypass);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_drain_all_pages_bypass);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_psi_event);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_psi_group);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_cpuset_fork);
|
||||
@@ -388,3 +392,4 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_free_proc);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_thread_release);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_has_work_ilocked);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_read_done);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_handle_tlb_conf);
|
||||
|
||||
@@ -236,6 +236,7 @@ static void set_type(struct bow_context *bc, struct bow_range **br, int type)
|
||||
|
||||
(*br)->type = type;
|
||||
|
||||
mutex_lock(&bc->ranges_lock);
|
||||
if (next->type == type) {
|
||||
if (type == TRIMMED)
|
||||
list_del(&next->trimmed_list);
|
||||
@@ -249,6 +250,7 @@ static void set_type(struct bow_context *bc, struct bow_range **br, int type)
|
||||
rb_erase(&(*br)->node, &bc->ranges);
|
||||
kfree(*br);
|
||||
}
|
||||
mutex_unlock(&bc->ranges_lock);
|
||||
|
||||
*br = NULL;
|
||||
}
|
||||
@@ -599,6 +601,7 @@ static void dm_bow_dtr(struct dm_target *ti)
|
||||
struct bow_context *bc = (struct bow_context *) ti->private;
|
||||
struct kobject *kobj;
|
||||
|
||||
mutex_lock(&bc->ranges_lock);
|
||||
while (rb_first(&bc->ranges)) {
|
||||
struct bow_range *br = container_of(rb_first(&bc->ranges),
|
||||
struct bow_range, node);
|
||||
@@ -606,6 +609,8 @@ static void dm_bow_dtr(struct dm_target *ti)
|
||||
rb_erase(&br->node, &bc->ranges);
|
||||
kfree(br);
|
||||
}
|
||||
mutex_unlock(&bc->ranges_lock);
|
||||
|
||||
if (bc->workqueue)
|
||||
destroy_workqueue(bc->workqueue);
|
||||
if (bc->bufio)
|
||||
@@ -1181,6 +1186,7 @@ static void dm_bow_tablestatus(struct dm_target *ti, char *result,
|
||||
return;
|
||||
}
|
||||
|
||||
mutex_lock(&bc->ranges_lock);
|
||||
for (i = rb_first(&bc->ranges); i; i = rb_next(i)) {
|
||||
struct bow_range *br = container_of(i, struct bow_range, node);
|
||||
|
||||
@@ -1188,11 +1194,11 @@ static void dm_bow_tablestatus(struct dm_target *ti, char *result,
|
||||
readable_type[br->type],
|
||||
(unsigned long long)br->sector);
|
||||
if (result >= end)
|
||||
return;
|
||||
goto unlock;
|
||||
|
||||
result += scnprintf(result, end - result, "\n");
|
||||
if (result >= end)
|
||||
return;
|
||||
goto unlock;
|
||||
|
||||
if (br->type == TRIMMED)
|
||||
++trimmed_range_count;
|
||||
@@ -1214,19 +1220,22 @@ static void dm_bow_tablestatus(struct dm_target *ti, char *result,
|
||||
if (!rb_next(i)) {
|
||||
scnprintf(result, end - result,
|
||||
"\nERROR: Last range not of type TOP");
|
||||
return;
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
if (br->sector > range_top(br)) {
|
||||
scnprintf(result, end - result,
|
||||
"\nERROR: sectors out of order");
|
||||
return;
|
||||
goto unlock;
|
||||
}
|
||||
}
|
||||
|
||||
if (trimmed_range_count != trimmed_list_length)
|
||||
scnprintf(result, end - result,
|
||||
"\nERROR: not all trimmed ranges in trimmed list");
|
||||
|
||||
unlock:
|
||||
mutex_unlock(&bc->ranges_lock);
|
||||
}
|
||||
|
||||
static void dm_bow_status(struct dm_target *ti, status_type_t type,
|
||||
|
||||
@@ -17,6 +17,8 @@
|
||||
#include <linux/list.h>
|
||||
#include <linux/mempool.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/of_platform.h>
|
||||
#include <linux/of_reserved_mem.h>
|
||||
#include <linux/pagemap.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/vmalloc.h>
|
||||
@@ -39,6 +41,105 @@ static unsigned kcopyd_subjob_size_kb = DEFAULT_SUB_JOB_SIZE_KB;
|
||||
module_param(kcopyd_subjob_size_kb, uint, S_IRUGO | S_IWUSR);
|
||||
MODULE_PARM_DESC(kcopyd_subjob_size_kb, "Sub-job size for dm-kcopyd clients");
|
||||
|
||||
static bool rsm_enabled;
|
||||
static phys_addr_t rsm_mem_base, rsm_mem_size;
|
||||
|
||||
#ifndef MODULE
|
||||
static DEFINE_SPINLOCK(rsm_lock);
|
||||
static int *rsm_mem;
|
||||
static int rsm_page_cnt;
|
||||
static int rsm_tbl_idx;
|
||||
static struct reserved_mem *rmem;
|
||||
|
||||
static void __init kcopyd_rsm_init(void)
|
||||
{
|
||||
static struct device_node *rsm_node;
|
||||
int ret = 0;
|
||||
|
||||
if (!rsm_enabled)
|
||||
return;
|
||||
|
||||
rsm_node = of_find_compatible_node(NULL, NULL, "mediatek,dm_ota");
|
||||
if (!rsm_node) {
|
||||
ret = -ENODEV;
|
||||
goto out;
|
||||
}
|
||||
|
||||
rmem = of_reserved_mem_lookup(rsm_node);
|
||||
if (!rmem) {
|
||||
ret = -EINVAL;
|
||||
goto out_put_node;
|
||||
}
|
||||
|
||||
rsm_mem_base = rmem->base;
|
||||
rsm_mem_size = rmem->size;
|
||||
rsm_page_cnt = rsm_mem_size / PAGE_SIZE;
|
||||
rsm_mem = kcalloc(rsm_page_cnt, sizeof(int), GFP_KERNEL);
|
||||
if (!rsm_mem)
|
||||
ret = -ENOMEM;
|
||||
|
||||
out_put_node:
|
||||
of_node_put(rsm_node);
|
||||
out:
|
||||
if (ret)
|
||||
pr_warn("kcopyd: failed to init rsm: %d", ret);
|
||||
}
|
||||
|
||||
static int __init kcopyd_rsm_enable(char *str)
|
||||
{
|
||||
rsm_enabled = true;
|
||||
|
||||
return 0;
|
||||
}
|
||||
early_param("mtk_kcopyd_quirk", kcopyd_rsm_enable);
|
||||
|
||||
static void kcopyd_rsm_get_page(struct page **p)
|
||||
{
|
||||
int i;
|
||||
unsigned long flags;
|
||||
|
||||
*p = NULL;
|
||||
spin_lock_irqsave(&rsm_lock, flags);
|
||||
for (i = 0 ; i < rsm_page_cnt ; i++) {
|
||||
rsm_tbl_idx = (rsm_tbl_idx + 1 == rsm_page_cnt) ? 0 : rsm_tbl_idx + 1;
|
||||
|
||||
if (rsm_mem[rsm_tbl_idx] == 0) {
|
||||
rsm_mem[rsm_tbl_idx] = 1;
|
||||
*p = virt_to_page(phys_to_virt(rsm_mem_base + PAGE_SIZE
|
||||
* rsm_tbl_idx));
|
||||
break;
|
||||
}
|
||||
}
|
||||
spin_unlock_irqrestore(&rsm_lock, flags);
|
||||
}
|
||||
|
||||
static void kcopyd_rsm_drop_page(struct page **p)
|
||||
{
|
||||
u64 off;
|
||||
unsigned long flags;
|
||||
|
||||
if (*p) {
|
||||
off = page_to_phys(*p) - rsm_mem_base;
|
||||
spin_lock_irqsave(&rsm_lock, flags);
|
||||
rsm_mem[off >> PAGE_SHIFT] = 0;
|
||||
spin_unlock_irqrestore(&rsm_lock, flags);
|
||||
*p = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static void kcopyd_rsm_destroy(void)
|
||||
{
|
||||
if (rsm_enabled)
|
||||
kfree(rsm_mem);
|
||||
}
|
||||
|
||||
#else
|
||||
#define kcopyd_rsm_destroy(...)
|
||||
#define kcopyd_rsm_drop_page(...)
|
||||
#define kcopyd_rsm_get_page(...)
|
||||
#define kcopyd_rsm_init(...)
|
||||
#endif
|
||||
|
||||
static unsigned dm_get_kcopyd_subjob_size(void)
|
||||
{
|
||||
unsigned sub_job_size_kb;
|
||||
@@ -211,7 +312,7 @@ static void wake(struct dm_kcopyd_client *kc)
|
||||
/*
|
||||
* Obtain one page for the use of kcopyd.
|
||||
*/
|
||||
static struct page_list *alloc_pl(gfp_t gfp)
|
||||
static struct page_list *alloc_pl(gfp_t gfp, unsigned long job_flags)
|
||||
{
|
||||
struct page_list *pl;
|
||||
|
||||
@@ -219,7 +320,12 @@ static struct page_list *alloc_pl(gfp_t gfp)
|
||||
if (!pl)
|
||||
return NULL;
|
||||
|
||||
pl->page = alloc_page(gfp);
|
||||
if (rsm_enabled && test_bit(DM_KCOPYD_SNAP_MERGE, &job_flags)) {
|
||||
kcopyd_rsm_get_page(&pl->page);
|
||||
} else {
|
||||
pl->page = alloc_page(gfp);
|
||||
}
|
||||
|
||||
if (!pl->page) {
|
||||
kfree(pl);
|
||||
return NULL;
|
||||
@@ -230,7 +336,14 @@ static struct page_list *alloc_pl(gfp_t gfp)
|
||||
|
||||
static void free_pl(struct page_list *pl)
|
||||
{
|
||||
__free_page(pl->page);
|
||||
struct page *p = pl->page;
|
||||
phys_addr_t pa = page_to_phys(p);
|
||||
|
||||
if (rsm_enabled && pa >= rsm_mem_base && pa < rsm_mem_base + rsm_mem_size)
|
||||
kcopyd_rsm_drop_page(&pl->page);
|
||||
else
|
||||
__free_page(pl->page);
|
||||
|
||||
kfree(pl);
|
||||
}
|
||||
|
||||
@@ -258,14 +371,15 @@ static void kcopyd_put_pages(struct dm_kcopyd_client *kc, struct page_list *pl)
|
||||
}
|
||||
|
||||
static int kcopyd_get_pages(struct dm_kcopyd_client *kc,
|
||||
unsigned int nr, struct page_list **pages)
|
||||
unsigned int nr, struct page_list **pages,
|
||||
unsigned long job_flags)
|
||||
{
|
||||
struct page_list *pl;
|
||||
|
||||
*pages = NULL;
|
||||
|
||||
do {
|
||||
pl = alloc_pl(__GFP_NOWARN | __GFP_NORETRY | __GFP_KSWAPD_RECLAIM);
|
||||
pl = alloc_pl(__GFP_NOWARN | __GFP_NORETRY | __GFP_KSWAPD_RECLAIM, job_flags);
|
||||
if (unlikely(!pl)) {
|
||||
/* Use reserved pages */
|
||||
pl = kc->pages;
|
||||
@@ -309,7 +423,7 @@ static int client_reserve_pages(struct dm_kcopyd_client *kc, unsigned nr_pages)
|
||||
struct page_list *pl = NULL, *next;
|
||||
|
||||
for (i = 0; i < nr_pages; i++) {
|
||||
next = alloc_pl(GFP_KERNEL);
|
||||
next = alloc_pl(GFP_KERNEL, 0);
|
||||
if (!next) {
|
||||
if (pl)
|
||||
drop_pages(pl);
|
||||
@@ -395,6 +509,8 @@ int __init dm_kcopyd_init(void)
|
||||
zero_page_list.next = &zero_page_list;
|
||||
zero_page_list.page = ZERO_PAGE(0);
|
||||
|
||||
kcopyd_rsm_init();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -402,6 +518,7 @@ void dm_kcopyd_exit(void)
|
||||
{
|
||||
kmem_cache_destroy(_job_cache);
|
||||
_job_cache = NULL;
|
||||
kcopyd_rsm_destroy();
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -586,7 +703,7 @@ static int run_pages_job(struct kcopyd_job *job)
|
||||
int r;
|
||||
unsigned nr_pages = dm_div_up(job->dests[0].count, PAGE_SIZE >> 9);
|
||||
|
||||
r = kcopyd_get_pages(job->kc, nr_pages, &job->pages);
|
||||
r = kcopyd_get_pages(job->kc, nr_pages, &job->pages, job->flags);
|
||||
if (!r) {
|
||||
/* this job is ready for io */
|
||||
push(&job->kc->io_jobs, job);
|
||||
|
||||
@@ -1117,7 +1117,8 @@ static void snapshot_merge_next_chunks(struct dm_snapshot *s)
|
||||
for (i = 0; i < linear_chunks; i++)
|
||||
__check_for_conflicting_io(s, old_chunk + i);
|
||||
|
||||
dm_kcopyd_copy(s->kcopyd_client, &src, 1, &dest, 0, merge_callback, s);
|
||||
dm_kcopyd_copy(s->kcopyd_client, &src, 1, &dest, 1 << DM_KCOPYD_SNAP_MERGE,
|
||||
merge_callback, s);
|
||||
return;
|
||||
|
||||
shut:
|
||||
|
||||
@@ -220,6 +220,10 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
|
||||
goto fail;
|
||||
}
|
||||
|
||||
/* Use min_t(int, ...) in case shost->can_queue exceeds SHRT_MAX */
|
||||
shost->cmd_per_lun = min_t(int, shost->cmd_per_lun,
|
||||
shost->can_queue);
|
||||
|
||||
error = scsi_init_sense_cache(shost);
|
||||
if (error)
|
||||
goto fail;
|
||||
@@ -228,12 +232,6 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
|
||||
if (error)
|
||||
goto fail;
|
||||
|
||||
shost->can_queue = shost->tag_set.queue_depth;
|
||||
|
||||
/* Use min_t(int, ...) in case shost->can_queue exceeds SHRT_MAX */
|
||||
shost->cmd_per_lun = min_t(int, shost->cmd_per_lun,
|
||||
shost->can_queue);
|
||||
|
||||
if (!shost->shost_gendev.parent)
|
||||
shost->shost_gendev.parent = dev ? dev : &platform_bus;
|
||||
if (!dma_dev)
|
||||
|
||||
@@ -1907,10 +1907,6 @@ int scsi_mq_setup_tags(struct Scsi_Host *shost)
|
||||
tag_set->ops = &scsi_mq_ops_no_commit;
|
||||
tag_set->nr_hw_queues = shost->nr_hw_queues ? : 1;
|
||||
tag_set->queue_depth = shost->can_queue;
|
||||
if (shost->hostt->name && strcmp(shost->hostt->name, "ufshcd") == 0) {
|
||||
tag_set->queue_depth--;
|
||||
tag_set->reserved_tags++;
|
||||
}
|
||||
tag_set->cmd_size = cmd_size;
|
||||
tag_set->numa_node = NUMA_NO_NODE;
|
||||
tag_set->flags = BLK_MQ_F_SHOULD_MERGE;
|
||||
|
||||
@@ -642,6 +642,7 @@ static int rndis_set_response(struct rndis_params *params,
|
||||
BufLength = le32_to_cpu(buf->InformationBufferLength);
|
||||
BufOffset = le32_to_cpu(buf->InformationBufferOffset);
|
||||
if ((BufLength > RNDIS_MAX_TOTAL_SIZE) ||
|
||||
(BufOffset > RNDIS_MAX_TOTAL_SIZE) ||
|
||||
(BufOffset + 8 >= RNDIS_MAX_TOTAL_SIZE))
|
||||
return -EINVAL;
|
||||
|
||||
|
||||
@@ -1437,7 +1437,6 @@ static void usb_gadget_remove_driver(struct usb_udc *udc)
|
||||
usb_gadget_udc_stop(udc);
|
||||
|
||||
udc->driver = NULL;
|
||||
udc->dev.driver = NULL;
|
||||
udc->gadget->dev.driver = NULL;
|
||||
}
|
||||
|
||||
@@ -1499,7 +1498,6 @@ static int udc_bind_to_driver(struct usb_udc *udc, struct usb_gadget_driver *dri
|
||||
driver->function);
|
||||
|
||||
udc->driver = driver;
|
||||
udc->dev.driver = &driver->driver;
|
||||
udc->gadget->dev.driver = &driver->driver;
|
||||
|
||||
usb_gadget_udc_set_speed(udc, driver->max_speed);
|
||||
@@ -1522,7 +1520,6 @@ err1:
|
||||
dev_err(&udc->dev, "failed to start %s: %d\n",
|
||||
udc->driver->function, ret);
|
||||
udc->driver = NULL;
|
||||
udc->dev.driver = NULL;
|
||||
udc->gadget->dev.driver = NULL;
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -681,7 +681,7 @@ static int xhci_exit_test_mode(struct xhci_hcd *xhci)
|
||||
}
|
||||
pm_runtime_allow(xhci_to_hcd(xhci)->self.controller);
|
||||
xhci->test_mode = 0;
|
||||
return xhci_reset(xhci);
|
||||
return xhci_reset(xhci, XHCI_RESET_SHORT_USEC);
|
||||
}
|
||||
|
||||
void xhci_set_link_state(struct xhci_hcd *xhci, struct xhci_port *port,
|
||||
|
||||
@@ -2695,7 +2695,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
|
||||
|
||||
fail:
|
||||
xhci_halt(xhci);
|
||||
xhci_reset(xhci);
|
||||
xhci_reset(xhci, XHCI_RESET_SHORT_USEC);
|
||||
xhci_mem_cleanup(xhci);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
@@ -65,7 +65,7 @@ static bool td_on_ring(struct xhci_td *td, struct xhci_ring *ring)
|
||||
* handshake done). There are two failure modes: "usec" have passed (major
|
||||
* hardware flakeout), or the register reads as all-ones (hardware removed).
|
||||
*/
|
||||
int xhci_handshake(void __iomem *ptr, u32 mask, u32 done, int usec)
|
||||
int xhci_handshake(void __iomem *ptr, u32 mask, u32 done, u64 timeout_us)
|
||||
{
|
||||
u32 result;
|
||||
int ret;
|
||||
@@ -73,7 +73,7 @@ int xhci_handshake(void __iomem *ptr, u32 mask, u32 done, int usec)
|
||||
ret = readl_poll_timeout_atomic(ptr, result,
|
||||
(result & mask) == done ||
|
||||
result == U32_MAX,
|
||||
1, usec);
|
||||
1, timeout_us);
|
||||
if (result == U32_MAX) /* card removed */
|
||||
return -ENODEV;
|
||||
|
||||
@@ -162,7 +162,7 @@ int xhci_start(struct xhci_hcd *xhci)
|
||||
* Transactions will be terminated immediately, and operational registers
|
||||
* will be set to their defaults.
|
||||
*/
|
||||
int xhci_reset(struct xhci_hcd *xhci)
|
||||
int xhci_reset(struct xhci_hcd *xhci, u64 timeout_us)
|
||||
{
|
||||
u32 command;
|
||||
u32 state;
|
||||
@@ -195,8 +195,7 @@ int xhci_reset(struct xhci_hcd *xhci)
|
||||
if (xhci->quirks & XHCI_INTEL_HOST)
|
||||
udelay(1000);
|
||||
|
||||
ret = xhci_handshake(&xhci->op_regs->command,
|
||||
CMD_RESET, 0, 10 * 1000 * 1000);
|
||||
ret = xhci_handshake(&xhci->op_regs->command, CMD_RESET, 0, timeout_us);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@@ -209,8 +208,7 @@ int xhci_reset(struct xhci_hcd *xhci)
|
||||
* xHCI cannot write to any doorbells or operational registers other
|
||||
* than status until the "Controller Not Ready" flag is cleared.
|
||||
*/
|
||||
ret = xhci_handshake(&xhci->op_regs->status,
|
||||
STS_CNR, 0, 10 * 1000 * 1000);
|
||||
ret = xhci_handshake(&xhci->op_regs->status, STS_CNR, 0, timeout_us);
|
||||
|
||||
xhci->usb2_rhub.bus_state.port_c_suspend = 0;
|
||||
xhci->usb2_rhub.bus_state.suspended_ports = 0;
|
||||
@@ -731,7 +729,7 @@ static void xhci_stop(struct usb_hcd *hcd)
|
||||
xhci->xhc_state |= XHCI_STATE_HALTED;
|
||||
xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
|
||||
xhci_halt(xhci);
|
||||
xhci_reset(xhci);
|
||||
xhci_reset(xhci, XHCI_RESET_SHORT_USEC);
|
||||
spin_unlock_irq(&xhci->lock);
|
||||
|
||||
xhci_cleanup_msix(xhci);
|
||||
@@ -784,7 +782,7 @@ void xhci_shutdown(struct usb_hcd *hcd)
|
||||
xhci_halt(xhci);
|
||||
/* Workaround for spurious wakeups at shutdown with HSW */
|
||||
if (xhci->quirks & XHCI_SPURIOUS_WAKEUP)
|
||||
xhci_reset(xhci);
|
||||
xhci_reset(xhci, XHCI_RESET_SHORT_USEC);
|
||||
spin_unlock_irq(&xhci->lock);
|
||||
|
||||
xhci_cleanup_msix(xhci);
|
||||
@@ -1170,7 +1168,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
|
||||
xhci_dbg(xhci, "Stop HCD\n");
|
||||
xhci_halt(xhci);
|
||||
xhci_zero_64b_regs(xhci);
|
||||
retval = xhci_reset(xhci);
|
||||
retval = xhci_reset(xhci, XHCI_RESET_LONG_USEC);
|
||||
spin_unlock_irq(&xhci->lock);
|
||||
if (retval)
|
||||
return retval;
|
||||
@@ -5282,7 +5280,7 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
|
||||
|
||||
xhci_dbg(xhci, "Resetting HCD\n");
|
||||
/* Reset the internal HC memory state and registers. */
|
||||
retval = xhci_reset(xhci);
|
||||
retval = xhci_reset(xhci, XHCI_RESET_LONG_USEC);
|
||||
if (retval)
|
||||
return retval;
|
||||
xhci_dbg(xhci, "Reset complete\n");
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user