You've already forked linux-rockchip
mirror of
https://github.com/armbian/linux-rockchip.git
synced 2026-01-06 11:08:10 -08:00
Merge eded3ad80a ("ia64: fix an addr to taddr in huge_pte_offset()") into android12-5.10-lts
Steps on the way to 5.10.180 Change-Id: Ied555d8ab53844823d31e3221bd0fb155f0baeae Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
@@ -58,7 +58,7 @@ huge_pte_offset (struct mm_struct *mm, unsigned long addr, unsigned long sz)
|
||||
|
||||
pgd = pgd_offset(mm, taddr);
|
||||
if (pgd_present(*pgd)) {
|
||||
p4d = p4d_offset(pgd, addr);
|
||||
p4d = p4d_offset(pgd, taddr);
|
||||
if (p4d_present(*p4d)) {
|
||||
pud = pud_offset(p4d, taddr);
|
||||
if (pud_present(*pud)) {
|
||||
|
||||
@@ -173,7 +173,6 @@ handler: ;\
|
||||
l.sw PT_GPR28(r1),r28 ;\
|
||||
l.sw PT_GPR29(r1),r29 ;\
|
||||
/* r30 already save */ ;\
|
||||
/* l.sw PT_GPR30(r1),r30*/ ;\
|
||||
l.sw PT_GPR31(r1),r31 ;\
|
||||
TRACE_IRQS_OFF_ENTRY ;\
|
||||
/* Store -1 in orig_gpr11 for non-syscall exceptions */ ;\
|
||||
@@ -211,9 +210,8 @@ handler: ;\
|
||||
l.sw PT_GPR27(r1),r27 ;\
|
||||
l.sw PT_GPR28(r1),r28 ;\
|
||||
l.sw PT_GPR29(r1),r29 ;\
|
||||
/* r31 already saved */ ;\
|
||||
l.sw PT_GPR30(r1),r30 ;\
|
||||
/* l.sw PT_GPR31(r1),r31 */ ;\
|
||||
/* r30 already saved */ ;\
|
||||
l.sw PT_GPR31(r1),r31 ;\
|
||||
/* Store -1 in orig_gpr11 for non-syscall exceptions */ ;\
|
||||
l.addi r30,r0,-1 ;\
|
||||
l.sw PT_ORIG_GPR11(r1),r30 ;\
|
||||
|
||||
@@ -33,9 +33,12 @@ static int __set_clk_parents(struct device_node *node, bool clk_supplier)
|
||||
else
|
||||
return rc;
|
||||
}
|
||||
if (clkspec.np == node && !clk_supplier)
|
||||
if (clkspec.np == node && !clk_supplier) {
|
||||
of_node_put(clkspec.np);
|
||||
return 0;
|
||||
}
|
||||
pclk = of_clk_get_from_provider(&clkspec);
|
||||
of_node_put(clkspec.np);
|
||||
if (IS_ERR(pclk)) {
|
||||
if (PTR_ERR(pclk) != -EPROBE_DEFER)
|
||||
pr_warn("clk: couldn't get parent clock %d for %pOF\n",
|
||||
@@ -48,10 +51,12 @@ static int __set_clk_parents(struct device_node *node, bool clk_supplier)
|
||||
if (rc < 0)
|
||||
goto err;
|
||||
if (clkspec.np == node && !clk_supplier) {
|
||||
of_node_put(clkspec.np);
|
||||
rc = 0;
|
||||
goto err;
|
||||
}
|
||||
clk = of_clk_get_from_provider(&clkspec);
|
||||
of_node_put(clkspec.np);
|
||||
if (IS_ERR(clk)) {
|
||||
if (PTR_ERR(clk) != -EPROBE_DEFER)
|
||||
pr_warn("clk: couldn't get assigned clock %d for %pOF\n",
|
||||
@@ -93,10 +98,13 @@ static int __set_clk_rates(struct device_node *node, bool clk_supplier)
|
||||
else
|
||||
return rc;
|
||||
}
|
||||
if (clkspec.np == node && !clk_supplier)
|
||||
if (clkspec.np == node && !clk_supplier) {
|
||||
of_node_put(clkspec.np);
|
||||
return 0;
|
||||
}
|
||||
|
||||
clk = of_clk_get_from_provider(&clkspec);
|
||||
of_node_put(clkspec.np);
|
||||
if (IS_ERR(clk)) {
|
||||
if (PTR_ERR(clk) != -EPROBE_DEFER)
|
||||
pr_warn("clk: couldn't get clock %d for %pOF\n",
|
||||
|
||||
@@ -258,21 +258,25 @@ int __init davinci_timer_register(struct clk *clk,
|
||||
resource_size(&timer_cfg->reg),
|
||||
"davinci-timer")) {
|
||||
pr_err("Unable to request memory region\n");
|
||||
return -EBUSY;
|
||||
rv = -EBUSY;
|
||||
goto exit_clk_disable;
|
||||
}
|
||||
|
||||
base = ioremap(timer_cfg->reg.start, resource_size(&timer_cfg->reg));
|
||||
if (!base) {
|
||||
pr_err("Unable to map the register range\n");
|
||||
return -ENOMEM;
|
||||
rv = -ENOMEM;
|
||||
goto exit_mem_region;
|
||||
}
|
||||
|
||||
davinci_timer_init(base);
|
||||
tick_rate = clk_get_rate(clk);
|
||||
|
||||
clockevent = kzalloc(sizeof(*clockevent), GFP_KERNEL);
|
||||
if (!clockevent)
|
||||
return -ENOMEM;
|
||||
if (!clockevent) {
|
||||
rv = -ENOMEM;
|
||||
goto exit_iounmap_base;
|
||||
}
|
||||
|
||||
clockevent->dev.name = "tim12";
|
||||
clockevent->dev.features = CLOCK_EVT_FEAT_ONESHOT;
|
||||
@@ -297,7 +301,7 @@ int __init davinci_timer_register(struct clk *clk,
|
||||
"clockevent/tim12", clockevent);
|
||||
if (rv) {
|
||||
pr_err("Unable to request the clockevent interrupt\n");
|
||||
return rv;
|
||||
goto exit_free_clockevent;
|
||||
}
|
||||
|
||||
davinci_clocksource.dev.rating = 300;
|
||||
@@ -324,13 +328,27 @@ int __init davinci_timer_register(struct clk *clk,
|
||||
rv = clocksource_register_hz(&davinci_clocksource.dev, tick_rate);
|
||||
if (rv) {
|
||||
pr_err("Unable to register clocksource\n");
|
||||
return rv;
|
||||
goto exit_free_irq;
|
||||
}
|
||||
|
||||
sched_clock_register(davinci_timer_read_sched_clock,
|
||||
DAVINCI_TIMER_CLKSRC_BITS, tick_rate);
|
||||
|
||||
return 0;
|
||||
|
||||
exit_free_irq:
|
||||
free_irq(timer_cfg->irq[DAVINCI_TIMER_CLOCKEVENT_IRQ].start,
|
||||
clockevent);
|
||||
exit_free_clockevent:
|
||||
kfree(clockevent);
|
||||
exit_iounmap_base:
|
||||
iounmap(base);
|
||||
exit_mem_region:
|
||||
release_mem_region(timer_cfg->reg.start,
|
||||
resource_size(&timer_cfg->reg));
|
||||
exit_clk_disable:
|
||||
clk_disable_unprepare(clk);
|
||||
return rv;
|
||||
}
|
||||
|
||||
static int __init of_davinci_timer_register(struct device_node *np)
|
||||
|
||||
@@ -212,6 +212,7 @@ struct at_xdmac {
|
||||
int irq;
|
||||
struct clk *clk;
|
||||
u32 save_gim;
|
||||
u32 save_gs;
|
||||
struct dma_pool *at_xdmac_desc_pool;
|
||||
struct at_xdmac_chan chan[];
|
||||
};
|
||||
@@ -1910,6 +1911,7 @@ static int atmel_xdmac_suspend(struct device *dev)
|
||||
}
|
||||
}
|
||||
atxdmac->save_gim = at_xdmac_read(atxdmac, AT_XDMAC_GIM);
|
||||
atxdmac->save_gs = at_xdmac_read(atxdmac, AT_XDMAC_GS);
|
||||
|
||||
at_xdmac_off(atxdmac);
|
||||
clk_disable_unprepare(atxdmac->clk);
|
||||
@@ -1946,7 +1948,8 @@ static int atmel_xdmac_resume(struct device *dev)
|
||||
at_xdmac_chan_write(atchan, AT_XDMAC_CNDC, atchan->save_cndc);
|
||||
at_xdmac_chan_write(atchan, AT_XDMAC_CIE, atchan->save_cim);
|
||||
wmb();
|
||||
at_xdmac_write(atxdmac, AT_XDMAC_GE, atchan->mask);
|
||||
if (atxdmac->save_gs & atchan->mask)
|
||||
at_xdmac_write(atxdmac, AT_XDMAC_GE, atchan->mask);
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
|
||||
@@ -166,7 +166,7 @@ static void vchan_free_desc(struct virt_dma_desc *vdesc)
|
||||
dw_edma_free_desc(vd2dw_edma_desc(vdesc));
|
||||
}
|
||||
|
||||
static void dw_edma_start_transfer(struct dw_edma_chan *chan)
|
||||
static int dw_edma_start_transfer(struct dw_edma_chan *chan)
|
||||
{
|
||||
struct dw_edma_chunk *child;
|
||||
struct dw_edma_desc *desc;
|
||||
@@ -174,16 +174,16 @@ static void dw_edma_start_transfer(struct dw_edma_chan *chan)
|
||||
|
||||
vd = vchan_next_desc(&chan->vc);
|
||||
if (!vd)
|
||||
return;
|
||||
return 0;
|
||||
|
||||
desc = vd2dw_edma_desc(vd);
|
||||
if (!desc)
|
||||
return;
|
||||
return 0;
|
||||
|
||||
child = list_first_entry_or_null(&desc->chunk->list,
|
||||
struct dw_edma_chunk, list);
|
||||
if (!child)
|
||||
return;
|
||||
return 0;
|
||||
|
||||
dw_edma_v0_core_start(child, !desc->xfer_sz);
|
||||
desc->xfer_sz += child->ll_region.sz;
|
||||
@@ -191,6 +191,8 @@ static void dw_edma_start_transfer(struct dw_edma_chan *chan)
|
||||
list_del(&child->list);
|
||||
kfree(child);
|
||||
desc->chunks_alloc--;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int dw_edma_device_config(struct dma_chan *dchan,
|
||||
@@ -274,9 +276,12 @@ static void dw_edma_device_issue_pending(struct dma_chan *dchan)
|
||||
struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
|
||||
unsigned long flags;
|
||||
|
||||
if (!chan->configured)
|
||||
return;
|
||||
|
||||
spin_lock_irqsave(&chan->vc.lock, flags);
|
||||
if (chan->configured && chan->request == EDMA_REQ_NONE &&
|
||||
chan->status == EDMA_ST_IDLE && vchan_issue_pending(&chan->vc)) {
|
||||
if (vchan_issue_pending(&chan->vc) && chan->request == EDMA_REQ_NONE &&
|
||||
chan->status == EDMA_ST_IDLE) {
|
||||
chan->status = EDMA_ST_BUSY;
|
||||
dw_edma_start_transfer(chan);
|
||||
}
|
||||
@@ -497,14 +502,14 @@ static void dw_edma_done_interrupt(struct dw_edma_chan *chan)
|
||||
switch (chan->request) {
|
||||
case EDMA_REQ_NONE:
|
||||
desc = vd2dw_edma_desc(vd);
|
||||
if (desc->chunks_alloc) {
|
||||
chan->status = EDMA_ST_BUSY;
|
||||
dw_edma_start_transfer(chan);
|
||||
} else {
|
||||
if (!desc->chunks_alloc) {
|
||||
list_del(&vd->node);
|
||||
vchan_cookie_complete(vd);
|
||||
chan->status = EDMA_ST_IDLE;
|
||||
}
|
||||
|
||||
/* Continue transferring if there are remaining chunks or issued requests.
|
||||
*/
|
||||
chan->status = dw_edma_start_transfer(chan) ? EDMA_ST_BUSY : EDMA_ST_IDLE;
|
||||
break;
|
||||
|
||||
case EDMA_REQ_STOP:
|
||||
|
||||
@@ -756,7 +756,7 @@ static int mv_xor_v2_probe(struct platform_device *pdev)
|
||||
|
||||
xor_dev->clk = devm_clk_get(&pdev->dev, NULL);
|
||||
if (PTR_ERR(xor_dev->clk) == -EPROBE_DEFER) {
|
||||
ret = EPROBE_DEFER;
|
||||
ret = -EPROBE_DEFER;
|
||||
goto disable_reg_clk;
|
||||
}
|
||||
if (!IS_ERR(xor_dev->clk)) {
|
||||
|
||||
@@ -243,6 +243,13 @@ void rpi_firmware_put(struct rpi_firmware *fw)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rpi_firmware_put);
|
||||
|
||||
static void devm_rpi_firmware_put(void *data)
|
||||
{
|
||||
struct rpi_firmware *fw = data;
|
||||
|
||||
rpi_firmware_put(fw);
|
||||
}
|
||||
|
||||
static int rpi_firmware_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct device *dev = &pdev->dev;
|
||||
@@ -338,6 +345,28 @@ err_put_device:
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rpi_firmware_get);
|
||||
|
||||
/**
|
||||
* devm_rpi_firmware_get - Get pointer to rpi_firmware structure.
|
||||
* @firmware_node: Pointer to the firmware Device Tree node.
|
||||
*
|
||||
* Returns NULL is the firmware device is not ready.
|
||||
*/
|
||||
struct rpi_firmware *devm_rpi_firmware_get(struct device *dev,
|
||||
struct device_node *firmware_node)
|
||||
{
|
||||
struct rpi_firmware *fw;
|
||||
|
||||
fw = rpi_firmware_get(firmware_node);
|
||||
if (!fw)
|
||||
return NULL;
|
||||
|
||||
if (devm_add_action_or_reset(dev, devm_rpi_firmware_put, fw))
|
||||
return NULL;
|
||||
|
||||
return fw;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(devm_rpi_firmware_get);
|
||||
|
||||
static const struct of_device_id rpi_firmware_of_match[] = {
|
||||
{ .compatible = "raspberrypi,bcm2835-firmware", },
|
||||
{},
|
||||
|
||||
@@ -2924,6 +2924,8 @@ static int cm_send_rej_locked(struct cm_id_private *cm_id_priv,
|
||||
(ari && ari_length > IB_CM_REJ_ARI_LENGTH))
|
||||
return -EINVAL;
|
||||
|
||||
trace_icm_send_rej(&cm_id_priv->id, reason);
|
||||
|
||||
switch (state) {
|
||||
case IB_CM_REQ_SENT:
|
||||
case IB_CM_MRA_REQ_RCVD:
|
||||
@@ -2954,7 +2956,6 @@ static int cm_send_rej_locked(struct cm_id_private *cm_id_priv,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
trace_icm_send_rej(&cm_id_priv->id, reason);
|
||||
ret = ib_post_send_mad(msg, NULL);
|
||||
if (ret) {
|
||||
cm_free_msg(msg);
|
||||
|
||||
@@ -15,6 +15,7 @@
|
||||
#include "verbs.h"
|
||||
#include "trace_ibhdrs.h"
|
||||
#include "ipoib.h"
|
||||
#include "trace_tx.h"
|
||||
|
||||
/* Add a convenience helper */
|
||||
#define CIRC_ADD(val, add, size) (((val) + (add)) & ((size) - 1))
|
||||
@@ -63,12 +64,14 @@ static u64 hfi1_ipoib_used(struct hfi1_ipoib_txq *txq)
|
||||
|
||||
static void hfi1_ipoib_stop_txq(struct hfi1_ipoib_txq *txq)
|
||||
{
|
||||
trace_hfi1_txq_stop(txq);
|
||||
if (atomic_inc_return(&txq->stops) == 1)
|
||||
netif_stop_subqueue(txq->priv->netdev, txq->q_idx);
|
||||
}
|
||||
|
||||
static void hfi1_ipoib_wake_txq(struct hfi1_ipoib_txq *txq)
|
||||
{
|
||||
trace_hfi1_txq_wake(txq);
|
||||
if (atomic_dec_and_test(&txq->stops))
|
||||
netif_wake_subqueue(txq->priv->netdev, txq->q_idx);
|
||||
}
|
||||
@@ -89,8 +92,10 @@ static void hfi1_ipoib_check_queue_depth(struct hfi1_ipoib_txq *txq)
|
||||
{
|
||||
++txq->sent_txreqs;
|
||||
if (hfi1_ipoib_used(txq) >= hfi1_ipoib_ring_hwat(txq) &&
|
||||
!atomic_xchg(&txq->ring_full, 1))
|
||||
!atomic_xchg(&txq->ring_full, 1)) {
|
||||
trace_hfi1_txq_full(txq);
|
||||
hfi1_ipoib_stop_txq(txq);
|
||||
}
|
||||
}
|
||||
|
||||
static void hfi1_ipoib_check_queue_stopped(struct hfi1_ipoib_txq *txq)
|
||||
@@ -112,8 +117,10 @@ static void hfi1_ipoib_check_queue_stopped(struct hfi1_ipoib_txq *txq)
|
||||
* to protect against ring overflow.
|
||||
*/
|
||||
if (hfi1_ipoib_used(txq) < hfi1_ipoib_ring_lwat(txq) &&
|
||||
atomic_xchg(&txq->ring_full, 0))
|
||||
atomic_xchg(&txq->ring_full, 0)) {
|
||||
trace_hfi1_txq_xmit_unstopped(txq);
|
||||
hfi1_ipoib_wake_txq(txq);
|
||||
}
|
||||
}
|
||||
|
||||
static void hfi1_ipoib_free_tx(struct ipoib_txreq *tx, int budget)
|
||||
@@ -244,6 +251,7 @@ static int hfi1_ipoib_build_ulp_payload(struct ipoib_txreq *tx,
|
||||
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
|
||||
|
||||
ret = sdma_txadd_page(dd,
|
||||
NULL,
|
||||
txreq,
|
||||
skb_frag_page(frag),
|
||||
frag->bv_offset,
|
||||
@@ -405,6 +413,7 @@ static struct ipoib_txreq *hfi1_ipoib_send_dma_common(struct net_device *dev,
|
||||
sdma_select_engine_sc(priv->dd,
|
||||
txp->flow.tx_queue,
|
||||
txp->flow.sc5);
|
||||
trace_hfi1_flow_switch(txp->txq);
|
||||
}
|
||||
|
||||
return tx;
|
||||
@@ -525,6 +534,7 @@ static int hfi1_ipoib_send_dma_list(struct net_device *dev,
|
||||
if (txq->flow.as_int != txp->flow.as_int) {
|
||||
int ret;
|
||||
|
||||
trace_hfi1_flow_flush(txq);
|
||||
ret = hfi1_ipoib_flush_tx_list(dev, txq);
|
||||
if (unlikely(ret)) {
|
||||
if (ret == -EBUSY)
|
||||
@@ -635,8 +645,10 @@ static int hfi1_ipoib_sdma_sleep(struct sdma_engine *sde,
|
||||
/* came from non-list submit */
|
||||
list_add_tail(&txreq->list, &txq->tx_list);
|
||||
if (list_empty(&txq->wait.list)) {
|
||||
if (!atomic_xchg(&txq->no_desc, 1))
|
||||
if (!atomic_xchg(&txq->no_desc, 1)) {
|
||||
trace_hfi1_txq_queued(txq);
|
||||
hfi1_ipoib_stop_txq(txq);
|
||||
}
|
||||
iowait_queue(pkts_sent, wait->iow, &sde->dmawait);
|
||||
}
|
||||
|
||||
@@ -659,6 +671,7 @@ static void hfi1_ipoib_sdma_wakeup(struct iowait *wait, int reason)
|
||||
struct hfi1_ipoib_txq *txq =
|
||||
container_of(wait, struct hfi1_ipoib_txq, wait);
|
||||
|
||||
trace_hfi1_txq_wakeup(txq);
|
||||
if (likely(txq->priv->netdev->reg_state == NETREG_REGISTERED))
|
||||
iowait_schedule(wait, system_highpri_wq, WORK_CPU_UNBOUND);
|
||||
}
|
||||
|
||||
@@ -167,11 +167,11 @@ int hfi1_mmu_rb_insert(struct mmu_rb_handler *handler,
|
||||
spin_lock_irqsave(&handler->lock, flags);
|
||||
node = __mmu_rb_search(handler, mnode->addr, mnode->len);
|
||||
if (node) {
|
||||
ret = -EINVAL;
|
||||
ret = -EEXIST;
|
||||
goto unlock;
|
||||
}
|
||||
__mmu_int_rb_insert(mnode, &handler->root);
|
||||
list_add(&mnode->list, &handler->lru_list);
|
||||
list_add_tail(&mnode->list, &handler->lru_list);
|
||||
|
||||
ret = handler->ops->insert(handler->ops_arg, mnode);
|
||||
if (ret) {
|
||||
@@ -184,6 +184,19 @@ unlock:
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Caller must hold handler lock */
|
||||
struct mmu_rb_node *hfi1_mmu_rb_get_first(struct mmu_rb_handler *handler,
|
||||
unsigned long addr, unsigned long len)
|
||||
{
|
||||
struct mmu_rb_node *node;
|
||||
|
||||
trace_hfi1_mmu_rb_search(addr, len);
|
||||
node = __mmu_int_rb_iter_first(&handler->root, addr, (addr + len) - 1);
|
||||
if (node)
|
||||
list_move_tail(&node->list, &handler->lru_list);
|
||||
return node;
|
||||
}
|
||||
|
||||
/* Caller must hold handler lock */
|
||||
static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *handler,
|
||||
unsigned long addr,
|
||||
@@ -208,32 +221,6 @@ static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *handler,
|
||||
return node;
|
||||
}
|
||||
|
||||
bool hfi1_mmu_rb_remove_unless_exact(struct mmu_rb_handler *handler,
|
||||
unsigned long addr, unsigned long len,
|
||||
struct mmu_rb_node **rb_node)
|
||||
{
|
||||
struct mmu_rb_node *node;
|
||||
unsigned long flags;
|
||||
bool ret = false;
|
||||
|
||||
if (current->mm != handler->mn.mm)
|
||||
return ret;
|
||||
|
||||
spin_lock_irqsave(&handler->lock, flags);
|
||||
node = __mmu_rb_search(handler, addr, len);
|
||||
if (node) {
|
||||
if (node->addr == addr && node->len == len)
|
||||
goto unlock;
|
||||
__mmu_int_rb_remove(node, &handler->root);
|
||||
list_del(&node->list); /* remove from LRU list */
|
||||
ret = true;
|
||||
}
|
||||
unlock:
|
||||
spin_unlock_irqrestore(&handler->lock, flags);
|
||||
*rb_node = node;
|
||||
return ret;
|
||||
}
|
||||
|
||||
void hfi1_mmu_rb_evict(struct mmu_rb_handler *handler, void *evict_arg)
|
||||
{
|
||||
struct mmu_rb_node *rbnode, *ptr;
|
||||
@@ -247,8 +234,7 @@ void hfi1_mmu_rb_evict(struct mmu_rb_handler *handler, void *evict_arg)
|
||||
INIT_LIST_HEAD(&del_list);
|
||||
|
||||
spin_lock_irqsave(&handler->lock, flags);
|
||||
list_for_each_entry_safe_reverse(rbnode, ptr, &handler->lru_list,
|
||||
list) {
|
||||
list_for_each_entry_safe(rbnode, ptr, &handler->lru_list, list) {
|
||||
if (handler->ops->evict(handler->ops_arg, rbnode, evict_arg,
|
||||
&stop)) {
|
||||
__mmu_int_rb_remove(rbnode, &handler->root);
|
||||
@@ -260,36 +246,11 @@ void hfi1_mmu_rb_evict(struct mmu_rb_handler *handler, void *evict_arg)
|
||||
}
|
||||
spin_unlock_irqrestore(&handler->lock, flags);
|
||||
|
||||
while (!list_empty(&del_list)) {
|
||||
rbnode = list_first_entry(&del_list, struct mmu_rb_node, list);
|
||||
list_del(&rbnode->list);
|
||||
list_for_each_entry_safe(rbnode, ptr, &del_list, list) {
|
||||
handler->ops->remove(handler->ops_arg, rbnode);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* It is up to the caller to ensure that this function does not race with the
|
||||
* mmu invalidate notifier which may be calling the users remove callback on
|
||||
* 'node'.
|
||||
*/
|
||||
void hfi1_mmu_rb_remove(struct mmu_rb_handler *handler,
|
||||
struct mmu_rb_node *node)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
if (current->mm != handler->mn.mm)
|
||||
return;
|
||||
|
||||
/* Validity of handler and node pointers has been checked by caller. */
|
||||
trace_hfi1_mmu_rb_remove(node->addr, node->len);
|
||||
spin_lock_irqsave(&handler->lock, flags);
|
||||
__mmu_int_rb_remove(node, &handler->root);
|
||||
list_del(&node->list); /* remove from LRU list */
|
||||
spin_unlock_irqrestore(&handler->lock, flags);
|
||||
|
||||
handler->ops->remove(handler->ops_arg, node);
|
||||
}
|
||||
|
||||
static int mmu_notifier_range_start(struct mmu_notifier *mn,
|
||||
const struct mmu_notifier_range *range)
|
||||
{
|
||||
|
||||
@@ -93,10 +93,8 @@ void hfi1_mmu_rb_unregister(struct mmu_rb_handler *handler);
|
||||
int hfi1_mmu_rb_insert(struct mmu_rb_handler *handler,
|
||||
struct mmu_rb_node *mnode);
|
||||
void hfi1_mmu_rb_evict(struct mmu_rb_handler *handler, void *evict_arg);
|
||||
void hfi1_mmu_rb_remove(struct mmu_rb_handler *handler,
|
||||
struct mmu_rb_node *mnode);
|
||||
bool hfi1_mmu_rb_remove_unless_exact(struct mmu_rb_handler *handler,
|
||||
unsigned long addr, unsigned long len,
|
||||
struct mmu_rb_node **rb_node);
|
||||
struct mmu_rb_node *hfi1_mmu_rb_get_first(struct mmu_rb_handler *handler,
|
||||
unsigned long addr,
|
||||
unsigned long len);
|
||||
|
||||
#endif /* _HFI1_MMU_RB_H */
|
||||
|
||||
@@ -1635,22 +1635,7 @@ static inline void sdma_unmap_desc(
|
||||
struct hfi1_devdata *dd,
|
||||
struct sdma_desc *descp)
|
||||
{
|
||||
switch (sdma_mapping_type(descp)) {
|
||||
case SDMA_MAP_SINGLE:
|
||||
dma_unmap_single(
|
||||
&dd->pcidev->dev,
|
||||
sdma_mapping_addr(descp),
|
||||
sdma_mapping_len(descp),
|
||||
DMA_TO_DEVICE);
|
||||
break;
|
||||
case SDMA_MAP_PAGE:
|
||||
dma_unmap_page(
|
||||
&dd->pcidev->dev,
|
||||
sdma_mapping_addr(descp),
|
||||
sdma_mapping_len(descp),
|
||||
DMA_TO_DEVICE);
|
||||
break;
|
||||
}
|
||||
system_descriptor_complete(dd, descp);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -3170,7 +3155,7 @@ int ext_coal_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx,
|
||||
|
||||
/* Add descriptor for coalesce buffer */
|
||||
tx->desc_limit = MAX_DESC;
|
||||
return _sdma_txadd_daddr(dd, SDMA_MAP_SINGLE, tx,
|
||||
return _sdma_txadd_daddr(dd, SDMA_MAP_SINGLE, NULL, tx,
|
||||
addr, tx->tlen);
|
||||
}
|
||||
|
||||
@@ -3210,10 +3195,12 @@ int _pad_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx)
|
||||
return rval;
|
||||
}
|
||||
}
|
||||
|
||||
/* finish the one just added */
|
||||
make_tx_sdma_desc(
|
||||
tx,
|
||||
SDMA_MAP_NONE,
|
||||
NULL,
|
||||
dd->sdma_pad_phys,
|
||||
sizeof(u32) - (tx->packet_len & (sizeof(u32) - 1)));
|
||||
_sdma_close_tx(dd, tx);
|
||||
|
||||
@@ -635,6 +635,7 @@ static inline dma_addr_t sdma_mapping_addr(struct sdma_desc *d)
|
||||
static inline void make_tx_sdma_desc(
|
||||
struct sdma_txreq *tx,
|
||||
int type,
|
||||
void *pinning_ctx,
|
||||
dma_addr_t addr,
|
||||
size_t len)
|
||||
{
|
||||
@@ -653,6 +654,7 @@ static inline void make_tx_sdma_desc(
|
||||
<< SDMA_DESC0_PHY_ADDR_SHIFT) |
|
||||
(((u64)len & SDMA_DESC0_BYTE_COUNT_MASK)
|
||||
<< SDMA_DESC0_BYTE_COUNT_SHIFT);
|
||||
desc->pinning_ctx = pinning_ctx;
|
||||
}
|
||||
|
||||
/* helper to extend txreq */
|
||||
@@ -685,6 +687,7 @@ static inline void _sdma_close_tx(struct hfi1_devdata *dd,
|
||||
static inline int _sdma_txadd_daddr(
|
||||
struct hfi1_devdata *dd,
|
||||
int type,
|
||||
void *pinning_ctx,
|
||||
struct sdma_txreq *tx,
|
||||
dma_addr_t addr,
|
||||
u16 len)
|
||||
@@ -694,6 +697,7 @@ static inline int _sdma_txadd_daddr(
|
||||
make_tx_sdma_desc(
|
||||
tx,
|
||||
type,
|
||||
pinning_ctx,
|
||||
addr, len);
|
||||
WARN_ON(len > tx->tlen);
|
||||
tx->tlen -= len;
|
||||
@@ -714,6 +718,7 @@ static inline int _sdma_txadd_daddr(
|
||||
/**
|
||||
* sdma_txadd_page() - add a page to the sdma_txreq
|
||||
* @dd: the device to use for mapping
|
||||
* @pinning_ctx: context to be released at descriptor retirement
|
||||
* @tx: tx request to which the page is added
|
||||
* @page: page to map
|
||||
* @offset: offset within the page
|
||||
@@ -729,6 +734,7 @@ static inline int _sdma_txadd_daddr(
|
||||
*/
|
||||
static inline int sdma_txadd_page(
|
||||
struct hfi1_devdata *dd,
|
||||
void *pinning_ctx,
|
||||
struct sdma_txreq *tx,
|
||||
struct page *page,
|
||||
unsigned long offset,
|
||||
@@ -756,8 +762,7 @@ static inline int sdma_txadd_page(
|
||||
return -ENOSPC;
|
||||
}
|
||||
|
||||
return _sdma_txadd_daddr(
|
||||
dd, SDMA_MAP_PAGE, tx, addr, len);
|
||||
return _sdma_txadd_daddr(dd, SDMA_MAP_PAGE, pinning_ctx, tx, addr, len);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -791,7 +796,8 @@ static inline int sdma_txadd_daddr(
|
||||
return rval;
|
||||
}
|
||||
|
||||
return _sdma_txadd_daddr(dd, SDMA_MAP_NONE, tx, addr, len);
|
||||
return _sdma_txadd_daddr(dd, SDMA_MAP_NONE, NULL, tx,
|
||||
addr, len);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -837,8 +843,7 @@ static inline int sdma_txadd_kvaddr(
|
||||
return -ENOSPC;
|
||||
}
|
||||
|
||||
return _sdma_txadd_daddr(
|
||||
dd, SDMA_MAP_SINGLE, tx, addr, len);
|
||||
return _sdma_txadd_daddr(dd, SDMA_MAP_SINGLE, NULL, tx, addr, len);
|
||||
}
|
||||
|
||||
struct iowait_work;
|
||||
@@ -1090,4 +1095,5 @@ extern uint mod_num_sdma;
|
||||
|
||||
void sdma_update_lmc(struct hfi1_devdata *dd, u64 mask, u32 lid);
|
||||
|
||||
void system_descriptor_complete(struct hfi1_devdata *dd, struct sdma_desc *descp);
|
||||
#endif
|
||||
|
||||
@@ -61,6 +61,7 @@
|
||||
struct sdma_desc {
|
||||
/* private: don't use directly */
|
||||
u64 qw[2];
|
||||
void *pinning_ctx;
|
||||
};
|
||||
|
||||
/**
|
||||
|
||||
@@ -78,10 +78,6 @@ DEFINE_EVENT(hfi1_mmu_rb_template, hfi1_mmu_rb_search,
|
||||
TP_PROTO(unsigned long addr, unsigned long len),
|
||||
TP_ARGS(addr, len));
|
||||
|
||||
DEFINE_EVENT(hfi1_mmu_rb_template, hfi1_mmu_rb_remove,
|
||||
TP_PROTO(unsigned long addr, unsigned long len),
|
||||
TP_ARGS(addr, len));
|
||||
|
||||
DEFINE_EVENT(hfi1_mmu_rb_template, hfi1_mmu_mem_invalidate,
|
||||
TP_PROTO(unsigned long addr, unsigned long len),
|
||||
TP_ARGS(addr, len));
|
||||
|
||||
@@ -53,6 +53,8 @@
|
||||
#include "hfi.h"
|
||||
#include "mad.h"
|
||||
#include "sdma.h"
|
||||
#include "ipoib.h"
|
||||
#include "user_sdma.h"
|
||||
|
||||
const char *parse_sdma_flags(struct trace_seq *p, u64 desc0, u64 desc1);
|
||||
|
||||
@@ -653,6 +655,80 @@ TRACE_EVENT(hfi1_sdma_user_completion,
|
||||
__entry->code)
|
||||
);
|
||||
|
||||
TRACE_EVENT(hfi1_usdma_defer,
|
||||
TP_PROTO(struct hfi1_user_sdma_pkt_q *pq,
|
||||
struct sdma_engine *sde,
|
||||
struct iowait *wait),
|
||||
TP_ARGS(pq, sde, wait),
|
||||
TP_STRUCT__entry(DD_DEV_ENTRY(pq->dd)
|
||||
__field(struct hfi1_user_sdma_pkt_q *, pq)
|
||||
__field(struct sdma_engine *, sde)
|
||||
__field(struct iowait *, wait)
|
||||
__field(int, engine)
|
||||
__field(int, empty)
|
||||
),
|
||||
TP_fast_assign(DD_DEV_ASSIGN(pq->dd);
|
||||
__entry->pq = pq;
|
||||
__entry->sde = sde;
|
||||
__entry->wait = wait;
|
||||
__entry->engine = sde->this_idx;
|
||||
__entry->empty = list_empty(&__entry->wait->list);
|
||||
),
|
||||
TP_printk("[%s] pq %llx sde %llx wait %llx engine %d empty %d",
|
||||
__get_str(dev),
|
||||
(unsigned long long)__entry->pq,
|
||||
(unsigned long long)__entry->sde,
|
||||
(unsigned long long)__entry->wait,
|
||||
__entry->engine,
|
||||
__entry->empty
|
||||
)
|
||||
);
|
||||
|
||||
TRACE_EVENT(hfi1_usdma_activate,
|
||||
TP_PROTO(struct hfi1_user_sdma_pkt_q *pq,
|
||||
struct iowait *wait,
|
||||
int reason),
|
||||
TP_ARGS(pq, wait, reason),
|
||||
TP_STRUCT__entry(DD_DEV_ENTRY(pq->dd)
|
||||
__field(struct hfi1_user_sdma_pkt_q *, pq)
|
||||
__field(struct iowait *, wait)
|
||||
__field(int, reason)
|
||||
),
|
||||
TP_fast_assign(DD_DEV_ASSIGN(pq->dd);
|
||||
__entry->pq = pq;
|
||||
__entry->wait = wait;
|
||||
__entry->reason = reason;
|
||||
),
|
||||
TP_printk("[%s] pq %llx wait %llx reason %d",
|
||||
__get_str(dev),
|
||||
(unsigned long long)__entry->pq,
|
||||
(unsigned long long)__entry->wait,
|
||||
__entry->reason
|
||||
)
|
||||
);
|
||||
|
||||
TRACE_EVENT(hfi1_usdma_we,
|
||||
TP_PROTO(struct hfi1_user_sdma_pkt_q *pq,
|
||||
int we_ret),
|
||||
TP_ARGS(pq, we_ret),
|
||||
TP_STRUCT__entry(DD_DEV_ENTRY(pq->dd)
|
||||
__field(struct hfi1_user_sdma_pkt_q *, pq)
|
||||
__field(int, state)
|
||||
__field(int, we_ret)
|
||||
),
|
||||
TP_fast_assign(DD_DEV_ASSIGN(pq->dd);
|
||||
__entry->pq = pq;
|
||||
__entry->state = pq->state;
|
||||
__entry->we_ret = we_ret;
|
||||
),
|
||||
TP_printk("[%s] pq %llx state %d we_ret %d",
|
||||
__get_str(dev),
|
||||
(unsigned long long)__entry->pq,
|
||||
__entry->state,
|
||||
__entry->we_ret
|
||||
)
|
||||
);
|
||||
|
||||
const char *print_u32_array(struct trace_seq *, u32 *, int);
|
||||
#define __print_u32_hex(arr, len) print_u32_array(p, arr, len)
|
||||
|
||||
@@ -858,6 +934,109 @@ DEFINE_EVENT(
|
||||
TP_ARGS(qp, flag)
|
||||
);
|
||||
|
||||
DECLARE_EVENT_CLASS(/* AIP */
|
||||
hfi1_ipoib_txq_template,
|
||||
TP_PROTO(struct hfi1_ipoib_txq *txq),
|
||||
TP_ARGS(txq),
|
||||
TP_STRUCT__entry(/* entry */
|
||||
DD_DEV_ENTRY(txq->priv->dd)
|
||||
__field(struct hfi1_ipoib_txq *, txq)
|
||||
__field(struct sdma_engine *, sde)
|
||||
__field(ulong, head)
|
||||
__field(ulong, tail)
|
||||
__field(uint, used)
|
||||
__field(uint, flow)
|
||||
__field(int, stops)
|
||||
__field(int, no_desc)
|
||||
__field(u8, idx)
|
||||
__field(u8, stopped)
|
||||
),
|
||||
TP_fast_assign(/* assign */
|
||||
DD_DEV_ASSIGN(txq->priv->dd)
|
||||
__entry->txq = txq;
|
||||
__entry->sde = txq->sde;
|
||||
__entry->head = txq->tx_ring.head;
|
||||
__entry->tail = txq->tx_ring.tail;
|
||||
__entry->idx = txq->q_idx;
|
||||
__entry->used =
|
||||
txq->sent_txreqs -
|
||||
atomic64_read(&txq->complete_txreqs);
|
||||
__entry->flow = txq->flow.as_int;
|
||||
__entry->stops = atomic_read(&txq->stops);
|
||||
__entry->no_desc = atomic_read(&txq->no_desc);
|
||||
__entry->stopped =
|
||||
__netif_subqueue_stopped(txq->priv->netdev, txq->q_idx);
|
||||
),
|
||||
TP_printk(/* print */
|
||||
"[%s] txq %llx idx %u sde %llx head %lx tail %lx flow %x used %u stops %d no_desc %d stopped %u",
|
||||
__get_str(dev),
|
||||
(unsigned long long)__entry->txq,
|
||||
__entry->idx,
|
||||
(unsigned long long)__entry->sde,
|
||||
__entry->head,
|
||||
__entry->tail,
|
||||
__entry->flow,
|
||||
__entry->used,
|
||||
__entry->stops,
|
||||
__entry->no_desc,
|
||||
__entry->stopped
|
||||
)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(/* queue stop */
|
||||
hfi1_ipoib_txq_template, hfi1_txq_stop,
|
||||
TP_PROTO(struct hfi1_ipoib_txq *txq),
|
||||
TP_ARGS(txq)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(/* queue wake */
|
||||
hfi1_ipoib_txq_template, hfi1_txq_wake,
|
||||
TP_PROTO(struct hfi1_ipoib_txq *txq),
|
||||
TP_ARGS(txq)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(/* flow flush */
|
||||
hfi1_ipoib_txq_template, hfi1_flow_flush,
|
||||
TP_PROTO(struct hfi1_ipoib_txq *txq),
|
||||
TP_ARGS(txq)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(/* flow switch */
|
||||
hfi1_ipoib_txq_template, hfi1_flow_switch,
|
||||
TP_PROTO(struct hfi1_ipoib_txq *txq),
|
||||
TP_ARGS(txq)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(/* wakeup */
|
||||
hfi1_ipoib_txq_template, hfi1_txq_wakeup,
|
||||
TP_PROTO(struct hfi1_ipoib_txq *txq),
|
||||
TP_ARGS(txq)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(/* full */
|
||||
hfi1_ipoib_txq_template, hfi1_txq_full,
|
||||
TP_PROTO(struct hfi1_ipoib_txq *txq),
|
||||
TP_ARGS(txq)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(/* queued */
|
||||
hfi1_ipoib_txq_template, hfi1_txq_queued,
|
||||
TP_PROTO(struct hfi1_ipoib_txq *txq),
|
||||
TP_ARGS(txq)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(/* xmit_stopped */
|
||||
hfi1_ipoib_txq_template, hfi1_txq_xmit_stopped,
|
||||
TP_PROTO(struct hfi1_ipoib_txq *txq),
|
||||
TP_ARGS(txq)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(/* xmit_unstopped */
|
||||
hfi1_ipoib_txq_template, hfi1_txq_xmit_unstopped,
|
||||
TP_PROTO(struct hfi1_ipoib_txq *txq),
|
||||
TP_ARGS(txq)
|
||||
);
|
||||
|
||||
#endif /* __HFI1_TRACE_TX_H */
|
||||
|
||||
#undef TRACE_INCLUDE_PATH
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -53,6 +53,7 @@
|
||||
#include "common.h"
|
||||
#include "iowait.h"
|
||||
#include "user_exp_rcv.h"
|
||||
#include "mmu_rb.h"
|
||||
|
||||
/* The maximum number of Data io vectors per message/request */
|
||||
#define MAX_VECTORS_PER_REQ 8
|
||||
@@ -152,16 +153,11 @@ struct sdma_mmu_node {
|
||||
struct user_sdma_iovec {
|
||||
struct list_head list;
|
||||
struct iovec iov;
|
||||
/* number of pages in this vector */
|
||||
unsigned int npages;
|
||||
/* array of pinned pages for this vector */
|
||||
struct page **pages;
|
||||
/*
|
||||
* offset into the virtual address space of the vector at
|
||||
* which we last left off.
|
||||
*/
|
||||
u64 offset;
|
||||
struct sdma_mmu_node *node;
|
||||
};
|
||||
|
||||
/* evict operation argument */
|
||||
|
||||
@@ -820,8 +820,8 @@ static int build_verbs_tx_desc(
|
||||
|
||||
/* add icrc, lt byte, and padding to flit */
|
||||
if (extra_bytes)
|
||||
ret = sdma_txadd_daddr(sde->dd, &tx->txreq,
|
||||
sde->dd->sdma_pad_phys, extra_bytes);
|
||||
ret = sdma_txadd_daddr(sde->dd, &tx->txreq, sde->dd->sdma_pad_phys,
|
||||
extra_bytes);
|
||||
|
||||
bail_txadd:
|
||||
return ret;
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user