IB/hns: Add driver files for hns RoCE driver

These are the various new source code files for the Hisilicon
RoCE driver for ARM architecture.

Signed-off-by: Wei Hu <xavier.huwei@huawei.com>
Signed-off-by: Nenglong Zhao <zhaonenglong@hisilicon.com>
Signed-off-by: Lijun Ou <oulijun@huawei.com>
Signed-off-by: Doug Ledford <dledford@redhat.com>
This commit is contained in:
oulijun
2016-07-21 19:06:38 +08:00
committed by Doug Ledford
parent 69bafce807
commit 9a4435375c
18 changed files with 10324 additions and 0 deletions

View File

@@ -0,0 +1,128 @@
/*
* Copyright (c) 2016 Hisilicon Limited.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/platform_device.h>
#include <rdma/ib_addr.h>
#include <rdma/ib_cache.h>
#include "hns_roce_device.h"
#define HNS_ROCE_PORT_NUM_SHIFT 24
#define HNS_ROCE_VLAN_SL_BIT_MASK 7
#define HNS_ROCE_VLAN_SL_SHIFT 13
struct ib_ah *hns_roce_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *ah_attr)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ibpd->device);
struct device *dev = &hr_dev->pdev->dev;
struct ib_gid_attr gid_attr;
struct hns_roce_ah *ah;
u16 vlan_tag = 0xffff;
struct in6_addr in6;
union ib_gid sgid;
int ret;
ah = kzalloc(sizeof(*ah), GFP_ATOMIC);
if (!ah)
return ERR_PTR(-ENOMEM);
/* Get mac address */
memcpy(&in6, ah_attr->grh.dgid.raw, sizeof(ah_attr->grh.dgid.raw));
if (rdma_is_multicast_addr(&in6))
rdma_get_mcast_mac(&in6, ah->av.mac);
else
memcpy(ah->av.mac, ah_attr->dmac, sizeof(ah_attr->dmac));
/* Get source gid */
ret = ib_get_cached_gid(ibpd->device, ah_attr->port_num,
ah_attr->grh.sgid_index, &sgid, &gid_attr);
if (ret) {
dev_err(dev, "get sgid failed! ret = %d\n", ret);
kfree(ah);
return ERR_PTR(ret);
}
if (gid_attr.ndev) {
if (is_vlan_dev(gid_attr.ndev))
vlan_tag = vlan_dev_vlan_id(gid_attr.ndev);
dev_put(gid_attr.ndev);
}
if (vlan_tag < 0x1000)
vlan_tag |= (ah_attr->sl & HNS_ROCE_VLAN_SL_BIT_MASK) <<
HNS_ROCE_VLAN_SL_SHIFT;
ah->av.port_pd = cpu_to_be32(to_hr_pd(ibpd)->pdn | (ah_attr->port_num <<
HNS_ROCE_PORT_NUM_SHIFT));
ah->av.gid_index = ah_attr->grh.sgid_index;
ah->av.vlan = cpu_to_le16(vlan_tag);
dev_dbg(dev, "gid_index = 0x%x,vlan = 0x%x\n", ah->av.gid_index,
ah->av.vlan);
if (ah_attr->static_rate)
ah->av.stat_rate = IB_RATE_10_GBPS;
memcpy(ah->av.dgid, ah_attr->grh.dgid.raw, HNS_ROCE_GID_SIZE);
ah->av.sl_tclass_flowlabel = cpu_to_le32(ah_attr->sl <<
HNS_ROCE_SL_SHIFT);
return &ah->ibah;
}
int hns_roce_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr)
{
struct hns_roce_ah *ah = to_hr_ah(ibah);
memset(ah_attr, 0, sizeof(*ah_attr));
ah_attr->sl = le32_to_cpu(ah->av.sl_tclass_flowlabel) >>
HNS_ROCE_SL_SHIFT;
ah_attr->port_num = le32_to_cpu(ah->av.port_pd) >>
HNS_ROCE_PORT_NUM_SHIFT;
ah_attr->static_rate = ah->av.stat_rate;
ah_attr->ah_flags = IB_AH_GRH;
ah_attr->grh.traffic_class = le32_to_cpu(ah->av.sl_tclass_flowlabel) >>
HNS_ROCE_TCLASS_SHIFT;
ah_attr->grh.flow_label = le32_to_cpu(ah->av.sl_tclass_flowlabel) &
HNS_ROCE_FLOW_LABLE_MASK;
ah_attr->grh.hop_limit = ah->av.hop_limit;
ah_attr->grh.sgid_index = ah->av.gid_index;
memcpy(ah_attr->grh.dgid.raw, ah->av.dgid, HNS_ROCE_GID_SIZE);
return 0;
}
int hns_roce_destroy_ah(struct ib_ah *ah)
{
kfree(to_hr_ah(ah));
return 0;
}

View File

@@ -0,0 +1,257 @@
/*
* Copyright (c) 2016 Hisilicon Limited.
* Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/platform_device.h>
#include "hns_roce_device.h"
int hns_roce_bitmap_alloc(struct hns_roce_bitmap *bitmap, unsigned long *obj)
{
int ret = 0;
spin_lock(&bitmap->lock);
*obj = find_next_zero_bit(bitmap->table, bitmap->max, bitmap->last);
if (*obj >= bitmap->max) {
bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
& bitmap->mask;
*obj = find_first_zero_bit(bitmap->table, bitmap->max);
}
if (*obj < bitmap->max) {
set_bit(*obj, bitmap->table);
bitmap->last = (*obj + 1);
if (bitmap->last == bitmap->max)
bitmap->last = 0;
*obj |= bitmap->top;
} else {
ret = -1;
}
spin_unlock(&bitmap->lock);
return ret;
}
void hns_roce_bitmap_free(struct hns_roce_bitmap *bitmap, unsigned long obj)
{
hns_roce_bitmap_free_range(bitmap, obj, 1);
}
int hns_roce_bitmap_alloc_range(struct hns_roce_bitmap *bitmap, int cnt,
int align, unsigned long *obj)
{
int ret = 0;
int i;
if (likely(cnt == 1 && align == 1))
return hns_roce_bitmap_alloc(bitmap, obj);
spin_lock(&bitmap->lock);
*obj = bitmap_find_next_zero_area(bitmap->table, bitmap->max,
bitmap->last, cnt, align - 1);
if (*obj >= bitmap->max) {
bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
& bitmap->mask;
*obj = bitmap_find_next_zero_area(bitmap->table, bitmap->max, 0,
cnt, align - 1);
}
if (*obj < bitmap->max) {
for (i = 0; i < cnt; i++)
set_bit(*obj + i, bitmap->table);
if (*obj == bitmap->last) {
bitmap->last = (*obj + cnt);
if (bitmap->last >= bitmap->max)
bitmap->last = 0;
}
*obj |= bitmap->top;
} else {
ret = -1;
}
spin_unlock(&bitmap->lock);
return ret;
}
void hns_roce_bitmap_free_range(struct hns_roce_bitmap *bitmap,
unsigned long obj, int cnt)
{
int i;
obj &= bitmap->max + bitmap->reserved_top - 1;
spin_lock(&bitmap->lock);
for (i = 0; i < cnt; i++)
clear_bit(obj + i, bitmap->table);
bitmap->last = min(bitmap->last, obj);
bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
& bitmap->mask;
spin_unlock(&bitmap->lock);
}
int hns_roce_bitmap_init(struct hns_roce_bitmap *bitmap, u32 num, u32 mask,
u32 reserved_bot, u32 reserved_top)
{
u32 i;
if (num != roundup_pow_of_two(num))
return -EINVAL;
bitmap->last = 0;
bitmap->top = 0;
bitmap->max = num - reserved_top;
bitmap->mask = mask;
bitmap->reserved_top = reserved_top;
spin_lock_init(&bitmap->lock);
bitmap->table = kcalloc(BITS_TO_LONGS(bitmap->max), sizeof(long),
GFP_KERNEL);
if (!bitmap->table)
return -ENOMEM;
for (i = 0; i < reserved_bot; ++i)
set_bit(i, bitmap->table);
return 0;
}
void hns_roce_bitmap_cleanup(struct hns_roce_bitmap *bitmap)
{
kfree(bitmap->table);
}
void hns_roce_buf_free(struct hns_roce_dev *hr_dev, u32 size,
struct hns_roce_buf *buf)
{
int i;
struct device *dev = &hr_dev->pdev->dev;
u32 bits_per_long = BITS_PER_LONG;
if (buf->nbufs == 1) {
dma_free_coherent(dev, size, buf->direct.buf, buf->direct.map);
} else {
if (bits_per_long == 64)
vunmap(buf->direct.buf);
for (i = 0; i < buf->nbufs; ++i)
if (buf->page_list[i].buf)
dma_free_coherent(&hr_dev->pdev->dev, PAGE_SIZE,
buf->page_list[i].buf,
buf->page_list[i].map);
kfree(buf->page_list);
}
}
int hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, u32 max_direct,
struct hns_roce_buf *buf)
{
int i = 0;
dma_addr_t t;
struct page **pages;
struct device *dev = &hr_dev->pdev->dev;
u32 bits_per_long = BITS_PER_LONG;
/* SQ/RQ buf lease than one page, SQ + RQ = 8K */
if (size <= max_direct) {
buf->nbufs = 1;
/* Npages calculated by page_size */
buf->npages = 1 << get_order(size);
buf->page_shift = PAGE_SHIFT;
/* MTT PA must be recorded in 4k alignment, t is 4k aligned */
buf->direct.buf = dma_alloc_coherent(dev, size, &t, GFP_KERNEL);
if (!buf->direct.buf)
return -ENOMEM;
buf->direct.map = t;
while (t & ((1 << buf->page_shift) - 1)) {
--buf->page_shift;
buf->npages *= 2;
}
memset(buf->direct.buf, 0, size);
} else {
buf->nbufs = (size + PAGE_SIZE - 1) / PAGE_SIZE;
buf->npages = buf->nbufs;
buf->page_shift = PAGE_SHIFT;
buf->page_list = kcalloc(buf->nbufs, sizeof(*buf->page_list),
GFP_KERNEL);
if (!buf->page_list)
return -ENOMEM;
for (i = 0; i < buf->nbufs; ++i) {
buf->page_list[i].buf = dma_alloc_coherent(dev,
PAGE_SIZE, &t,
GFP_KERNEL);
if (!buf->page_list[i].buf)
goto err_free;
buf->page_list[i].map = t;
memset(buf->page_list[i].buf, 0, PAGE_SIZE);
}
if (bits_per_long == 64) {
pages = kmalloc_array(buf->nbufs, sizeof(*pages),
GFP_KERNEL);
if (!pages)
goto err_free;
for (i = 0; i < buf->nbufs; ++i)
pages[i] = virt_to_page(buf->page_list[i].buf);
buf->direct.buf = vmap(pages, buf->nbufs, VM_MAP,
PAGE_KERNEL);
kfree(pages);
if (!buf->direct.buf)
goto err_free;
}
}
return 0;
err_free:
hns_roce_buf_free(hr_dev, size, buf);
return -ENOMEM;
}
void hns_roce_cleanup_bitmap(struct hns_roce_dev *hr_dev)
{
hns_roce_cleanup_qp_table(hr_dev);
hns_roce_cleanup_cq_table(hr_dev);
hns_roce_cleanup_mr_table(hr_dev);
hns_roce_cleanup_pd_table(hr_dev);
hns_roce_cleanup_uar_table(hr_dev);
}

View File

@@ -0,0 +1,368 @@
/*
* Copyright (c) 2016 Hisilicon Limited.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/dmapool.h>
#include <linux/platform_device.h>
#include "hns_roce_common.h"
#include "hns_roce_device.h"
#include "hns_roce_cmd.h"
#define CMD_POLL_TOKEN 0xffff
#define CMD_MAX_NUM 32
#define STATUS_MASK 0xff
#define CMD_TOKEN_MASK 0x1f
#define GO_BIT_TIMEOUT_MSECS 10000
enum {
HCR_TOKEN_OFFSET = 0x14,
HCR_STATUS_OFFSET = 0x18,
HCR_GO_BIT = 15,
};
static int cmd_pending(struct hns_roce_dev *hr_dev)
{
u32 status = readl(hr_dev->cmd.hcr + HCR_TOKEN_OFFSET);
return (!!(status & (1 << HCR_GO_BIT)));
}
/* this function should be serialized with "hcr_mutex" */
static int __hns_roce_cmd_mbox_post_hw(struct hns_roce_dev *hr_dev,
u64 in_param, u64 out_param,
u32 in_modifier, u8 op_modifier, u16 op,
u16 token, int event)
{
struct hns_roce_cmdq *cmd = &hr_dev->cmd;
struct device *dev = &hr_dev->pdev->dev;
u32 __iomem *hcr = (u32 *)cmd->hcr;
int ret = -EAGAIN;
unsigned long end;
u32 val = 0;
end = msecs_to_jiffies(GO_BIT_TIMEOUT_MSECS) + jiffies;
while (cmd_pending(hr_dev)) {
if (time_after(jiffies, end)) {
dev_dbg(dev, "jiffies=%d end=%d\n", (int)jiffies,
(int)end);
goto out;
}
cond_resched();
}
roce_set_field(val, ROCEE_MB6_ROCEE_MB_CMD_M, ROCEE_MB6_ROCEE_MB_CMD_S,
op);
roce_set_field(val, ROCEE_MB6_ROCEE_MB_CMD_MDF_M,
ROCEE_MB6_ROCEE_MB_CMD_MDF_S, op_modifier);
roce_set_bit(val, ROCEE_MB6_ROCEE_MB_EVENT_S, event);
roce_set_bit(val, ROCEE_MB6_ROCEE_MB_HW_RUN_S, 1);
roce_set_field(val, ROCEE_MB6_ROCEE_MB_TOKEN_M,
ROCEE_MB6_ROCEE_MB_TOKEN_S, token);
__raw_writeq(cpu_to_le64(in_param), hcr + 0);
__raw_writeq(cpu_to_le64(out_param), hcr + 2);
__raw_writel(cpu_to_le32(in_modifier), hcr + 4);
/* Memory barrier */
wmb();
__raw_writel(cpu_to_le32(val), hcr + 5);
mmiowb();
ret = 0;
out:
return ret;
}
static int hns_roce_cmd_mbox_post_hw(struct hns_roce_dev *hr_dev, u64 in_param,
u64 out_param, u32 in_modifier,
u8 op_modifier, u16 op, u16 token,
int event)
{
struct hns_roce_cmdq *cmd = &hr_dev->cmd;
int ret = -EAGAIN;
mutex_lock(&cmd->hcr_mutex);
ret = __hns_roce_cmd_mbox_post_hw(hr_dev, in_param, out_param,
in_modifier, op_modifier, op, token,
event);
mutex_unlock(&cmd->hcr_mutex);
return ret;
}
/* this should be called with "poll_sem" */
static int __hns_roce_cmd_mbox_poll(struct hns_roce_dev *hr_dev, u64 in_param,
u64 out_param, unsigned long in_modifier,
u8 op_modifier, u16 op,
unsigned long timeout)
{
struct device *dev = &hr_dev->pdev->dev;
u8 __iomem *hcr = hr_dev->cmd.hcr;
unsigned long end = 0;
u32 status = 0;
int ret;
ret = hns_roce_cmd_mbox_post_hw(hr_dev, in_param, out_param,
in_modifier, op_modifier, op,
CMD_POLL_TOKEN, 0);
if (ret) {
dev_err(dev, "[cmd_poll]hns_roce_cmd_mbox_post_hw failed\n");
goto out;
}
end = msecs_to_jiffies(timeout) + jiffies;
while (cmd_pending(hr_dev) && time_before(jiffies, end))
cond_resched();
if (cmd_pending(hr_dev)) {
dev_err(dev, "[cmd_poll]hw run cmd TIMEDOUT!\n");
ret = -ETIMEDOUT;
goto out;
}
status = le32_to_cpu((__force __be32)
__raw_readl(hcr + HCR_STATUS_OFFSET));
if ((status & STATUS_MASK) != 0x1) {
dev_err(dev, "mailbox status 0x%x!\n", status);
ret = -EBUSY;
goto out;
}
out:
return ret;
}
static int hns_roce_cmd_mbox_poll(struct hns_roce_dev *hr_dev, u64 in_param,
u64 out_param, unsigned long in_modifier,
u8 op_modifier, u16 op, unsigned long timeout)
{
int ret;
down(&hr_dev->cmd.poll_sem);
ret = __hns_roce_cmd_mbox_poll(hr_dev, in_param, out_param, in_modifier,
op_modifier, op, timeout);
up(&hr_dev->cmd.poll_sem);
return ret;
}
void hns_roce_cmd_event(struct hns_roce_dev *hr_dev, u16 token, u8 status,
u64 out_param)
{
struct hns_roce_cmd_context
*context = &hr_dev->cmd.context[token & hr_dev->cmd.token_mask];
if (token != context->token)
return;
context->result = (status == HNS_ROCE_CMD_SUCCESS) ? 0 : (-EIO);
context->out_param = out_param;
complete(&context->done);
}
/* this should be called with "use_events" */
static int __hns_roce_cmd_mbox_wait(struct hns_roce_dev *hr_dev, u64 in_param,
u64 out_param, unsigned long in_modifier,
u8 op_modifier, u16 op,
unsigned long timeout)
{
struct hns_roce_cmdq *cmd = &hr_dev->cmd;
struct device *dev = &hr_dev->pdev->dev;
struct hns_roce_cmd_context *context;
int ret = 0;
spin_lock(&cmd->context_lock);
WARN_ON(cmd->free_head < 0);
context = &cmd->context[cmd->free_head];
context->token += cmd->token_mask + 1;
cmd->free_head = context->next;
spin_unlock(&cmd->context_lock);
init_completion(&context->done);
ret = hns_roce_cmd_mbox_post_hw(hr_dev, in_param, out_param,
in_modifier, op_modifier, op,
context->token, 1);
if (ret)
goto out;
/*
* It is timeout when wait_for_completion_timeout return 0
* The return value is the time limit set in advance
* how many seconds showing
*/
if (!wait_for_completion_timeout(&context->done,
msecs_to_jiffies(timeout))) {
dev_err(dev, "[cmd]wait_for_completion_timeout timeout\n");
ret = -EBUSY;
goto out;
}
ret = context->result;
if (ret) {
dev_err(dev, "[cmd]event mod cmd process error!err=%d\n", ret);
goto out;
}
out:
spin_lock(&cmd->context_lock);
context->next = cmd->free_head;
cmd->free_head = context - cmd->context;
spin_unlock(&cmd->context_lock);
return ret;
}
static int hns_roce_cmd_mbox_wait(struct hns_roce_dev *hr_dev, u64 in_param,
u64 out_param, unsigned long in_modifier,
u8 op_modifier, u16 op, unsigned long timeout)
{
int ret = 0;
down(&hr_dev->cmd.event_sem);
ret = __hns_roce_cmd_mbox_wait(hr_dev, in_param, out_param,
in_modifier, op_modifier, op, timeout);
up(&hr_dev->cmd.event_sem);
return ret;
}
int hns_roce_cmd_mbox(struct hns_roce_dev *hr_dev, u64 in_param, u64 out_param,
unsigned long in_modifier, u8 op_modifier, u16 op,
unsigned long timeout)
{
if (hr_dev->cmd.use_events)
return hns_roce_cmd_mbox_wait(hr_dev, in_param, out_param,
in_modifier, op_modifier, op,
timeout);
else
return hns_roce_cmd_mbox_poll(hr_dev, in_param, out_param,
in_modifier, op_modifier, op,
timeout);
}
int hns_roce_cmd_init(struct hns_roce_dev *hr_dev)
{
struct device *dev = &hr_dev->pdev->dev;
mutex_init(&hr_dev->cmd.hcr_mutex);
sema_init(&hr_dev->cmd.poll_sem, 1);
hr_dev->cmd.use_events = 0;
hr_dev->cmd.toggle = 1;
hr_dev->cmd.max_cmds = CMD_MAX_NUM;
hr_dev->cmd.hcr = hr_dev->reg_base + ROCEE_MB1_REG;
hr_dev->cmd.pool = dma_pool_create("hns_roce_cmd", dev,
HNS_ROCE_MAILBOX_SIZE,
HNS_ROCE_MAILBOX_SIZE, 0);
if (!hr_dev->cmd.pool)
return -ENOMEM;
return 0;
}
void hns_roce_cmd_cleanup(struct hns_roce_dev *hr_dev)
{
dma_pool_destroy(hr_dev->cmd.pool);
}
int hns_roce_cmd_use_events(struct hns_roce_dev *hr_dev)
{
struct hns_roce_cmdq *hr_cmd = &hr_dev->cmd;
int i;
hr_cmd->context = kmalloc(hr_cmd->max_cmds *
sizeof(struct hns_roce_cmd_context),
GFP_KERNEL);
if (!hr_cmd->context)
return -ENOMEM;
for (i = 0; i < hr_cmd->max_cmds; ++i) {
hr_cmd->context[i].token = i;
hr_cmd->context[i].next = i + 1;
}
hr_cmd->context[hr_cmd->max_cmds - 1].next = -1;
hr_cmd->free_head = 0;
sema_init(&hr_cmd->event_sem, hr_cmd->max_cmds);
spin_lock_init(&hr_cmd->context_lock);
hr_cmd->token_mask = CMD_TOKEN_MASK;
hr_cmd->use_events = 1;
down(&hr_cmd->poll_sem);
return 0;
}
void hns_roce_cmd_use_polling(struct hns_roce_dev *hr_dev)
{
struct hns_roce_cmdq *hr_cmd = &hr_dev->cmd;
int i;
hr_cmd->use_events = 0;
for (i = 0; i < hr_cmd->max_cmds; ++i)
down(&hr_cmd->event_sem);
kfree(hr_cmd->context);
up(&hr_cmd->poll_sem);
}
struct hns_roce_cmd_mailbox
*hns_roce_alloc_cmd_mailbox(struct hns_roce_dev *hr_dev)
{
struct hns_roce_cmd_mailbox *mailbox;
mailbox = kmalloc(sizeof(*mailbox), GFP_KERNEL);
if (!mailbox)
return ERR_PTR(-ENOMEM);
mailbox->buf = dma_pool_alloc(hr_dev->cmd.pool, GFP_KERNEL,
&mailbox->dma);
if (!mailbox->buf) {
kfree(mailbox);
return ERR_PTR(-ENOMEM);
}
return mailbox;
}
void hns_roce_free_cmd_mailbox(struct hns_roce_dev *hr_dev,
struct hns_roce_cmd_mailbox *mailbox)
{
if (!mailbox)
return;
dma_pool_free(hr_dev->cmd.pool, mailbox->buf, mailbox->dma);
kfree(mailbox);
}

View File

@@ -0,0 +1,80 @@
/*
* Copyright (c) 2016 Hisilicon Limited.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef _HNS_ROCE_CMD_H
#define _HNS_ROCE_CMD_H
#define HNS_ROCE_MAILBOX_SIZE 4096
enum {
/* TPT commands */
HNS_ROCE_CMD_SW2HW_MPT = 0xd,
HNS_ROCE_CMD_HW2SW_MPT = 0xf,
/* CQ commands */
HNS_ROCE_CMD_SW2HW_CQ = 0x16,
HNS_ROCE_CMD_HW2SW_CQ = 0x17,
/* QP/EE commands */
HNS_ROCE_CMD_RST2INIT_QP = 0x19,
HNS_ROCE_CMD_INIT2RTR_QP = 0x1a,
HNS_ROCE_CMD_RTR2RTS_QP = 0x1b,
HNS_ROCE_CMD_RTS2RTS_QP = 0x1c,
HNS_ROCE_CMD_2ERR_QP = 0x1e,
HNS_ROCE_CMD_RTS2SQD_QP = 0x1f,
HNS_ROCE_CMD_SQD2SQD_QP = 0x38,
HNS_ROCE_CMD_SQD2RTS_QP = 0x20,
HNS_ROCE_CMD_2RST_QP = 0x21,
HNS_ROCE_CMD_QUERY_QP = 0x22,
};
enum {
HNS_ROCE_CMD_TIME_CLASS_A = 10000,
HNS_ROCE_CMD_TIME_CLASS_B = 10000,
HNS_ROCE_CMD_TIME_CLASS_C = 10000,
};
struct hns_roce_cmd_mailbox {
void *buf;
dma_addr_t dma;
};
int hns_roce_cmd_mbox(struct hns_roce_dev *hr_dev, u64 in_param, u64 out_param,
unsigned long in_modifier, u8 op_modifier, u16 op,
unsigned long timeout);
struct hns_roce_cmd_mailbox
*hns_roce_alloc_cmd_mailbox(struct hns_roce_dev *hr_dev);
void hns_roce_free_cmd_mailbox(struct hns_roce_dev *hr_dev,
struct hns_roce_cmd_mailbox *mailbox);
#endif /* _HNS_ROCE_CMD_H */

View File

@@ -0,0 +1,325 @@
/*
* Copyright (c) 2016 Hisilicon Limited.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef _HNS_ROCE_COMMON_H
#define _HNS_ROCE_COMMON_H
#ifndef assert
#define assert(cond)
#endif
#define roce_write(dev, reg, val) writel((val), (dev)->reg_base + (reg))
#define roce_read(dev, reg) readl((dev)->reg_base + (reg))
#define roce_raw_write(value, addr) \
__raw_writel((__force u32)cpu_to_le32(value), (addr))
#define roce_get_field(origin, mask, shift) \
(((origin) & (mask)) >> (shift))
#define roce_get_bit(origin, shift) \
roce_get_field((origin), (1ul << (shift)), (shift))
#define roce_set_field(origin, mask, shift, val) \
do { \
(origin) &= (~(mask)); \
(origin) |= (((u32)(val) << (shift)) & (mask)); \
} while (0)
#define roce_set_bit(origin, shift, val) \
roce_set_field((origin), (1ul << (shift)), (shift), (val))
#define ROCEE_GLB_CFG_ROCEE_DB_SQ_MODE_S 3
#define ROCEE_GLB_CFG_ROCEE_DB_OTH_MODE_S 4
#define ROCEE_GLB_CFG_SQ_EXT_DB_MODE_S 5
#define ROCEE_GLB_CFG_OTH_EXT_DB_MODE_S 6
#define ROCEE_GLB_CFG_ROCEE_PORT_ST_S 10
#define ROCEE_GLB_CFG_ROCEE_PORT_ST_M \
(((1UL << 6) - 1) << ROCEE_GLB_CFG_ROCEE_PORT_ST_S)
#define ROCEE_GLB_CFG_TRP_RAQ_DROP_EN_S 16
#define ROCEE_DMAE_USER_CFG1_ROCEE_STREAM_ID_TB_CFG_S 0
#define ROCEE_DMAE_USER_CFG1_ROCEE_STREAM_ID_TB_CFG_M \
(((1UL << 24) - 1) << ROCEE_DMAE_USER_CFG1_ROCEE_STREAM_ID_TB_CFG_S)
#define ROCEE_DMAE_USER_CFG1_ROCEE_CACHE_TB_CFG_S 24
#define ROCEE_DMAE_USER_CFG1_ROCEE_CACHE_TB_CFG_M \
(((1UL << 4) - 1) << ROCEE_DMAE_USER_CFG1_ROCEE_CACHE_TB_CFG_S)
#define ROCEE_DMAE_USER_CFG2_ROCEE_STREAM_ID_PKT_CFG_S 0
#define ROCEE_DMAE_USER_CFG2_ROCEE_STREAM_ID_PKT_CFG_M \
(((1UL << 24) - 1) << ROCEE_DMAE_USER_CFG2_ROCEE_STREAM_ID_PKT_CFG_S)
#define ROCEE_DMAE_USER_CFG2_ROCEE_CACHE_PKT_CFG_S 24
#define ROCEE_DMAE_USER_CFG2_ROCEE_CACHE_PKT_CFG_M \
(((1UL << 4) - 1) << ROCEE_DMAE_USER_CFG2_ROCEE_CACHE_PKT_CFG_S)
#define ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_S 0
#define ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_M \
(((1UL << 16) - 1) << ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_S)
#define ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_EMPTY_S 16
#define ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_EMPTY_M \
(((1UL << 16) - 1) << ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_EMPTY_S)
#define ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_S 0
#define ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_M \
(((1UL << 16) - 1) << ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_S)
#define ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_EMPTY_S 16
#define ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_EMPTY_M \
(((1UL << 16) - 1) << ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_EMPTY_S)
#define ROCEE_RAQ_WL_ROCEE_RAQ_WL_S 0
#define ROCEE_RAQ_WL_ROCEE_RAQ_WL_M \
(((1UL << 8) - 1) << ROCEE_RAQ_WL_ROCEE_RAQ_WL_S)
#define ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_POL_TIME_INTERVAL_S 0
#define ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_POL_TIME_INTERVAL_M \
(((1UL << 15) - 1) << \
ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_POL_TIME_INTERVAL_S)
#define ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_RAQ_TIMEOUT_CHK_CFG_S 16
#define ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_RAQ_TIMEOUT_CHK_CFG_M \
(((1UL << 4) - 1) << \
ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_RAQ_TIMEOUT_CHK_CFG_S)
#define ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_RAQ_TIMEOUT_CHK_EN_S 20
#define ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_EXT_RAQ_MODE 21
#define ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_SHIFT_S 0
#define ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_SHIFT_M \
(((1UL << 5) - 1) << ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_SHIFT_S)
#define ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_BA_H_S 5
#define ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_BA_H_M \
(((1UL << 5) - 1) << ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_BA_H_S)
#define ROCEE_EXT_DB_OTH_H_EXT_DB_OTH_SHIFT_S 0
#define ROCEE_EXT_DB_OTH_H_EXT_DB_OTH_SHIFT_M \
(((1UL << 5) - 1) << ROCEE_EXT_DB_OTH_H_EXT_DB_OTH_SHIFT_S)
#define ROCEE_EXT_DB_SQ_H_EXT_DB_OTH_BA_H_S 5
#define ROCEE_EXT_DB_SQ_H_EXT_DB_OTH_BA_H_M \
(((1UL << 5) - 1) << ROCEE_EXT_DB_SQ_H_EXT_DB_OTH_BA_H_S)
#define ROCEE_EXT_RAQ_H_EXT_RAQ_SHIFT_S 0
#define ROCEE_EXT_RAQ_H_EXT_RAQ_SHIFT_M \
(((1UL << 5) - 1) << ROCEE_EXT_RAQ_H_EXT_RAQ_SHIFT_S)
#define ROCEE_EXT_RAQ_H_EXT_RAQ_BA_H_S 8
#define ROCEE_EXT_RAQ_H_EXT_RAQ_BA_H_M \
(((1UL << 5) - 1) << ROCEE_EXT_RAQ_H_EXT_RAQ_BA_H_S)
#define ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_S 0
#define ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_M \
(((1UL << 19) - 1) << ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_S)
#define ROCEE_BT_CMD_H_ROCEE_BT_CMD_S 19
#define ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S 20
#define ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M \
(((1UL << 2) - 1) << ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S)
#define ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_S 22
#define ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_M \
(((1UL << 5) - 1) << ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_S)
#define ROCEE_BT_CMD_H_ROCEE_BT_CMD_HW_SYNS_S 31
#define ROCEE_QP1C_CFG0_0_ROCEE_QP1C_QP_ST_S 0
#define ROCEE_QP1C_CFG0_0_ROCEE_QP1C_QP_ST_M \
(((1UL << 3) - 1) << ROCEE_QP1C_CFG0_0_ROCEE_QP1C_QP_ST_S)
#define ROCEE_QP1C_CFG3_0_ROCEE_QP1C_RQ_HEAD_S 0
#define ROCEE_QP1C_CFG3_0_ROCEE_QP1C_RQ_HEAD_M \
(((1UL << 15) - 1) << ROCEE_QP1C_CFG3_0_ROCEE_QP1C_RQ_HEAD_S)
#define ROCEE_MB6_ROCEE_MB_CMD_S 0
#define ROCEE_MB6_ROCEE_MB_CMD_M \
(((1UL << 8) - 1) << ROCEE_MB6_ROCEE_MB_CMD_S)
#define ROCEE_MB6_ROCEE_MB_CMD_MDF_S 8
#define ROCEE_MB6_ROCEE_MB_CMD_MDF_M \
(((1UL << 4) - 1) << ROCEE_MB6_ROCEE_MB_CMD_MDF_S)
#define ROCEE_MB6_ROCEE_MB_EVENT_S 14
#define ROCEE_MB6_ROCEE_MB_HW_RUN_S 15
#define ROCEE_MB6_ROCEE_MB_TOKEN_S 16
#define ROCEE_MB6_ROCEE_MB_TOKEN_M \
(((1UL << 16) - 1) << ROCEE_MB6_ROCEE_MB_TOKEN_S)
#define ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_S 0
#define ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_M \
(((1UL << 24) - 1) << ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_S)
#define ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_MDF_S 24
#define ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_MDF_M \
(((1UL << 4) - 1) << ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_MDF_S)
#define ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_S 28
#define ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_M \
(((1UL << 3) - 1) << ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_S)
#define ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_HW_SYNS_S 31
#define ROCEE_SMAC_H_ROCEE_SMAC_H_S 0
#define ROCEE_SMAC_H_ROCEE_SMAC_H_M \
(((1UL << 16) - 1) << ROCEE_SMAC_H_ROCEE_SMAC_H_S)
#define ROCEE_SMAC_H_ROCEE_PORT_MTU_S 16
#define ROCEE_SMAC_H_ROCEE_PORT_MTU_M \
(((1UL << 4) - 1) << ROCEE_SMAC_H_ROCEE_PORT_MTU_S)
#define ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_S 0
#define ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_M \
(((1UL << 2) - 1) << ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_S)
#define ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_AEQE_SHIFT_S 8
#define ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_AEQE_SHIFT_M \
(((1UL << 4) - 1) << ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_AEQE_SHIFT_S)
#define ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQ_ALM_OVF_INT_ST_S 17
#define ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQ_BT_H_S 0
#define ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQ_BT_H_M \
(((1UL << 5) - 1) << ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQ_BT_H_S)
#define ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQE_CUR_IDX_S 16
#define ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQE_CUR_IDX_M \
(((1UL << 16) - 1) << ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQE_CUR_IDX_S)
#define ROCEE_CAEP_AEQE_CONS_IDX_CAEP_AEQE_CONS_IDX_S 0
#define ROCEE_CAEP_AEQE_CONS_IDX_CAEP_AEQE_CONS_IDX_M \
(((1UL << 16) - 1) << ROCEE_CAEP_AEQE_CONS_IDX_CAEP_AEQE_CONS_IDX_S)
#define ROCEE_CAEP_CEQC_SHIFT_CAEP_CEQ_ALM_OVF_INT_ST_S 16
#define ROCEE_CAEP_CE_IRQ_MASK_CAEP_CEQ_ALM_OVF_MASK_S 1
#define ROCEE_CAEP_CEQ_ALM_OVF_CAEP_CEQ_ALM_OVF_S 0
#define ROCEE_CAEP_AE_MASK_CAEP_AEQ_ALM_OVF_MASK_S 0
#define ROCEE_CAEP_AE_MASK_CAEP_AE_IRQ_MASK_S 1
#define ROCEE_CAEP_AE_ST_CAEP_AEQ_ALM_OVF_S 0
#define ROCEE_SDB_ISSUE_PTR_SDB_ISSUE_PTR_S 0
#define ROCEE_SDB_ISSUE_PTR_SDB_ISSUE_PTR_M \
(((1UL << 28) - 1) << ROCEE_SDB_ISSUE_PTR_SDB_ISSUE_PTR_S)
#define ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S 0
#define ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M \
(((1UL << 28) - 1) << ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S)
#define ROCEE_SDB_INV_CNT_SDB_INV_CNT_S 0
#define ROCEE_SDB_INV_CNT_SDB_INV_CNT_M \
(((1UL << 16) - 1) << ROCEE_SDB_INV_CNT_SDB_INV_CNT_S)
/*************ROCEE_REG DEFINITION****************/
#define ROCEE_VENDOR_ID_REG 0x0
#define ROCEE_VENDOR_PART_ID_REG 0x4
#define ROCEE_HW_VERSION_REG 0x8
#define ROCEE_SYS_IMAGE_GUID_L_REG 0xC
#define ROCEE_SYS_IMAGE_GUID_H_REG 0x10
#define ROCEE_PORT_GID_L_0_REG 0x50
#define ROCEE_PORT_GID_ML_0_REG 0x54
#define ROCEE_PORT_GID_MH_0_REG 0x58
#define ROCEE_PORT_GID_H_0_REG 0x5C
#define ROCEE_BT_CMD_H_REG 0x204
#define ROCEE_SMAC_L_0_REG 0x240
#define ROCEE_SMAC_H_0_REG 0x244
#define ROCEE_QP1C_CFG3_0_REG 0x27C
#define ROCEE_CAEP_AEQE_CONS_IDX_REG 0x3AC
#define ROCEE_CAEP_CEQC_CONS_IDX_0_REG 0x3BC
#define ROCEE_ECC_UCERR_ALM1_REG 0xB38
#define ROCEE_ECC_UCERR_ALM2_REG 0xB3C
#define ROCEE_ECC_CERR_ALM1_REG 0xB44
#define ROCEE_ECC_CERR_ALM2_REG 0xB48
#define ROCEE_ACK_DELAY_REG 0x14
#define ROCEE_GLB_CFG_REG 0x18
#define ROCEE_DMAE_USER_CFG1_REG 0x40
#define ROCEE_DMAE_USER_CFG2_REG 0x44
#define ROCEE_DB_SQ_WL_REG 0x154
#define ROCEE_DB_OTHERS_WL_REG 0x158
#define ROCEE_RAQ_WL_REG 0x15C
#define ROCEE_WRMS_POL_TIME_INTERVAL_REG 0x160
#define ROCEE_EXT_DB_SQ_REG 0x164
#define ROCEE_EXT_DB_SQ_H_REG 0x168
#define ROCEE_EXT_DB_OTH_REG 0x16C
#define ROCEE_EXT_DB_OTH_H_REG 0x170
#define ROCEE_EXT_DB_SQ_WL_EMPTY_REG 0x174
#define ROCEE_EXT_DB_SQ_WL_REG 0x178
#define ROCEE_EXT_DB_OTHERS_WL_EMPTY_REG 0x17C
#define ROCEE_EXT_DB_OTHERS_WL_REG 0x180
#define ROCEE_EXT_RAQ_REG 0x184
#define ROCEE_EXT_RAQ_H_REG 0x188
#define ROCEE_CAEP_CE_INTERVAL_CFG_REG 0x190
#define ROCEE_CAEP_CE_BURST_NUM_CFG_REG 0x194
#define ROCEE_BT_CMD_L_REG 0x200
#define ROCEE_MB1_REG 0x210
#define ROCEE_DB_SQ_L_0_REG 0x230
#define ROCEE_DB_OTHERS_L_0_REG 0x238
#define ROCEE_QP1C_CFG0_0_REG 0x270
#define ROCEE_CAEP_AEQC_AEQE_SHIFT_REG 0x3A0
#define ROCEE_CAEP_CEQC_SHIFT_0_REG 0x3B0
#define ROCEE_CAEP_CE_IRQ_MASK_0_REG 0x3C0
#define ROCEE_CAEP_CEQ_ALM_OVF_0_REG 0x3C4
#define ROCEE_CAEP_AE_MASK_REG 0x6C8
#define ROCEE_CAEP_AE_ST_REG 0x6CC
#define ROCEE_SDB_ISSUE_PTR_REG 0x758
#define ROCEE_SDB_SEND_PTR_REG 0x75C
#define ROCEE_SDB_INV_CNT_REG 0x9A4
#define ROCEE_ECC_UCERR_ALM0_REG 0xB34
#define ROCEE_ECC_CERR_ALM0_REG 0xB40
#endif /* _HNS_ROCE_COMMON_H */

View File

@@ -0,0 +1,446 @@
/*
* Copyright (c) 2016 Hisilicon Limited.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/platform_device.h>
#include <rdma/ib_umem.h>
#include "hns_roce_device.h"
#include "hns_roce_cmd.h"
#include "hns_roce_hem.h"
#include "hns_roce_user.h"
#include "hns_roce_common.h"
static void hns_roce_ib_cq_comp(struct hns_roce_cq *hr_cq)
{
struct ib_cq *ibcq = &hr_cq->ib_cq;
ibcq->comp_handler(ibcq, ibcq->cq_context);
}
static void hns_roce_ib_cq_event(struct hns_roce_cq *hr_cq,
enum hns_roce_event event_type)
{
struct hns_roce_dev *hr_dev;
struct ib_event event;
struct ib_cq *ibcq;
ibcq = &hr_cq->ib_cq;
hr_dev = to_hr_dev(ibcq->device);
if (event_type != HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID &&
event_type != HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR &&
event_type != HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW) {
dev_err(&hr_dev->pdev->dev,
"hns_roce_ib: Unexpected event type 0x%x on CQ %06lx\n",
event_type, hr_cq->cqn);
return;
}
if (ibcq->event_handler) {
event.device = ibcq->device;
event.event = IB_EVENT_CQ_ERR;
event.element.cq = ibcq;
ibcq->event_handler(&event, ibcq->cq_context);
}
}
static int hns_roce_sw2hw_cq(struct hns_roce_dev *dev,
struct hns_roce_cmd_mailbox *mailbox,
unsigned long cq_num)
{
return hns_roce_cmd_mbox(dev, mailbox->dma, 0, cq_num, 0,
HNS_ROCE_CMD_SW2HW_CQ, HNS_ROCE_CMD_TIME_CLASS_A);
}
static int hns_roce_cq_alloc(struct hns_roce_dev *hr_dev, int nent,
struct hns_roce_mtt *hr_mtt,
struct hns_roce_uar *hr_uar,
struct hns_roce_cq *hr_cq, int vector,
int collapsed)
{
struct hns_roce_cmd_mailbox *mailbox = NULL;
struct hns_roce_cq_table *cq_table = NULL;
struct device *dev = &hr_dev->pdev->dev;
dma_addr_t dma_handle;
u64 *mtts = NULL;
int ret = 0;
cq_table = &hr_dev->cq_table;
/* Get the physical address of cq buf */
mtts = hns_roce_table_find(&hr_dev->mr_table.mtt_table,
hr_mtt->first_seg, &dma_handle);
if (!mtts) {
dev_err(dev, "CQ alloc.Failed to find cq buf addr.\n");
return -EINVAL;
}
if (vector >= hr_dev->caps.num_comp_vectors) {
dev_err(dev, "CQ alloc.Invalid vector.\n");
return -EINVAL;
}
hr_cq->vector = vector;
ret = hns_roce_bitmap_alloc(&cq_table->bitmap, &hr_cq->cqn);
if (ret == -1) {
dev_err(dev, "CQ alloc.Failed to alloc index.\n");
return -ENOMEM;
}
/* Get CQC memory HEM(Hardware Entry Memory) table */
ret = hns_roce_table_get(hr_dev, &cq_table->table, hr_cq->cqn);
if (ret) {
dev_err(dev, "CQ alloc.Failed to get context mem.\n");
goto err_out;
}
/* The cq insert radix tree */
spin_lock_irq(&cq_table->lock);
/* Radix_tree: The associated pointer and long integer key value like */
ret = radix_tree_insert(&cq_table->tree, hr_cq->cqn, hr_cq);
spin_unlock_irq(&cq_table->lock);
if (ret) {
dev_err(dev, "CQ alloc.Failed to radix_tree_insert.\n");
goto err_put;
}
/* Allocate mailbox memory */
mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
if (IS_ERR(mailbox)) {
ret = PTR_ERR(mailbox);
goto err_radix;
}
hr_dev->hw->write_cqc(hr_dev, hr_cq, mailbox->buf, mtts, dma_handle,
nent, vector);
/* Send mailbox to hw */
ret = hns_roce_sw2hw_cq(hr_dev, mailbox, hr_cq->cqn);
hns_roce_free_cmd_mailbox(hr_dev, mailbox);
if (ret) {
dev_err(dev, "CQ alloc.Failed to cmd mailbox.\n");
goto err_radix;
}
hr_cq->cons_index = 0;
hr_cq->uar = hr_uar;
return 0;
err_radix:
spin_lock_irq(&cq_table->lock);
radix_tree_delete(&cq_table->tree, hr_cq->cqn);
spin_unlock_irq(&cq_table->lock);
err_put:
hns_roce_table_put(hr_dev, &cq_table->table, hr_cq->cqn);
err_out:
hns_roce_bitmap_free(&cq_table->bitmap, hr_cq->cqn);
return ret;
}
static int hns_roce_hw2sw_cq(struct hns_roce_dev *dev,
struct hns_roce_cmd_mailbox *mailbox,
unsigned long cq_num)
{
return hns_roce_cmd_mbox(dev, 0, mailbox ? mailbox->dma : 0, cq_num,
mailbox ? 0 : 1, HNS_ROCE_CMD_HW2SW_CQ,
HNS_ROCE_CMD_TIME_CLASS_A);
}
static void hns_roce_free_cq(struct hns_roce_dev *hr_dev,
struct hns_roce_cq *hr_cq)
{
struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
struct device *dev = &hr_dev->pdev->dev;
int ret;
ret = hns_roce_hw2sw_cq(hr_dev, NULL, hr_cq->cqn);
if (ret)
dev_err(dev, "HW2SW_CQ failed (%d) for CQN %06lx\n", ret,
hr_cq->cqn);
/* Waiting interrupt process procedure carried out */
synchronize_irq(hr_dev->eq_table.eq[hr_cq->vector].irq);
spin_lock_irq(&cq_table->lock);
radix_tree_delete(&cq_table->tree, hr_cq->cqn);
spin_unlock_irq(&cq_table->lock);
hns_roce_table_put(hr_dev, &cq_table->table, hr_cq->cqn);
hns_roce_bitmap_free(&cq_table->bitmap, hr_cq->cqn);
}
static int hns_roce_ib_get_cq_umem(struct hns_roce_dev *hr_dev,
struct ib_ucontext *context,
struct hns_roce_cq_buf *buf,
struct ib_umem **umem, u64 buf_addr, int cqe)
{
int ret;
*umem = ib_umem_get(context, buf_addr, cqe * hr_dev->caps.cq_entry_sz,
IB_ACCESS_LOCAL_WRITE, 1);
if (IS_ERR(*umem))
return PTR_ERR(*umem);
ret = hns_roce_mtt_init(hr_dev, ib_umem_page_count(*umem),
ilog2((unsigned int)(*umem)->page_size),
&buf->hr_mtt);
if (ret)
goto err_buf;
ret = hns_roce_ib_umem_write_mtt(hr_dev, &buf->hr_mtt, *umem);
if (ret)
goto err_mtt;
return 0;
err_mtt:
hns_roce_mtt_cleanup(hr_dev, &buf->hr_mtt);
err_buf:
ib_umem_release(*umem);
return ret;
}
static int hns_roce_ib_alloc_cq_buf(struct hns_roce_dev *hr_dev,
struct hns_roce_cq_buf *buf, u32 nent)
{
int ret;
ret = hns_roce_buf_alloc(hr_dev, nent * hr_dev->caps.cq_entry_sz,
PAGE_SIZE * 2, &buf->hr_buf);
if (ret)
goto out;
ret = hns_roce_mtt_init(hr_dev, buf->hr_buf.npages,
buf->hr_buf.page_shift, &buf->hr_mtt);
if (ret)
goto err_buf;
ret = hns_roce_buf_write_mtt(hr_dev, &buf->hr_mtt, &buf->hr_buf);
if (ret)
goto err_mtt;
return 0;
err_mtt:
hns_roce_mtt_cleanup(hr_dev, &buf->hr_mtt);
err_buf:
hns_roce_buf_free(hr_dev, nent * hr_dev->caps.cq_entry_sz,
&buf->hr_buf);
out:
return ret;
}
static void hns_roce_ib_free_cq_buf(struct hns_roce_dev *hr_dev,
struct hns_roce_cq_buf *buf, int cqe)
{
hns_roce_buf_free(hr_dev, (cqe + 1) * hr_dev->caps.cq_entry_sz,
&buf->hr_buf);
}
struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev,
const struct ib_cq_init_attr *attr,
struct ib_ucontext *context,
struct ib_udata *udata)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev);
struct device *dev = &hr_dev->pdev->dev;
struct hns_roce_ib_create_cq ucmd;
struct hns_roce_cq *hr_cq = NULL;
struct hns_roce_uar *uar = NULL;
int vector = attr->comp_vector;
int cq_entries = attr->cqe;
int ret = 0;
if (cq_entries < 1 || cq_entries > hr_dev->caps.max_cqes) {
dev_err(dev, "Creat CQ failed. entries=%d, max=%d\n",
cq_entries, hr_dev->caps.max_cqes);
return ERR_PTR(-EINVAL);
}
hr_cq = kmalloc(sizeof(*hr_cq), GFP_KERNEL);
if (!hr_cq)
return ERR_PTR(-ENOMEM);
/* In v1 engine, parameter verification */
if (cq_entries < HNS_ROCE_MIN_CQE_NUM)
cq_entries = HNS_ROCE_MIN_CQE_NUM;
cq_entries = roundup_pow_of_two((unsigned int)cq_entries);
hr_cq->ib_cq.cqe = cq_entries - 1;
mutex_init(&hr_cq->resize_mutex);
spin_lock_init(&hr_cq->lock);
hr_cq->hr_resize_buf = NULL;
hr_cq->resize_umem = NULL;
if (context) {
if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
dev_err(dev, "Failed to copy_from_udata.\n");
ret = -EFAULT;
goto err_cq;
}
/* Get user space address, write it into mtt table */
ret = hns_roce_ib_get_cq_umem(hr_dev, context, &hr_cq->hr_buf,
&hr_cq->umem, ucmd.buf_addr,
cq_entries);
if (ret) {
dev_err(dev, "Failed to get_cq_umem.\n");
goto err_cq;
}
/* Get user space parameters */
uar = &to_hr_ucontext(context)->uar;
} else {
/* Init mmt table and write buff address to mtt table */
ret = hns_roce_ib_alloc_cq_buf(hr_dev, &hr_cq->hr_buf,
cq_entries);
if (ret) {
dev_err(dev, "Failed to alloc_cq_buf.\n");
goto err_cq;
}
uar = &hr_dev->priv_uar;
hr_cq->cq_db_l = hr_dev->reg_base + ROCEE_DB_OTHERS_L_0_REG +
0x1000 * uar->index;
}
/* Allocate cq index, fill cq_context */
ret = hns_roce_cq_alloc(hr_dev, cq_entries, &hr_cq->hr_buf.hr_mtt,
uar, hr_cq, vector, 0);
if (ret) {
dev_err(dev, "Creat CQ .Failed to cq_alloc.\n");
goto err_mtt;
}
/* Get created cq handler and carry out event */
hr_cq->comp = hns_roce_ib_cq_comp;
hr_cq->event = hns_roce_ib_cq_event;
hr_cq->cq_depth = cq_entries;
if (context) {
if (ib_copy_to_udata(udata, &hr_cq->cqn, sizeof(u64))) {
ret = -EFAULT;
goto err_mtt;
}
}
return &hr_cq->ib_cq;
err_mtt:
hns_roce_mtt_cleanup(hr_dev, &hr_cq->hr_buf.hr_mtt);
if (context)
ib_umem_release(hr_cq->umem);
else
hns_roce_ib_free_cq_buf(hr_dev, &hr_cq->hr_buf,
hr_cq->ib_cq.cqe);
err_cq:
kfree(hr_cq);
return ERR_PTR(ret);
}
int hns_roce_ib_destroy_cq(struct ib_cq *ib_cq)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ib_cq->device);
struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq);
hns_roce_free_cq(hr_dev, hr_cq);
hns_roce_mtt_cleanup(hr_dev, &hr_cq->hr_buf.hr_mtt);
if (ib_cq->uobject)
ib_umem_release(hr_cq->umem);
else
/* Free the buff of stored cq */
hns_roce_ib_free_cq_buf(hr_dev, &hr_cq->hr_buf, ib_cq->cqe);
kfree(hr_cq);
return 0;
}
void hns_roce_cq_completion(struct hns_roce_dev *hr_dev, u32 cqn)
{
struct device *dev = &hr_dev->pdev->dev;
struct hns_roce_cq *cq;
cq = radix_tree_lookup(&hr_dev->cq_table.tree,
cqn & (hr_dev->caps.num_cqs - 1));
if (!cq) {
dev_warn(dev, "Completion event for bogus CQ 0x%08x\n", cqn);
return;
}
cq->comp(cq);
}
void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type)
{
struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
struct device *dev = &hr_dev->pdev->dev;
struct hns_roce_cq *cq;
cq = radix_tree_lookup(&cq_table->tree,
cqn & (hr_dev->caps.num_cqs - 1));
if (cq)
atomic_inc(&cq->refcount);
if (!cq) {
dev_warn(dev, "Async event for bogus CQ %08x\n", cqn);
return;
}
cq->event(cq, (enum hns_roce_event)event_type);
if (atomic_dec_and_test(&cq->refcount))
complete(&cq->free);
}
int hns_roce_init_cq_table(struct hns_roce_dev *hr_dev)
{
struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
spin_lock_init(&cq_table->lock);
INIT_RADIX_TREE(&cq_table->tree, GFP_ATOMIC);
return hns_roce_bitmap_init(&cq_table->bitmap, hr_dev->caps.num_cqs,
hr_dev->caps.num_cqs - 1,
hr_dev->caps.reserved_cqs, 0);
}
void hns_roce_cleanup_cq_table(struct hns_roce_dev *hr_dev)
{
hns_roce_bitmap_cleanup(&hr_dev->cq_table.bitmap);
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,130 @@
/*
* Copyright (c) 2016 Hisilicon Limited.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef _HNS_ROCE_EQ_H
#define _HNS_ROCE_EQ_H
#define HNS_ROCE_CEQ 1
#define HNS_ROCE_AEQ 2
#define HNS_ROCE_CEQ_ENTRY_SIZE 0x4
#define HNS_ROCE_AEQ_ENTRY_SIZE 0x10
#define HNS_ROCE_CEQC_REG_OFFSET 0x18
#define HNS_ROCE_CEQ_DEFAULT_INTERVAL 0x10
#define HNS_ROCE_CEQ_DEFAULT_BURST_NUM 0x10
#define HNS_ROCE_INT_MASK_DISABLE 0
#define HNS_ROCE_INT_MASK_ENABLE 1
#define EQ_ENABLE 1
#define EQ_DISABLE 0
#define CONS_INDEX_MASK 0xffff
#define CEQ_REG_OFFSET 0x18
enum {
HNS_ROCE_EQ_STAT_INVALID = 0,
HNS_ROCE_EQ_STAT_VALID = 2,
};
struct hns_roce_aeqe {
u32 asyn;
union {
struct {
u32 qp;
u32 rsv0;
u32 rsv1;
} qp_event;
struct {
u32 cq;
u32 rsv0;
u32 rsv1;
} cq_event;
struct {
u32 port;
u32 rsv0;
u32 rsv1;
} port_event;
struct {
u32 ceqe;
u32 rsv0;
u32 rsv1;
} ce_event;
struct {
__le64 out_param;
__le16 token;
u8 status;
u8 rsv0;
} __packed cmd;
} event;
};
#define HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S 16
#define HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M \
(((1UL << 8) - 1) << HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S)
#define HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S 24
#define HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M \
(((1UL << 7) - 1) << HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S)
#define HNS_ROCE_AEQE_U32_4_OWNER_S 31
#define HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S 0
#define HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M \
(((1UL << 24) - 1) << HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S)
#define HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S 0
#define HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_M \
(((1UL << 16) - 1) << HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S)
#define HNS_ROCE_AEQE_EVENT_CE_EVENT_CEQE_CEQN_S 0
#define HNS_ROCE_AEQE_EVENT_CE_EVENT_CEQE_CEQN_M \
(((1UL << 5) - 1) << HNS_ROCE_AEQE_EVENT_CE_EVENT_CEQE_CEQN_S)
struct hns_roce_ceqe {
union {
int comp;
} ceqe;
};
#define HNS_ROCE_CEQE_CEQE_COMP_OWNER_S 0
#define HNS_ROCE_CEQE_CEQE_COMP_CQN_S 16
#define HNS_ROCE_CEQE_CEQE_COMP_CQN_M \
(((1UL << 16) - 1) << HNS_ROCE_CEQE_CEQE_COMP_CQN_S)
#endif /* _HNS_ROCE_EQ_H */

View File

@@ -0,0 +1,476 @@
/*
* Copyright (c) 2016 Hisilicon Limited.
* Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/platform_device.h>
#include "hns_roce_device.h"
#include "hns_roce_hem.h"
#include "hns_roce_common.h"
#define HW_SYNC_TIMEOUT_MSECS 500
#define HW_SYNC_SLEEP_TIME_INTERVAL 20
#define HNS_ROCE_HEM_ALLOC_SIZE (1 << 17)
#define HNS_ROCE_TABLE_CHUNK_SIZE (1 << 17)
#define DMA_ADDR_T_SHIFT 12
#define BT_CMD_SYNC_SHIFT 31
#define BT_BA_SHIFT 32
struct hns_roce_hem *hns_roce_alloc_hem(struct hns_roce_dev *hr_dev, int npages,
gfp_t gfp_mask)
{
struct hns_roce_hem_chunk *chunk = NULL;
struct hns_roce_hem *hem;
struct scatterlist *mem;
int order;
void *buf;
WARN_ON(gfp_mask & __GFP_HIGHMEM);
hem = kmalloc(sizeof(*hem),
gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN));
if (!hem)
return NULL;
hem->refcount = 0;
INIT_LIST_HEAD(&hem->chunk_list);
order = get_order(HNS_ROCE_HEM_ALLOC_SIZE);
while (npages > 0) {
if (!chunk) {
chunk = kmalloc(sizeof(*chunk),
gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN));
if (!chunk)
goto fail;
sg_init_table(chunk->mem, HNS_ROCE_HEM_CHUNK_LEN);
chunk->npages = 0;
chunk->nsg = 0;
list_add_tail(&chunk->list, &hem->chunk_list);
}
while (1 << order > npages)
--order;
/*
* Alloc memory one time. If failed, don't alloc small block
* memory, directly return fail.
*/
mem = &chunk->mem[chunk->npages];
buf = dma_alloc_coherent(&hr_dev->pdev->dev, PAGE_SIZE << order,
&sg_dma_address(mem), gfp_mask);
if (!buf)
goto fail;
sg_set_buf(mem, buf, PAGE_SIZE << order);
WARN_ON(mem->offset);
sg_dma_len(mem) = PAGE_SIZE << order;
++chunk->npages;
++chunk->nsg;
npages -= 1 << order;
}
return hem;
fail:
hns_roce_free_hem(hr_dev, hem);
return NULL;
}
void hns_roce_free_hem(struct hns_roce_dev *hr_dev, struct hns_roce_hem *hem)
{
struct hns_roce_hem_chunk *chunk, *tmp;
int i;
if (!hem)
return;
list_for_each_entry_safe(chunk, tmp, &hem->chunk_list, list) {
for (i = 0; i < chunk->npages; ++i)
dma_free_coherent(&hr_dev->pdev->dev,
chunk->mem[i].length,
lowmem_page_address(sg_page(&chunk->mem[i])),
sg_dma_address(&chunk->mem[i]));
kfree(chunk);
}
kfree(hem);
}
static int hns_roce_set_hem(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table, unsigned long obj)
{
struct device *dev = &hr_dev->pdev->dev;
spinlock_t *lock = &hr_dev->bt_cmd_lock;
unsigned long end = 0;
unsigned long flags;
struct hns_roce_hem_iter iter;
void __iomem *bt_cmd;
u32 bt_cmd_h_val = 0;
u32 bt_cmd_val[2];
u32 bt_cmd_l = 0;
u64 bt_ba = 0;
int ret = 0;
/* Find the HEM(Hardware Entry Memory) entry */
unsigned long i = (obj & (table->num_obj - 1)) /
(HNS_ROCE_TABLE_CHUNK_SIZE / table->obj_size);
switch (table->type) {
case HEM_TYPE_QPC:
roce_set_field(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M,
ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, HEM_TYPE_QPC);
break;
case HEM_TYPE_MTPT:
roce_set_field(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M,
ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S,
HEM_TYPE_MTPT);
break;
case HEM_TYPE_CQC:
roce_set_field(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M,
ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, HEM_TYPE_CQC);
break;
case HEM_TYPE_SRQC:
roce_set_field(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M,
ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S,
HEM_TYPE_SRQC);
break;
default:
return ret;
}
roce_set_field(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_M,
ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_S, obj);
roce_set_bit(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_S, 0);
roce_set_bit(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_HW_SYNS_S, 1);
/* Currently iter only a chunk */
for (hns_roce_hem_first(table->hem[i], &iter);
!hns_roce_hem_last(&iter); hns_roce_hem_next(&iter)) {
bt_ba = hns_roce_hem_addr(&iter) >> DMA_ADDR_T_SHIFT;
spin_lock_irqsave(lock, flags);
bt_cmd = hr_dev->reg_base + ROCEE_BT_CMD_H_REG;
end = msecs_to_jiffies(HW_SYNC_TIMEOUT_MSECS) + jiffies;
while (1) {
if (readl(bt_cmd) >> BT_CMD_SYNC_SHIFT) {
if (!(time_before(jiffies, end))) {
dev_err(dev, "Write bt_cmd err,hw_sync is not zero.\n");
spin_unlock_irqrestore(lock, flags);
return -EBUSY;
}
} else {
break;
}
msleep(HW_SYNC_SLEEP_TIME_INTERVAL);
}
bt_cmd_l = (u32)bt_ba;
roce_set_field(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_M,
ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_S,
bt_ba >> BT_BA_SHIFT);
bt_cmd_val[0] = bt_cmd_l;
bt_cmd_val[1] = bt_cmd_h_val;
hns_roce_write64_k(bt_cmd_val,
hr_dev->reg_base + ROCEE_BT_CMD_L_REG);
spin_unlock_irqrestore(lock, flags);
}
return ret;
}
static int hns_roce_clear_hem(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table,
unsigned long obj)
{
struct device *dev = &hr_dev->pdev->dev;
unsigned long end = 0;
unsigned long flags;
void __iomem *bt_cmd;
uint32_t bt_cmd_val[2];
u32 bt_cmd_h_val = 0;
int ret = 0;
switch (table->type) {
case HEM_TYPE_QPC:
roce_set_field(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M,
ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, HEM_TYPE_QPC);
break;
case HEM_TYPE_MTPT:
roce_set_field(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M,
ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S,
HEM_TYPE_MTPT);
break;
case HEM_TYPE_CQC:
roce_set_field(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M,
ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, HEM_TYPE_CQC);
break;
case HEM_TYPE_SRQC:
roce_set_field(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M,
ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S,
HEM_TYPE_SRQC);
break;
default:
return ret;
}
roce_set_field(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_M,
ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_S, obj);
roce_set_bit(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_S, 0);
roce_set_bit(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_HW_SYNS_S, 1);
roce_set_field(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_M,
ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_S, 0);
spin_lock_irqsave(&hr_dev->bt_cmd_lock, flags);
bt_cmd = hr_dev->reg_base + ROCEE_BT_CMD_H_REG;
end = msecs_to_jiffies(HW_SYNC_TIMEOUT_MSECS) + jiffies;
while (1) {
if (readl(bt_cmd) >> BT_CMD_SYNC_SHIFT) {
if (!(time_before(jiffies, end))) {
dev_err(dev, "Write bt_cmd err,hw_sync is not zero.\n");
spin_unlock_irqrestore(&hr_dev->bt_cmd_lock,
flags);
return -EBUSY;
}
} else {
break;
}
msleep(HW_SYNC_SLEEP_TIME_INTERVAL);
}
bt_cmd_val[0] = 0;
bt_cmd_val[1] = bt_cmd_h_val;
hns_roce_write64_k(bt_cmd_val, hr_dev->reg_base + ROCEE_BT_CMD_L_REG);
spin_unlock_irqrestore(&hr_dev->bt_cmd_lock, flags);
return ret;
}
int hns_roce_table_get(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table, unsigned long obj)
{
struct device *dev = &hr_dev->pdev->dev;
int ret = 0;
unsigned long i;
i = (obj & (table->num_obj - 1)) / (HNS_ROCE_TABLE_CHUNK_SIZE /
table->obj_size);
mutex_lock(&table->mutex);
if (table->hem[i]) {
++table->hem[i]->refcount;
goto out;
}
table->hem[i] = hns_roce_alloc_hem(hr_dev,
HNS_ROCE_TABLE_CHUNK_SIZE >> PAGE_SHIFT,
(table->lowmem ? GFP_KERNEL :
GFP_HIGHUSER) | __GFP_NOWARN);
if (!table->hem[i]) {
ret = -ENOMEM;
goto out;
}
/* Set HEM base address(128K/page, pa) to Hardware */
if (hns_roce_set_hem(hr_dev, table, obj)) {
ret = -ENODEV;
dev_err(dev, "set HEM base address to HW failed.\n");
goto out;
}
++table->hem[i]->refcount;
out:
mutex_unlock(&table->mutex);
return ret;
}
void hns_roce_table_put(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table, unsigned long obj)
{
struct device *dev = &hr_dev->pdev->dev;
unsigned long i;
i = (obj & (table->num_obj - 1)) /
(HNS_ROCE_TABLE_CHUNK_SIZE / table->obj_size);
mutex_lock(&table->mutex);
if (--table->hem[i]->refcount == 0) {
/* Clear HEM base address */
if (hns_roce_clear_hem(hr_dev, table, obj))
dev_warn(dev, "Clear HEM base address failed.\n");
hns_roce_free_hem(hr_dev, table->hem[i]);
table->hem[i] = NULL;
}
mutex_unlock(&table->mutex);
}
void *hns_roce_table_find(struct hns_roce_hem_table *table, unsigned long obj,
dma_addr_t *dma_handle)
{
struct hns_roce_hem_chunk *chunk;
unsigned long idx;
int i;
int offset, dma_offset;
struct hns_roce_hem *hem;
struct page *page = NULL;
if (!table->lowmem)
return NULL;
mutex_lock(&table->mutex);
idx = (obj & (table->num_obj - 1)) * table->obj_size;
hem = table->hem[idx / HNS_ROCE_TABLE_CHUNK_SIZE];
dma_offset = offset = idx % HNS_ROCE_TABLE_CHUNK_SIZE;
if (!hem)
goto out;
list_for_each_entry(chunk, &hem->chunk_list, list) {
for (i = 0; i < chunk->npages; ++i) {
if (dma_handle && dma_offset >= 0) {
if (sg_dma_len(&chunk->mem[i]) >
(u32)dma_offset)
*dma_handle = sg_dma_address(
&chunk->mem[i]) + dma_offset;
dma_offset -= sg_dma_len(&chunk->mem[i]);
}
if (chunk->mem[i].length > (u32)offset) {
page = sg_page(&chunk->mem[i]);
goto out;
}
offset -= chunk->mem[i].length;
}
}
out:
mutex_unlock(&table->mutex);
return page ? lowmem_page_address(page) + offset : NULL;
}
int hns_roce_table_get_range(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table,
unsigned long start, unsigned long end)
{
unsigned long inc = HNS_ROCE_TABLE_CHUNK_SIZE / table->obj_size;
unsigned long i = 0;
int ret = 0;
/* Allocate MTT entry memory according to chunk(128K) */
for (i = start; i <= end; i += inc) {
ret = hns_roce_table_get(hr_dev, table, i);
if (ret)
goto fail;
}
return 0;
fail:
while (i > start) {
i -= inc;
hns_roce_table_put(hr_dev, table, i);
}
return ret;
}
void hns_roce_table_put_range(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table,
unsigned long start, unsigned long end)
{
unsigned long i;
for (i = start; i <= end;
i += HNS_ROCE_TABLE_CHUNK_SIZE / table->obj_size)
hns_roce_table_put(hr_dev, table, i);
}
int hns_roce_init_hem_table(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table, u32 type,
unsigned long obj_size, unsigned long nobj,
int use_lowmem)
{
unsigned long obj_per_chunk;
unsigned long num_hem;
obj_per_chunk = HNS_ROCE_TABLE_CHUNK_SIZE / obj_size;
num_hem = (nobj + obj_per_chunk - 1) / obj_per_chunk;
table->hem = kcalloc(num_hem, sizeof(*table->hem), GFP_KERNEL);
if (!table->hem)
return -ENOMEM;
table->type = type;
table->num_hem = num_hem;
table->num_obj = nobj;
table->obj_size = obj_size;
table->lowmem = use_lowmem;
mutex_init(&table->mutex);
return 0;
}
void hns_roce_cleanup_hem_table(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table)
{
struct device *dev = &hr_dev->pdev->dev;
unsigned long i;
for (i = 0; i < table->num_hem; ++i)
if (table->hem[i]) {
if (hns_roce_clear_hem(hr_dev, table,
i * HNS_ROCE_TABLE_CHUNK_SIZE / table->obj_size))
dev_err(dev, "Clear HEM base address failed.\n");
hns_roce_free_hem(hr_dev, table->hem[i]);
}
kfree(table->hem);
}
void hns_roce_cleanup_hem(struct hns_roce_dev *hr_dev)
{
hns_roce_cleanup_hem_table(hr_dev, &hr_dev->cq_table.table);
hns_roce_cleanup_hem_table(hr_dev, &hr_dev->qp_table.irrl_table);
hns_roce_cleanup_hem_table(hr_dev, &hr_dev->qp_table.qp_table);
hns_roce_cleanup_hem_table(hr_dev, &hr_dev->mr_table.mtpt_table);
hns_roce_cleanup_hem_table(hr_dev, &hr_dev->mr_table.mtt_table);
}

View File

@@ -0,0 +1,131 @@
/*
* Copyright (c) 2016 Hisilicon Limited.
* Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef _HNS_ROCE_HEM_H
#define _HNS_ROCE_HEM_H
enum {
/* MAP HEM(Hardware Entry Memory) */
HEM_TYPE_QPC = 0,
HEM_TYPE_MTPT,
HEM_TYPE_CQC,
HEM_TYPE_SRQC,
/* UNMAP HEM */
HEM_TYPE_MTT,
HEM_TYPE_IRRL,
};
#define HNS_ROCE_HEM_CHUNK_LEN \
((256 - sizeof(struct list_head) - 2 * sizeof(int)) / \
(sizeof(struct scatterlist)))
enum {
HNS_ROCE_HEM_PAGE_SHIFT = 12,
HNS_ROCE_HEM_PAGE_SIZE = 1 << HNS_ROCE_HEM_PAGE_SHIFT,
};
struct hns_roce_hem_chunk {
struct list_head list;
int npages;
int nsg;
struct scatterlist mem[HNS_ROCE_HEM_CHUNK_LEN];
};
struct hns_roce_hem {
struct list_head chunk_list;
int refcount;
};
struct hns_roce_hem_iter {
struct hns_roce_hem *hem;
struct hns_roce_hem_chunk *chunk;
int page_idx;
};
void hns_roce_free_hem(struct hns_roce_dev *hr_dev, struct hns_roce_hem *hem);
int hns_roce_table_get(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table, unsigned long obj);
void hns_roce_table_put(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table, unsigned long obj);
void *hns_roce_table_find(struct hns_roce_hem_table *table, unsigned long obj,
dma_addr_t *dma_handle);
int hns_roce_table_get_range(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table,
unsigned long start, unsigned long end);
void hns_roce_table_put_range(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table,
unsigned long start, unsigned long end);
int hns_roce_init_hem_table(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table, u32 type,
unsigned long obj_size, unsigned long nobj,
int use_lowmem);
void hns_roce_cleanup_hem_table(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table);
void hns_roce_cleanup_hem(struct hns_roce_dev *hr_dev);
static inline void hns_roce_hem_first(struct hns_roce_hem *hem,
struct hns_roce_hem_iter *iter)
{
iter->hem = hem;
iter->chunk = list_empty(&hem->chunk_list) ? NULL :
list_entry(hem->chunk_list.next,
struct hns_roce_hem_chunk, list);
iter->page_idx = 0;
}
static inline int hns_roce_hem_last(struct hns_roce_hem_iter *iter)
{
return !iter->chunk;
}
static inline void hns_roce_hem_next(struct hns_roce_hem_iter *iter)
{
if (++iter->page_idx >= iter->chunk->nsg) {
if (iter->chunk->list.next == &iter->hem->chunk_list) {
iter->chunk = NULL;
return;
}
iter->chunk = list_entry(iter->chunk->list.next,
struct hns_roce_hem_chunk, list);
iter->page_idx = 0;
}
}
static inline dma_addr_t hns_roce_hem_addr(struct hns_roce_hem_iter *iter)
{
return sg_dma_address(&iter->chunk->mem[iter->page_idx]);
}
#endif /*_HNS_ROCE_HEM_H*/

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,144 @@
/*
* Copyright (c) 2016 Hisilicon Limited.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/platform_device.h>
#include "hns_roce_device.h"
static int hns_roce_pd_alloc(struct hns_roce_dev *hr_dev, unsigned long *pdn)
{
struct device *dev = &hr_dev->pdev->dev;
unsigned long pd_number;
int ret = 0;
ret = hns_roce_bitmap_alloc(&hr_dev->pd_bitmap, &pd_number);
if (ret == -1) {
dev_err(dev, "alloc pdn from pdbitmap failed\n");
return -ENOMEM;
}
*pdn = pd_number;
return 0;
}
static void hns_roce_pd_free(struct hns_roce_dev *hr_dev, unsigned long pdn)
{
hns_roce_bitmap_free(&hr_dev->pd_bitmap, pdn);
}
int hns_roce_init_pd_table(struct hns_roce_dev *hr_dev)
{
return hns_roce_bitmap_init(&hr_dev->pd_bitmap, hr_dev->caps.num_pds,
hr_dev->caps.num_pds - 1,
hr_dev->caps.reserved_pds, 0);
}
void hns_roce_cleanup_pd_table(struct hns_roce_dev *hr_dev)
{
hns_roce_bitmap_cleanup(&hr_dev->pd_bitmap);
}
struct ib_pd *hns_roce_alloc_pd(struct ib_device *ib_dev,
struct ib_ucontext *context,
struct ib_udata *udata)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev);
struct device *dev = &hr_dev->pdev->dev;
struct hns_roce_pd *pd;
int ret;
pd = kmalloc(sizeof(*pd), GFP_KERNEL);
if (!pd)
return ERR_PTR(-ENOMEM);
ret = hns_roce_pd_alloc(to_hr_dev(ib_dev), &pd->pdn);
if (ret) {
kfree(pd);
dev_err(dev, "[alloc_pd]hns_roce_pd_alloc failed!\n");
return ERR_PTR(ret);
}
if (context) {
if (ib_copy_to_udata(udata, &pd->pdn, sizeof(u64))) {
hns_roce_pd_free(to_hr_dev(ib_dev), pd->pdn);
dev_err(dev, "[alloc_pd]ib_copy_to_udata failed!\n");
kfree(pd);
return ERR_PTR(-EFAULT);
}
}
return &pd->ibpd;
}
int hns_roce_dealloc_pd(struct ib_pd *pd)
{
hns_roce_pd_free(to_hr_dev(pd->device), to_hr_pd(pd)->pdn);
kfree(to_hr_pd(pd));
return 0;
}
int hns_roce_uar_alloc(struct hns_roce_dev *hr_dev, struct hns_roce_uar *uar)
{
struct resource *res;
int ret = 0;
/* Using bitmap to manager UAR index */
ret = hns_roce_bitmap_alloc(&hr_dev->uar_table.bitmap, &uar->index);
if (ret == -1)
return -ENOMEM;
uar->index = (uar->index - 1) % hr_dev->caps.phy_num_uars + 1;
res = platform_get_resource(hr_dev->pdev, IORESOURCE_MEM, 0);
uar->pfn = ((res->start) >> PAGE_SHIFT) + uar->index;
return 0;
}
void hns_roce_uar_free(struct hns_roce_dev *hr_dev, struct hns_roce_uar *uar)
{
hns_roce_bitmap_free(&hr_dev->uar_table.bitmap, uar->index);
}
int hns_roce_init_uar_table(struct hns_roce_dev *hr_dev)
{
return hns_roce_bitmap_init(&hr_dev->uar_table.bitmap,
hr_dev->caps.num_uars,
hr_dev->caps.num_uars - 1,
hr_dev->caps.reserved_uars, 0);
}
void hns_roce_cleanup_uar_table(struct hns_roce_dev *hr_dev)
{
hns_roce_bitmap_cleanup(&hr_dev->uar_table.bitmap);
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,53 @@
/*
* Copyright (c) 2016 Hisilicon Limited.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef _HNS_ROCE_USER_H
#define _HNS_ROCE_USER_H
struct hns_roce_ib_create_cq {
__u64 buf_addr;
};
struct hns_roce_ib_create_qp {
__u64 buf_addr;
__u64 db_addr;
__u8 log_sq_bb_count;
__u8 log_sq_stride;
__u8 sq_no_prefetch;
__u8 reserved[5];
};
struct hns_roce_ib_alloc_ucontext_resp {
__u32 qp_tab_size;
};
#endif /*_HNS_ROCE_USER_H */