2019-07-11 14:39:23 +02:00
|
|
|
#include "queue.h"
|
2019-12-01 16:31:11 -05:00
|
|
|
#include "apple_bce.h"
|
2025-10-25 13:36:49 +05:30
|
|
|
#include <linux/version.h>
|
2019-07-11 14:39:23 +02:00
|
|
|
|
2019-07-11 21:04:10 +02:00
|
|
|
#define REG_DOORBELL_BASE 0x44000
|
|
|
|
|
|
2019-12-01 16:31:11 -05:00
|
|
|
struct bce_queue_cq *bce_alloc_cq(struct apple_bce_device *dev, int qid, u32 el_count)
|
2019-07-11 14:57:38 +02:00
|
|
|
{
|
|
|
|
|
struct bce_queue_cq *q;
|
2019-07-11 15:49:22 +02:00
|
|
|
q = kzalloc(sizeof(struct bce_queue_cq), GFP_KERNEL);
|
2019-07-11 14:57:38 +02:00
|
|
|
q->qid = qid;
|
|
|
|
|
q->type = BCE_QUEUE_CQ;
|
|
|
|
|
q->el_count = el_count;
|
|
|
|
|
q->data = dma_alloc_coherent(&dev->pci->dev, el_count * sizeof(struct bce_qe_completion),
|
|
|
|
|
&q->dma_handle, GFP_KERNEL);
|
|
|
|
|
if (!q->data) {
|
2019-07-11 15:57:50 +02:00
|
|
|
pr_err("DMA queue memory alloc failed\n");
|
2019-07-11 14:57:38 +02:00
|
|
|
kfree(q);
|
2019-07-11 17:57:48 +02:00
|
|
|
return NULL;
|
2019-07-11 14:57:38 +02:00
|
|
|
}
|
|
|
|
|
return q;
|
|
|
|
|
}
|
|
|
|
|
|
2019-07-11 17:00:46 +02:00
|
|
|
void bce_get_cq_memcfg(struct bce_queue_cq *cq, struct bce_queue_memcfg *cfg)
|
2019-07-11 15:49:22 +02:00
|
|
|
{
|
|
|
|
|
cfg->qid = (u16) cq->qid;
|
|
|
|
|
cfg->el_count = (u16) cq->el_count;
|
|
|
|
|
cfg->vector_or_cq = 0;
|
|
|
|
|
cfg->_pad = 0;
|
|
|
|
|
cfg->addr = cq->dma_handle;
|
|
|
|
|
cfg->length = cq->el_count * sizeof(struct bce_qe_completion);
|
|
|
|
|
}
|
|
|
|
|
|
2019-12-01 16:31:11 -05:00
|
|
|
void bce_free_cq(struct apple_bce_device *dev, struct bce_queue_cq *cq)
|
2019-07-11 15:49:22 +02:00
|
|
|
{
|
2019-07-11 17:57:48 +02:00
|
|
|
dma_free_coherent(&dev->pci->dev, cq->el_count * sizeof(struct bce_qe_completion), cq->data, cq->dma_handle);
|
|
|
|
|
kfree(cq);
|
2019-07-11 14:57:38 +02:00
|
|
|
}
|
2019-07-11 14:39:23 +02:00
|
|
|
|
2019-12-01 16:31:11 -05:00
|
|
|
static void bce_handle_cq_completion(struct apple_bce_device *dev, struct bce_qe_completion *e, size_t *ce)
|
2019-07-11 14:39:23 +02:00
|
|
|
{
|
|
|
|
|
struct bce_queue *target;
|
|
|
|
|
struct bce_queue_sq *target_sq;
|
2019-07-12 21:06:34 +02:00
|
|
|
struct bce_sq_completion_data *cmpl;
|
2019-07-11 14:39:23 +02:00
|
|
|
if (e->qid >= BCE_MAX_QUEUE_COUNT) {
|
2019-07-11 15:57:50 +02:00
|
|
|
pr_err("Device sent a response for qid (%u) >= BCE_MAX_QUEUE_COUNT\n", e->qid);
|
2019-07-11 14:39:23 +02:00
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
target = dev->queues[e->qid];
|
|
|
|
|
if (!target || target->type != BCE_QUEUE_SQ) {
|
2019-07-11 15:57:50 +02:00
|
|
|
pr_err("Device sent a response for qid (%u), which does not exist\n", e->qid);
|
2019-07-11 14:39:23 +02:00
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
target_sq = (struct bce_queue_sq *) target;
|
2019-07-12 21:06:34 +02:00
|
|
|
if (target_sq->completion_tail != e->completion_index) {
|
2019-07-11 15:57:50 +02:00
|
|
|
pr_err("Completion index mismatch; this is likely going to make this driver unusable\n");
|
2019-07-11 14:39:23 +02:00
|
|
|
return;
|
|
|
|
|
}
|
2019-07-12 21:06:34 +02:00
|
|
|
if (!target_sq->has_pending_completions) {
|
|
|
|
|
target_sq->has_pending_completions = true;
|
|
|
|
|
dev->int_sq_list[(*ce)++] = target_sq;
|
|
|
|
|
}
|
|
|
|
|
cmpl = &target_sq->completion_data[e->completion_index];
|
|
|
|
|
cmpl->status = e->status;
|
|
|
|
|
cmpl->data_size = e->data_size;
|
|
|
|
|
cmpl->result = e->result;
|
|
|
|
|
wmb();
|
|
|
|
|
target_sq->completion_tail = (target_sq->completion_tail + 1) % target_sq->el_count;
|
2019-07-11 14:39:23 +02:00
|
|
|
}
|
|
|
|
|
|
2019-12-01 16:31:11 -05:00
|
|
|
void bce_handle_cq_completions(struct apple_bce_device *dev, struct bce_queue_cq *cq)
|
2019-07-11 14:39:23 +02:00
|
|
|
{
|
2019-07-12 21:06:34 +02:00
|
|
|
size_t ce = 0;
|
2019-07-11 23:27:26 +02:00
|
|
|
struct bce_qe_completion *e;
|
2019-07-12 21:06:34 +02:00
|
|
|
struct bce_queue_sq *sq;
|
2019-07-11 23:27:26 +02:00
|
|
|
e = bce_cq_element(cq, cq->index);
|
2019-07-11 21:22:30 +02:00
|
|
|
if (!(e->flags & BCE_COMPLETION_FLAG_PENDING))
|
|
|
|
|
return;
|
2019-07-12 21:06:34 +02:00
|
|
|
mb();
|
2019-07-11 14:39:23 +02:00
|
|
|
while (true) {
|
2019-07-11 21:22:30 +02:00
|
|
|
e = bce_cq_element(cq, cq->index);
|
2019-07-11 14:39:23 +02:00
|
|
|
if (!(e->flags & BCE_COMPLETION_FLAG_PENDING))
|
|
|
|
|
break;
|
2019-12-01 16:31:11 -05:00
|
|
|
// pr_info("apple-bce: compl: %i: %i %llx %llx", e->qid, e->status, e->data_size, e->result);
|
2019-07-12 21:06:34 +02:00
|
|
|
bce_handle_cq_completion(dev, e, &ce);
|
2019-07-11 14:39:23 +02:00
|
|
|
e->flags = 0;
|
2019-07-11 21:22:47 +02:00
|
|
|
cq->index = (cq->index + 1) % cq->el_count;
|
2019-07-11 14:39:23 +02:00
|
|
|
}
|
2019-07-12 21:06:34 +02:00
|
|
|
mb();
|
2019-07-11 21:22:30 +02:00
|
|
|
iowrite32(cq->index, (u32 *) ((u8 *) dev->reg_mem_dma + REG_DOORBELL_BASE) + cq->qid);
|
2019-07-12 21:06:34 +02:00
|
|
|
while (ce) {
|
|
|
|
|
--ce;
|
|
|
|
|
sq = dev->int_sq_list[ce];
|
|
|
|
|
sq->completion(sq);
|
|
|
|
|
sq->has_pending_completions = false;
|
|
|
|
|
}
|
2019-07-11 17:57:48 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2019-12-01 16:31:11 -05:00
|
|
|
struct bce_queue_sq *bce_alloc_sq(struct apple_bce_device *dev, int qid, u32 el_size, u32 el_count,
|
2019-07-11 21:04:10 +02:00
|
|
|
bce_sq_completion compl, void *userdata)
|
2019-07-11 17:57:48 +02:00
|
|
|
{
|
|
|
|
|
struct bce_queue_sq *q;
|
|
|
|
|
q = kzalloc(sizeof(struct bce_queue_sq), GFP_KERNEL);
|
|
|
|
|
q->qid = qid;
|
|
|
|
|
q->type = BCE_QUEUE_SQ;
|
|
|
|
|
q->el_size = el_size;
|
|
|
|
|
q->el_count = el_count;
|
|
|
|
|
q->data = dma_alloc_coherent(&dev->pci->dev, el_count * el_size,
|
|
|
|
|
&q->dma_handle, GFP_KERNEL);
|
2019-07-11 21:04:10 +02:00
|
|
|
q->completion = compl;
|
|
|
|
|
q->userdata = userdata;
|
2019-07-12 21:06:34 +02:00
|
|
|
q->completion_data = kzalloc(sizeof(struct bce_sq_completion_data) * el_count, GFP_KERNEL);
|
2019-07-13 19:02:30 +02:00
|
|
|
q->reg_mem_dma = dev->reg_mem_dma;
|
|
|
|
|
atomic_set(&q->available_commands, el_count - 1);
|
|
|
|
|
init_completion(&q->available_command_completion);
|
2019-07-13 19:20:25 +02:00
|
|
|
atomic_set(&q->available_command_completion_waiting_count, 0);
|
2019-07-11 17:57:48 +02:00
|
|
|
if (!q->data) {
|
|
|
|
|
pr_err("DMA queue memory alloc failed\n");
|
|
|
|
|
kfree(q);
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
return q;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void bce_get_sq_memcfg(struct bce_queue_sq *sq, struct bce_queue_cq *cq, struct bce_queue_memcfg *cfg)
|
|
|
|
|
{
|
|
|
|
|
cfg->qid = (u16) sq->qid;
|
|
|
|
|
cfg->el_count = (u16) sq->el_count;
|
|
|
|
|
cfg->vector_or_cq = (u16) cq->qid;
|
|
|
|
|
cfg->_pad = 0;
|
|
|
|
|
cfg->addr = sq->dma_handle;
|
|
|
|
|
cfg->length = sq->el_count * sq->el_size;
|
|
|
|
|
}
|
|
|
|
|
|
2019-12-01 16:31:11 -05:00
|
|
|
void bce_free_sq(struct apple_bce_device *dev, struct bce_queue_sq *sq)
|
2019-07-11 17:57:48 +02:00
|
|
|
{
|
|
|
|
|
dma_free_coherent(&dev->pci->dev, sq->el_count * sq->el_size, sq->data, sq->dma_handle);
|
|
|
|
|
kfree(sq);
|
|
|
|
|
}
|
2019-07-11 20:17:37 +02:00
|
|
|
|
2019-07-14 14:34:23 +02:00
|
|
|
int bce_reserve_submission(struct bce_queue_sq *sq, unsigned long *timeout)
|
2019-07-13 19:02:30 +02:00
|
|
|
{
|
|
|
|
|
while (atomic_dec_if_positive(&sq->available_commands) < 0) {
|
2019-07-14 14:34:23 +02:00
|
|
|
if (!timeout || !*timeout)
|
2019-07-13 19:02:30 +02:00
|
|
|
return -EAGAIN;
|
2019-07-13 19:20:25 +02:00
|
|
|
atomic_inc(&sq->available_command_completion_waiting_count);
|
2019-07-14 14:34:23 +02:00
|
|
|
*timeout = wait_for_completion_timeout(&sq->available_command_completion, *timeout);
|
|
|
|
|
if (!*timeout) {
|
2019-07-13 19:20:25 +02:00
|
|
|
if (atomic_dec_if_positive(&sq->available_command_completion_waiting_count) < 0)
|
|
|
|
|
try_wait_for_completion(&sq->available_command_completion); /* consume the pending completion */
|
|
|
|
|
}
|
2019-07-13 19:02:30 +02:00
|
|
|
}
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
2019-07-15 20:54:47 +02:00
|
|
|
void bce_cancel_submission_reservation(struct bce_queue_sq *sq)
|
|
|
|
|
{
|
|
|
|
|
atomic_inc(&sq->available_commands);
|
|
|
|
|
}
|
|
|
|
|
|
2019-07-13 19:02:30 +02:00
|
|
|
void *bce_next_submission(struct bce_queue_sq *sq)
|
|
|
|
|
{
|
|
|
|
|
void *ret = bce_sq_element(sq, sq->tail);
|
|
|
|
|
sq->tail = (sq->tail + 1) % sq->el_count;
|
|
|
|
|
return ret;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void bce_submit_to_device(struct bce_queue_sq *sq)
|
|
|
|
|
{
|
2019-07-16 16:15:14 +02:00
|
|
|
mb();
|
2019-07-13 19:02:30 +02:00
|
|
|
iowrite32(sq->tail, (u32 *) ((u8 *) sq->reg_mem_dma + REG_DOORBELL_BASE) + sq->qid);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void bce_notify_submission_complete(struct bce_queue_sq *sq)
|
|
|
|
|
{
|
|
|
|
|
sq->head = (sq->head + 1) % sq->el_count;
|
2019-07-13 19:20:25 +02:00
|
|
|
atomic_inc(&sq->available_commands);
|
|
|
|
|
if (atomic_dec_if_positive(&sq->available_command_completion_waiting_count) >= 0) {
|
2019-07-13 19:02:30 +02:00
|
|
|
complete(&sq->available_command_completion);
|
|
|
|
|
}
|
|
|
|
|
}
|
2019-07-11 20:17:37 +02:00
|
|
|
|
2019-07-13 20:26:37 +02:00
|
|
|
void bce_set_submission_single(struct bce_qe_submission *element, dma_addr_t addr, size_t size)
|
2019-07-13 19:51:06 +02:00
|
|
|
{
|
|
|
|
|
element->addr = addr;
|
|
|
|
|
element->length = size;
|
|
|
|
|
element->segl_addr = element->segl_length = 0;
|
|
|
|
|
}
|
|
|
|
|
|
2019-07-12 21:06:34 +02:00
|
|
|
static void bce_cmdq_completion(struct bce_queue_sq *q);
|
2019-07-11 20:17:37 +02:00
|
|
|
|
2019-12-01 16:31:11 -05:00
|
|
|
struct bce_queue_cmdq *bce_alloc_cmdq(struct apple_bce_device *dev, int qid, u32 el_count)
|
2019-07-11 20:17:37 +02:00
|
|
|
{
|
|
|
|
|
struct bce_queue_cmdq *q;
|
|
|
|
|
q = kzalloc(sizeof(struct bce_queue_cmdq), GFP_KERNEL);
|
2019-07-12 09:09:17 +02:00
|
|
|
q->sq = bce_alloc_sq(dev, qid, BCE_CMD_SIZE, el_count, bce_cmdq_completion, q);
|
2019-07-11 20:17:37 +02:00
|
|
|
if (!q->sq) {
|
|
|
|
|
kfree(q);
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
spin_lock_init(&q->lck);
|
|
|
|
|
q->tres = kzalloc(sizeof(struct bce_queue_cmdq_result_el*) * el_count, GFP_KERNEL);
|
|
|
|
|
if (!q->tres) {
|
|
|
|
|
kfree(q);
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
return q;
|
|
|
|
|
}
|
|
|
|
|
|
2019-12-01 16:31:11 -05:00
|
|
|
void bce_free_cmdq(struct apple_bce_device *dev, struct bce_queue_cmdq *cmdq)
|
2019-07-11 21:04:10 +02:00
|
|
|
{
|
2019-07-12 09:09:17 +02:00
|
|
|
bce_free_sq(dev, cmdq->sq);
|
2019-07-11 21:04:10 +02:00
|
|
|
kfree(cmdq->tres);
|
|
|
|
|
kfree(cmdq);
|
|
|
|
|
}
|
|
|
|
|
|
2019-07-12 21:06:34 +02:00
|
|
|
void bce_cmdq_completion(struct bce_queue_sq *q)
|
2019-07-11 20:17:37 +02:00
|
|
|
{
|
|
|
|
|
struct bce_queue_cmdq_result_el *el;
|
|
|
|
|
struct bce_queue_cmdq *cmdq = q->userdata;
|
2019-07-12 21:06:34 +02:00
|
|
|
struct bce_sq_completion_data *result;
|
|
|
|
|
|
2019-07-13 19:02:30 +02:00
|
|
|
spin_lock(&cmdq->lck);
|
2019-07-12 21:06:34 +02:00
|
|
|
while ((result = bce_next_completion(q))) {
|
2019-07-13 19:02:30 +02:00
|
|
|
el = cmdq->tres[cmdq->sq->head];
|
2019-07-12 21:06:34 +02:00
|
|
|
if (el) {
|
|
|
|
|
el->result = result->result;
|
|
|
|
|
el->status = result->status;
|
|
|
|
|
mb();
|
|
|
|
|
complete(&el->cmpl);
|
|
|
|
|
} else {
|
2019-12-01 16:31:11 -05:00
|
|
|
pr_err("apple-bce: Unexpected command queue completion\n");
|
2019-07-12 21:06:34 +02:00
|
|
|
}
|
2019-07-13 19:02:30 +02:00
|
|
|
cmdq->tres[cmdq->sq->head] = NULL;
|
|
|
|
|
bce_notify_submission_complete(q);
|
2019-07-11 20:17:37 +02:00
|
|
|
}
|
2019-07-13 19:02:30 +02:00
|
|
|
spin_unlock(&cmdq->lck);
|
2019-07-11 20:17:37 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static __always_inline void *bce_cmd_start(struct bce_queue_cmdq *cmdq, struct bce_queue_cmdq_result_el *res)
|
|
|
|
|
{
|
|
|
|
|
void *ret;
|
2019-07-14 14:34:23 +02:00
|
|
|
unsigned long timeout;
|
2019-07-11 20:17:37 +02:00
|
|
|
init_completion(&res->cmpl);
|
2019-07-11 23:27:26 +02:00
|
|
|
mb();
|
2019-07-11 20:17:37 +02:00
|
|
|
|
2019-07-14 14:34:23 +02:00
|
|
|
timeout = msecs_to_jiffies(1000L * 60 * 5); /* wait for up to ~5 minutes */
|
|
|
|
|
if (bce_reserve_submission(cmdq->sq, &timeout))
|
2019-07-13 19:02:30 +02:00
|
|
|
return NULL;
|
2019-07-11 20:17:37 +02:00
|
|
|
|
2019-07-13 19:02:30 +02:00
|
|
|
spin_lock(&cmdq->lck);
|
|
|
|
|
cmdq->tres[cmdq->sq->tail] = res;
|
|
|
|
|
ret = bce_next_submission(cmdq->sq);
|
2019-07-11 20:17:37 +02:00
|
|
|
return ret;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static __always_inline void bce_cmd_finish(struct bce_queue_cmdq *cmdq, struct bce_queue_cmdq_result_el *res)
|
|
|
|
|
{
|
2019-07-13 19:02:30 +02:00
|
|
|
bce_submit_to_device(cmdq->sq);
|
2019-07-11 20:17:37 +02:00
|
|
|
spin_unlock(&cmdq->lck);
|
|
|
|
|
|
|
|
|
|
wait_for_completion(&res->cmpl);
|
|
|
|
|
mb();
|
|
|
|
|
}
|
|
|
|
|
|
2019-07-13 17:28:56 +02:00
|
|
|
u32 bce_cmd_register_queue(struct bce_queue_cmdq *cmdq, struct bce_queue_memcfg *cfg, const char *name, bool isdirout)
|
2019-07-11 20:17:37 +02:00
|
|
|
{
|
|
|
|
|
struct bce_queue_cmdq_result_el res;
|
|
|
|
|
struct bce_cmdq_register_memory_queue_cmd *cmd = bce_cmd_start(cmdq, &res);
|
2019-07-13 19:02:30 +02:00
|
|
|
if (!cmd)
|
|
|
|
|
return (u32) -1;
|
2019-07-11 20:17:37 +02:00
|
|
|
cmd->cmd = BCE_CMD_REGISTER_MEMORY_QUEUE;
|
2019-07-13 17:28:56 +02:00
|
|
|
cmd->flags = (u16) ((name ? 2 : 0) | (isdirout ? 1 : 0));
|
2019-07-11 20:17:37 +02:00
|
|
|
cmd->qid = cfg->qid;
|
|
|
|
|
cmd->el_count = cfg->el_count;
|
|
|
|
|
cmd->vector_or_cq = cfg->vector_or_cq;
|
2019-07-12 22:39:21 +02:00
|
|
|
memset(cmd->name, 0, sizeof(cmd->name));
|
2019-07-11 21:04:10 +02:00
|
|
|
if (name) {
|
2019-07-12 22:39:21 +02:00
|
|
|
cmd->name_len = (u16) min(strlen(name), (size_t) sizeof(cmd->name));
|
2019-07-11 21:04:10 +02:00
|
|
|
memcpy(cmd->name, name, cmd->name_len);
|
|
|
|
|
} else {
|
|
|
|
|
cmd->name_len = 0;
|
|
|
|
|
}
|
2019-07-11 20:17:37 +02:00
|
|
|
cmd->addr = cfg->addr;
|
|
|
|
|
cmd->length = cfg->length;
|
|
|
|
|
|
|
|
|
|
bce_cmd_finish(cmdq, &res);
|
|
|
|
|
return res.status;
|
|
|
|
|
}
|
|
|
|
|
|
2019-07-11 21:04:10 +02:00
|
|
|
u32 bce_cmd_unregister_memory_queue(struct bce_queue_cmdq *cmdq, u16 qid)
|
2019-07-11 20:17:37 +02:00
|
|
|
{
|
|
|
|
|
struct bce_queue_cmdq_result_el res;
|
|
|
|
|
struct bce_cmdq_simple_memory_queue_cmd *cmd = bce_cmd_start(cmdq, &res);
|
2019-07-13 19:02:30 +02:00
|
|
|
if (!cmd)
|
|
|
|
|
return (u32) -1;
|
2019-07-11 20:17:37 +02:00
|
|
|
cmd->cmd = BCE_CMD_UNREGISTER_MEMORY_QUEUE;
|
|
|
|
|
cmd->flags = 0;
|
|
|
|
|
cmd->qid = qid;
|
|
|
|
|
bce_cmd_finish(cmdq, &res);
|
|
|
|
|
return res.status;
|
|
|
|
|
}
|
|
|
|
|
|
2019-07-11 21:04:10 +02:00
|
|
|
u32 bce_cmd_flush_memory_queue(struct bce_queue_cmdq *cmdq, u16 qid)
|
2019-07-11 20:17:37 +02:00
|
|
|
{
|
|
|
|
|
struct bce_queue_cmdq_result_el res;
|
|
|
|
|
struct bce_cmdq_simple_memory_queue_cmd *cmd = bce_cmd_start(cmdq, &res);
|
2019-07-13 19:02:30 +02:00
|
|
|
if (!cmd)
|
|
|
|
|
return (u32) -1;
|
2019-07-11 20:17:37 +02:00
|
|
|
cmd->cmd = BCE_CMD_FLUSH_MEMORY_QUEUE;
|
|
|
|
|
cmd->flags = 0;
|
|
|
|
|
cmd->qid = qid;
|
|
|
|
|
bce_cmd_finish(cmdq, &res);
|
|
|
|
|
return res.status;
|
2019-07-12 21:40:43 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2019-12-01 16:31:11 -05:00
|
|
|
struct bce_queue_cq *bce_create_cq(struct apple_bce_device *dev, u32 el_count)
|
2019-07-12 21:40:43 +02:00
|
|
|
{
|
|
|
|
|
struct bce_queue_cq *cq;
|
|
|
|
|
struct bce_queue_memcfg cfg;
|
2025-10-25 13:36:49 +05:30
|
|
|
#if LINUX_VERSION_CODE < KERNEL_VERSION(6,18,0)
|
2019-07-12 21:40:43 +02:00
|
|
|
int qid = ida_simple_get(&dev->queue_ida, BCE_QUEUE_USER_MIN, BCE_QUEUE_USER_MAX, GFP_KERNEL);
|
2025-10-25 13:36:49 +05:30
|
|
|
#else
|
|
|
|
|
int qid = ida_alloc_range(&dev->queue_ida, BCE_QUEUE_USER_MIN, BCE_QUEUE_USER_MAX - 1, GFP_KERNEL);
|
|
|
|
|
#endif
|
2019-07-12 21:40:43 +02:00
|
|
|
if (qid < 0)
|
|
|
|
|
return NULL;
|
|
|
|
|
cq = bce_alloc_cq(dev, qid, el_count);
|
|
|
|
|
if (!cq)
|
|
|
|
|
return NULL;
|
|
|
|
|
bce_get_cq_memcfg(cq, &cfg);
|
|
|
|
|
if (bce_cmd_register_queue(dev->cmd_cmdq, &cfg, NULL, false) != 0) {
|
2019-12-01 16:31:11 -05:00
|
|
|
pr_err("apple-bce: CQ registration failed (%i)", qid);
|
2019-07-12 21:40:43 +02:00
|
|
|
bce_free_cq(dev, cq);
|
2025-10-25 13:36:49 +05:30
|
|
|
#if LINUX_VERSION_CODE < KERNEL_VERSION(6,18,0)
|
2019-07-12 22:40:54 +02:00
|
|
|
ida_simple_remove(&dev->queue_ida, (uint) qid);
|
2025-10-25 13:36:49 +05:30
|
|
|
#else
|
|
|
|
|
ida_free(&dev->queue_ida, (uint) qid);
|
|
|
|
|
#endif
|
2019-07-12 21:40:43 +02:00
|
|
|
return NULL;
|
|
|
|
|
}
|
2019-07-12 22:40:54 +02:00
|
|
|
dev->queues[qid] = (struct bce_queue *) cq;
|
2019-07-12 21:40:43 +02:00
|
|
|
return cq;
|
|
|
|
|
}
|
|
|
|
|
|
2019-12-01 16:31:11 -05:00
|
|
|
struct bce_queue_sq *bce_create_sq(struct apple_bce_device *dev, struct bce_queue_cq *cq, const char *name, u32 el_count,
|
2019-07-12 21:40:43 +02:00
|
|
|
int direction, bce_sq_completion compl, void *userdata)
|
|
|
|
|
{
|
|
|
|
|
struct bce_queue_sq *sq;
|
|
|
|
|
struct bce_queue_memcfg cfg;
|
|
|
|
|
int qid;
|
|
|
|
|
if (cq == NULL)
|
|
|
|
|
return NULL; /* cq can not be null */
|
|
|
|
|
if (name == NULL)
|
|
|
|
|
return NULL; /* name can not be null */
|
|
|
|
|
if (direction != DMA_TO_DEVICE && direction != DMA_FROM_DEVICE)
|
|
|
|
|
return NULL; /* unsupported direction */
|
2025-10-25 13:36:49 +05:30
|
|
|
#if LINUX_VERSION_CODE < KERNEL_VERSION(6,18,0)
|
2019-07-12 21:40:43 +02:00
|
|
|
qid = ida_simple_get(&dev->queue_ida, BCE_QUEUE_USER_MIN, BCE_QUEUE_USER_MAX, GFP_KERNEL);
|
2025-10-25 13:36:49 +05:30
|
|
|
#else
|
|
|
|
|
qid = ida_alloc_range(&dev->queue_ida, BCE_QUEUE_USER_MIN, BCE_QUEUE_USER_MAX - 1, GFP_KERNEL);
|
|
|
|
|
#endif
|
2019-07-12 21:40:43 +02:00
|
|
|
if (qid < 0)
|
|
|
|
|
return NULL;
|
|
|
|
|
sq = bce_alloc_sq(dev, qid, sizeof(struct bce_qe_submission), el_count, compl, userdata);
|
|
|
|
|
if (!sq)
|
|
|
|
|
return NULL;
|
|
|
|
|
bce_get_sq_memcfg(sq, cq, &cfg);
|
2019-07-13 17:28:56 +02:00
|
|
|
if (bce_cmd_register_queue(dev->cmd_cmdq, &cfg, name, direction != DMA_FROM_DEVICE) != 0) {
|
2019-12-01 16:31:11 -05:00
|
|
|
pr_err("apple-bce: SQ registration failed (%i)", qid);
|
2019-07-12 21:40:43 +02:00
|
|
|
bce_free_sq(dev, sq);
|
2025-10-25 13:36:49 +05:30
|
|
|
#if LINUX_VERSION_CODE < KERNEL_VERSION(6,18,0)
|
2019-07-12 22:40:54 +02:00
|
|
|
ida_simple_remove(&dev->queue_ida, (uint) qid);
|
2025-10-25 13:36:49 +05:30
|
|
|
#else
|
|
|
|
|
ida_free(&dev->queue_ida, (uint) qid);
|
|
|
|
|
#endif
|
2019-07-12 21:40:43 +02:00
|
|
|
return NULL;
|
|
|
|
|
}
|
2019-08-26 15:03:36 +02:00
|
|
|
spin_lock(&dev->queues_lock);
|
2019-07-12 22:40:54 +02:00
|
|
|
dev->queues[qid] = (struct bce_queue *) sq;
|
2019-08-26 15:03:36 +02:00
|
|
|
spin_unlock(&dev->queues_lock);
|
2019-07-12 21:40:43 +02:00
|
|
|
return sq;
|
|
|
|
|
}
|
|
|
|
|
|
2019-12-01 16:31:11 -05:00
|
|
|
void bce_destroy_cq(struct apple_bce_device *dev, struct bce_queue_cq *cq)
|
2019-07-12 21:40:43 +02:00
|
|
|
{
|
2019-07-13 21:15:33 +02:00
|
|
|
if (!dev->is_being_removed && bce_cmd_unregister_memory_queue(dev->cmd_cmdq, (u16) cq->qid))
|
2019-12-01 16:31:11 -05:00
|
|
|
pr_err("apple-bce: CQ unregister failed");
|
2019-08-26 15:03:36 +02:00
|
|
|
spin_lock(&dev->queues_lock);
|
|
|
|
|
dev->queues[cq->qid] = NULL;
|
|
|
|
|
spin_unlock(&dev->queues_lock);
|
2025-10-25 13:36:49 +05:30
|
|
|
#if LINUX_VERSION_CODE < KERNEL_VERSION(6,18,0)
|
2019-07-12 21:40:43 +02:00
|
|
|
ida_simple_remove(&dev->queue_ida, (uint) cq->qid);
|
2025-10-25 13:36:49 +05:30
|
|
|
#else
|
|
|
|
|
ida_free(&dev->queue_ida, (uint) cq->qid);
|
|
|
|
|
#endif
|
2019-07-12 21:40:43 +02:00
|
|
|
bce_free_cq(dev, cq);
|
|
|
|
|
}
|
|
|
|
|
|
2019-12-01 16:31:11 -05:00
|
|
|
void bce_destroy_sq(struct apple_bce_device *dev, struct bce_queue_sq *sq)
|
2019-07-12 21:40:43 +02:00
|
|
|
{
|
2019-07-13 21:15:33 +02:00
|
|
|
if (!dev->is_being_removed && bce_cmd_unregister_memory_queue(dev->cmd_cmdq, (u16) sq->qid))
|
2019-12-01 16:31:11 -05:00
|
|
|
pr_err("apple-bce: CQ unregister failed");
|
2019-08-26 15:03:36 +02:00
|
|
|
spin_lock(&dev->queues_lock);
|
|
|
|
|
dev->queues[sq->qid] = NULL;
|
|
|
|
|
spin_unlock(&dev->queues_lock);
|
2025-10-25 13:36:49 +05:30
|
|
|
#if LINUX_VERSION_CODE < KERNEL_VERSION(6,18,0)
|
2019-07-12 21:40:43 +02:00
|
|
|
ida_simple_remove(&dev->queue_ida, (uint) sq->qid);
|
2025-10-25 13:36:49 +05:30
|
|
|
#else
|
|
|
|
|
ida_free(&dev->queue_ida, (uint) sq->qid);
|
|
|
|
|
#endif
|
2019-07-12 21:40:43 +02:00
|
|
|
bce_free_sq(dev, sq);
|
2019-07-11 20:17:37 +02:00
|
|
|
}
|