Skip to content

Commit

Permalink
net: mana: add msix index sharing between EQs
Browse files Browse the repository at this point in the history
This patch allows to assign and poll more than one EQ on the same
msix index.
It is achieved by introducing a list of attached EQs in each IRQ context.
It also removes the existing msix_index map that tried to ensure that there
is only one EQ at each msix_index.
This patch exports symbols for creating EQs from other MANA kernel modules.

Signed-off-by: Konstantin Taranov <kotaranov@microsoft.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
  • Loading branch information
Konstantin Taranov authored and davem330 committed Dec 15, 2023
1 parent 10b7572 commit 02fed6d
Show file tree
Hide file tree
Showing 4 changed files with 43 additions and 42 deletions.
76 changes: 36 additions & 40 deletions drivers/net/ethernet/microsoft/mana/gdma_main.c
Original file line number Diff line number Diff line change
Expand Up @@ -414,8 +414,12 @@ static void mana_gd_process_eq_events(void *arg)

old_bits = (eq->head / num_eqe - 1) & GDMA_EQE_OWNER_MASK;
/* No more entries */
if (owner_bits == old_bits)
if (owner_bits == old_bits) {
/* return here without ringing the doorbell */
if (i == 0)
return;
break;
}

new_bits = (eq->head / num_eqe) & GDMA_EQE_OWNER_MASK;
if (owner_bits != new_bits) {
Expand Down Expand Up @@ -445,42 +449,29 @@ static int mana_gd_register_irq(struct gdma_queue *queue,
struct gdma_dev *gd = queue->gdma_dev;
struct gdma_irq_context *gic;
struct gdma_context *gc;
struct gdma_resource *r;
unsigned int msi_index;
unsigned long flags;
struct device *dev;
int err = 0;

gc = gd->gdma_context;
r = &gc->msix_resource;
dev = gc->dev;
msi_index = spec->eq.msix_index;

spin_lock_irqsave(&r->lock, flags);

msi_index = find_first_zero_bit(r->map, r->size);
if (msi_index >= r->size || msi_index >= gc->num_msix_usable) {
if (msi_index >= gc->num_msix_usable) {
err = -ENOSPC;
} else {
bitmap_set(r->map, msi_index, 1);
queue->eq.msix_index = msi_index;
}

spin_unlock_irqrestore(&r->lock, flags);

if (err) {
dev_err(dev, "Register IRQ err:%d, msi:%u rsize:%u, nMSI:%u",
err, msi_index, r->size, gc->num_msix_usable);
dev_err(dev, "Register IRQ err:%d, msi:%u nMSI:%u",
err, msi_index, gc->num_msix_usable);

return err;
}

queue->eq.msix_index = msi_index;
gic = &gc->irq_contexts[msi_index];

WARN_ON(gic->handler || gic->arg);

gic->arg = queue;

gic->handler = mana_gd_process_eq_events;
spin_lock_irqsave(&gic->lock, flags);
list_add_rcu(&queue->entry, &gic->eq_list);
spin_unlock_irqrestore(&gic->lock, flags);

return 0;
}
Expand All @@ -490,27 +481,29 @@ static void mana_gd_deregiser_irq(struct gdma_queue *queue)
struct gdma_dev *gd = queue->gdma_dev;
struct gdma_irq_context *gic;
struct gdma_context *gc;
struct gdma_resource *r;
unsigned int msix_index;
unsigned long flags;
struct gdma_queue *eq;

gc = gd->gdma_context;
r = &gc->msix_resource;

/* At most num_online_cpus() + 1 interrupts are used. */
msix_index = queue->eq.msix_index;
if (WARN_ON(msix_index >= gc->num_msix_usable))
return;

gic = &gc->irq_contexts[msix_index];
gic->handler = NULL;
gic->arg = NULL;

spin_lock_irqsave(&r->lock, flags);
bitmap_clear(r->map, msix_index, 1);
spin_unlock_irqrestore(&r->lock, flags);
spin_lock_irqsave(&gic->lock, flags);
list_for_each_entry_rcu(eq, &gic->eq_list, entry) {
if (queue == eq) {
list_del_rcu(&eq->entry);
break;
}
}
spin_unlock_irqrestore(&gic->lock, flags);

queue->eq.msix_index = INVALID_PCI_MSIX_INDEX;
synchronize_rcu();
}

int mana_gd_test_eq(struct gdma_context *gc, struct gdma_queue *eq)
Expand Down Expand Up @@ -588,6 +581,7 @@ static int mana_gd_create_eq(struct gdma_dev *gd,
int err;

queue->eq.msix_index = INVALID_PCI_MSIX_INDEX;
queue->id = INVALID_QUEUE_ID;

log2_num_entries = ilog2(queue->queue_size / GDMA_EQE_SIZE);

Expand Down Expand Up @@ -819,6 +813,7 @@ int mana_gd_create_mana_eq(struct gdma_dev *gd,
kfree(queue);
return err;
}
EXPORT_SYMBOL_NS(mana_gd_create_mana_eq, NET_MANA);

int mana_gd_create_mana_wq_cq(struct gdma_dev *gd,
const struct gdma_queue_spec *spec,
Expand Down Expand Up @@ -895,6 +890,7 @@ void mana_gd_destroy_queue(struct gdma_context *gc, struct gdma_queue *queue)
mana_gd_free_memory(gmi);
kfree(queue);
}
EXPORT_SYMBOL_NS(mana_gd_destroy_queue, NET_MANA);

int mana_gd_verify_vf_version(struct pci_dev *pdev)
{
Expand Down Expand Up @@ -1217,9 +1213,14 @@ int mana_gd_poll_cq(struct gdma_queue *cq, struct gdma_comp *comp, int num_cqe)
static irqreturn_t mana_gd_intr(int irq, void *arg)
{
struct gdma_irq_context *gic = arg;
struct list_head *eq_list = &gic->eq_list;
struct gdma_queue *eq;

if (gic->handler)
gic->handler(gic->arg);
rcu_read_lock();
list_for_each_entry_rcu(eq, eq_list, entry) {
gic->handler(eq);
}
rcu_read_unlock();

return IRQ_HANDLED;
}
Expand Down Expand Up @@ -1271,8 +1272,9 @@ static int mana_gd_setup_irqs(struct pci_dev *pdev)

for (i = 0; i < nvec; i++) {
gic = &gc->irq_contexts[i];
gic->handler = NULL;
gic->arg = NULL;
gic->handler = mana_gd_process_eq_events;
INIT_LIST_HEAD(&gic->eq_list);
spin_lock_init(&gic->lock);

if (!i)
snprintf(gic->name, MANA_IRQ_NAME_SZ, "mana_hwc@pci:%s",
Expand All @@ -1295,10 +1297,6 @@ static int mana_gd_setup_irqs(struct pci_dev *pdev)
irq_set_affinity_and_hint(irq, cpumask_of(cpu));
}

err = mana_gd_alloc_res_map(nvec, &gc->msix_resource);
if (err)
goto free_irq;

gc->max_num_msix = nvec;
gc->num_msix_usable = nvec;

Expand Down Expand Up @@ -1329,8 +1327,6 @@ static void mana_gd_remove_irqs(struct pci_dev *pdev)
if (gc->max_num_msix < 1)
return;

mana_gd_free_res_map(&gc->msix_resource);

for (i = 0; i < gc->max_num_msix; i++) {
irq = pci_irq_vector(pdev, i);
if (irq < 0)
Expand Down
1 change: 1 addition & 0 deletions drivers/net/ethernet/microsoft/mana/hw_channel.c
Original file line number Diff line number Diff line change
Expand Up @@ -300,6 +300,7 @@ static int mana_hwc_create_gdma_eq(struct hw_channel_context *hwc,
spec.eq.context = ctx;
spec.eq.callback = cb;
spec.eq.log2_throttle_limit = DEFAULT_LOG2_THROTTLING_FOR_ERROR_EQ;
spec.eq.msix_index = 0;

return mana_gd_create_hwc_queue(hwc->gdma_dev, &spec, queue);
}
Expand Down
1 change: 1 addition & 0 deletions drivers/net/ethernet/microsoft/mana/mana_en.c
Original file line number Diff line number Diff line change
Expand Up @@ -1244,6 +1244,7 @@ static int mana_create_eq(struct mana_context *ac)
spec.eq.log2_throttle_limit = LOG2_EQ_THROTTLE;

for (i = 0; i < gc->max_num_queues; i++) {
spec.eq.msix_index = (i + 1) % gc->num_msix_usable;
err = mana_gd_create_mana_eq(gd, &spec, &ac->eqs[i].eq);
if (err)
goto out;
Expand Down
7 changes: 5 additions & 2 deletions include/net/mana/gdma.h
Original file line number Diff line number Diff line change
Expand Up @@ -293,6 +293,7 @@ struct gdma_queue {

u32 head;
u32 tail;
struct list_head entry;

/* Extra fields specific to EQ/CQ. */
union {
Expand Down Expand Up @@ -328,6 +329,7 @@ struct gdma_queue_spec {
void *context;

unsigned long log2_throttle_limit;
unsigned int msix_index;
} eq;

struct {
Expand All @@ -344,7 +346,9 @@ struct gdma_queue_spec {

struct gdma_irq_context {
void (*handler)(void *arg);
void *arg;
/* Protect the eq_list */
spinlock_t lock;
struct list_head eq_list;
char name[MANA_IRQ_NAME_SZ];
};

Expand All @@ -355,7 +359,6 @@ struct gdma_context {
unsigned int max_num_queues;
unsigned int max_num_msix;
unsigned int num_msix_usable;
struct gdma_resource msix_resource;
struct gdma_irq_context *irq_contexts;

/* L2 MTU */
Expand Down

0 comments on commit 02fed6d

Please sign in to comment.