rockchip: remove nvme irq for now

Signed-off-by: sbwml <admin@cooluc.com>
This commit is contained in:
sbwml 2024-06-15 01:14:52 +08:00
parent 04a9360d85
commit ade949c45f
3 changed files with 0 additions and 102 deletions

View File

@ -1,42 +0,0 @@
#!/bin/sh /etc/rc.common
START=00
STOP=90
get_device_irq() {
local device="$1"
local line
local seconds="0"
while [ "${seconds}" -le 10 ]; do
line=$(grep -m 1 "${device}\$" /proc/interrupts) && break
seconds="$(( seconds + 2 ))"
sleep 2
done
echo ${line} | sed 's/:.*//'
}
set_smp_affinity() {
local core_mask="$1"
local interface="$2"
[ -z "$interface" ] && return 0
local irq=$(get_device_irq "$interface")
echo "${core_mask}" > /proc/irq/${irq}/smp_affinity
}
boot() {
. /lib/functions/uci-defaults.sh
case "$(board_name)" in
friendlyarm,nanopi-r5s)
set_smp_affinity "1" "nvme0q0"
set_smp_affinity "2" "nvme0q1"
set_smp_affinity "1" "nvme0q2"
set_smp_affinity "1" "nvme0q3"
set_smp_affinity "2" "nvme0q4"
;;
esac
}

View File

@ -1 +0,0 @@
../init.d/nvme-smp-affinity

View File

@ -1,59 +0,0 @@
From 0ac490bde726d22ce3bde0e41cec9858f71d7616 Mon Sep 17 00:00:00 2001
From: sbwml <admin@cooluc.com>
Date: Mon, 15 Apr 2024 06:36:21 +0800
Subject: [PATCH] nvme-pci: add support for modifying IRQ affinity
By default the kernel uses managed IRQ, but on lower performance devices it may occur that the nvme interrupt is on the same core as the eth network card interrupt, which may result in reduced transfer performance.
Modify the nvme pci controller to allow users to modify interrupt affinity according to their actual needs for better performance.
Signed-off-by: sbwml <admin@cooluc.com>
---
drivers/nvme/host/pci.c | 22 ++++++++++++++++------
1 file changed, 16 insertions(+), 6 deletions(-)
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -456,10 +456,7 @@ static void nvme_pci_map_queues(struct b
* affinity), so use the regular blk-mq cpu mapping
*/
map->queue_offset = qoff;
- if (i != HCTX_TYPE_POLL && offset)
- blk_mq_pci_map_queues(map, to_pci_dev(dev->dev), offset);
- else
- blk_mq_map_queues(map);
+ blk_mq_map_queues(map);
qoff += map->nr_queues;
offset += map->nr_queues;
}
@@ -2223,6 +2220,8 @@ static int nvme_setup_irqs(struct nvme_d
};
unsigned int irq_queues, poll_queues;
unsigned int flags = PCI_IRQ_ALL_TYPES | PCI_IRQ_AFFINITY;
+ unsigned int affvecs;
+ int nr_irqs;
/*
* Poll queues don't need interrupts, but we need at least one I/O queue
@@ -2248,8 +2247,19 @@ static int nvme_setup_irqs(struct nvme_d
irq_queues += (nr_io_queues - poll_queues);
if (dev->ctrl.quirks & NVME_QUIRK_BROKEN_MSI)
flags &= ~PCI_IRQ_MSI;
- return pci_alloc_irq_vectors_affinity(pdev, 1, irq_queues, flags,
- &affd);
+
+ nr_irqs = pci_alloc_irq_vectors_affinity(pdev, 1, irq_queues, PCI_IRQ_ALL_TYPES, NULL);
+
+ if (nr_irqs > 0) {
+ if (nr_irqs > affd.pre_vectors)
+ affvecs = nr_irqs - affd.pre_vectors;
+ else
+ affvecs = 0;
+
+ nvme_calc_irq_sets(&affd, affvecs);
+ }
+
+ return nr_irqs;
}
static unsigned int nvme_max_io_queues(struct nvme_dev *dev)