作者
QQ群:852283276
微信:arm80x86
微信公众号:青儿创客基地
B站:主页 https://space.bilibili.com/208826118
参考
PCI Utilities
调用libpci库出现的问题和解决方法
Libpci库的调用
pci 学习笔记
第六章 PCI
pcie sysfs
基于Xilinx petalinux2018.2,linux4.19,zu+v7,访问
//drivers\pci\pci-sysfs.c line1638
pci_sysfs_init
for_each_pci_dev
pci_create_sysfs_dev_files
sysfs_create_bin_file(config space)
pci_create_resource_files(bar)
sysfs_create_bin_file(rom)
pci_create_capabilities_sysfs
sysfs_create_bin_file(vpd)
pcie_aspm_create_sysfs_dev_files
//drivers\pci\pci.c line4233
pci_probe_reset_function
pci_dev_specific_reset
pcie_has_flr
pcie_capability_read_dword(dev, PCI_EXP_DEVCAP, &cap); //PCI_EXP_DEVCAP + pcie_cap
pci_af_flr
pci_pm_reset
pci_dev_reset_slot_function
pci_parent_bus_reset
pci_reset_bridge_secondary_bus
pci_create_firmware_label_files
函数sysfs_create_bin_file
对于pci/pcie就是size不一样,
static const struct bin_attribute pci_config_attr = {
.attr = {
.name = "config",
.mode = S_IRUGO | S_IWUSR,
},
.size = PCI_CFG_SPACE_SIZE,
.read = pci_read_config,
.write = pci_write_config,
};
static const struct bin_attribute pcie_config_attr = {
.attr = {
.name = "config",
.mode = S_IRUGO | S_IWUSR,
},
.size = PCI_CFG_SPACE_EXP_SIZE,
.read = pci_read_config,
.write = pci_write_config,
};
函数pci_dev_specific_reset
,
#define PCI_DEVICE_ID_INTEL_82599_SFP_VF 0x10ed
#define PCI_DEVICE_ID_INTEL_IVB_M_VGA 0x0156
#define PCI_DEVICE_ID_INTEL_IVB_M2_VGA 0x0166
static const struct pci_dev_reset_methods pci_dev_reset_methods[] = {
{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82599_SFP_VF,
reset_intel_82599_sfp_virtfn },
{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IVB_M_VGA,
reset_ivb_igd },
{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IVB_M2_VGA,
reset_ivb_igd },
{ PCI_VENDOR_ID_CHELSIO, PCI_ANY_ID,
reset_chelsio_generic_dev },
{ 0 }
};
/*
* These device-specific reset methods are here rather than in a driver
* because when a host assigns a device to a guest VM, the host may need
* to reset the device but probably doesn't have a driver for it.
*/
int pci_dev_specific_reset(struct pci_dev *dev, int probe)
{
const struct pci_dev_reset_methods *i;
for (i = pci_dev_reset_methods; i->reset; i++) {
if ((i->vendor == dev->vendor ||
i->vendor == (u16)PCI_ANY_ID) &&
(i->device == dev->device ||
i->device == (u16)PCI_ANY_ID))
return i->reset(dev, probe);
}
return -ENOTTY;
}
zynqmp lspci
lspci来自PCI Utilities软件包,可生成libpci做定制开发。
root@zynqmp:~# lspci -h
lspci: invalid option -- 'h'
Usage: lspci [<switches>]
Basic display modes:
-mm Produce machine-readable output (single -m for an obsolete format)
-t Show bus tree
Display options:
-v Be verbose (-vv for very verbose)
-k Show kernel drivers handling each device
-x Show hex-dump of the standard part of the config space
-xxx Show hex-dump of the whole config space (dangerous; root only)
-xxxx Show hex-dump of the 4096-byte extended config space (root only)
-b Bus-centric view (addresses and IRQ's as seen by the bus)
-D Always show domain numbers
Resolving of device ID's to names:
-n Show numeric ID's
-nn Show both textual and numeric ID's (names & numbers)
-q Query the PCI ID database for unknown ID's via DNS
-qq As above, but re-query locally cached entries
-Q Query the PCI ID database for all ID's via DNS
Selection of devices:
-s [[[[<domain>]:]<bus>]:][<slot>][.[<func>]] Show only devices in selected slots
-d [<vendor>]:[<device>][:<class>] Show only devices with specified ID's
Other options:
-i <file> Use specified ID database instead of /usr/share/pci.ids.gz
-p <file> Look up kernel modules in a given file instead of default modules.pcimap
-M Enable `bus mapping' mode (dangerous; root only)
PCI access options:
-A <method> Use the specified PCI access method (see `-A help' for a list)
-O <par>=<val> Set PCI access parameter (see `-O help' for a list)
-G Enable PCI access debugging
-F <file> Read PCI configuration dump from a given file
root@zynqmp:~# lspci -tv
-[0000:00]---00.0-[01-07]----00.0-[02-07]--+-00.0-[03]----00.0 PLX Technology, Inc. Device 87b1
+-01.0-[04]----00.0 Samsung Electronics Co Ltd NVMe SSD Controller SM961/PM961
+-02.0-[05]----00.0 Samsung Electronics Co Ltd NVMe SSD Controller SM961/PM961
+-03.0-[06]----00.0 Samsung Electronics Co Ltd NVMe SSD Controller SM961/PM961
\-08.0-[07]--
注意,Xilinx ZynqMP的PS PCIe只在EP模式支持MSI-X,Root模式只有MSI。The MSI-X table and PBA are applicable only for Endpoint mode of operation and the corresponding registers are implemented in the AXI-PCIe bridge at predefined offsets.
pcie拓扑架构
内核启动过程pcie相关打印,BAR0 range小于1GB导致打印中出现no space现象。
[ 1.890187] xilinx-pcie 1000000000.axi-pcie: Using MSI FIFO mode
[ 1.896042] xilinx-pcie 1000000000.axi-pcie: PCIe Link is UP
[ 1.901638] OF: PCI: host bridge /amba_pl@0/axi-pcie@a0000000 ranges:
[ 1.908027] OF: PCI: No bus range found for /amba_pl@0/axi-pcie@a0000000, using [bus 00-ff]
[ 1.916508] OF: PCI: MEM 0xa0000000..0xafffffff -> 0xa0000000
[ 1.922453] xilinx-pcie 1000000000.axi-pcie: PCI host bridge to bus 0000:00
[ 1.929307] pci_bus 0000:00: root bus resource [bus 00-ff]
[ 1.934754] pci_bus 0000:00: root bus resource [mem 0xa0000000-0xafffffff]
[ 1.941752] pci 0000:00:00.0: bridge configuration invalid ([bus 00-00]), reconfiguring
[ 1.949891] pci 0000:01:00.0: bridge configuration invalid ([bus 00-00]), reconfiguring
[ 1.959247] pci 0000:02:00.0: bridge configuration invalid ([bus 00-00]), reconfiguring
[ 1.967062] pci 0000:02:01.0: bridge configuration invalid ([bus 00-00]), reconfiguring
[ 1.975019] pci 0000:02:02.0: bridge configuration invalid ([bus 00-00]), reconfiguring
[ 1.982976] pci 0000:02:03.0: bridge configuration invalid ([bus 00-00]), reconfiguring
[ 1.990934] pci 0000:02:08.0: bridge configuration invalid ([bus 00-00]), reconfiguring
[ 2.000660] pci 0000:00:00.0: BAR 0: no space for [mem size 0x100000000 64bit]
[ 2.007686] pci 0000:00:00.0: BAR 0: failed to assign [mem size 0x100000000 64bit]
[ 2.015210] pci 0000:00:00.0: BAR 8: assigned [mem 0xa0000000-0xa08fffff]
[ 2.021957] pci 0000:00:00.0: BAR 9: assigned [mem 0xa0900000-0xa0efffff 64bit pref]
[ 2.029657] pci 0000:00:00.0: BAR 0: no space for [mem size 0x100000000 64bit]
[ 2.036833] pci 0000:00:00.0: BAR 0: failed to assign [mem size 0x100000000 64bit]
[ 2.044359] pci 0000:00:00.0: BAR 8: assigned [mem 0xa0000000-0xa04fffff]
[ 2.051107] pci 0000:00:00.0: BAR 9: assigned [mem 0xa0500000-0xa06fffff 64bit pref]
[ 2.058808] pci 0000:00:00.0: BAR 8: reassigned [mem 0xa0700000-0xa0ffffff] (expanded by 0x400000)
[ 2.067717] pci 0000:00:00.0: BAR 9: reassigned [mem 0xa0000000-0xa05fffff 64bit pref] (expanded by 0x400000)
[ 2.077579] pci 0000:01:00.0: BAR 8: assigned [mem 0xa0700000-0xa0efffff]
[ 2.084323] pci 0000:01:00.0: BAR 9: assigned [mem 0xa0000000-0xa05fffff 64bit pref]
[ 2.092021] pci 0000:01:00.0: BAR 0: assigned [mem 0xa0f00000-0xa0f3ffff]
[ 2.098772] pci 0000:01:00.0: BAR 7: no space for [io size 0x2000]
[ 2.104996] pci 0000:01:00.0: BAR 7: failed to assign [io size 0x2000]
[ 2.111574] pci 0000:01:00.0: BAR 7: no space for [io size 0x2000]
[ 2.117798] pci 0000:01:00.0: BAR 7: failed to assign [io size 0x2000]
[ 2.124377] pci 0000:02:00.0: BAR 8: assigned [mem 0xa0700000-0xa07fffff]
[ 2.131120] pci 0000:02:01.0: BAR 8: assigned [mem 0xa0800000-0xa0afffff]
[ 2.137867] pci 0000:02:01.0: BAR 9: assigned [mem 0xa0000000-0xa01fffff 64bit pref]
[ 2.145565] pci 0000:02:02.0: BAR 8: assigned [mem 0xa0b00000-0xa0dfffff]
[ 2.152313] pci 0000:02:02.0: BAR 9: assigned [mem 0xa0200000-0xa03fffff 64bit pref]
[ 2.160011] pci 0000:02:03.0: BAR 8: assigned [mem 0xa0e00000-0xa0efffff]
[ 2.166758] pci 0000:02:01.0: BAR 7: no space for [io size 0x1000]
[ 2.172985] pci 0000:02:01.0: BAR 7: failed to assign [io size 0x1000]
[ 2.179560] pci 0000:02:02.0: BAR 7: no space for [io size 0x1000]
[ 2.185787] pci 0000:02:02.0: BAR 7: failed to assign [io size 0x1000]
[ 2.192364] pci 0000:02:02.0: BAR 7: no space for [io size 0x1000]
[ 2.198589] pci 0000:02:02.0: BAR 7: failed to assign [io size 0x1000]
[ 2.205164] pci 0000:02:01.0: BAR 7: no space for [io size 0x1000]
[ 2.211392] pci 0000:02:01.0: BAR 7: failed to assign [io size 0x1000]
[ 2.217968] pci 0000:03:00.0: BAR 0: assigned [mem 0xa0700000-0xa073ffff]
[ 2.224717] pci 0000:02:00.0: PCI bridge to [bus 03]
[ 2.229648] pci 0000:02:00.0: bridge window [mem 0xa0700000-0xa07fffff]
[ 2.236401] pci 0000:04:00.0: BAR 0: assigned [mem 0xa0800000-0xa0803fff 64bit]
[ 2.243669] pci 0000:02:01.0: PCI bridge to [bus 04]
[ 2.248591] pci 0000:02:01.0: bridge window [mem 0xa0800000-0xa0afffff]
[ 2.255337] pci 0000:02:01.0: bridge window [mem 0xa0000000-0xa01fffff 64bit pref]
[ 2.263041] pci 0000:05:00.0: BAR 0: assigned [mem 0xa0b00000-0xa0b03fff 64bit]
[ 2.270311] pci 0000:02:02.0: PCI bridge to [bus 05]
[ 2.275233] pci 0000:02:02.0: bridge window [mem 0xa0b00000-0xa0dfffff]
[ 2.281979] pci 0000:02:02.0: bridge window [mem 0xa0200000-0xa03fffff 64bit pref]
[ 2.289683] pci 0000:06:00.0: BAR 0: assigned [mem 0xa0e00000-0xa0e03fff 64bit]
[ 2.296954] pci 0000:02:03.0: PCI bridge to [bus 06]
[ 2.301876] pci 0000:02:03.0: bridge window [mem 0xa0e00000-0xa0efffff]
[ 2.308627] pci 0000:02:08.0: PCI bridge to [bus 07]
[ 2.313562] pci 0000:01:00.0: PCI bridge to [bus 02-07]
[ 2.318743] pci 0000:01:00.0: bridge window [mem 0xa0700000-0xa0efffff]
[ 2.325489] pci 0000:01:00.0: bridge window [mem 0xa0000000-0xa05fffff 64bit pref]
[ 2.333190] pci 0000:00:00.0: PCI bridge to [bus 01-07]
[ 2.338376] pci 0000:00:00.0: bridge window [mem 0xa0700000-0xa0ffffff]
[ 2.345123] pci 0000:00:00.0: bridge window [mem 0xa0000000-0xa05fffff 64bit pref]
...
[ 4.851543] nvme nvme0: pci function 0000:04:00.0
[ 4.856245] pci 0000:00:00.0: enabling device (0000 -> 0002)
[ 4.856389] nvme nvme1: pci function 0000:05:00.0
[ 4.856468] pci 0000:01:00.0: enabling device (0000 -> 0002)
[ 4.856485] pci 0000:02:02.0: enabling device (0000 -> 0002)
[ 4.856497] nvme 0000:05:00.0: enabling device (0000 -> 0002)
[ 4.856556] nvme nvme2: pci function 0000:06:00.0
[ 4.862287] pci 0000:02:03.0: enabling device (0000 -> 0002)
[ 4.862301] nvme 0000:06:00.0: enabling device (0000 -> 0002)
[ 4.899591] pci 0000:02:01.0: enabling device (0000 -> 0002)
[ 4.905229] nvme 0000:04:00.0: enabling device (0000 -> 0002)
分析一下lspci的拓扑架构,每个PCI-PCI桥接器都拥有一个主干总线接口序号以及一个二级总线接口序号。主干总线是那个离CPU最近的PCI总线而二级总线是离它稍远的PCI总线。任何PCI-PCI桥接器还包含一个从属总线序号,这是所有二级总线接口所桥接的PCI总线中序号最大的那个。或者说这个从属总线序号是PCI-PCI桥接器向下连接中PCI总线的最大序号。位于PCI-PCI桥接器后所有的PCI总线必须位于二级总线序号和从属总线序号之间,使用深度优先搜索算法搜索拓扑。
每个pci总线控制器和pci设备相连时,有一根IDSEL信号线。IDSEL信号,一端接在pci设备的IDSEL端,另一头接在pci总线控制器的pci地址线AD[31:11]其中的一个(不会有2根IDSEL接在同一条AD[31:11]上)。这个独特的连接,决定了pci设备的slot号。每个pci物理设备,可能含有多个逻辑设备,就是func号。由总线号+槽位号+功能号组成的唯一的32位地址区分开pci的设备空间。
p2p: pci-to-pci bridge
zynqmp cortex-a53*4
|bus[0]
axi-pcie host bridge
|bus[1]
PLX8724 P2P
|bus[2]
---------------------------------
|slot[0]|slot[1]|slot[2]|slot[3]|slot[8]
P2P P2P P2P P2P P2P
|bus[3] |bus[4] |bus[5] |bus[6] |bus[7]
PLX SSD SSD SSD SSD
# 3,4,5,6,7看着像广度搜索,是由于3,4,5,6下面均无bridge。
root -> pci_bus[0] -> pci_dev
| axi-pcie[1]
pci_bus[1] -> pci_dev
| p2p[2]
pci_bus[2] -> pci_dev -> pci_dev -> pci_dev -> pci_dev -> pci_dev
| p2p[3] p2p[4] p2p[5] p2p[6] p2p[7]
pci_bus[3] -> pci_dev
| plx software port
pci_bus[4] -> pci_dev
| nvme
pci_bus[5] -> pci_dev
| nvme
pci_bus[6] -> pci_dev
| nvme
pci_bus[7] -> pci_dev
nvme
pcie sysfs
重新扫描,一般需要重新扫描的时候,基本系统挂了,扫描也没用
root@zynqmp:~# echo 1 > /sys/bus/pci/rescan
移植20180806到20171213
移植linux-xlnx-20180806内核pci文件夹、linux/include/pci.h到linux-xlnx-20171213内核,编译错误,
net/core/rtnetlink.c:842:17: error: implicit declaration of function ‘dev_num_vf’ [-Werror=implicit-function-declaration]
int num_vfs = dev_num_vf(dev->dev.parent);
drivers/nvme/host/pci.c:2124:2: error: unknown field ‘reset_notify’ specified in initializer
.reset_notify = nvme_reset_notify,
在pci-20180806.h文件line934添加#define dev_num_vf(d) ((dev_is_pci(d) ? pci_num_vf(to_pci_dev(d)) : 0)),然后注释line2124,再次编译出现错误,
drivers/pci/access.c:4:32: fatal error: linux/sched/signal.h: No such file or directory
#include <linux/sched/signal.h>
又产生新错误,放弃。。。
内核代码牵一发而动全身,花式作死
移植host驱动
换一种思路,只移植host的驱动,报错,
drivers/pci/host/pcie-xdma-pl.c:266:11: error: ‘pci_irqd_intx_xlate’ undeclared here (not in a function)
.xlate = pci_irqd_intx_xlate,
^~~~~~~~~~~~~~~~~~~
drivers/pci/host/pcie-xdma-pl.c: In function ‘xilinx_pcie_probe’:
drivers/pci/host/pcie-xdma-pl.c:738:11: error: implicit declaration of function ‘devm_pci_alloc_host_bridge’ [-Werror=implicit-function-declaration]
bridge = devm_pci_alloc_host_bridge(dev, sizeof(*port));
^~~~~~~~~~~~~~~~~~~~~~~~~~
drivers/pci/host/pcie-xdma-pl.c:738:9: warning: assignment makes pointer from integer without a cast [-Wint-conversion]
bridge = devm_pci_alloc_host_bridge(dev, sizeof(*port));
^
drivers/pci/host/pcie-xdma-pl.c:742:9: error: implicit declaration of function ‘pci_host_bridge_priv’ [-Werror=implicit-function-declaration]
port = pci_host_bridge_priv(bridge);
^~~~~~~~~~~~~~~~~~~~
drivers/pci/host/pcie-xdma-pl.c:742:7: warning: assignment makes pointer from integer without a cast [-Wint-conversion]
port = pci_host_bridge_priv(bridge);
^
drivers/pci/host/pcie-xdma-pl.c:773:8: error: ‘struct pci_host_bridge’ has no member named ‘sysdata’
bridge->sysdata = port;
^~
drivers/pci/host/pcie-xdma-pl.c:774:8: error: ‘struct pci_host_bridge’ has no member named ‘busnr’; did you mean ‘bus’?
bridge->busnr = port->root_busno;
^~
drivers/pci/host/pcie-xdma-pl.c:775:8: error: ‘struct pci_host_bridge’ has no member named ‘ops’
bridge->ops = &xilinx_pcie_ops;
^~
drivers/pci/host/pcie-xdma-pl.c:776:8: error: ‘struct pci_host_bridge’ has no member named ‘map_irq’
bridge->map_irq = of_irq_parse_and_map_pci;
^~
drivers/pci/host/pcie-xdma-pl.c:777:8: error: ‘struct pci_host_bridge’ has no member named ‘swizzle_irq’
bridge->swizzle_irq = pci_common_swizzle;
^~
drivers/pci/host/pcie-xdma-pl.c:779:8: error: implicit declaration of function ‘pci_scan_root_bus_bridge’ [-Werror=implicit-function-declaration]
err = pci_scan_root_bus_bridge(bridge);
^~~~~~~~~~~~~~~~~~~~~~~~
cc1: some warnings being treated as errors
line266复制20180806的pci_irqd_intx_xlate函数进来,
移植devm_pci_alloc_host_bridge函数时发现,pci host bridge的结构体变化太大,移植成功的概率太小,感觉不可行了。
linkup检测
{
/* add msleep for rcu stall or check pcie link up
[ 23.082370] INFO: rcu_sched detected stalls on CPUs/tasks:
[ 23.087664] 3-...: (1 GPs behind) idle=222/140000000000000/0 softirq=365/365 fqs=2366
[ 23.095617] (detected by 2, t=5255 jiffies, g=-279, c=-280, q=94)
[ 23.101760] Task dump for CPU 3:
[ 23.104960] swapper/0 R running task 0 1 0 0x00000002
[ 23.111967] Call trace:
[ 23.114397] [<ffffff8008085800>] __switch_to+0x98/0xb0
[ 23.119497] [<ffffff800840f488>] devm_ioremap+0x58/0xb0
*/
struct gpio_desc *gpiod_linkup;
u32 linkup_waitms = 0;
gpiod_linkup = devm_gpiod_get_optional(dev, "linkup", GPIOD_IN);
if (IS_ERR(gpiod_linkup))
return PTR_ERR(gpiod_linkup);
if (gpiod_linkup) {
if (of_property_read_u32(node, "linkup-waitms", &linkup_waitms)) {
linkup_waitms = 1000;
}
while (linkup_waitms > 0) {
if (gpiod_get_value(gpiod_linkup) == 1)
break;
linkup_waitms--;
}
if (linkup_waitms == 0) {
dev_info(port->dev, "linkup-gpio: PCIe Link is DOWN\n");
return -EINVAL;
} else {
dev_info(port->dev, "linkup-gpio: PCIe Link is UP [%u]\n", linkup_waitms);
}
} else {
dev_info(port->dev, "linkup-gpio not found\n");
}
}