---------- pcie_udrv.c----------
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/platform_device.h>
#include <linux/of_address.h>
#include <linux/of_pci.h>
#include <linux/platform_device.h>
#include <linux/of_device.h>
#include <linux/pci.h>
#include <linux/pci-ecam.h>
#include <linux/kallsyms.h>
#include <linux/err.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/irq.h>
#include <linux/msi.h>
#include <linux/fwnode.h>
#include <linux/acpi.h>
#include <linux/idr.h>
#include <linux/version.h>
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0)
#include <linux/reset-controller.h>
#include <linux/reset.h>
#else
#include "../subctrl/include.linux/reset-controller.h"
#include "../subctrl/include.linux/reset.h"
#endif
#include <linux/clk.h>
#include <linux/version.h>
#include "pcie_udrv.h"
LIST_HEAD(pcie_host_list);
static int udrv_pcie_host_ep_init(struct platform_device *pdev);
#define copy_resource(dst, src, name_) do { \
(dst)->start = (src)->start; \
(dst)->end = (src)->end; \
(dst)->flags = (src)->flags; \
(dst)->name = name_; \
} while (0)
#define UDRV_MAX_PCIE_HOST_NUM 16
#define PCIE_DP_PORT 2
#define PCIE_UP_PORT 3
#define PCIE_MAX_FMEA_DEV_NUM 12
DEFINE_IDR(pcie_idr);
static void udrv_pcie_msi_mask_irq(struct irq_data *data)
{
pci_msi_mask_irq(data);
irq_chip_mask_parent(data);
}
static void udrv_pcie_msi_unmask_irq(struct irq_data *data)
{
pci_msi_unmask_irq(data);
irq_chip_unmask_parent(data);
}
static struct irq_chip udrv_pcie_msi_irq_chip = {
.name = "udrv_msi",
.irq_mask = udrv_pcie_msi_mask_irq,
.irq_unmask = udrv_pcie_msi_unmask_irq,
};
static struct msi_domain_info udrv_pcie_msi_domain_info = {
.flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | MSI_FLAG_PCI_MSIX | MSI_FLAG_MULTI_PCI_MSI),
.chip = &udrv_pcie_msi_irq_chip,
};
static void udrv_pcie_handle_msi_irq(struct udrv_pcie_host *pcie)
{
uint32_t status = 0;
unsigned long bit_pos = 0, status_u64 = 0;
uint32_t irq;
handle pf_handle = pcie->host_info.pf_handle;
uint32_t ret = ndrv_pcie_host_get_msi_status(pf_handle, &status);
if ((ret != 0) || (status == 0)) {
return;
}
status_u64 = (unsigned long)status;
(void)ndrv_pcie_host_msi_mask_all(pf_handle, 0xFFFFFFFF);
while ((bit_pos = find_next_bit(&status_u64, UDRV_MAX_MSI_IRQS, bit_pos)) != UDRV_MAX_MSI_IRQS) {
irq = irq_find_mapping(pcie->msi.irq_domain, bit_pos);
(void)ndrv_pcie_host_msi_clear(pf_handle, (u32)bit_pos);
generic_handle_irq(irq);
bit_pos++;
}
(void)ndrv_pcie_host_msi_mask_all(pf_handle, 0);
}
static void udrv_pcie_handle_chained_msi_irq(struct irq_desc *desc)
{
struct udrv_pcie_host *pcie = NULL;
struct irq_chip *chip = irq_desc_get_chip(desc);
chained_irq_enter(chip, desc);
pcie = irq_desc_get_handler_data(desc);
udrv_pcie_handle_msi_irq(pcie);
chained_irq_exit(chip, desc);
}
static void udrv_pcie_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
{
struct udrv_pcie_host *pcie = irq_data_get_irq_chip_data(data);
struct udrv_pcie_host_msi_info *msi = &pcie->msi;
msg->address_lo = lower_32_bits(msi->msi_addr);
msg->address_hi = upper_32_bits(msi->msi_addr);
msg->data = (u32)data->hwirq;
dev_info(pcie->dev, "[iWare][Info] msi#%d address_hi %#x address_lo %#x\n", (int)data->hwirq,
msg->address_hi, msg->address_lo);
}
static int udrv_pcie_msi_set_affinity(struct irq_data *irq_data, const struct cpumask *mask, bool force)
{
struct pci_dev *pdev;
int irq = irq_data->irq;
unsigned int msi_index;
unsigned long flags;
/* 获取PCI设备和MSI索引 */
pdev = irq_data->device->pci_dev;
if (!pdev) {
pr_err("Failed to get pci_dev\n");
return -EINVAL;
}
msi_index = irq_data->msi_index;
if (msi_index >= pdev->msix_entries_count) {
pr_err("Invalid MSI index\n");
return -EINVAL;
}
/* 设置中断亲和性 */
if (irq_set_affinity(irq, mask)) {
pr_err("Failed to set irq affinity for irq %d\n", irq);
return -EINVAL;
}
/* 更新MSI中断的亲和性 */
spin_lock_irqsave(&pdev->msi_list_lock, flags);
pdev->msix_entries[msi_index].affinity = mask;
spin_unlock_irqrestore(&pdev->msi_list_lock, flags);
return 0;
return -EINVAL;
}
static void udrv_pcie_bottm_mask(struct irq_data *data)
{
struct udrv_pcie_host *pcie = irq_data_get_irq_chip_data(data);
struct udrv_pcie_host_msi_info *msi = &pcie->msi;
handle pf_handle = pcie->host_info.pf_handle;
unsigned long flags;
raw_spin_lock_irqsave(&msi->lock, flags);
ndrv_pcie_host_msi_mask_one(pf_handle, (u32)data->hwirq, 1);
raw_spin_unlock_irqrestore(&msi->lock, flags);
}
static void udrv_pcie_bottm_unmask(struct irq_data *data)
{
struct udrv_pcie_host *pcie = irq_data_get_irq_chip_data(data);
struct udrv_pcie_host_msi_info *msi = &pcie->msi;
handle pf_handle = pcie->host_info.pf_handle;
unsigned long flags;
raw_spin_lock_irqsave(&msi->lock, flags);
ndrv_pcie_host_msi_mask_one(pf_handle, (u32)data->hwirq, 0);
raw_spin_unlock_irqrestore(&msi->lock, flags);
}
static struct irq_chip udrv_pcie_msi_bottom_irq_chip = {
.name = "udrv_bottom_msi",
.irq_compose_msi_msg = udrv_pcie_compose_msi_msg,
.irq_set_affinity = udrv_pcie_msi_set_affinity,
.irq_mask = udrv_pcie_bottm_mask,
.irq_unmask = udrv_pcie_bottm_unmask,
};
static int dw_pcie_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, unsigned int num_irqs, void *args)
{
unsigned long flags;
struct udrv_pcie_host *pcie = domain->host_data;
struct udrv_pcie_host_msi_info *msi = &pcie->msi;
int bit;
uint32_t i;
raw_spin_lock_irqsave(&msi->lock, flags);
bit = bitmap_find_free_region(msi->msi_irq_in_use_bits, UDRV_MAX_MSI_IRQS, order_base_2(num_irqs));
raw_spin_unlock_irqrestore(&msi->lock, flags);
if (bit < 0) {
return -ENOSPC;
}
for (i = 0; i < num_irqs; i++) {
irq_domain_set_info(domain, virq + i, bit + i, &udrv_pcie_msi_bottom_irq_chip, pcie,
handle_level_irq, NULL, NULL);
}
return 0;
}
static void udrv_pcie_irq_domain_free(struct irq_domain *domain, unsigned int virq, unsigned int num_irqs)
{
unsigned long flags;
struct udrv_pcie_host *pcie = domain->host_data;
struct udrv_pcie_host_msi_info *msi = &pcie->msi;
struct irq_data *data = irq_domain_get_irq_data(domain, virq);
uint32_t i;
raw_spin_lock_irqsave(&msi->lock, flags);
bitmap_release_region(msi->msi_irq_in_use_bits, (u32)data->hwirq, order_base_2(num_irqs));
raw_spin_unlock_irqrestore(&msi->lock, flags);
for (i = 0; i < num_irqs; i++) {
data = irq_domain_get_irq_data(domain, virq + i);
irq_domain_reset_irq_data(data);
}
}
static const struct irq_domain_ops udrv_pcie_msi_domain_ops = {
.alloc = dw_pcie_irq_domain_alloc,
.free = udrv_pcie_irq_domain_free,
};
static void udrv_pcie_remove_msi(struct udrv_pcie_host *pcie)
{
handle pf_handle = pcie->host_info.pf_handle;
struct udrv_pcie_host_msi_info *msi = &pcie->msi;
(void)ndrv_pcie_host_msi_mask_all(pf_handle, 0xFFFFFFFF);
if (msi->msi_trans_type == MSI_ITS) {
return;
}
irq_set_chained_handler((u32)msi->irq, NULL);
irq_set_handler_data((u32)msi->irq, NULL);
irq_domain_remove(msi->msi_domain);
irq_domain_remove(msi->irq_domain);
}
static int udrv_pcie_allocate_msi_domains(struct udrv_pcie_host *pcie)
{
struct udrv_pcie_host_msi_info *msi = &pcie->msi;
struct fwnode_handle *fwnode = of_node_to_fwnode(pcie->dev->of_node);
msi->irq_domain = irq_domain_create_linear(fwnode, UDRV_MAX_MSI_IRQS, &udrv_pcie_msi_domain_ops, pcie);
if (!msi->irq_domain) {
dev_err(pcie->dev, "[iWare][Error] irq_domain_create_linear fail\n");
return -ENOMEM;
}
msi->msi_domain = pci_msi_create_irq_domain(fwnode, &udrv_pcie_msi_domain_info, msi->irq_domain);
if (!msi->msi_domain) {
irq_domain_remove(msi->irq_domain);
dev_err(pcie->dev, "[iWare][Error] pci_msi_create_irq_domain fail\n");
return -ENOMEM;
}
return 0;
}
static int udrv_pcie_get_msi_info_from_dt(struct udrv_pcie_host *pcie)
{
struct device *dev = pcie->dev;
struct udrv_pcie_host_msi_info *msi = &pcie->msi;
struct resource *res = NULL;
struct platform_device *pdev = to_platform_device(dev);
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "msi");
if (!res) {
dev_err(dev, "[iWare][Error] get msi address fail\n");
return -EINVAL;
}
if (of_property_read_u32(dev->of_node, "msi-trans-type", (u32 *)(uintptr_t)(&msi->msi_trans_type)) != 0) {
msi->msi_trans_type = MSI_ITS;
}
msi->msi_addr = res->start;
if (msi->msi_trans_type == MSI_ITS) {
return 0;
}
msi->irq = platform_get_irq(pdev, 0);
if (msi->irq <= 0) {
dev_info(dev, "[iWare][Info] no msi irq,now jump\n");
return 0;
}
return 0;
}
static void udrv_pcie_free_atu_info(struct udrv_pcie_host *pcie)
{
struct list_head *head = &pcie->host_info.atu_info.entry;
struct udrv_pcie_atu_info *tmp = NULL;
struct udrv_pcie_atu_info *pos = NULL;
list_for_each_entry_safe(pos, tmp, head, entry)
{
list_del(&pos->entry);
kfree(pos);
}
}
static void udrv_pcie_free_port_info(struct udrv_pcie_host *pcie)
{
struct list_head *head = &pcie->port_info.entry;
struct udrv_pcie_port_info *tmp = NULL;
struct udrv_pcie_port_info *pos = NULL;
list_for_each_entry_safe(pos, tmp, head, entry) {
if (pos->idr >= 0) {
idr_remove(&pcie_idr, pos->idr);
}
list_del(&pos->entry);
kfree(pos);
}
}
static int udrv_pcie_get_atu_info_from_dt(struct udrv_pcie_host *pcie)
{
struct device *dev = pcie->dev;
struct udrv_pcie_atu_info *atu_info = NULL;
struct list_head *head = &pcie->host_info.atu_info.entry;
struct of_pci_range_parser parser;
struct of_pci_range range;
int ret = of_pci_range_parser_init(&parser, dev->of_node);
if (ret != 0) {
dev_err(dev, "[iWare][Error] parser range failed\n");
goto err_range;
}
for_each_of_pci_range(&parser, &range)
{
if (((range.flags & IORESOURCE_TYPE_BITS) != IORESOURCE_IO) &&
((range.flags & IORESOURCE_TYPE_BITS) != IORESOURCE_MEM)) {
continue;
}
atu_info = kzalloc(sizeof(struct udrv_pcie_atu_info), GFP_KERNEL);
if (atu_info == NULL) {
ret = -ENOMEM;
goto fail;
}
atu_info->cpu_addr = range.cpu_addr;
atu_info->pcie_addr = range.pci_addr;
atu_info->size = range.size;
atu_info->atu_mode = ((range.flags & IORESOURCE_TYPE_BITS) == IORESOURCE_MEM) ? 0 : 1;
list_add_tail(&atu_info->entry, head);
}
return 0;
fail:
udrv_pcie_free_atu_info(pcie);
err_range:
ndrv_pcie_close_pf(pcie->host_info.pf_handle, pcie->host_info.core_version);
return ret;
}
static int udrv_pcie_get_host_property_info_from_dt(struct device *dev, struct udrv_pcie_host_info *host_info)
{
if (of_property_read_u32(dev->of_node, "type-support", (u32 *)(uintptr_t)(&host_info->type_support_mask)) != 0) {
dev_info(dev, "[iWare] no declare type support, default rp mode\n");
set_bit(NDRV_PCIE_RP_MODE, &(host_info->type_support_mask));
}
if (of_property_read_u32(dev->of_node, "clocks_num", &host_info->clk_num) != 0) {
dev_err(dev, "[iWare][Error] Faild read clk_num\n");
return -EINVAL;
}
if (of_property_read_u32(dev->of_node, "resets_num", &host_info->rst_num) != 0) {
dev_err(dev, "[iWare][Error] Faild read rsts_num\n");
return -EINVAL;
}
if (of_property_read_u32(dev->of_node, "core-version", &host_info->core_version) != 0) {
dev_err(dev, "[iWare][Error] Faild to read core version\n");
return -EINVAL;
}
if (of_property_read_u32(dev->of_node, "rc_mode", (u32 *)(uintptr_t)(&host_info->rc_mode)) != 0) {
host_info->rc_mode = NDRV_PCIE_RC_NORMAL;
}
return 0;
}
static int udrv_pcie_get_host_info_from_dt(struct udrv_pcie_host *pcie)
{
struct device *dev = pcie->dev;
struct platform_device *pdev = to_platform_device(dev);
struct udrv_pcie_host_info *host_info = &pcie->host_info;
struct resource *apb_res = NULL;
struct io_region region;
int ret;
if (of_property_read_u32(dev->of_node, "linux,pci-domain", &host_info->host_id) != 0) {
dev_err(dev, "[iWare][Error] Faild read pci-domain\n");
return -EINVAL;
}
if (host_info->host_id >= UDRV_MAX_PCIE_HOST_NUM) {
dev_err(dev, "[iWare][Error] Invalid domain nr = 0x%x\n", host_info->host_id);
return -EINVAL;
}
ret = udrv_pcie_get_host_property_info_from_dt(dev, host_info);
if (ret != 0) {
return ret;
}
apb_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
if (apb_res == NULL) {
dev_err(dev, "[iWare][Error] Faild to get dbi address\n");
return -EINVAL;
}
host_info->apb_base = devm_pci_remap_cfgspace(dev, apb_res->start, resource_size(apb_res));
host_info->apb_paddr = apb_res->start;
host_info->apb_size = (u32)resource_size(apb_res);
if (IS_ERR(host_info->apb_base)) {
dev_err(dev, "[iWare][Error] Faild to remap apb_base\n");
return -EINVAL;
}
region.io_base = host_info->apb_base;
region.io_size = host_info->apb_size;
host_info->pf_handle = ndrv_pcie_open_pf(®ion, host_info->core_version, host_info->rc_mode);
if (host_info->pf_handle == NULL) {
dev_err(pcie->dev, "[iWare][Error] ndrv pcie_open_pf fail\n");
return -EINVAL;
}
return udrv_pcie_get_atu_info_from_dt(pcie);
}
static int udrv_pcie_host_init(struct udrv_pcie_host *pcie)
{
u32 ret;
u32 atu_id = 0;
struct ndrv_pcie_ecam_cfg_info ecam_cfg;
struct ndrv_pcie_atu_cfg_info atu_cfg;
struct udrv_pcie_host_info *host_info = &pcie->host_info;
struct udrv_pcie_atu_info *pos = NULL;
struct list_head *head = &pcie->host_info.atu_info.entry;
/* 仅支持RP的host,需要初始化TX atu和ecam */
if (test_bit(NDRV_PCIE_RP_MODE, &(host_info->type_support_mask)) != 0) {
list_for_each_entry(pos, head, entry)
{
atu_cfg.tx_src_base_addr = pos->cpu_addr;
atu_cfg.tx_dst_base_addr = pos->pcie_addr;
atu_cfg.tx_region_size = pos->size;
atu_cfg.atu_mode = pos->atu_mode;
ret = ndrv_pcie_host_ap_atu_init(host_info->pf_handle, &atu_cfg, atu_id);
++atu_id;
if (ret != 0) {
pr_err("[iWare][Error] init atu:0x%x failed, ret=%u\n", atu_id, ret);
return -EINVAL;
}
}
ecam_cfg.ecam_base_addr_l = (u32)(host_info->ecam_res.start);
ecam_cfg.ecam_base_addr_h = (u32)(host_info->ecam_res.start >> 32); /* 高32bit */
ecam_cfg.ecam_base_size = (u32)resource_size(&host_info->ecam_res);
ecam_cfg.ecam_start_bus_no = (u32)host_info->bus_res.start;
ret = ndrv_pcie_host_ap_ecam_init(host_info->pf_handle, &ecam_cfg);
if (ret != 0) {
pr_err("[iWare][Error] init ap ecam failed, ret=%u\n", ret);
return -EINVAL;
}
}
ret = ndrv_pcie_host_ap_enable(host_info->pf_handle, 1);
if (ret != 0) {
pr_err("[iWare][Error] ap enable failed, ret=%u\n", ret);
return -EINVAL;
}
return 0;
}
#ifdef CONFIG_UDRV_FMEA
static int pcie_fmea_init(struct udrv_pcie_host *pcie)
{
int ret;
struct fmea_dev_info dev_info;
const struct ndrv_fmea_item_info *item_table;
struct ndrv_pcie_fmea_open_cfg cfg;
dev_info.dev = pcie->dev;
dev_info.pdev_id = pcie->host_info.host_id;
dev_info.name = FMEA_MODULE_NAME(pcie);
cfg.paddr = pcie->host_info.apb_paddr;
item_table = ndrv_pcie_pf_get_fmea_table(pcie->host_info.pf_handle, &cfg);
if (item_table == NULL) {
dev_err(pcie->dev, "[iWare][Error] pcie get_fmea_table fail\n");
return -EINVAL;
}
ret = kdrv_fmea_iomm_unit_list_init(&dev_info, item_table, &pcie->iomm_info);
if (ret != 0) {
dev_err(pcie->dev, "[iWare][Error] kdrv fmea_iomm_unit_list_init fail\n");
return -EINVAL;
}
return 0;
}
static void pcie_fmea_deinit(struct udrv_pcie_host *pcie)
{
kdrv_fmea_iomm_unit_list_deinit(&pcie->iomm_info);
}
int kdrv_pcie_fmea_entry(u32 pcie_id, u32 group_id, u64 *err_info, u32 *alm_flg, char *buf, u32 size)
{
struct udrv_pcie_host *pcie = NULL;
struct fmea_iomm_unit *pcie_fmobj = NULL;
pcie = hisi_pcie_get_by_host_id(pcie_id);
if (pcie == NULL) {
pr_err("[iWare][Error] [pcie fmea entry], get pcie failed\n");
return -ENODEV;
}
if (group_id >= pcie->iomm_info.group_num) {
return -ERANGE;
}
pcie_fmobj = &pcie->iomm_info.iomms[group_id];
if (!pcie_fmobj) {
return -ENODEV;
}
return kdrv_fmea_entry(pcie_fmobj, err_info, alm_flg, buf, size);
}
EXPORT_SYMBOL(kdrv_pcie_fmea_entry);
int kdrv_pcie_get_fmea_group_num_(struct udrv_pcie_host *pcie, uint32_t *num)
{
if (pcie == NULL || num == NULL) {
return -EINVAL;
}
*num = pcie->iomm_info.group_num;
return 0;
}
int kdrv_pcie_get_fmea_dev_num_(struct udrv_pcie_host *pcie, uint32_t *num)
{
if (pcie == NULL || num == NULL) {
return -EINVAL;
}
*num = PCIE_MAX_FMEA_DEV_NUM;
return 0;
}
#endif
static int udrv_pcie_host_ecam_init(struct pci_config_window *cfg_w)
{
int ret;
struct platform_device *pdev = to_platform_device(cfg_w->parent);
struct udrv_pcie_host *pcie = platform_get_drvdata(pdev);
struct udrv_pcie_host_info *host_info = &pcie->host_info;
struct resource *cfg_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config");
if (!cfg_res) {
pr_err("[iWare]cfg_res is null\n");
return -EINVAL;
}
copy_resource(&host_info->ecam_res, cfg_res, "config");
copy_resource(&host_info->bus_res, &cfg_w->busr, "bus");
#ifdef CONFIG_UDRV_DEBUG
udrv_pcie_show_host_info(pcie);
#endif
ret = pcie->ops->port_ops->init(pcie);
if (ret != 0) {
pr_err("[iWare][Error] port init failed,ret=%d\n", ret);
return ret;
}
ret = pcie->ops->host_ops->init(pcie);
if (ret != 0) {
pr_err("[iWare][Error] host init failed,ret=%d\n", ret);
return ret;
}
ret = pcie->ops->msi_ops->init(pcie);
if (ret != 0) {
pr_err("[iWare][Error] msi init failed,ret=%d\n", ret);
}
return ret;
}
/* host仅支持ep场景,仅初始化芯片 */
static int udrv_pcie_host_ep_init(struct platform_device *pdev)
{
int ret;
struct udrv_pcie_host *pcie = platform_get_drvdata(pdev);
#ifdef CONFIG_UDRV_DEBUG
udrv_pcie_show_host_info(pcie);
#endif
ret = pcie->ops->port_ops->init(pcie);
if (ret != 0) {
return ret;
}
ret = pcie->ops->host_ops->init(pcie);
if (ret != 0) {
return ret;
}
ret = pcie->ops->msi_ops->init(pcie);
return ret;
}
static int udrv_pcie_get_switch_info(struct device *dev, struct device_node *child,
struct udrv_pcie_port_info *port_info)
{
if (of_property_read_u32(child, "switch-core-id", &port_info->switch_core_id) != 0) {
dev_err(dev, "[iWare][Error] Faild to read switch-core-id\n");
return -EINVAL;
}
if (of_property_read_u32(child, "core-0-1-dp-bitmap", &port_info->core_0_1_dp_bitmap) != 0) {
dev_err(dev, "[iWare][Error] Faild to read core-0-1-dp-bitmap\n");
return -EINVAL;
}
if (of_property_read_u32(child, "core-2-3-dp-bitmap", &port_info->core_2_3_dp_bitmap) != 0) {
dev_err(dev, "[iWare][Error] Faild to read core-2-3-dp-bitmap\n");
return -EINVAL;
}
if (of_property_read_u32(child, "up-core-id", &port_info->up_core_id) != 0) {
dev_err(dev, "[iWare][Error] Faild to read up-core-id\n");
return -EINVAL;
}
return 0;
}
static int udrv_pcie_get_port_info_from_dts(struct device *dev, struct device_node *child,
struct udrv_pcie_port_info *port_info)
{
if (of_property_read_u32(child, "port-type", &port_info->port_type) != 0) {
dev_err(dev, "[iWare][Error] Faild to read port-type\n");
return -EINVAL;
}
if (port_info->port_type == NDRV_PCIE_EP_MODE) {
if (of_property_read_u64(child, "ep-addr", &port_info->ep_addr) != 0) {
dev_err(dev, "[iWare][Error] Faild to read ep_addr\n");
return -EINVAL;
}
if (of_property_read_u32(child, "ep-size", &port_info->ep_size) != 0) {
dev_err(dev, "[iWare][Error] Faild to read ep_size\n");
return -EINVAL;
}
}
if (of_property_read_u32(child, "port-id", &port_info->port_id) != 0) {
dev_err(dev, "[iWare][Error] Faild to read port-id\n");
return -EINVAL;
}
if (of_property_read_u32(child, "lport-id", &port_info->lport_id) != 0) {
dev_err(dev, "[iWare][Error] Faild to read lport-id\n");
return -EINVAL;
}
if (of_property_read_u32(child, "pri_bus", &port_info->pri_bus) != 0) {
port_info->pri_bus = 0;
}
if (of_property_read_u32(child, "sec_bus", &port_info->sec_bus) != 0) {
port_info->sec_bus = 0;
}
if (of_property_read_u32(child, "sub_bus", &port_info->sub_bus) != 0) {
port_info->sub_bus = 0;
}
return 0;
}
static int udrv_pcie_get_port_info_child_dt(struct device *dev, struct device_node *child,
struct udrv_pcie_port_info *port_info)
{
int ret;
ret = udrv_pcie_get_port_info_from_dts(dev, child, port_info);
if (ret != 0) {
return ret;
}
if (of_property_read_u32(child, "lanes-nums", &port_info->lane_num) != 0) {
dev_err(dev, "[iWare][Error] Faild to read lanes\n");
return -EINVAL;
}
if (of_property_read_u32(child, "max-lanes", &port_info->max_lanes) != 0) {
port_info->max_lanes = port_info->lane_num;
}
if (of_property_read_u32(child, "max-speed", &port_info->max_speed) != 0) {
dev_err(dev, "[iWare][Error] Faild to read max-speed\n");
return -EINVAL;
}
if (of_property_read_u32(child, "target-speed", &port_info->target_speed) != 0) {
port_info->target_speed = port_info->max_speed;
}
if (of_property_read_u32(child, "payload", &port_info->payload) != 0) {
port_info->payload = NDRV_PCIE_PAYLOAD_128B;
}
if (of_property_read_u32(child, "read_req", &port_info->read_req) != 0) {
port_info->read_req = NDRV_PCIE_PAYLOAD_512B;
}
if (of_property_read_u32(child, "pcs_clk", &port_info->pcs_clk) != 0) {
port_info->pcs_clk = NDRV_PCIE_PCS_CLK_100M;
}
if (of_property_read_u32(child, "core-id", &port_info->core_id) != 0) {
port_info->core_id = 0;
}
if (of_property_read_u32(child, "probe", &port_info->is_probe) != 0) {
port_info->is_probe = 0;
}
if (of_property_read_u32(child, "aer_en", &port_info->aer_en) != 0) {
port_info->aer_en = 0;
}
if (port_info->port_type == PCIE_DP_PORT || port_info->port_type == PCIE_UP_PORT) {
return udrv_pcie_get_switch_info(dev, child, port_info);
}
return 0;
}
static int udrv_pcie_get_port_info_from_dt(struct udrv_pcie_host *pcie)
{
struct device *dev = pcie->dev;
struct device_node *child = NULL;
struct udrv_pcie_port_info *port_info = NULL;
struct list_head *head = &pcie->port_info.entry;
int ret;
for_each_child_of_node(dev->of_node, child) {
port_info = kzalloc(sizeof(struct udrv_pcie_port_info), GFP_KERNEL);
if (!port_info) {
ret = -ENOMEM;
goto get_port_fail;
}
list_add_tail(&port_info->entry, head);
ret = udrv_pcie_get_port_info_child_dt(dev, child, port_info);
if (ret != 0) {
dev_err(dev, "[iWare][Error] get child dt failed,ret:%d\n", ret);
goto get_port_fail;
}
port_info->idr = (u32)idr_alloc(&pcie_idr, pcie, (int)port_info->lport_id,
(int)port_info->lport_id + 1, GFP_KERNEL);
if ((port_info->idr < 0) || (port_info->idr != port_info->lport_id)) {
dev_err(dev, "[iWare][Error] idr_alloc fail, port_id:%d, idr:%d\n", port_info->port_id, port_info->idr);
ret = -ENOSPC;
goto get_port_fail;
}
}
return 0;
get_port_fail:
udrv_pcie_free_port_info(pcie);
return ret;
}
static int udrv_pcie_subctrl_dereset(struct udrv_pcie_host *pcie)
{
struct device *dev = pcie->dev;
struct reset_control *rst = NULL;
struct clk *clk = NULL;
struct device_node *node = dev->of_node;
int ret, i;
u32 clk_num = pcie->host_info.clk_num;
u32 rst_num = pcie->host_info.rst_num;
for (i = 0; i < (int)rst_num; i++) {
rst = of_reset_control_get_by_index(node, i);
if (IS_ERR(rst)) {
dev_err(dev, "[iWare][Error] [udrv pcie_subctrl_dereset] get rst failed\n");
return -EFAULT;
}
ret = reset_control_deassert(rst);
if (ret != 0) {
dev_err(dev, "[iWare][Error] [udrv pcie_subctrl_dereset] soft rst failed, ret=%d\n", ret);
return ret;
}
reset_control_put(rst);
}
for (i = 0; i < (int)clk_num; i++) {
clk = of_clk_get(node, i);
if (IS_ERR(clk)) {
dev_err(dev, "[iWare][Error] [udrv pcie_subctrl_dereset] get clk failed\n");
return -EFAULT;
}
ret = clk_prepare_enable(clk);
if (ret != 0) {
dev_err(dev, "[iWare][Error] [udrv pcie_subctrl_dereset] open clk failed, ret=%d\n", ret);
return ret;
}
clk_put(clk);
}
return 0;
}
static void udrv_pcie_convert_port_cfg(struct ndrv_pcie_port_cfg_info *cfg, struct udrv_pcie_port_info *pos)
{
cfg->phy_port_id = pos->port_id;
cfg->mode = pos->port_type;
cfg->ep_addr = pos->ep_addr;
cfg->ep_size = pos->ep_size;
cfg->lane_num = pos->lane_num;
cfg->target_speed = pos->target_speed;
cfg->max_lane = pos->max_lanes;
cfg->max_speed = pos->max_speed;
cfg->core_id = pos->core_id;
cfg->payload = pos->payload;
cfg->read_req = pos->read_req;
cfg->pcs_clk = pos->pcs_clk;
cfg->switch_info.switch_core_id = pos->switch_core_id;
cfg->switch_info.core0_1_dp_bitmap = pos->core_0_1_dp_bitmap;
cfg->switch_info.core2_3_dp_bitmap = pos->core_2_3_dp_bitmap;
cfg->switch_info.up_core_id = pos->up_core_id;
cfg->pri_bus = pos->pri_bus;
cfg->sec_bus = pos->sec_bus;
cfg->sub_bus = pos->sub_bus;
cfg->aer_en = pos->aer_en;
}
static int udrv_pcie_port_init(struct udrv_pcie_host *pcie)
{
u32 u_ret;
int ret;
struct udrv_pcie_host_info *host_info = &pcie->host_info;
struct udrv_pcie_port_info *pos = NULL;
struct list_head *head = &pcie->port_info.entry;
struct ndrv_pcie_port_cfg_info cfg;
/* 打开时钟和解复位 */
ret = udrv_pcie_subctrl_dereset(pcie);
if (ret != 0) {
dev_err(pcie->dev, "[iWare][Error] host_id = %u, subctrl failed, ret = 0x%x\n", host_info->host_id, ret);
return ret;
}
list_for_each_entry(pos, head, entry)
{
udrv_pcie_convert_port_cfg(&cfg, pos);
u_ret = ndrv_pcie_host_port_init(host_info->pf_handle, &cfg);
if (u_ret != 0) {
dev_err(pcie->dev, "[iWare][Error] port_init fail, host_id = %u, ret = 0x%x\n", host_info->host_id, u_ret);
return -EINVAL;
}
if (pos->is_probe != 0) {
u_ret = ndrv_pcie_set_port_enable(host_info->pf_handle, pos->core_id, pos->port_id, 1);
if (u_ret != 0) {
return -EINVAL;
}
}
}
return 0;
}
static int udrv_pcie_msi_init(struct udrv_pcie_host *pcie)
{
struct udrv_pcie_host_msi_info *msi = &pcie->msi;
handle pf_handle = pcie->host_info.pf_handle;
u32 u_ret;
int ret;
u_ret = ndrv_pcie_host_set_msi_addr(pf_handle, msi->msi_addr);
if (u_ret != 0) {
return -EINVAL;
}
u_ret = ndrv_pcie_host_set_msi_enable(pf_handle, msi->msi_trans_type);
if (u_ret != 0) {
return -EINVAL;
}
if (msi->msi_trans_type == MSI_ITS) {
return 0;
}
raw_spin_lock_init(&msi->lock);
ret = udrv_pcie_allocate_msi_domains(pcie);
if (ret != 0) {
dev_err(pcie->dev, "[iWare][Error] allocate_msi_domains fail, ret = %d\n", ret);
return ret;
}
irq_set_chained_handler_and_data((u32)msi->irq, udrv_pcie_handle_chained_msi_irq, pcie);
return 0;
}
static void __iomem *udrv_pcie_ecam_map_bus(struct pci_bus *bus, unsigned int devfn, int where)
{
u32 bus_no = bus->number;
void __iomem *base = NULL;
struct pci_config_window *config = bus->sysdata;
u32 devfn_shift = config->ops->bus_shift - 8; // dev + func = 5 + 3 = 8
if ((bus_no < config->busr.start) || (bus_no > config->busr.end)) {
return NULL;
}
bus_no -= (u32)config->busr.start;
base = config->win + (bus_no << config->ops->bus_shift);
return base + (devfn << devfn_shift) + where;
}
static int udrv_pcie_host_common_probe(struct platform_device *pdev, struct pci_ecam_ops *ops)
{
#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0)
return pci_host_common_probe(pdev, ops);
#else
struct udrv_pcie_host *pcie = platform_get_drvdata(pdev);
int ret;
ret = pci_host_common_probe(pdev);
// 5.10调用完内核的probe之后drvdata会被设成bridge,需要设回来,同时保存bridge以便remove时使用
pcie->bridge = platform_get_drvdata(pdev);
platform_set_drvdata(pdev, pcie);
return ret;
#endif
}
static int udrv_pcie_host_common_remove(struct platform_device *pdev)
{
#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0)
return 0; // 早期版本的内核没有提供remove接口
#else
struct udrv_pcie_host *pcie = platform_get_drvdata(pdev);
// 调用内核的remove之前需要将drvdata设成框架要求的bridge
platform_set_drvdata(pdev, pcie->bridge);
return pci_host_common_remove(pdev);
#endif
}
static int udrv_pcie_host_probe_for_dt(struct udrv_pcie_host *pcie)
{
struct platform_device *pdev = to_platform_device(pcie->dev);
struct udrv_pcie_host_info *host_info = &pcie->host_info;
/* host支持RC模式时,需要通过内核接口触发probe */
if (test_bit(NDRV_PCIE_RP_MODE, &(host_info->type_support_mask)) != 0) {
return udrv_pcie_host_common_probe(pdev, &pcie->ops->host_ops->ecam_ops);
} else {
/* 不支持rc的host(ep or switch),不调用内核接口创建host,直接初始化芯片 */
return udrv_pcie_host_ep_init(pdev);
}
}
static struct udrv_pcie_host_ops g_pcie_host_ops_for_dt = {
.ecam_ops = {
.bus_shift = 20, /* 20bus的起始bit */
.init = udrv_pcie_host_ecam_init,
.pci_ops = {
.map_bus = udrv_pcie_ecam_map_bus,
.read = pci_generic_config_read,
.write = pci_generic_config_write,
}
},
.probe = udrv_pcie_host_probe_for_dt,
.get_info = udrv_pcie_get_host_info_from_dt,
.init = udrv_pcie_host_init,
};
static struct udrv_pcie_port_ops g_pcie_port_ops_for_dt = {
.get_info = udrv_pcie_get_port_info_from_dt,
.init = udrv_pcie_port_init,
};
static struct udrv_pcie_msi_ops g_pcie_msi_ops_for_dt = {
.get_info = udrv_pcie_get_msi_info_from_dt,
.init = udrv_pcie_msi_init,
};
static const struct udrv_pcie_ops g_pcie_ops_for_dt = {
.port_ops = &g_pcie_port_ops_for_dt,
.host_ops = &g_pcie_host_ops_for_dt,
.msi_ops = &g_pcie_msi_ops_for_dt,
};
static const struct of_device_id g_udrv_pcie_of_match[] = {
{
.compatible = "hisilicon,udrv-pcie-ecam-dt",
.data = &g_pcie_host_ops_for_dt.ecam_ops,
},
{},
};
MODULE_DEVICE_TABLE(of, g_udrv_pcie_of_match);
static const struct udrv_pcie_ops *udrv_pcie_get_host_ops(struct device *dev)
{
return &g_pcie_ops_for_dt;
}
static int udrv_pcie_probe(struct platform_device *pdev)
{
int ret = 0;
struct device *dev = &pdev->dev;
struct udrv_pcie_host *pcie = NULL;
const struct udrv_pcie_ops *ops = udrv_pcie_get_host_ops(dev);
if (!ops) {
dev_err(dev, "[iWare][Error] get ops fail\n");
return -EINVAL;
}
pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
if (!pcie) {
dev_err(dev, "[iWare][Error] devm_kzalloc fail\n");
return -ENOMEM;
}
INIT_LIST_HEAD(&pcie->host_info.atu_info.entry);
INIT_LIST_HEAD(&pcie->port_info.entry);
pcie->ops = ops;
pcie->dev = dev;
platform_set_drvdata(pdev, pcie);
list_add_tail(&pcie->node, &pcie_host_list);
if ((ops->host_ops) && (ops->host_ops->get_info(pcie) != 0)) {
pr_err("[iWare][Error] get host dts info failed \n");
ret = -EINVAL;
goto err_host;
}
if ((ops->port_ops) && (ops->port_ops->get_info(pcie) != 0)) {
pr_err("[iWare][Error] get port dts info failed \n");
ret = -EINVAL;
goto err_port;
}
if ((ops->msi_ops) && (ops->msi_ops->get_info(pcie) != 0)) {
pr_err("[iWare][Error] get msi dts info failed \n");
ret = -EINVAL;
goto err_msi;
}
if ((ops->host_ops) && (ops->host_ops->probe(pcie) != 0)) {
pr_err("[iWare][Error] pcie probe failed \n");
ret = -EINVAL;
goto err_probe;
}
#ifdef CONFIG_UDRV_FMEA
ret = pcie_fmea_init(pcie);
if (ret != 0) {
pr_err("[iWare][Error] fmea init failed,ret=%d\n", ret);
goto err_fmea;
}
#endif
#ifdef CONFIG_UDRV_KDRV_INFECTED
ret = kdrv_pcie_probe_infected_callback(pcie);
if (ret != 0) {
pr_err("[iWare][Error] kdrv_pcie_probe_infected_callback fail %d\n", ret);
goto err_infected_callback;
}
#endif
return 0;
#ifdef CONFIG_UDRV_KDRV_INFECTED
err_infected_callback:
#endif
#ifdef CONFIG_UDRV_FMEA
pcie_fmea_deinit(pcie);
err_fmea:
#endif
/* 调用框架和probe对等的remove */
if (test_bit(NDRV_PCIE_RP_MODE, &(pcie->host_info.type_support_mask)) != 0) {
(void)udrv_pcie_host_common_remove(pdev);
}
udrv_pcie_remove_msi(pcie);
err_probe:
err_msi:
udrv_pcie_free_port_info(pcie);
err_port:
udrv_pcie_free_atu_info(pcie);
ndrv_pcie_close_pf(pcie->host_info.pf_handle, pcie->host_info.core_version);
err_host:
list_del(&pcie->node);
return ret;
}
static int udrv_pcie_remove(struct platform_device *pdev)
{
struct udrv_pcie_host *pcie = platform_get_drvdata(pdev);
struct udrv_pcie_host_info *host_info = &pcie->host_info;
#ifdef CONFIG_UDRV_KDRV_INFECTED
kdrv_pcie_remove_infected_callback(pcie);
#endif
#ifdef CONFIG_UDRV_FMEA
pcie_fmea_deinit(pcie);
#endif
/* 调用框架和probe对等的remove */
if (test_bit(NDRV_PCIE_RP_MODE, &(host_info->type_support_mask)) != 0) {
(void)udrv_pcie_host_common_remove(pdev);
}
udrv_pcie_remove_msi(pcie);
udrv_pcie_free_port_info(pcie);
udrv_pcie_free_atu_info(pcie);
ndrv_pcie_close_pf(pcie->host_info.pf_handle, pcie->host_info.core_version);
list_del(&pcie->node);
return 0;
}
static struct platform_driver g_udrv_pcie_dt_driver = {
.probe = udrv_pcie_probe,
.remove = udrv_pcie_remove,
.driver = {
.name = "udrv-pcie",
.of_match_table = g_udrv_pcie_of_match,
},
};
struct platform_driver *udrv_get_pcie_dt_driver(void)
{
return &g_udrv_pcie_dt_driver;
}
static struct platform_driver * const drivers[] = {
&g_udrv_pcie_dt_driver,
};
int find_pcie_host_by_id(struct device *dev, void *data)
{
struct pcie_find_data *pcie_search_info = (struct pcie_find_data *)data;
struct udrv_pcie_host *pcie = dev_get_drvdata(dev);
if (pcie->host_info.host_id == pcie_search_info->host_id) {
pcie_search_info->pcie = pcie;
return 1; // 找到了退出遍历dev
}
return 0;
}
struct udrv_pcie_host *hisi_pcie_get_by_host_id(uint32_t host_id)
{
int ret;
struct pcie_find_data pcie_search_info = { 0 };
pcie_search_info.host_id = host_id;
ret = driver_for_each_device(&g_udrv_pcie_dt_driver.driver, NULL, &pcie_search_info, find_pcie_host_by_id);
if (pcie_search_info.pcie == NULL) { // 找没找到直接判断返回的hipcie,不依赖driver_for_each_device返回值
pr_err("[iWare][Error] find pcie fail: host_id = %u\n", host_id);
return NULL;
}
return pcie_search_info.pcie;
}
static int __init udrv_pcie_init(void)
{
return platform_register_drivers(drivers, ARRAY_SIZE(drivers));
}
static void __exit udrv_pcie_exit(void)
{
platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
}
module_init(udrv_pcie_init);
module_exit(udrv_pcie_exit);
MODULE_LICENSE("Dual BSD/GPL");
上述是我补充的我pcie驱动的代码,请帮我结合给你发的内核代码分析,我要增加pcie的设置中断亲和性的代码,需要在哪里增加,以及能满足在linux下发送指令时echo 3 > /proc/irq/109/smp_affinity_list时能正确配置中断亲和性
最新发布