[DM/Feature] Basic PCI/PCIe (Peripheral Component Interconnect Express) bus

PCI/PCIe have better performance and more devices support, such as
NVMe, GPU, Powerful NIC (Like RDMA). PCI/PCIe can access control by
IOMMU that the virtualiztion and userspace driver will more safety.
PCI/PCIe device could hot plugging, no design modifications SoC required,
PCI/PCIe on Embedded SoC is popular now.
We make a simple framework to support them.

Feature Lists:
1.PCI INTx: the INT[A-D] pin IRQ for legacy PCI, work with platform PIC.
2.MSI/MSI-X: the message write IRQ for PCIe, work with platform's PIC.
3.PME: we only support the D0, D1, D2, D3HOT, D3COLD init by framework.
4.Endpoint: a simple EP framework for PCI FPGA or NTB function.
5.OFW: we only support work on OFW SoC, ACPI support in the future maybe.

Host controller:
1. Common PCI host controller on ECAM.
2. Generic PCI host controller on ECAM.

Signed-off-by: GuEe-GUI <2991707448@qq.com>
This commit is contained in:
GuEe-GUI 2024-05-30 17:47:56 +08:00 committed by Meco Man
parent 802a6ed2ca
commit 2168ed8e7d
27 changed files with 7430 additions and 0 deletions

View File

@ -23,6 +23,7 @@ rsource "hwcrypto/Kconfig"
rsource "wlan/Kconfig"
rsource "virtio/Kconfig"
rsource "ofw/Kconfig"
rsource "pci/Kconfig"
rsource "pic/Kconfig"
rsource "pin/Kconfig"
rsource "pinctrl/Kconfig"

View File

@ -0,0 +1,604 @@
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2022-08-25 GuEe-GUI first version
*/
#ifndef __PCI_H__
#define __PCI_H__
#include <rtdef.h>
#include <bitmap.h>
#include <ioremap.h>
#include <drivers/ofw.h>
#include <drivers/pic.h>
#include <drivers/core/dm.h>
#include <drivers/core/driver.h>
#include "../../pci/pci_ids.h"
#include "../../pci/pci_regs.h"
#define RT_PCI_INTX_PIN_MAX 4
#define RT_PCI_BAR_NR_MAX 6
#define RT_PCI_DEVICE_MAX 32
#define RT_PCI_FUNCTION_MAX 8
#define RT_PCI_FIND_CAP_TTL 48
/*
* The PCI interface treats multi-function devices as independent
* devices. The slot/function address of each device is encoded
* in a single byte as follows:
*
* 7:3 = slot
* 2:0 = function
*/
#define RT_PCI_DEVID(bus, devfn) ((((rt_uint16_t)(bus)) << 8) | (devfn))
#define RT_PCI_DEVFN(slot, func) ((((slot) & 0x1f) << 3) | ((func) & 0x07))
#define RT_PCI_SLOT(devfn) (((devfn) >> 3) & 0x1f)
#define RT_PCI_FUNC(devfn) ((devfn) & 0x07)
#define PCIE_LINK_STATE_L0S RT_BIT(0)
#define PCIE_LINK_STATE_L1 RT_BIT(1)
#define PCIE_LINK_STATE_CLKPM RT_BIT(2)
#define PCIE_LINK_STATE_L1_1 RT_BIT(3)
#define PCIE_LINK_STATE_L1_2 RT_BIT(4)
#define PCIE_LINK_STATE_L1_1_PCIPM RT_BIT(5)
#define PCIE_LINK_STATE_L1_2_PCIPM RT_BIT(6)
#define PCIE_LINK_STATE_ALL \
( \
PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 | \
PCIE_LINK_STATE_CLKPM | \
PCIE_LINK_STATE_L1_1 | PCIE_LINK_STATE_L1_2 | \
PCIE_LINK_STATE_L1_1_PCIPM | PCIE_LINK_STATE_L1_2_PCIPM \
)
struct rt_pci_bus_region
{
rt_uint64_t phy_addr;
rt_uint64_t cpu_addr;
rt_uint64_t size;
rt_uint64_t bus_start;
#define PCI_BUS_REGION_F_NONE 0xffffffff /* PCI no memory */
#define PCI_BUS_REGION_F_MEM 0x00000000 /* PCI memory space */
#define PCI_BUS_REGION_F_IO 0x00000001 /* PCI IO space */
#define PCI_BUS_REGION_F_PREFETCH 0x00000008 /* Prefetchable PCI memory */
rt_ubase_t flags;
};
struct rt_pci_bus_resource
{
rt_ubase_t base;
rt_size_t size;
rt_ubase_t flags;
};
/*
* PCI topology:
*
* +-----+-----+ +-------------+ PCI Bus 0 +------------+ PCI Bus 1
* | RAM | CPU |---------| Host Bridge |--------+-----| PCI Bridge |-----+
* +-----+-----+ +-------------+ | +------------+ | +-------------+
* | +----| End Point 2 |
* +-------------+ +-------------+ | +-------------+ | +-------------+
* | End Point 5 |----+ | End Point 0 |-------+ | End Point 3 |----+
* +-------------+ | +-------------+ | +-------------+ |
* | | |
* +-------------+ | +-------------+ | +-------------+ | +-------------+
* | End Point 6 |----+----| ISA Bridge |-------+-----| End Point 1 | +----| End Point 4 |
* +-------------+ +-------------+ | +-------------+ +-------------+
* |
* +------+ +----------------+ |
* | Port |---------| CardBus Bridge |----+
* +------+ +----------------+
*/
struct rt_pci_bus;
struct rt_pci_device_id
{
#define PCI_ANY_ID (~0)
#define RT_PCI_DEVICE_ID(vend, dev) \
.vendor = (vend), \
.device = (dev), \
.subsystem_vendor = PCI_ANY_ID, \
.subsystem_device = PCI_ANY_ID
#define RT_PCI_DEVICE_CLASS(dev_class, dev_class_mask) \
.vendor = PCI_ANY_ID, .device = PCI_ANY_ID, \
.subsystem_vendor = PCI_ANY_ID, \
.subsystem_device = PCI_ANY_ID, \
.class = (dev_class), .class_mask = (dev_class_mask),
rt_uint32_t vendor, device; /* Vendor and device ID or PCI_ANY_ID */
rt_uint32_t subsystem_vendor; /* Subsystem ID's or PCI_ANY_ID */
rt_uint32_t subsystem_device; /* Subsystem ID's or PCI_ANY_ID */
rt_uint32_t class, class_mask; /* (class, subclass, prog-if) triplet */
const void *data;
};
struct rt_pci_device
{
struct rt_device parent;
const char *name;
rt_list_t list;
struct rt_pci_bus *bus;
struct rt_pci_bus *subbus; /* In PCI-to-PCI bridge, 'End Point' or 'Port' is NULL */
const struct rt_pci_device_id *id;
rt_uint32_t devfn; /* Encoded device & function index */
rt_uint16_t vendor;
rt_uint16_t device;
rt_uint16_t subsystem_vendor;
rt_uint16_t subsystem_device;
rt_uint32_t class; /* 3 bytes: (base, sub, prog-if) */
rt_uint8_t revision;
rt_uint8_t hdr_type;
rt_uint8_t max_latency;
rt_uint8_t min_grantl;
rt_uint8_t int_pin;
rt_uint8_t int_line;
rt_uint16_t exp_flags;
rt_uint32_t cfg_size;
void *sysdata;
int irq;
rt_uint8_t pin;
struct rt_pic *intx_pic;
struct rt_pci_bus_resource resource[RT_PCI_BAR_NR_MAX];
rt_uint8_t pme_cap;
rt_uint8_t msi_cap;
rt_uint8_t msix_cap;
rt_uint8_t pcie_cap;
rt_uint8_t busmaster:1; /* Is the bus master */
rt_uint8_t multi_function:1; /* Multi-function device */
rt_uint8_t ari_enabled:1; /* Alternative Routing-ID Interpretation */
rt_uint8_t no_msi:1; /* May not use MSI */
rt_uint8_t no_64bit_msi:1; /* May only use 32-bit MSIs */
rt_uint8_t msi_enabled:1; /* MSI enable */
rt_uint8_t msix_enabled:1; /* MSIx enable */
rt_uint8_t broken_intx_masking:1; /* INTx masking can't be used */
rt_uint8_t pme_support:5; /* Bitmask of states from which PME# can be generated */
#ifdef RT_PCI_MSI
void *msix_base;
struct rt_pic *msi_pic;
rt_list_t msi_desc_nodes;
struct rt_spinlock msi_lock;
#endif
};
struct rt_pci_host_bridge
{
struct rt_device parent;
rt_uint32_t domain;
struct rt_pci_bus *root_bus;
const struct rt_pci_ops *ops;
const struct rt_pci_ops *child_ops;
rt_uint32_t bus_range[2];
rt_size_t bus_regions_nr;
struct rt_pci_bus_region *bus_regions;
rt_size_t dma_regions_nr;
struct rt_pci_bus_region *dma_regions;
rt_uint8_t (*irq_slot)(struct rt_pci_device *pdev, rt_uint8_t *pinp);
int (*irq_map)(struct rt_pci_device *pdev, rt_uint8_t slot, rt_uint8_t pin);
void *sysdata;
rt_uint8_t priv[0];
};
#define rt_device_to_pci_host_bridge(dev) rt_container_of(dev, struct rt_pci_host_bridge, parent)
struct rt_pci_ops
{
rt_err_t (*add)(struct rt_pci_bus *bus);
rt_err_t (*remove)(struct rt_pci_bus *bus);
void *(*map)(struct rt_pci_bus *bus, rt_uint32_t devfn, int reg);
rt_err_t (*read)(struct rt_pci_bus *bus,
rt_uint32_t devfn, int reg, int width, rt_uint32_t *value);
rt_err_t (*write)(struct rt_pci_bus *bus,
rt_uint32_t devfn, int reg, int width, rt_uint32_t value);
};
struct rt_pci_bus
{
rt_list_t list;
rt_list_t children_nodes;
rt_list_t devices_nodes;
struct rt_pci_bus *parent;
union
{
/* In PCI-to-PCI bridge, parent is not NULL */
struct rt_pci_device *self;
/* In Host bridge, this is Root bus ('PCI Bus 0') */
struct rt_pci_host_bridge *host_bridge;
};
const struct rt_pci_ops *ops;
char name[48];
char number;
struct rt_spinlock lock;
void *sysdata;
};
struct rt_pci_driver
{
struct rt_driver parent;
const char *name;
const struct rt_pci_device_id *ids;
rt_err_t (*probe)(struct rt_pci_device *pdev);
rt_err_t (*remove)(struct rt_pci_device *pdev);
rt_err_t (*shutdown)(struct rt_pci_device *pdev);
};
struct rt_pci_msix_entry
{
int irq;
int index;
};
enum rt_pci_power
{
RT_PCI_D0,
RT_PCI_D1,
RT_PCI_D2,
RT_PCI_D3HOT,
RT_PCI_D3COLD,
RT_PCI_PME_MAX,
};
void rt_pci_pme_init(struct rt_pci_device *pdev);
void rt_pci_pme_active(struct rt_pci_device *pdev, rt_bool_t enable);
rt_err_t rt_pci_enable_wake(struct rt_pci_device *pci_dev,
enum rt_pci_power state, rt_bool_t enable);
rt_inline rt_bool_t rt_pci_pme_capable(struct rt_pci_device *pdev,
enum rt_pci_power state)
{
if (!pdev->pme_cap)
{
return RT_FALSE;
}
return !!(pdev->pme_support & (1 << state));
}
void rt_pci_msi_init(struct rt_pci_device *pdev);
void rt_pci_msix_init(struct rt_pci_device *pdev);
void rt_pci_set_master(struct rt_pci_device *pdev);
void rt_pci_clear_master(struct rt_pci_device *pdev);
struct rt_pci_host_bridge *rt_pci_host_bridge_alloc(rt_size_t priv_size);
rt_err_t rt_pci_host_bridge_free(struct rt_pci_host_bridge *);
rt_err_t rt_pci_host_bridge_init(struct rt_pci_host_bridge *host_bridge);
rt_err_t rt_pci_host_bridge_probe(struct rt_pci_host_bridge *host_bridge);
struct rt_pci_device *rt_pci_alloc_device(struct rt_pci_bus *bus);
struct rt_pci_device *rt_pci_scan_single_device(struct rt_pci_bus *bus, rt_uint32_t devfn);
rt_err_t rt_pci_setup_device(struct rt_pci_device *pdev);
rt_size_t rt_pci_scan_slot(struct rt_pci_bus *bus, rt_uint32_t devfn);
rt_uint32_t rt_pci_scan_child_buses(struct rt_pci_bus *bus, rt_size_t buses);
rt_uint32_t rt_pci_scan_child_bus(struct rt_pci_bus *bus);
rt_err_t rt_pci_host_bridge_register(struct rt_pci_host_bridge *host_bridge);
rt_err_t rt_pci_scan_root_bus_bridge(struct rt_pci_host_bridge *host_bridge);
rt_err_t rt_pci_host_bridge_remove(struct rt_pci_host_bridge *host_bridge);
rt_err_t rt_pci_bus_remove(struct rt_pci_bus *bus);
rt_err_t rt_pci_device_remove(struct rt_pci_device *pdev);
rt_uint32_t rt_pci_domain(struct rt_pci_device *pdev);
rt_uint8_t rt_pci_bus_find_capability(struct rt_pci_bus *bus, rt_uint32_t devfn, int cap);
rt_uint8_t rt_pci_find_capability(struct rt_pci_device *pdev, int cap);
rt_uint8_t rt_pci_find_next_capability(struct rt_pci_device *pdev, rt_uint8_t pos, int cap);
rt_uint16_t rt_pci_find_ext_capability(struct rt_pci_device *pdev, int cap);
rt_uint16_t rt_pci_find_ext_next_capability(struct rt_pci_device *pdev, rt_uint16_t pos, int cap);
struct rt_pci_bus *rt_pci_find_root_bus(struct rt_pci_bus *bus);
struct rt_pci_host_bridge *rt_pci_find_host_bridge(struct rt_pci_bus *bus);
rt_inline rt_uint16_t rt_pci_dev_id(struct rt_pci_device *pdev)
{
return RT_PCI_DEVID(pdev->bus->number, pdev->devfn);
}
rt_inline rt_bool_t rt_pci_is_root_bus(struct rt_pci_bus *bus)
{
return bus->parent ? RT_FALSE : RT_TRUE;
}
rt_inline rt_bool_t rt_pci_is_bridge(struct rt_pci_device *pdev)
{
return pdev->hdr_type == PCIM_HDRTYPE_BRIDGE ||
pdev->hdr_type == PCIM_HDRTYPE_CARDBUS;
}
rt_inline rt_bool_t rt_pci_is_pcie(struct rt_pci_device *pdev)
{
return !!pdev->pcie_cap;
}
#define rt_pci_foreach_bridge(pdev, bus) \
rt_list_for_each_entry(pdev, &bus->devices_nodes, list) \
if (rt_pci_is_bridge(pdev))
rt_err_t rt_pci_bus_read_config_u8(struct rt_pci_bus *bus,
rt_uint32_t devfn, int pos, rt_uint8_t *value);
rt_err_t rt_pci_bus_read_config_u16(struct rt_pci_bus *bus,
rt_uint32_t devfn, int pos, rt_uint16_t *value);
rt_err_t rt_pci_bus_read_config_u32(struct rt_pci_bus *bus,
rt_uint32_t devfn, int pos, rt_uint32_t *value);
rt_err_t rt_pci_bus_write_config_u8(struct rt_pci_bus *bus,
rt_uint32_t devfn, int reg, rt_uint8_t value);
rt_err_t rt_pci_bus_write_config_u16(struct rt_pci_bus *bus,
rt_uint32_t devfn, int reg, rt_uint16_t value);
rt_err_t rt_pci_bus_write_config_u32(struct rt_pci_bus *bus,
rt_uint32_t devfn, int reg, rt_uint32_t value);
rt_err_t rt_pci_bus_read_config_uxx(struct rt_pci_bus *bus,
rt_uint32_t devfn, int reg, int width, rt_uint32_t *value);
rt_err_t rt_pci_bus_write_config_uxx(struct rt_pci_bus *bus,
rt_uint32_t devfn, int reg, int width, rt_uint32_t value);
rt_err_t rt_pci_bus_read_config_generic_u32(struct rt_pci_bus *bus,
rt_uint32_t devfn, int reg, int width, rt_uint32_t *value);
rt_err_t rt_pci_bus_write_config_generic_u32(struct rt_pci_bus *bus,
rt_uint32_t devfn, int reg, int width, rt_uint32_t value);
rt_inline rt_err_t rt_pci_read_config_u8(const struct rt_pci_device *pdev,
int reg, rt_uint8_t *value)
{
return rt_pci_bus_read_config_u8(pdev->bus, pdev->devfn, reg, value);
}
rt_inline rt_err_t rt_pci_read_config_u16(const struct rt_pci_device *pdev,
int reg, rt_uint16_t *value)
{
return rt_pci_bus_read_config_u16(pdev->bus, pdev->devfn, reg, value);
}
rt_inline rt_err_t rt_pci_read_config_u32(const struct rt_pci_device *pdev,
int reg, rt_uint32_t *value)
{
return rt_pci_bus_read_config_u32(pdev->bus, pdev->devfn, reg, value);
}
rt_inline rt_err_t rt_pci_write_config_u8(const struct rt_pci_device *pdev,
int reg, rt_uint8_t value)
{
return rt_pci_bus_write_config_u8(pdev->bus, pdev->devfn, reg, value);
}
rt_inline rt_err_t rt_pci_write_config_u16(const struct rt_pci_device *pdev,
int reg, rt_uint16_t value)
{
return rt_pci_bus_write_config_u16(pdev->bus, pdev->devfn, reg, value);
}
rt_inline rt_err_t rt_pci_write_config_u32(const struct rt_pci_device *pdev,
int reg, rt_uint32_t value)
{
return rt_pci_bus_write_config_u32(pdev->bus, pdev->devfn, reg, value);
}
#ifdef RT_USING_OFW
int rt_pci_ofw_irq_parse_and_map(struct rt_pci_device *pdev,
rt_uint8_t slot, rt_uint8_t pin);
rt_err_t rt_pci_ofw_parse_ranges(struct rt_ofw_node *dev_np,
struct rt_pci_host_bridge *host_bridge);
rt_err_t rt_pci_ofw_host_bridge_init(struct rt_ofw_node *dev_np,
struct rt_pci_host_bridge *host_bridge);
rt_err_t rt_pci_ofw_bus_init(struct rt_pci_bus *bus);
rt_err_t rt_pci_ofw_bus_free(struct rt_pci_bus *bus);
rt_err_t rt_pci_ofw_device_init(struct rt_pci_device *pdev);
rt_err_t rt_pci_ofw_device_free(struct rt_pci_device *pdev);
#else
rt_inline rt_err_t rt_pci_ofw_host_bridge_init(struct rt_ofw_node *dev_np,
struct rt_pci_host_bridge *host_bridge)
{
return RT_EOK;
}
rt_inline rt_err_t rt_pci_ofw_bus_init(struct rt_pci_bus *bus)
{
return RT_EOK;
}
rt_inline rt_err_t rt_pci_ofw_bus_free(struct rt_pci_bus *bus)
{
return RT_EOK;
}
rt_inline rt_err_t rt_pci_ofw_device_init(struct rt_pci_device *pdev)
{
return RT_EOK;
}
rt_inline rt_err_t rt_pci_ofw_device_free(struct rt_pci_device *pdev)
{
return RT_EOK;
}
rt_inline int rt_pci_ofw_irq_parse_and_map(struct rt_pci_device *pdev,
rt_uint8_t slot, rt_uint8_t pin)
{
return -1;
}
rt_inline rt_err_t rt_pci_ofw_parse_ranges(struct rt_ofw_node *dev_np,
struct rt_pci_host_bridge *host_bridge)
{
return -RT_ENOSYS;
}
#endif /* RT_USING_OFW */
rt_inline void *rt_pci_iomap(struct rt_pci_device *pdev, int bar_idx)
{
struct rt_pci_bus_resource *res = &pdev->resource[bar_idx];
RT_ASSERT(bar_idx < RT_ARRAY_SIZE(pdev->resource));
return rt_ioremap((void *)res->base, res->size);
}
rt_uint8_t rt_pci_irq_intx(struct rt_pci_device *pdev, rt_uint8_t pin);
rt_uint8_t rt_pci_irq_slot(struct rt_pci_device *pdev, rt_uint8_t *pinp);
void rt_pci_assign_irq(struct rt_pci_device *pdev);
void rt_pci_intx(struct rt_pci_device *pdev, rt_bool_t enable);
rt_bool_t rt_pci_check_and_mask_intx(struct rt_pci_device *pdev);
rt_bool_t rt_pci_check_and_unmask_intx(struct rt_pci_device *pdev);
void rt_pci_irq_mask(struct rt_pci_device *pdev);
void rt_pci_irq_unmask(struct rt_pci_device *pdev);
#define RT_PCI_IRQ_F_LEGACY RT_BIT(0) /* Allow legacy interrupts */
#define RT_PCI_IRQ_F_MSI RT_BIT(1) /* Allow MSI interrupts */
#define RT_PCI_IRQ_F_MSIX RT_BIT(2) /* Allow MSI-X interrupts */
#define RT_PCI_IRQ_F_AFFINITY RT_BIT(3) /* Auto-assign affinity */
#define RT_PCI_IRQ_F_ALL_TYPES (RT_PCI_IRQ_F_LEGACY | RT_PCI_IRQ_F_MSI | RT_PCI_IRQ_F_MSIX)
#ifdef RT_PCI_MSI
rt_ssize_t rt_pci_alloc_vector(struct rt_pci_device *pdev, int min, int max,
rt_uint32_t flags, RT_IRQ_AFFINITY_DECLARE((*affinities)));
void rt_pci_free_vector(struct rt_pci_device *pdev);
rt_ssize_t rt_pci_msi_vector_count(struct rt_pci_device *pdev);
rt_err_t rt_pci_msi_disable(struct rt_pci_device *pdev);
rt_ssize_t rt_pci_msi_enable_range_affinity(struct rt_pci_device *pdev,
int min, int max, RT_IRQ_AFFINITY_DECLARE((*affinities)));
rt_ssize_t rt_pci_msix_vector_count(struct rt_pci_device *pdev);
rt_err_t rt_pci_msix_disable(struct rt_pci_device *pdev);
rt_ssize_t rt_pci_msix_enable_range_affinity(struct rt_pci_device *pdev,
struct rt_pci_msix_entry *entries, int min, int max,
RT_IRQ_AFFINITY_DECLARE((*affinities)));
#else
rt_inline rt_ssize_t rt_pci_alloc_vector(struct rt_pci_device *pdev, int min, int max,
rt_uint32_t flags, RT_IRQ_AFFINITY_DECLARE((*affinities)))
{
return -RT_ENOSYS;
}
rt_inline void rt_pci_free_vector(struct rt_pci_device *pdev)
{
return;
}
rt_inline rt_ssize_t rt_pci_msi_vector_count(struct rt_pci_device *pdev)
{
return 0;
}
rt_inline rt_err_t rt_pci_msi_disable(struct rt_pci_device *pdev)
{
return RT_EOK;
}
rt_inline rt_ssize_t rt_pci_msi_enable_range_affinity(struct rt_pci_device *pdev,
int min, int max, RT_IRQ_AFFINITY_DECLARE((*affinities)))
{
return -RT_ENOSYS;
}
rt_inline rt_ssize_t rt_pci_msix_vector_count(struct rt_pci_device *pdev)
{
return 0;
}
rt_inline rt_err_t rt_pci_msix_disable(struct rt_pci_device *pdev)
{
return RT_EOK;
}
rt_inline rt_ssize_t rt_pci_msix_enable_range_affinity(struct rt_pci_device *pdev,
struct rt_pci_msix_entry *entries, int min, int max,
RT_IRQ_AFFINITY_DECLARE((*affinities)))
{
return -RT_ENOSYS;
}
#endif /* RT_PCI_MSI */
rt_inline void rt_pci_msix_entry_index_linear(struct rt_pci_msix_entry *entries,
rt_size_t nvectors)
{
for (int i = 0; i < nvectors; ++i)
{
entries[i].index = i;
}
}
rt_inline rt_ssize_t rt_pci_msi_enable_range(struct rt_pci_device *pdev,
int min, int max)
{
return rt_pci_msi_enable_range_affinity(pdev, min, max, RT_NULL);
}
rt_inline rt_err_t rt_pci_msi_enable(struct rt_pci_device *pdev)
{
rt_ssize_t res = rt_pci_msi_enable_range(pdev, 1, 1);
return res == 1 ? res : RT_EOK;
}
rt_inline rt_ssize_t rt_pci_msix_enable_range(struct rt_pci_device *pdev,
struct rt_pci_msix_entry *entries, int min, int max)
{
return rt_pci_msix_enable_range_affinity(pdev, entries, min, max, RT_NULL);
}
rt_inline rt_ssize_t rt_pci_msix_enable(struct rt_pci_device *pdev,
struct rt_pci_msix_entry *entries, int count)
{
return rt_pci_msix_enable_range(pdev, entries, count, count);
}
rt_err_t rt_pci_region_setup(struct rt_pci_host_bridge *host_bridge);
struct rt_pci_bus_region *rt_pci_region_alloc(struct rt_pci_host_bridge *host_bridge,
void **out_addr, rt_size_t size, rt_ubase_t flags, rt_bool_t mem64);
rt_err_t rt_pci_device_alloc_resource(struct rt_pci_host_bridge *host_bridge,
struct rt_pci_device *pdev);
void rt_pci_enum_device(struct rt_pci_bus *bus,
rt_bool_t (callback(struct rt_pci_device *, void *)), void *data);
const struct rt_pci_device_id *rt_pci_match_id(struct rt_pci_device *pdev,
const struct rt_pci_device_id *id);
const struct rt_pci_device_id *rt_pci_match_ids(struct rt_pci_device *pdev,
const struct rt_pci_device_id *ids);
rt_err_t rt_pci_driver_register(struct rt_pci_driver *pdrv);
rt_err_t rt_pci_device_register(struct rt_pci_device *pdev);
#define RT_PCI_DRIVER_EXPORT(driver) RT_DRIVER_EXPORT(driver, pci, BUILIN)
extern struct rt_spinlock rt_pci_lock;
#endif /* __PCI_H__ */

View File

@ -0,0 +1,179 @@
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2022-08-25 GuEe-GUI first version
*/
#ifndef __PCI_ENDPOINT_H__
#define __PCI_ENDPOINT_H__
#include <drivers/pci.h>
enum rt_pci_ep_pin
{
RT_PCI_EP_PIN_UNKNOWN,
RT_PCI_EP_PIN_INTA,
RT_PCI_EP_PIN_INTB,
RT_PCI_EP_PIN_INTC,
RT_PCI_EP_PIN_INTD,
};
enum rt_pci_ep_irq
{
RT_PCI_EP_IRQ_UNKNOWN,
RT_PCI_EP_IRQ_LEGACY,
RT_PCI_EP_IRQ_MSI,
RT_PCI_EP_IRQ_MSIX,
};
struct rt_pci_ep_header
{
rt_uint16_t vendor;
rt_uint16_t device;
rt_uint8_t revision;
rt_uint8_t progif;
rt_uint8_t subclass;
rt_uint8_t class_code;
rt_uint8_t cache_line_size;
rt_uint16_t subsystem_vendor;
rt_uint16_t subsystem_device;
enum rt_pci_ep_pin intx;
};
struct rt_pci_ep_bar
{
/* To PCI Bus */
struct rt_pci_bus_resource bus;
/* To CPU */
rt_ubase_t cpu_addr;
};
/*
* Type of MSI-X table, For more format detail,
* please read `components/drivers/include/drivers/pci_msi.h`
*/
struct rt_pci_ep_msix_tbl
{
union
{
rt_uint64_t msg_addr;
struct
{
rt_uint32_t msg_addr_upper;
rt_uint32_t msg_addr_lower;
};
};
rt_uint32_t msg_data;
rt_uint32_t vector_ctrl;
};
struct rt_pci_ep_ops;
struct rt_pci_ep
{
rt_list_t list;
const char *name;
struct rt_ref ref;
const struct rt_device *rc_dev;
const struct rt_pci_ep_ops *ops;
rt_uint8_t max_functions;
RT_BITMAP_DECLARE(functions_map, 8);
rt_list_t epf_nodes;
struct rt_mutex lock;
void *priv;
};
struct rt_pci_epf
{
rt_list_t list;
const char *name;
struct rt_pci_ep_header *header;
struct rt_pci_ep_bar bar[PCI_STD_NUM_BARS];
rt_uint8_t msi_interrupts;
rt_uint16_t msix_interrupts;
rt_uint8_t func_no;
struct rt_pci_ep *ep;
};
struct rt_pci_ep_ops
{
rt_err_t (*write_header)(struct rt_pci_ep *ep, rt_uint8_t func_no,
struct rt_pci_ep_header *hdr);
rt_err_t (*set_bar)(struct rt_pci_ep *ep, rt_uint8_t func_no,
struct rt_pci_ep_bar *bar, int bar_idx);
rt_err_t (*clear_bar)(struct rt_pci_ep *ep, rt_uint8_t func_no,
struct rt_pci_ep_bar *bar, int bar_idx);
rt_err_t (*map_addr)(struct rt_pci_ep *ep, rt_uint8_t func_no,
rt_ubase_t addr, rt_uint64_t pci_addr, rt_size_t size);
rt_err_t (*unmap_addr)(struct rt_pci_ep *ep, rt_uint8_t func_no, rt_ubase_t addr);
rt_err_t (*set_msi)(struct rt_pci_ep *ep, rt_uint8_t func_no,
unsigned irq_nr);
rt_err_t (*get_msi)(struct rt_pci_ep *ep, rt_uint8_t func_no,
unsigned *out_irq_nr);
rt_err_t (*set_msix)(struct rt_pci_ep *ep, rt_uint8_t func_no,
unsigned irq_nr, int bar_idx, rt_off_t offset);
rt_err_t (*get_msix)(struct rt_pci_ep *ep, rt_uint8_t func_no,
unsigned *out_irq_nr);
rt_err_t (*raise_irq)(struct rt_pci_ep *ep, rt_uint8_t func_no,
enum rt_pci_ep_irq type, unsigned irq);
rt_err_t (*start)(struct rt_pci_ep *ep);
rt_err_t (*stop)(struct rt_pci_ep *ep);
};
rt_err_t rt_pci_ep_write_header(struct rt_pci_ep *ep, rt_uint8_t func_no,
struct rt_pci_ep_header *hdr);
rt_err_t rt_pci_ep_set_bar(struct rt_pci_ep *ep, rt_uint8_t func_no,
struct rt_pci_ep_bar *bar, int bar_idx);
rt_err_t rt_pci_ep_clear_bar(struct rt_pci_ep *ep, rt_uint8_t func_no,
struct rt_pci_ep_bar *bar, int bar_idx);
rt_err_t rt_pci_ep_map_addr(struct rt_pci_ep *ep, rt_uint8_t func_no,
rt_ubase_t addr, rt_uint64_t pci_addr, rt_size_t size);
rt_err_t rt_pci_ep_unmap_addr(struct rt_pci_ep *ep, rt_uint8_t func_no,
rt_ubase_t addr);
rt_err_t rt_pci_ep_set_msi(struct rt_pci_ep *ep, rt_uint8_t func_no,
unsigned irq_nr);
rt_err_t rt_pci_ep_get_msi(struct rt_pci_ep *ep, rt_uint8_t func_no,
unsigned *out_irq_nr);
rt_err_t rt_pci_ep_set_msix(struct rt_pci_ep *ep, rt_uint8_t func_no,
unsigned irq_nr, int bar_idx, rt_off_t offset);
rt_err_t rt_pci_ep_get_msix(struct rt_pci_ep *ep, rt_uint8_t func_no,
unsigned *out_irq_nr);
rt_err_t rt_pci_ep_raise_irq(struct rt_pci_ep *ep, rt_uint8_t func_no,
enum rt_pci_ep_irq type, unsigned irq);
rt_err_t rt_pci_ep_start(struct rt_pci_ep *ep);
rt_err_t rt_pci_ep_stop(struct rt_pci_ep *ep);
rt_err_t rt_pci_ep_register(struct rt_pci_ep *ep);
rt_err_t rt_pci_ep_unregister(struct rt_pci_ep *ep);
rt_err_t rt_pci_ep_add_epf(struct rt_pci_ep *ep, struct rt_pci_epf *epf);
rt_err_t rt_pci_ep_remove_epf(struct rt_pci_ep *ep, struct rt_pci_epf *epf);
struct rt_pci_ep *rt_pci_ep_get(const char *name);
void rt_pci_ep_put(struct rt_pci_ep *ep);
#endif /* __PCI_ENDPOINT_H__ */

View File

@ -0,0 +1,189 @@
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2022-08-25 GuEe-GUI first version
*/
#ifndef __PCI_MSI_H__
#define __PCI_MSI_H__
#include <drivers/pci.h>
/*
* MSI Format:
* T0: 32-bit Address
* T1: 64-bit Address
* T2: 32-bit Address with Per-Vector Masking
* T3: 64-bit Address with Per-Vector Masking
*
* 31 16 15 8 7 0
* +---------------------------+-----------------+---------------+
* | Message Control | Next Capability | Capability ID | DW0
* | | Pointer | (05h) |
* +---------------------------+-----------------+---------------+
* | Message Address [31:0] | DW1
* +-------------------------------------------------------------+
* | Message Address [63:32] | DW2 (T1: only 64-bit)
* +---------------------------+---------------------------------+
* | Reserved | Message Data | DW3
* +---------------------------+---------------------------------+
* | Mask Bits | DW4 (T2/T3: only with Per-Vector Masking)
* +-------------------------------------------------------------+
* | Pending Bits | DW5 (T2/T3: only with Per-Vector Masking)
* +-------------------------------------------------------------+
*
* MSI Message Control:
*
* 15 9 8 7 6 4 3 1 0
* +----------------------+---+---+---------------+----------+---+
* | Reserved | | | | | |
* +----------------------+---+---+---------------+----------+---+
* ^ ^ ^ ^ ^
* | | | | |
* | | | | +---- MSI Enable (RW)
* | | | +----------- Multiple Message Capable (RO, log2n, [n <= 5])
* | | +------------------------- Multiple Message Enable (RW, log2n, [n <= 5])
* | +----------------------------------- 64-bit Address Capable
* +--------------------------------------- Per-Vector Masking Capable
*/
struct rt_pci_msi_conf
{
rt_uint32_t mask;
rt_uint8_t mask_pos;
int default_irq;
struct
{
rt_uint8_t is_masking:1;
rt_uint8_t is_64bit:1;
rt_uint8_t multi_msg_max:3; /* log2 num of messages allocated */
rt_uint8_t multi_msg_use:3; /* log2 num of messages supported */
} cap;
};
/*
* MSI-X Format:
*
* 31 16 15 8 7 0
* +---------------------------+-----------------+---------------+
* | Message Control | Next Capability | Capability ID | DW0
* | | Pointer | (11h) |
* +---------------------------+-----------------+---+-----------+
* | MSI-X Table Offset | Table BIR | DW1 (BIR: BAR Index Register)
* +-------------------------------------------------+-----------+ |
* | Pending Bit Array (PBA) Offset | PBA BIR | DW2 --------+ |
* +-------------------------------------------------+-----------+ | |
* | |
* MSI-X Message Control: | |
* | |
* 15 14 13 11 10 0 | |
* +---+---+----------+------------------------------------------+ | |
* | | | Reserved | Table Size in N-1 (RO) | | |
* +---+---+----------+------------------------------------------+ | |
* ^ ^ | |
* | | | |
* | +---- Function Mask (RW) | |
* +-------- MSI-X Enable (RW) | |
* | |
* MSI-X Table (BAR[Table BIR] + MSI-X Table Offset): | |
* | |
* DW3 DW2 DW1 DW0 | |
* +----------------+--------------+---------------+---------------+ <---------|-+
* | Vector Control | Message Data | Upper Address | Lower Address | Entry 0 |
* +----------------+--------------+---------------+---------------+ |
* | Vector Control | Message Data | Upper Address | Lower Address | Entry 1 |
* +----------------+--------------+---------------+---------------+ |
* | ...... | ...... | ...... | ...... | |
* +----------------+--------------+---------------+---------------+ |
* | Vector Control | Message Data | Upper Address | Lower Address | Entry N-1 |
* +----------------+--------------+---------------+---------------+ |
* ^ |
* | |
* +---- Bit 0 is vector Mask Bit (R/W) |
* |
* MSI-X Pending Bit Array (BAR[PBA BIR] + Pending Bit Array Offset): |
* |
* DW1 DW0 |
* +-------------------------------+ <-----------------------------------------+
* | Pending Bits 0 - 63 | QW 0
* +-------------------------------+
* | Pending Bits 64 - 127 | QW 1
* +-------------------------------+
* | ...... |
* +-------------------------------+
* | Pending Bits | QW (N-1)/64
* +-------------------------------+
*/
struct rt_pci_msix_conf
{
int index;
rt_uint32_t msg_ctrl;
void *table_base;
};
struct rt_pci_msi_msg
{
rt_uint32_t address_lo;
rt_uint32_t address_hi;
rt_uint32_t data;
};
struct rt_pci_msi_desc
{
rt_list_t list;
int irq;
rt_size_t vector_used;
rt_size_t vector_count;
union
{
/* For MSI-X */
rt_bitmap_t *affinity;
/* For MSI */
rt_bitmap_t **affinities;
};
struct rt_pci_device *pdev;
struct rt_pci_msi_msg msg;
void *write_msi_msg_data;
void (*write_msi_msg)(struct rt_pci_msi_desc *, void *);
rt_bool_t is_msix;
union
{
struct rt_pci_msi_conf msi;
struct rt_pci_msix_conf msix;
};
void *priv;
};
#define rt_pci_msi_first_desc(pdev) \
(rt_list_isempty(&(pdev)->msi_desc_nodes) ? RT_NULL : \
rt_list_first_entry(&(pdev)->msi_desc_nodes, struct rt_pci_msi_desc, list))
#define rt_pci_msi_for_each_desc(pdev, desc) \
rt_list_for_each_entry(desc, &(pdev)->msi_desc_nodes, list)
#define rt_pci_msix_table_size(flags) ((flags & PCIM_MSIXCTRL_TABLE_SIZE) + 1)
rt_err_t rt_pci_msi_setup_irqs(struct rt_pci_device *pdev, int nvec, int type);
void rt_pci_msi_shutdown(struct rt_pci_device *pdev);
void rt_pci_msix_shutdown(struct rt_pci_device *pdev);
void rt_pci_msi_free_irqs(struct rt_pci_device *pdev);
void rt_pci_msi_write_msg(struct rt_pci_msi_desc *desc, struct rt_pci_msi_msg *msg);
void rt_pci_msi_mask_irq(struct rt_pic_irq *pirq);
void rt_pci_msi_unmask_irq(struct rt_pic_irq *pirq);
#endif /* __PCI_MSI_H__ */

View File

@ -0,0 +1,49 @@
menuconfig RT_USING_PCI
bool "Using Peripheral Component Interconnect Express (PCIe/PCI)"
depends on RT_USING_DM
depends on RT_USING_PIC
select RT_USING_ADT
select RT_USING_ADT_BITMAP
default n
config RT_PCI_MSI
bool "PCI MSI/MSI-X"
depends on RT_USING_PCI
default y
config RT_PCI_ENDPOINT
bool "PCI Endpoint"
depends on RT_USING_PCI
select RT_USING_ADT_REF
default n
config RT_PCI_SYS_64BIT
bool "PCI System 64bit"
depends on RT_USING_PCI
depends on ARCH_CPU_64BIT
default y
config RT_PCI_CACHE_LINE_SIZE
int "PCI Cache line size"
depends on RT_USING_PCI
default 8 if ARCH_CPU_64BIT
default 4
config RT_PCI_LOCKLESS
bool "PCI Lock less in options"
depends on RT_USING_PCI
default n
if RT_USING_PCI
comment "PCI Device Drivers"
config RT_PCI_ECAM
bool "PCIe ECAM"
depends on RT_USING_PCI
default y
help
PCIe Express Enhanced Configuration Access Mechanism
rsource "host/Kconfig"
endif

View File

@ -0,0 +1,28 @@
from building import *
objs = []
if not GetDepend(['RT_USING_PCI']):
Return('objs')
cwd = GetCurrentDir()
list = os.listdir(cwd)
CPPPATH = [cwd + '/../include']
src = ['access.c', 'host-bridge.c', 'irq.c', 'pci.c', 'pme.c', 'probe.c']
if GetDepend(['RT_USING_OFW']):
src += ['ofw.c']
if GetDepend(['RT_PCI_ECAM']):
src += ['ecam.c']
group = DefineGroup('DeviceDrivers', src, depend = [''], CPPPATH = CPPPATH)
for d in list:
path = os.path.join(cwd, d)
if os.path.isfile(os.path.join(path, 'SConscript')):
objs = objs + SConscript(os.path.join(d, 'SConscript'))
objs = objs + group
Return('objs')

159
components/drivers/pci/access.c Executable file
View File

@ -0,0 +1,159 @@
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2022-10-24 GuEe-GUI first version
*/
#include <rthw.h>
#include <rtthread.h>
#include <drivers/pci.h>
struct rt_spinlock rt_pci_lock = { 0 };
#ifdef RT_PCI_LOCKLESS
#define pci_lock_config(l) do { (void)(l); } while (0)
#define pci_unlock_config(l) do { (void)(l); } while (0)
#else
#define pci_lock_config(l) l = rt_spin_lock_irqsave(&rt_pci_lock)
#define pci_unlock_config(l) rt_spin_unlock_irqrestore(&rt_pci_lock, l)
#endif
#define PCI_OPS_READ(name, type) \
rt_err_t rt_pci_bus_read_config_##name(struct rt_pci_bus *bus, rt_uint32_t devfn, int reg, type *value) \
{ \
rt_err_t err; \
rt_ubase_t level; \
rt_uint32_t data = 0; \
pci_lock_config(level); \
err = bus->ops->read(bus, devfn, reg, sizeof(type), &data); \
*value = err ? (type)(~0) : (type)data; \
pci_unlock_config(level); \
return err; \
}
#define PCI_OPS_WRITE(name, type) \
rt_err_t rt_pci_bus_write_config_##name(struct rt_pci_bus *bus, rt_uint32_t devfn, int reg, type value) \
{ \
rt_err_t err; \
rt_ubase_t level; \
pci_lock_config(level); \
err = bus->ops->write(bus, devfn, reg, sizeof(type), value); \
pci_unlock_config(level); \
return err; \
}
#define PCI_OPS(name, type) \
PCI_OPS_READ(name, type) \
PCI_OPS_WRITE(name, type)
PCI_OPS(u8, rt_uint8_t)
PCI_OPS(u16, rt_uint16_t)
PCI_OPS(u32, rt_uint32_t)
#undef PCI_OP_WRITE
#undef PCI_OP_READ
#undef PCI_OPS
rt_err_t rt_pci_bus_read_config_uxx(struct rt_pci_bus *bus,
rt_uint32_t devfn, int reg, int width, rt_uint32_t *value)
{
void *base;
if ((base = bus->ops->map(bus, devfn, reg)))
{
if (width == 1)
{
*value = HWREG8(base);
}
else if (width == 2)
{
*value = HWREG16(base);
}
else
{
*value = HWREG32(base);
}
return RT_EOK;
}
return -RT_ERROR;
}
rt_err_t rt_pci_bus_write_config_uxx(struct rt_pci_bus *bus,
rt_uint32_t devfn, int reg, int width, rt_uint32_t value)
{
void *base;
if ((base = bus->ops->map(bus, devfn, reg)))
{
if (width == 1)
{
HWREG8(base) = value;
}
else if (width == 2)
{
HWREG16(base) = value;
}
else
{
HWREG32(base) = value;
}
return RT_EOK;
}
return -RT_ERROR;
}
rt_err_t rt_pci_bus_read_config_generic_u32(struct rt_pci_bus *bus,
rt_uint32_t devfn, int reg, int width, rt_uint32_t *value)
{
void *base;
if ((base = bus->ops->map(bus, devfn, reg)))
{
*value = HWREG32(base);
if (width <= 2)
{
*value = (*value >> (8 * (reg & 3))) & ((1 << (width * 8)) - 1);
}
return RT_EOK;
}
return -RT_ERROR;
}
rt_err_t rt_pci_bus_write_config_generic_u32(struct rt_pci_bus *bus,
rt_uint32_t devfn, int reg, int width, rt_uint32_t value)
{
void *base;
if ((base = bus->ops->map(bus, devfn, reg & ~0x3)))
{
if (width == 4)
{
HWREG32(base) = value;
}
else
{
rt_uint32_t mask, tmp;
mask = ~(((1 << (width * 8)) - 1) << ((reg & 0x3) * 8));
tmp = HWREG32(base) & mask;
tmp |= value << ((reg & 0x3) * 8);
HWREG32(base) = tmp;
}
return RT_EOK;
}
return -RT_ERROR;
}

View File

@ -0,0 +1,72 @@
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2022-10-24 GuEe-GUI first version
*/
#include <rthw.h>
#include <rtthread.h>
#define DBG_TAG "pci.ecam"
#define DBG_LVL DBG_INFO
#include <rtdbg.h>
#include "ecam.h"
struct pci_ecam_config_window *pci_ecam_create(struct rt_pci_host_bridge *host_bridge,
const struct pci_ecam_ops *ops)
{
struct pci_ecam_config_window *conf_win = rt_calloc(1, sizeof(*conf_win));
if (!conf_win)
{
return RT_NULL;
}
conf_win->bus_range = host_bridge->bus_range;
conf_win->bus_shift = ops->bus_shift;
conf_win->ops = ops;
host_bridge->ops = (const struct rt_pci_ops *)&ops->pci_ops;
return conf_win;
}
void *pci_ecam_map(struct rt_pci_bus *bus, rt_uint32_t devfn, int where)
{
struct pci_ecam_config_window *conf_win = bus->sysdata;
const struct pci_ecam_ops *eops = conf_win->ops;
void *win = conf_win->win, *map;
rt_uint32_t busn = bus->number, bus_shift = eops->bus_shift, devfn_shift = bus_shift - 8;
busn -= conf_win->bus_range[0];
if (bus_shift)
{
rt_uint32_t bus_offset = (busn & PCIE_ECAM_BUS_MASK) << bus_shift;
rt_uint32_t devfn_offset = (devfn & PCIE_ECAM_DEVFN_MASK) << devfn_shift;
where &= PCIE_ECAM_REG_MASK;
map = win + (bus_offset | devfn_offset | where);
}
else
{
map = win + PCIE_ECAM_OFFSET(busn, devfn, where);
}
return map;
}
const struct pci_ecam_ops pci_generic_ecam_ops =
{
.pci_ops =
{
.map = pci_ecam_map,
.read = rt_pci_bus_read_config_uxx,
.write = rt_pci_bus_write_config_uxx,
}
};

View File

@ -0,0 +1,69 @@
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2022-10-24 GuEe-GUI first version
*/
#ifndef __RT_PCI_ECAM_H__
#define __RT_PCI_ECAM_H__
#include <drivers/pci.h>
#include <drivers/ofw.h>
#include <drivers/ofw_io.h>
#include <drivers/platform.h>
/*
* Memory address shift values for the byte-level address that
* can be used when accessing the PCI Express Configuration Space.
*/
/*
* Enhanced Configuration Access Mechanism (ECAM)
*
* See PCI Express Base Specification, Revision 5.0, Version 1.0,
* Section 7.2.2, Table 7-1, p. 677.
*/
#define PCIE_ECAM_BUS_SHIFT 20 /* Bus number */
#define PCIE_ECAM_DEVFN_SHIFT 12 /* Device and Function number */
#define PCIE_ECAM_BUS_MASK 0xff
#define PCIE_ECAM_DEVFN_MASK 0xff
#define PCIE_ECAM_REG_MASK 0xfff /* Limit offset to a maximum of 4K */
#define PCIE_ECAM_BUS(x) (((x) & PCIE_ECAM_BUS_MASK) << PCIE_ECAM_BUS_SHIFT)
#define PCIE_ECAM_DEVFN(x) (((x) & PCIE_ECAM_DEVFN_MASK) << PCIE_ECAM_DEVFN_SHIFT)
#define PCIE_ECAM_REG(x) ((x) & PCIE_ECAM_REG_MASK)
#define PCIE_ECAM_OFFSET(bus, devfn, where) \
(PCIE_ECAM_BUS(bus) | PCIE_ECAM_DEVFN(devfn) | PCIE_ECAM_REG(where))
struct pci_ecam_ops
{
rt_uint32_t bus_shift;
const struct rt_pci_ops pci_ops;
};
struct pci_ecam_config_window
{
rt_uint32_t *bus_range;
rt_uint32_t bus_shift;
void *win;
void *priv;
const struct pci_ecam_ops *ops;
};
/* Default ECAM ops */
extern const struct pci_ecam_ops pci_generic_ecam_ops;
void *pci_ecam_map(struct rt_pci_bus *bus, rt_uint32_t devfn, int where);
struct pci_ecam_config_window *pci_ecam_create(struct rt_pci_host_bridge *host_bridge,
const struct pci_ecam_ops *ops);
rt_err_t pci_host_common_probe(struct rt_platform_device *pdev);
rt_err_t pci_host_common_remove(struct rt_platform_device *pdev);
#endif /* __RT_PCI_ECAM_H__ */

View File

@ -0,0 +1,15 @@
from building import *
group = []
if not GetDepend(['RT_PCI_ENDPOINT']):
Return('group')
cwd = GetCurrentDir()
CPPPATH = [cwd + '/../../include']
src = ['endpoint.c']
group = DefineGroup('DeviceDrivers', src, depend = [''], CPPPATH = CPPPATH)
Return('group')

View File

@ -0,0 +1,504 @@
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2022-08-25 GuEe-GUI first version
*/
#include <drivers/pci_endpoint.h>
#define DBG_TAG "pci.ep"
#define DBG_LVL DBG_INFO
#include <rtdbg.h>
static rt_list_t _ep_nodes = RT_LIST_OBJECT_INIT(_ep_nodes);
static struct rt_spinlock _ep_lock = { 0 };
rt_err_t rt_pci_ep_write_header(struct rt_pci_ep *ep, rt_uint8_t func_no,
struct rt_pci_ep_header *hdr)
{
rt_err_t err;
if (ep && ep->ops && hdr && func_no < ep->max_functions)
{
if (ep->ops->write_header)
{
rt_mutex_take(&ep->lock, RT_WAITING_FOREVER);
err = ep->ops->write_header(ep, func_no, hdr);
rt_mutex_release(&ep->lock);
}
else
{
err = -RT_ENOSYS;
}
}
else
{
err = -RT_EINVAL;
}
return err;
}
rt_err_t rt_pci_ep_set_bar(struct rt_pci_ep *ep, rt_uint8_t func_no,
struct rt_pci_ep_bar *bar, int bar_idx)
{
rt_err_t err = RT_EOK;
if (ep && ep->ops && func_no < ep->max_functions && bar &&
bar_idx < PCI_STD_NUM_BARS)
{
struct rt_pci_bus_resource *bus_bar = &bar->bus;
if (bar_idx == (PCI_STD_NUM_BARS - 1) &&
(bus_bar->flags & PCIM_BAR_MEM_TYPE_64))
{
err = -RT_EINVAL;
LOG_E("%s: Set BAR[%d] can't not 64bit", ep->name, bar_idx);
}
if (rt_upper_32_bits(bus_bar->size) &&
!(bus_bar->flags & PCIM_BAR_MEM_TYPE_64))
{
err = -RT_EINVAL;
LOG_E("%s: Set BAR[%d] size is no support 64bit", ep->name, bar_idx);
}
if ((bus_bar->flags & PCIM_BAR_SPACE_IO) &&
(bus_bar->flags & PCIM_BAR_IO_MASK))
{
err = -RT_EINVAL;
LOG_E("%s: Set BAR[%d] io flags is invalid", ep->name, bar_idx);
}
if (!err)
{
if (ep->ops->set_bar)
{
rt_mutex_take(&ep->lock, RT_WAITING_FOREVER);
err = ep->ops->set_bar(ep, func_no, bar, bar_idx);
rt_mutex_release(&ep->lock);
}
else
{
err = -RT_ENOSYS;
}
}
}
else
{
err = -RT_EINVAL;
}
return err;
}
rt_err_t rt_pci_ep_clear_bar(struct rt_pci_ep *ep, rt_uint8_t func_no,
struct rt_pci_ep_bar *bar, int bar_idx)
{
rt_err_t err;
if (ep && ep->ops && func_no < ep->max_functions && bar &&
bar_idx < PCI_STD_NUM_BARS)
{
if (ep->ops->clear_bar)
{
rt_mutex_take(&ep->lock, RT_WAITING_FOREVER);
err = ep->ops->clear_bar(ep, func_no, bar, bar_idx);
rt_mutex_release(&ep->lock);
}
else
{
err = -RT_ENOSYS;
}
}
else
{
err = -RT_EINVAL;
}
return err;
}
rt_err_t rt_pci_ep_map_addr(struct rt_pci_ep *ep, rt_uint8_t func_no,
rt_ubase_t addr, rt_uint64_t pci_addr, rt_size_t size)
{
rt_err_t err;
if (ep && ep->ops && func_no < ep->max_functions && size)
{
if (ep->ops->map_addr)
{
rt_mutex_take(&ep->lock, RT_WAITING_FOREVER);
err = ep->ops->map_addr(ep, func_no, addr, pci_addr, size);
rt_mutex_release(&ep->lock);
}
else
{
err = -RT_ENOSYS;
}
}
else
{
err = -RT_EINVAL;
}
return err;
}
rt_err_t rt_pci_ep_unmap_addr(struct rt_pci_ep *ep, rt_uint8_t func_no,
rt_ubase_t addr)
{
rt_err_t err;
if (ep && ep->ops && func_no < ep->max_functions)
{
if (ep->ops->unmap_addr)
{
rt_mutex_take(&ep->lock, RT_WAITING_FOREVER);
err = ep->ops->unmap_addr(ep, func_no, addr);
rt_mutex_release(&ep->lock);
}
else
{
err = -RT_ENOSYS;
}
}
else
{
err = -RT_EINVAL;
}
return err;
}
rt_err_t rt_pci_ep_set_msi(struct rt_pci_ep *ep, rt_uint8_t func_no,
unsigned irq_nr)
{
rt_err_t err;
if (ep && ep->ops && func_no < ep->max_functions)
{
if (ep->ops->set_msix)
{
err = -RT_EINVAL;
for (int log2 = 0; log2 < 5; ++log2)
{
if (irq_nr <= (1 << log2))
{
rt_mutex_take(&ep->lock, RT_WAITING_FOREVER);
err = ep->ops->set_msi(ep, func_no, log2);
rt_mutex_release(&ep->lock);
}
}
}
else
{
err = -RT_ENOSYS;
}
}
else
{
err = -RT_EINVAL;
}
return err;
}
rt_err_t rt_pci_ep_get_msi(struct rt_pci_ep *ep, rt_uint8_t func_no,
unsigned *out_irq_nr)
{
rt_err_t err;
if (ep && ep->ops && func_no < ep->max_functions && out_irq_nr)
{
if (ep->ops->get_msi)
{
rt_mutex_take(&ep->lock, RT_WAITING_FOREVER);
err = ep->ops->get_msi(ep, func_no, out_irq_nr);
rt_mutex_release(&ep->lock);
}
else
{
err = -RT_ENOSYS;
}
}
else
{
err = -RT_EINVAL;
}
return err;
}
rt_err_t rt_pci_ep_set_msix(struct rt_pci_ep *ep, rt_uint8_t func_no,
unsigned irq_nr, int bar_idx, rt_off_t offset)
{
rt_err_t err;
if (ep && ep->ops && func_no < ep->max_functions && irq_nr < 2048 &&
bar_idx < PCI_STD_NUM_BARS)
{
if (ep->ops->set_msix)
{
rt_mutex_take(&ep->lock, RT_WAITING_FOREVER);
err = ep->ops->set_msix(ep, func_no, irq_nr, bar_idx, offset);
rt_mutex_release(&ep->lock);
}
else
{
err = -RT_ENOSYS;
}
}
else
{
err = -RT_EINVAL;
}
return err;
}
rt_err_t rt_pci_ep_get_msix(struct rt_pci_ep *ep, rt_uint8_t func_no,
unsigned *out_irq_nr)
{
rt_err_t err;
if (ep && ep->ops && func_no < ep->max_functions && out_irq_nr)
{
if (ep->ops->get_msix)
{
rt_mutex_take(&ep->lock, RT_WAITING_FOREVER);
err = ep->ops->get_msix(ep, func_no, out_irq_nr);
rt_mutex_release(&ep->lock);
}
else
{
err = -RT_ENOSYS;
}
}
else
{
err = -RT_EINVAL;
}
return err;
}
rt_err_t rt_pci_ep_raise_irq(struct rt_pci_ep *ep, rt_uint8_t func_no,
enum rt_pci_ep_irq type, unsigned irq)
{
rt_err_t err;
if (ep && ep->ops && func_no < ep->max_functions)
{
if (ep->ops->raise_irq)
{
rt_mutex_take(&ep->lock, RT_WAITING_FOREVER);
err = ep->ops->raise_irq(ep, func_no, type, irq);
rt_mutex_release(&ep->lock);
}
else
{
err = -RT_ENOSYS;
}
}
else
{
err = -RT_EINVAL;
}
return err;
}
rt_err_t rt_pci_ep_start(struct rt_pci_ep *ep)
{
rt_err_t err;
if (ep && ep->ops)
{
if (ep->ops->start)
{
rt_mutex_take(&ep->lock, RT_WAITING_FOREVER);
err = ep->ops->start(ep);
rt_mutex_release(&ep->lock);
}
else
{
err = -RT_ENOSYS;
}
}
else
{
err = -RT_EINVAL;
}
return err;
}
rt_err_t rt_pci_ep_stop(struct rt_pci_ep *ep)
{
rt_err_t err;
if (ep && ep->ops)
{
if (ep->ops->stop)
{
rt_mutex_take(&ep->lock, RT_WAITING_FOREVER);
err = ep->ops->stop(ep);
rt_mutex_release(&ep->lock);
}
else
{
err = -RT_ENOSYS;
}
}
else
{
err = -RT_EINVAL;
}
return err;
}
rt_err_t rt_pci_ep_register(struct rt_pci_ep *ep)
{
rt_ubase_t level;
if (!ep || !ep->ops)
{
return -RT_EINVAL;
}
rt_list_init(&ep->list);
rt_ref_init(&ep->ref);
rt_list_init(&ep->epf_nodes);
rt_mutex_init(&ep->lock, ep->name, RT_IPC_FLAG_PRIO);
level = rt_spin_lock_irqsave(&_ep_lock);
rt_list_insert_before(&_ep_nodes, &ep->list);
rt_spin_unlock_irqrestore(&_ep_lock, level);
return RT_EOK;
}
rt_err_t rt_pci_ep_unregister(struct rt_pci_ep *ep)
{
rt_ubase_t level;
rt_err_t err = RT_EOK;
if (!ep)
{
return -RT_EINVAL;
}
level = rt_spin_lock_irqsave(&_ep_lock);
if (rt_ref_read(&ep->ref) > 1)
{
err = -RT_EBUSY;
}
else
{
rt_list_remove(&ep->list);
rt_mutex_detach(&ep->lock);
}
rt_spin_unlock_irqrestore(&_ep_lock, level);
return err;
}
rt_err_t rt_pci_ep_add_epf(struct rt_pci_ep *ep, struct rt_pci_epf *epf)
{
rt_err_t err = RT_EOK;
if (!ep || !epf || !epf->name)
{
return -RT_EINVAL;
}
if (epf->func_no > ep->max_functions - 1)
{
LOG_E("%s function No(%d) > %s max function No(%d - 1)",
epf->name, epf->func_no, ep->name, ep->max_functions);
return -RT_EINVAL;
}
epf->ep = ep;
rt_list_init(&epf->list);
rt_mutex_take(&ep->lock, RT_WAITING_FOREVER);
if (!rt_bitmap_test_bit(ep->functions_map, epf->func_no))
{
rt_bitmap_set_bit(ep->functions_map, epf->func_no);
rt_list_insert_before(&ep->epf_nodes, &epf->list);
}
else
{
err = -RT_EINVAL;
LOG_E("%s function No(%d) is repeating", epf->name, epf->func_no);
}
rt_mutex_release(&ep->lock);
return err;
}
rt_err_t rt_pci_ep_remove_epf(struct rt_pci_ep *ep, struct rt_pci_epf *epf)
{
if (!ep || !epf)
{
return -RT_EINVAL;
}
rt_mutex_take(&ep->lock, RT_WAITING_FOREVER);
rt_bitmap_clear_bit(ep->functions_map, epf->func_no);
rt_list_remove(&epf->list);
rt_mutex_release(&ep->lock);
return RT_EOK;
}
struct rt_pci_ep *rt_pci_ep_get(const char *name)
{
rt_ubase_t level;
struct rt_pci_ep *ep = RT_NULL, *ep_tmp;
level = rt_spin_lock_irqsave(&_ep_lock);
rt_list_for_each_entry(ep_tmp, &_ep_nodes, list)
{
if (!name || !rt_strcmp(ep_tmp->name, name))
{
ep = ep_tmp;
rt_ref_get(&ep->ref);
break;
}
}
rt_spin_unlock_irqrestore(&_ep_lock, level);
return ep;
}
static void pci_ep_release(struct rt_ref *ref)
{
struct rt_pci_ep *ep = rt_container_of(ref, struct rt_pci_ep, ref);
rt_pci_ep_unregister(ep);
}
void rt_pci_ep_put(struct rt_pci_ep *ep)
{
if (ep)
{
rt_ref_put(&ep->ref, &pci_ep_release);
}
}

View File

@ -0,0 +1,129 @@
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2022-10-24 GuEe-GUI first version
*/
#include <rtthread.h>
#include <drivers/pm.h>
#include <drivers/pci.h>
#ifdef RT_USING_PM
struct host_bridge_pm_status
{
rt_uint8_t mode;
rt_bool_t enable;
};
static const enum rt_pci_power system_pci_pm_mode[] =
{
[PM_SLEEP_MODE_NONE] = RT_PCI_D0,
[PM_SLEEP_MODE_IDLE] = RT_PCI_D3HOT,
[PM_SLEEP_MODE_LIGHT] = RT_PCI_D1,
[PM_SLEEP_MODE_DEEP] = RT_PCI_D1,
[PM_SLEEP_MODE_STANDBY] = RT_PCI_D2,
[PM_SLEEP_MODE_SHUTDOWN] = RT_PCI_D3COLD,
};
static rt_bool_t pci_device_pm_ops(struct rt_pci_device *pdev, void *data)
{
struct host_bridge_pm_status *status = data;
rt_pci_enable_wake(pdev, system_pci_pm_mode[status->mode], status->enable);
/* To find all devices, always return false */
return RT_FALSE;
}
static rt_err_t host_bridge_pm_suspend(const struct rt_device *device, rt_uint8_t mode)
{
struct host_bridge_pm_status status;
struct rt_pci_device *pdev = rt_container_of(device, struct rt_pci_device, parent);
status.mode = mode;
status.enable = RT_FALSE;
rt_pci_enum_device(pdev->bus, pci_device_pm_ops, &status);
return RT_EOK;
}
static void host_bridge_pm_resume(const struct rt_device *device, rt_uint8_t mode)
{
struct host_bridge_pm_status status;
struct rt_pci_device *pdev = rt_container_of(device, struct rt_pci_device, parent);
status.mode = mode;
status.enable = RT_TRUE;
rt_pci_enum_device(pdev->bus, pci_device_pm_ops, &status);
}
static const struct rt_device_pm_ops host_bridge_pm_ops =
{
.suspend = host_bridge_pm_suspend,
.resume = host_bridge_pm_resume,
};
#endif /* RT_USING_PM */
static void host_bridge_free(struct rt_pci_device *pdev)
{
#ifdef RT_USING_PM
rt_pm_device_unregister(&pdev->parent);
#endif
}
static rt_err_t host_bridge_probe(struct rt_pci_device *pdev)
{
rt_err_t err = RT_EOK;
rt_pci_set_master(pdev);
#ifdef RT_USING_PM
rt_pm_device_register(&pdev->parent, &host_bridge_pm_ops);
#endif
return err;
}
static rt_err_t host_bridge_remove(struct rt_pci_device *pdev)
{
host_bridge_free(pdev);
rt_pci_clear_master(pdev);
return RT_EOK;
}
static rt_err_t host_bridge_shutdown(struct rt_pci_device *pdev)
{
host_bridge_free(pdev);
return RT_EOK;
}
static const struct rt_pci_device_id host_bridge_pci_ids[] =
{
/* PCI host bridges */
{ RT_PCI_DEVICE_ID(PCI_VENDOR_ID_REDHAT, 0x0008) },
/* Any PCI-Express port */
{ RT_PCI_DEVICE_CLASS(PCIS_BRIDGE_PCI_NORMAL, ~0) },
/* PCI-to-PCI bridge */
{ RT_PCI_DEVICE_CLASS(PCIS_BRIDGE_PCI_SUBTRACTIVE, ~0) },
/* Any Root Complex Event Collector */
{ RT_PCI_DEVICE_CLASS(((PCIS_SYSTEM_RCEC << 8) | 0x00), ~0) },
{ /* sentinel */ }
};
static struct rt_pci_driver host_bridge_driver =
{
.name = "host-bridge",
.ids = host_bridge_pci_ids,
.probe = host_bridge_probe,
.remove = host_bridge_remove,
.shutdown = host_bridge_shutdown,
};
RT_PCI_DRIVER_EXPORT(host_bridge_driver);

View File

@ -0,0 +1,10 @@
config RT_PCI_HOST_COMMON
bool "Common PCI host controller"
depends on RT_PCI_ECAM
default y
config RT_PCI_HOST_GENERIC
bool "Generic PCI host controller"
depends on RT_PCI_ECAM
select RT_PCI_HOST_COMMON
default y

View File

@ -0,0 +1,25 @@
from building import *
objs = []
cwd = GetCurrentDir()
list = os.listdir(cwd)
CPPPATH = [cwd + '/../../include']
src = []
if GetDepend(['RT_PCI_HOST_COMMON']):
src += ['pci-host-common.c']
if GetDepend(['RT_PCI_HOST_GENERIC']):
src += ['pci-host-generic.c']
group = DefineGroup('DeviceDrivers', src, depend = [''], CPPPATH = CPPPATH)
for d in list:
path = os.path.join(cwd, d)
if os.path.isfile(os.path.join(path, 'SConscript')):
objs = objs + SConscript(os.path.join(d, 'SConscript'))
objs = objs + group
Return('objs')

View File

@ -0,0 +1,85 @@
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2022-10-24 GuEe-GUI first version
*/
#include <rtthread.h>
#include "../ecam.h"
rt_err_t pci_host_common_probe(struct rt_platform_device *pdev)
{
void *base;
rt_err_t err;
struct rt_device *dev = &pdev->parent;
struct pci_ecam_config_window *conf_win;
struct rt_pci_host_bridge *host_bridge = rt_pci_host_bridge_alloc(0);
if (!host_bridge)
{
return -RT_ENOMEM;
}
if (!(base = rt_dm_dev_iomap(dev, 0)))
{
err = -RT_EIO;
goto _fail;
}
host_bridge->parent.ofw_node = dev->ofw_node;
if ((err = rt_pci_host_bridge_init(host_bridge)))
{
goto _fail;
}
host_bridge->sysdata = conf_win = pci_ecam_create(host_bridge,
(const struct pci_ecam_ops *)pdev->id->data);
if (!conf_win)
{
err = -RT_ENOMEM;
goto _fail;
}
conf_win->win = base;
conf_win->priv = host_bridge;
if ((err = rt_pci_host_bridge_probe(host_bridge)))
{
goto _fail;
}
dev->user_data = host_bridge;
return RT_EOK;
_fail:
if (base)
{
rt_iounmap(base);
}
rt_pci_host_bridge_free(host_bridge);
return err;
}
rt_err_t pci_host_common_remove(struct rt_platform_device *pdev)
{
struct pci_ecam_config_window *conf_win;
struct rt_pci_host_bridge *host_bridge = pdev->parent.user_data;
rt_pci_host_bridge_remove(host_bridge);
conf_win = host_bridge->sysdata;
rt_iounmap(conf_win->win);
rt_pci_host_bridge_free(host_bridge);
return RT_EOK;
}

View File

@ -0,0 +1,66 @@
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2022-10-24 GuEe-GUI first version
*/
#include <rtthread.h>
#include "../ecam.h"
static const struct pci_ecam_ops gen_pci_cfg_cam_bus_ops =
{
.bus_shift = 16,
.pci_ops =
{
.map = pci_ecam_map,
.read = rt_pci_bus_read_config_uxx,
.write = rt_pci_bus_write_config_uxx,
}
};
static void *pci_dw_ecam_map_bus(struct rt_pci_bus *bus, rt_uint32_t devfn, int where)
{
struct pci_ecam_config_window *conf_win = bus->sysdata;
if (bus->number == conf_win->bus_range[0] && RT_PCI_SLOT(devfn) > 0)
{
return RT_NULL;
}
return pci_ecam_map(bus, devfn, where);
}
static const struct pci_ecam_ops pci_dw_ecam_bus_ops =
{
.pci_ops =
{
.map = pci_dw_ecam_map_bus,
.read = rt_pci_bus_read_config_uxx,
.write = rt_pci_bus_write_config_uxx,
}
};
static const struct rt_ofw_node_id gen_pci_ofw_ids[] =
{
{ .compatible = "pci-host-cam-generic", .data = &gen_pci_cfg_cam_bus_ops },
{ .compatible = "pci-host-ecam-generic", .data = &pci_generic_ecam_ops },
{ .compatible = "marvell,armada8k-pcie-ecam", .data = &pci_dw_ecam_bus_ops },
{ .compatible = "socionext,synquacer-pcie-ecam", .data = &pci_dw_ecam_bus_ops },
{ .compatible = "snps,dw-pcie-ecam", .data = &pci_dw_ecam_bus_ops },
{ /* sentinel */ }
};
static struct rt_platform_driver gen_pci_driver =
{
.name = "pci-host-generic",
.ids = gen_pci_ofw_ids,
.probe = pci_host_common_probe,
.remove = pci_host_common_remove,
};
RT_PLATFORM_DRIVER_EXPORT(gen_pci_driver);

View File

@ -0,0 +1,60 @@
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2022-11-07 GuEe-GUI first version
*/
#include <rtthread.h>
#define DBG_TAG "pci.irq"
#define DBG_LVL DBG_INFO
#include <rtdbg.h>
#include <drivers/pci.h>
void rt_pci_assign_irq(struct rt_pci_device *pdev)
{
int irq = 0;
rt_uint8_t pin, slot = -1;
struct rt_pci_host_bridge *host_bridge = rt_pci_find_host_bridge(pdev->bus);
if (!host_bridge->irq_map)
{
LOG_D("PCI-Device<%s> runtime IRQ mapping not provided by platform",
rt_dm_dev_get_name(&pdev->parent));
return;
}
/* Must try the swizzle when interrupt line passes through a P2P bridge */
rt_pci_read_config_u8(pdev, PCIR_INTPIN, &pin);
if (pin > RT_PCI_INTX_PIN_MAX)
{
pin = 1;
}
if (pin)
{
if (host_bridge->irq_slot)
{
slot = host_bridge->irq_slot(pdev, &pin);
}
/* Map IRQ */
if ((irq = host_bridge->irq_map(pdev, slot, pin)) == -1)
{
irq = 0;
}
}
pdev->irq = irq;
LOG_D("PCI-Device<%s> assign IRQ: got %d", rt_dm_dev_get_name(&pdev->parent), pdev->irq);
/* Save IRQ */
rt_pci_write_config_u8(pdev, PCIR_INTLINE, irq);
}

View File

@ -0,0 +1,15 @@
from building import *
group = []
if not GetDepend(['RT_PCI_MSI']):
Return('group')
cwd = GetCurrentDir()
CPPPATH = [cwd + '/../../include']
src = ['device.c', 'irq.c', 'msi.c']
group = DefineGroup('DeviceDrivers', src, depend = [''], CPPPATH = CPPPATH)
Return('group')

View File

@ -0,0 +1,46 @@
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2022-10-24 GuEe-GUI first version
*/
#include <drivers/pci.h>
void rt_pci_msi_init(struct rt_pci_device *pdev)
{
if (pdev && (pdev->msi_cap = rt_pci_find_capability(pdev, PCIY_MSI)))
{
rt_uint16_t ctrl;
rt_pci_read_config_u16(pdev, pdev->msi_cap + PCIR_MSI_CTRL, &ctrl);
if (ctrl & PCIM_MSICTRL_MSI_ENABLE)
{
rt_pci_write_config_u16(pdev, pdev->msi_cap + PCIR_MSI_CTRL, ctrl & ~PCIM_MSICTRL_MSI_ENABLE);
}
if (!(ctrl & PCIM_MSICTRL_64BIT))
{
pdev->no_64bit_msi = RT_TRUE;
}
}
}
void rt_pci_msix_init(struct rt_pci_device *pdev)
{
if (pdev && (pdev->msix_cap = rt_pci_find_capability(pdev, PCIY_MSIX)))
{
rt_uint16_t ctrl;
rt_pci_read_config_u16(pdev, pdev->msix_cap + PCIR_MSIX_CTRL, &ctrl);
if (ctrl & PCIM_MSIXCTRL_MSIX_ENABLE)
{
rt_pci_write_config_u16(pdev, pdev->msix_cap + PCIR_MSIX_CTRL, ctrl & ~PCIM_MSIXCTRL_MSIX_ENABLE);
}
}
}

View File

@ -0,0 +1,146 @@
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2022-10-24 GuEe-GUI first version
*/
#include <drivers/pci_msi.h>
#define DBG_TAG "pci.msi.irq"
#define DBG_LVL DBG_INFO
#include <rtdbg.h>
static struct rt_spinlock msi_irq_map_lock = {};
static RT_BITMAP_DECLARE(msi_irq_map, MAX_HANDLERS) = {};
rt_err_t rt_pci_msi_setup_irqs(struct rt_pci_device *pdev, int nvec, int type)
{
int irq, index = 0, irq_nr = 0;
rt_err_t err = RT_EOK;
struct rt_pic_irq *pirq;
struct rt_pic *msi_pic;
struct rt_pci_msi_desc *desc;
if (!pdev)
{
return -RT_EINVAL;
}
msi_pic = pdev->msi_pic;
if (type == PCIY_MSI)
{
int last_irq = -1;
rt_size_t irq_nr;
desc = rt_pci_msi_first_desc(pdev);
irq_nr = 1 << desc->msi.cap.multi_msg_use;
rt_hw_spin_lock(&msi_irq_map_lock.lock);
_retry:
for (int i = 0; i < irq_nr; ++i)
{
if ((irq = msi_pic->ops->irq_alloc_msi(msi_pic, desc)) < 0)
{
err = irq;
LOG_E("Setup %s[%d] IRQ error = %s", "MSI", i, rt_strerror(err));
break;
}
if (last_irq >= 0 && last_irq + 1 != irq)
{
for (int idx = 0; idx < i; ++i, --last_irq)
{
rt_bitmap_set_bit(msi_irq_map, last_irq);
}
last_irq = irq;
goto _retry;
}
last_irq = irq;
}
if (!err)
{
/* Get the first irq */
desc->irq = irq - irq_nr;
}
rt_bitmap_for_each_set_bit(msi_irq_map, irq, MAX_HANDLERS)
{
msi_pic->ops->irq_free_msi(msi_pic, irq);
/* Free bit so the next user doesn't need to bzero */
rt_bitmap_clear_bit(msi_irq_map, irq);
}
rt_hw_spin_unlock(&msi_irq_map_lock.lock);
if (!err)
{
for (int idx = 0; idx < nvec; ++idx)
{
pirq = rt_pic_find_pirq(msi_pic, irq + idx);
pirq->msi_desc = desc;
msi_pic->ops->irq_compose_msi_msg(pirq, &desc->msg);
rt_pci_msi_write_msg(desc, &desc->msg);
}
}
}
else if (type == PCIY_MSIX)
{
rt_pci_msi_for_each_desc(pdev, desc)
{
if ((irq = msi_pic->ops->irq_alloc_msi(msi_pic, desc)) < 0)
{
err = irq;
LOG_E("Setup %s[%d] IRQ error = %s", "MSI-X",
desc->msix.index, rt_strerror(err));
break;
}
desc->irq = irq;
pirq = rt_pic_find_pirq(msi_pic, irq);
pirq->msi_desc = desc;
msi_pic->ops->irq_compose_msi_msg(pirq, &desc->msg);
rt_pci_msi_write_msg(desc, &desc->msg);
++irq_nr;
}
if (err)
{
rt_pci_msi_for_each_desc(pdev, desc)
{
if (index >= irq_nr)
{
break;
}
msi_pic->ops->irq_free_msi(msi_pic, desc->irq);
++index;
}
}
}
else
{
err = -RT_EINVAL;
}
return err;
}

View File

@ -0,0 +1,949 @@
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2022-11-07 GuEe-GUI first version
*/
#include <drivers/pci_msi.h>
#include <drivers/core/numa.h>
#define DBG_TAG "pci.msi"
#define DBG_LVL DBG_INFO
#include <rtdbg.h>
/* PCI has 2048 max IRQs in MSI-X */
static RT_IRQ_AFFINITY_DECLARE(msi_affinity_default[2048]) rt_section(".bss.noclean.pci.msi");
rt_inline void spin_lock(struct rt_spinlock *lock)
{
rt_hw_spin_lock(&lock->lock);
}
rt_inline void spin_unlock(struct rt_spinlock *lock)
{
rt_hw_spin_unlock(&lock->lock);
}
rt_inline void *msix_table_base(struct rt_pci_msix_conf *msix)
{
return msix->table_base + msix->index * PCIM_MSIX_ENTRY_SIZE;
}
rt_inline void *msix_vector_ctrl_base(struct rt_pci_msix_conf *msix)
{
return msix_table_base(msix) + PCIM_MSIX_ENTRY_VECTOR_CTRL;
}
rt_inline void msix_write_vector_ctrl(struct rt_pci_msix_conf *msix,
rt_uint32_t ctrl)
{
void *vc_addr = msix_vector_ctrl_base(msix);
HWREG32(vc_addr) = ctrl;
}
rt_inline void msix_mask(struct rt_pci_msix_conf *msix)
{
msix->msg_ctrl |= PCIM_MSIX_ENTRYVECTOR_CTRL_MASK;
msix_write_vector_ctrl(msix, msix->msg_ctrl);
/* Flush write to device */
HWREG32(msix->table_base);
}
static void msix_update_ctrl(struct rt_pci_device *pdev,
rt_uint16_t clear, rt_uint16_t set)
{
rt_uint16_t msgctl;
rt_pci_read_config_u16(pdev, pdev->msix_cap + PCIR_MSIX_CTRL, &msgctl);
msgctl &= ~clear;
msgctl |= set;
rt_pci_write_config_u16(pdev, pdev->msix_cap + PCIR_MSIX_CTRL, msgctl);
}
rt_inline void msix_unmask(struct rt_pci_msix_conf *msix)
{
msix->msg_ctrl &= ~PCIM_MSIX_ENTRYVECTOR_CTRL_MASK;
msix_write_vector_ctrl(msix, msix->msg_ctrl);
}
rt_inline rt_uint32_t msi_multi_mask(struct rt_pci_msi_conf *msi)
{
if (msi->cap.multi_msg_max >= 5)
{
return 0xffffffff;
}
return (1 << (1 << msi->cap.multi_msg_max)) - 1;
}
static void msi_write_mask(struct rt_pci_msi_conf *msi,
rt_uint32_t clear, rt_uint32_t set, struct rt_pci_device *pdev)
{
if (msi->cap.is_masking)
{
rt_ubase_t level = rt_spin_lock_irqsave(&pdev->msi_lock);
msi->mask &= ~clear;
msi->mask |= set;
rt_pci_write_config_u32(pdev, msi->mask_pos, msi->mask);
rt_spin_unlock_irqrestore(&pdev->msi_lock, level);
}
}
rt_inline void msi_mask(struct rt_pci_msi_conf *msi,
rt_uint32_t mask, struct rt_pci_device *pdev)
{
msi_write_mask(msi, 0, mask, pdev);
}
rt_inline void msi_unmask(struct rt_pci_msi_conf *msi,
rt_uint32_t mask, struct rt_pci_device *pdev)
{
msi_write_mask(msi, mask, 0, pdev);
}
static void msi_write_enable(struct rt_pci_device *pdev, rt_bool_t enable)
{
rt_uint16_t msgctl;
rt_pci_read_config_u16(pdev, pdev->msi_cap + PCIR_MSI_CTRL, &msgctl);
msgctl &= ~PCIM_MSICTRL_MSI_ENABLE;
if (enable)
{
msgctl |= PCIM_MSICTRL_MSI_ENABLE;
}
rt_pci_write_config_u16(pdev, pdev->msi_cap + PCIR_MSI_CTRL, msgctl);
}
static void msi_affinity_init(struct rt_pci_msi_desc *desc, int msi_index,
rt_bitmap_t *cpumasks)
{
int irq;
struct rt_pic_irq *pirq;
struct rt_pci_device *pdev = desc->pdev;
struct rt_pic *msi_pic = pdev->msi_pic;
irq = desc->irq + desc->is_msix ? 0 : msi_index;
pirq = rt_pic_find_pirq(msi_pic, irq);
/* Save affinity */
if (desc->is_msix)
{
desc->affinity = pirq->affinity;
}
else
{
desc->affinities[msi_index] = pirq->affinity;
}
if ((void *)cpumasks > (void *)msi_affinity_default &&
(void *)cpumasks < (void *)msi_affinity_default + sizeof(msi_affinity_default))
{
rt_uint64_t data_address;
/* Get MSI/MSI-X write data adddress */
data_address = desc->msg.address_hi;
data_address <<= 32;
data_address |= desc->msg.address_lo;
/* Prepare affinity */
cpumasks = pirq->affinity;
rt_numa_memory_affinity(data_address, cpumasks);
}
else if (rt_bitmap_next_set_bit(cpumasks, 0, RT_CPUS_NR) >= RT_CPUS_NR)
{
/* No affinity info found, give up */
return;
}
if (!rt_pic_irq_set_affinity(irq, cpumasks))
{
if (msi_pic->ops->irq_write_msi_msg)
{
msi_pic->ops->irq_write_msi_msg(pirq, &desc->msg);
}
}
}
void rt_pci_msi_shutdown(struct rt_pci_device *pdev)
{
struct rt_pci_msi_desc *desc;
if (!pdev)
{
return;
}
msi_write_enable(pdev, RT_FALSE);
rt_pci_intx(pdev, RT_TRUE);
if ((desc = rt_pci_msi_first_desc(pdev)))
{
msi_unmask(&desc->msi, msi_multi_mask(&desc->msi), pdev);
}
/* Restore pdev->irq to its default pin-assertion IRQ */
pdev->irq = desc->msi.default_irq;
pdev->msi_enabled = RT_FALSE;
}
void rt_pci_msix_shutdown(struct rt_pci_device *pdev)
{
struct rt_pci_msi_desc *desc;
if (!pdev)
{
return;
}
rt_pci_msi_for_each_desc(pdev, desc)
{
msix_mask(&desc->msix);
}
msix_update_ctrl(pdev, PCIM_MSIXCTRL_MSIX_ENABLE, 0);
rt_pci_intx(pdev, RT_TRUE);
pdev->msix_enabled = RT_FALSE;
}
void rt_pci_msi_free_irqs(struct rt_pci_device *pdev)
{
struct rt_pci_msi_desc *desc, *last_desc = RT_NULL;
if (!pdev)
{
return;
}
if (pdev->msix_base)
{
rt_iounmap(pdev->msix_base);
pdev->msix_base = RT_NULL;
}
rt_pci_msi_for_each_desc(pdev, desc)
{
/* To safety */
if (last_desc)
{
rt_list_remove(&last_desc->list);
rt_free(last_desc);
}
last_desc = desc;
}
/* The last one */
if (last_desc)
{
rt_list_remove(&last_desc->list);
rt_free(last_desc);
}
}
void rt_pci_msi_write_msg(struct rt_pci_msi_desc *desc, struct rt_pci_msi_msg *msg)
{
struct rt_pci_device *pdev = desc->pdev;
if (desc->is_msix)
{
void *msix_entry;
rt_bool_t unmasked;
rt_uint32_t msgctl;
struct rt_pci_msix_conf *msix = &desc->msix;
msgctl = msix->msg_ctrl;
unmasked = !(msgctl & PCIM_MSIX_ENTRYVECTOR_CTRL_MASK);
msix_entry = msix_table_base(msix);
if (unmasked)
{
msix_write_vector_ctrl(msix, msgctl | PCIM_MSIX_ENTRYVECTOR_CTRL_MASK);
}
HWREG32(msix_entry + PCIM_MSIX_ENTRY_LOWER_ADDR) = msg->address_lo;
HWREG32(msix_entry + PCIM_MSIX_ENTRY_UPPER_ADDR) = msg->address_hi;
HWREG32(msix_entry + PCIM_MSIX_ENTRY_DATA) = msg->data;
if (unmasked)
{
msix_write_vector_ctrl(msix, msgctl);
}
/* Ensure that the writes are visible in the device */
HWREG32(msix_entry + PCIM_MSIX_ENTRY_DATA);
}
else
{
rt_uint16_t msgctl;
int pos = pdev->msi_cap;
struct rt_pci_msi_conf *msi = &desc->msi;
rt_pci_read_config_u16(pdev, pos + PCIR_MSI_CTRL, &msgctl);
msgctl &= ~PCIM_MSICTRL_MME_MASK;
msgctl |= msi->cap.multi_msg_use << PCIM_MSICTRL_MME_SHIFT;
rt_pci_write_config_u16(pdev, pos + PCIR_MSI_CTRL, msgctl);
rt_pci_write_config_u32(pdev, pos + PCIR_MSI_ADDR, msg->address_lo);
/*
* The value stored in this field is related to the processor system,
* the processor will initialize this field
* when the PCIe device is initialized, and the rules for filling
* in this field are not the same for different processors.
* If the Multiple Message Enable field is not 0b000 (multiple IRQs),
* the PCIe device can send different interrupt requests
* by changing the low data in the Message Data field
*/
if (msi->cap.is_64bit)
{
rt_pci_write_config_u32(pdev, pos + PCIR_MSI_ADDR_HIGH, msg->address_hi);
rt_pci_write_config_u16(pdev, pos + PCIR_MSI_DATA_64BIT, msg->data);
}
else
{
rt_pci_write_config_u16(pdev, pos + PCIR_MSI_DATA, msg->data);
}
/* Ensure that the writes are visible in the device */
rt_pci_read_config_u16(pdev, pos + PCIR_MSI_CTRL, &msgctl);
}
desc->msg = *msg;
if (desc->write_msi_msg)
{
desc->write_msi_msg(desc, desc->write_msi_msg_data);
}
}
void rt_pci_msi_mask_irq(struct rt_pic_irq *pirq)
{
struct rt_pci_msi_desc *desc;
if (pirq && (desc = pirq->msi_desc))
{
if (desc->is_msix)
{
msix_mask(&desc->msix);
}
else
{
msi_mask(&desc->msi, RT_BIT(pirq->irq - desc->irq), desc->pdev);
}
}
}
void rt_pci_msi_unmask_irq(struct rt_pic_irq *pirq)
{
struct rt_pci_msi_desc *desc;
if (pirq && (desc = pirq->msi_desc))
{
if (desc->is_msix)
{
msix_unmask(&desc->msix);
}
else
{
msi_unmask(&desc->msi, RT_BIT(pirq->irq - desc->irq), desc->pdev);
}
}
}
rt_ssize_t rt_pci_alloc_vector(struct rt_pci_device *pdev, int min, int max,
rt_uint32_t flags, RT_IRQ_AFFINITY_DECLARE((*affinities)))
{
rt_ssize_t res = -RT_ENOSYS;
if (!pdev || min > max)
{
return -RT_EINVAL;
}
if (flags & RT_PCI_IRQ_F_AFFINITY)
{
if (!affinities)
{
affinities = msi_affinity_default;
}
}
else
{
affinities = RT_NULL;
}
if (flags & RT_PCI_IRQ_F_MSIX)
{
res = rt_pci_msix_enable_range_affinity(pdev, RT_NULL, min, max, affinities);
if (res > 0)
{
return res;
}
}
if (flags & RT_PCI_IRQ_F_MSI)
{
res = rt_pci_msi_enable_range_affinity(pdev, min, max, affinities);
if (res > 0)
{
return res;
}
}
if (flags & RT_PCI_IRQ_F_LEGACY)
{
if (min == 1 && pdev->irq >= 0)
{
if (affinities)
{
int cpuid;
RT_IRQ_AFFINITY_DECLARE(old_affinity);
/* INTx is shared, we should update it */
rt_pic_irq_get_affinity(pdev->irq, old_affinity);
rt_bitmap_for_each_set_bit(affinities[0], cpuid, RT_CPUS_NR)
{
RT_IRQ_AFFINITY_SET(old_affinity, cpuid);
}
rt_pic_irq_set_affinity(pdev->irq, old_affinity);
}
rt_pci_intx(pdev, RT_TRUE);
return min;
}
}
return res;
}
void rt_pci_free_vector(struct rt_pci_device *pdev)
{
if (!pdev)
{
return;
}
rt_pci_msi_disable(pdev);
rt_pci_msix_disable(pdev);
rt_pci_irq_mask(pdev);
}
static rt_err_t msi_verify_entries(struct rt_pci_device *pdev)
{
if (pdev->no_64bit_msi)
{
struct rt_pci_msi_desc *desc;
rt_pci_msi_for_each_desc(pdev, desc)
{
if (desc->msg.address_hi)
{
LOG_D("%s: Arch assigned 64-bit MSI address %08x%08x"
"but device only supports 32 bits",
rt_dm_dev_get_name(&pdev->parent),
desc->msg.address_hi, desc->msg.address_lo);
return -RT_EIO;
}
}
}
return RT_EOK;
}
static rt_err_t msi_insert_desc(struct rt_pci_device *pdev,
struct rt_pci_msi_desc *init_desc)
{
rt_size_t msi_affinity_ptr_size = 0;
struct rt_pci_msi_desc *msi_desc;
if (!init_desc->is_msix)
{
msi_affinity_ptr_size += sizeof(msi_desc->affinities[0]) * 32;
}
msi_desc = rt_calloc(1, sizeof(*msi_desc) + msi_affinity_ptr_size);
if (!msi_desc)
{
return -RT_ENOMEM;
}
rt_memcpy(msi_desc, init_desc, sizeof(*msi_desc));
if (!init_desc->is_msix)
{
msi_desc->affinities = (void *)msi_desc + sizeof(*msi_desc);
}
msi_desc->pdev = pdev;
rt_list_init(&msi_desc->list);
rt_list_insert_before(&pdev->msi_desc_nodes, &msi_desc->list);
return RT_EOK;
}
rt_ssize_t rt_pci_msi_vector_count(struct rt_pci_device *pdev)
{
rt_uint16_t msgctl;
if (!pdev)
{
return -RT_EINVAL;
}
if (!pdev->msi_cap)
{
return -RT_EINVAL;
}
rt_pci_read_config_u16(pdev, pdev->msi_cap + PCIR_MSI_CTRL, &msgctl);
return 1 << ((msgctl & PCIM_MSICTRL_MMC_MASK) >> 1);
}
rt_err_t rt_pci_msi_disable(struct rt_pci_device *pdev)
{
if (!pdev)
{
return -RT_EINVAL;
}
if (!pdev->msi_enabled)
{
return -RT_EINVAL;
}
spin_lock(&pdev->msi_lock);
rt_pci_msi_shutdown(pdev);
rt_pci_msi_free_irqs(pdev);
spin_unlock(&pdev->msi_lock);
return RT_EOK;
}
static rt_err_t msi_setup_msi_desc(struct rt_pci_device *pdev, int nvec)
{
rt_uint16_t msgctl;
struct rt_pci_msi_desc desc;
rt_memset(&desc, 0, sizeof(desc));
desc.vector_used = nvec;
desc.vector_count = rt_pci_msi_vector_count(pdev);
desc.is_msix = RT_FALSE;
rt_pci_read_config_u16(pdev, pdev->msi_cap + PCIR_MSI_CTRL, &msgctl);
desc.msi.cap.is_64bit = !!(msgctl & PCIM_MSICTRL_64BIT);
desc.msi.cap.is_masking = !!(msgctl & PCIM_MSICTRL_VECTOR);
desc.msi.cap.multi_msg_max = (msgctl & PCIM_MSICTRL_MMC_MASK) >> 1;
for (int log2 = 0; log2 < 5; ++log2)
{
if (nvec <= (1 << log2))
{
desc.msi.cap.multi_msg_use = log2;
break;
}
}
if (desc.msi.cap.is_64bit)
{
desc.msi.mask_pos = pdev->msi_cap + PCIR_MSI_MASK_64BIT;
}
else
{
desc.msi.mask_pos = pdev->msi_cap + PCIR_MSI_MASK;
}
/* Save pdev->irq for its default pin-assertion IRQ */
desc.msi.default_irq = pdev->irq;
if (desc.msi.cap.is_masking)
{
/* Get the old mask status */
rt_pci_read_config_u32(pdev, desc.msi.mask_pos, &desc.msi.mask);
}
return msi_insert_desc(pdev, &desc);
}
static rt_ssize_t msi_capability_init(struct rt_pci_device *pdev,
int nvec, RT_IRQ_AFFINITY_DECLARE((*affinities)))
{
rt_err_t err;
struct rt_pci_msi_desc *desc;
msi_write_enable(pdev, RT_FALSE);
spin_lock(&pdev->msi_lock);
if (!(err = msi_setup_msi_desc(pdev, nvec)))
{
/* All MSIs are unmasked by default; mask them all */
desc = rt_pci_msi_first_desc(pdev);
msi_mask(&desc->msi, msi_multi_mask(&desc->msi), pdev);
if (!(err = rt_pci_msi_setup_irqs(pdev, nvec, PCIY_MSI)))
{
err = msi_verify_entries(pdev);
}
if (err)
{
msi_unmask(&desc->msi, msi_multi_mask(&desc->msi), pdev);
}
}
spin_unlock(&pdev->msi_lock);
if (err)
{
rt_pci_msi_free_irqs(pdev);
LOG_E("%s: Setup %s interrupts(%d) error = %s",
rt_dm_dev_get_name(&pdev->parent), "MSI", nvec, rt_strerror(err));
return err;
}
if (affinities)
{
for (int idx = 0; idx < nvec; ++idx)
{
msi_affinity_init(desc, idx, affinities[idx]);
}
}
/* Disable INTX */
rt_pci_intx(pdev, RT_FALSE);
/* Set MSI enabled bits */
msi_write_enable(pdev, RT_TRUE);
pdev->irq = desc->irq;
pdev->msi_enabled = RT_TRUE;
return nvec;
}
rt_ssize_t rt_pci_msi_enable_range_affinity(struct rt_pci_device *pdev,
int min, int max, RT_IRQ_AFFINITY_DECLARE((*affinities)))
{
int nvec = max;
rt_size_t entries_nr;
if (!pdev || min > max)
{
return -RT_EINVAL;
}
if (pdev->no_msi)
{
return -RT_ENOSYS;
}
if (!pdev->msi_pic)
{
return -RT_ENOSYS;
}
if (pdev->msi_enabled)
{
LOG_W("%s: MSI is enabled", rt_dm_dev_get_name(&pdev->parent));
return -RT_EINVAL;
}
entries_nr = rt_pci_msi_vector_count(pdev);
if (entries_nr < 0)
{
return entries_nr;
}
if (nvec > entries_nr)
{
return -RT_EEMPTY;
}
return msi_capability_init(pdev, nvec, affinities);
}
rt_ssize_t rt_pci_msix_vector_count(struct rt_pci_device *pdev)
{
rt_uint16_t msgctl;
if (!pdev)
{
return -RT_EINVAL;
}
if (!pdev->msix_cap)
{
return -RT_EINVAL;
}
rt_pci_read_config_u16(pdev, pdev->msix_cap + PCIR_MSIX_CTRL, &msgctl);
return rt_pci_msix_table_size(msgctl);
}
rt_err_t rt_pci_msix_disable(struct rt_pci_device *pdev)
{
if (!pdev)
{
return -RT_EINVAL;
}
if (!pdev->msix_enabled)
{
return -RT_EINVAL;
}
spin_lock(&pdev->msi_lock);
rt_pci_msix_shutdown(pdev);
rt_pci_msi_free_irqs(pdev);
spin_unlock(&pdev->msi_lock);
return RT_EOK;
}
static void *msix_table_remap(struct rt_pci_device *pdev, rt_size_t entries_nr)
{
rt_uint8_t bir;
rt_uint32_t table_offset;
rt_ubase_t table_base_phys;
rt_pci_read_config_u32(pdev, pdev->msix_cap + PCIR_MSIX_TABLE, &table_offset);
bir = (rt_uint8_t)(table_offset & PCIM_MSIX_BIR_MASK);
if (pdev->resource[bir].flags & PCI_BUS_REGION_F_NONE)
{
LOG_E("%s: BAR[bir = %d] is invalid", rt_dm_dev_get_name(&pdev->parent), bir);
return RT_NULL;
}
table_base_phys = pdev->resource[bir].base + (table_offset & ~PCIM_MSIX_BIR_MASK);
return rt_ioremap((void *)table_base_phys, entries_nr * PCIM_MSIX_ENTRY_SIZE);
}
static rt_err_t msix_setup_msi_descs(struct rt_pci_device *pdev,
void *table_base, struct rt_pci_msix_entry *entries, int nvec)
{
rt_err_t err;
struct rt_pci_msi_desc desc;
rt_memset(&desc, 0, sizeof(desc));
desc.vector_used = 1;
desc.vector_count = rt_pci_msix_vector_count(pdev);
desc.is_msix = RT_TRUE;
desc.msix.table_base = table_base;
for (int i = 0; i < nvec; ++i)
{
void *table_entry;
int index = entries ? entries[i].index : i;
desc.msix.index = index;
table_entry = msix_table_base(&desc.msix);
desc.msix.msg_ctrl = HWREG32(table_entry + PCIM_MSIX_ENTRY_VECTOR_CTRL);
if ((err = msi_insert_desc(pdev, &desc)))
{
break;
}
}
return err;
}
static rt_ssize_t msix_capability_init(struct rt_pci_device *pdev,
struct rt_pci_msix_entry *entries, int nvec,
RT_IRQ_AFFINITY_DECLARE((*affinities)))
{
rt_err_t err;
rt_uint16_t msgctl;
rt_size_t table_size;
void *table_base, *table_entry;
struct rt_pci_msi_desc *desc;
struct rt_pci_msix_entry *entry;
/*
* Some devices require MSI-X to be enabled before the MSI-X
* registers can be accessed.
* Mask all the vectors to prevent interrupts coming in before
* they're fully set up.
*/
msix_update_ctrl(pdev, 0, PCIM_MSIXCTRL_FUNCTION_MASK | PCIM_MSIXCTRL_MSIX_ENABLE);
rt_pci_read_config_u16(pdev, pdev->msix_cap + PCIR_MSIX_CTRL, &msgctl);
/* Request & Map MSI-X table region */
table_size = rt_pci_msix_table_size(msgctl);
table_base = msix_table_remap(pdev, table_size);
if (!table_base)
{
LOG_E("%s: Remap MSI-X table fail", rt_dm_dev_get_name(&pdev->parent));
err = -RT_ENOMEM;
goto _out_disbale_msix;
}
pdev->msix_base = table_base;
spin_lock(&pdev->msi_lock);
if (!(err = msix_setup_msi_descs(pdev, table_base, entries, nvec)))
{
if (!(err = rt_pci_msi_setup_irqs(pdev, nvec, PCIY_MSIX)))
{
/* Check if all MSI entries honor device restrictions */
err = msi_verify_entries(pdev);
}
}
spin_unlock(&pdev->msi_lock);
if (err)
{
rt_pci_msi_free_irqs(pdev);
LOG_E("%s: Setup %s interrupts(%d) error = %s",
rt_dm_dev_get_name(&pdev->parent), "MSI-X", nvec, rt_strerror(err));
goto _out_disbale_msix;
}
entry = entries;
rt_pci_msi_for_each_desc(pdev, desc)
{
if (affinities)
{
msi_affinity_init(desc, desc->msix.index, affinities[entry->index]);
}
entry->irq = desc->irq;
++entry;
}
/* Disable INTX */
rt_pci_intx(pdev, RT_FALSE);
/* Maske all table entries */
table_entry = table_base;
for (int i = 0; i < table_size; ++i, table_entry += PCIM_MSIX_ENTRY_SIZE)
{
HWREG32(table_entry + PCIM_MSIX_ENTRY_VECTOR_CTRL) = PCIM_MSIX_ENTRYVECTOR_CTRL_MASK;
}
msix_update_ctrl(pdev, PCIM_MSIXCTRL_FUNCTION_MASK, 0);
pdev->msix_enabled = RT_TRUE;
return nvec;
_out_disbale_msix:
msix_update_ctrl(pdev, PCIM_MSIXCTRL_FUNCTION_MASK | PCIM_MSIXCTRL_MSIX_ENABLE, 0);
return err;
}
rt_ssize_t rt_pci_msix_enable_range_affinity(struct rt_pci_device *pdev,
struct rt_pci_msix_entry *entries, int min, int max,
RT_IRQ_AFFINITY_DECLARE((*affinities)))
{
int nvec = max;
rt_size_t entries_nr;
if (!pdev || min > max)
{
return -RT_EINVAL;
}
if (pdev->no_msi)
{
return -RT_ENOSYS;
}
if (!pdev->msi_pic)
{
return -RT_ENOSYS;
}
if (pdev->msix_enabled)
{
LOG_W("%s: MSI-X is enabled", rt_dm_dev_get_name(&pdev->parent));
return -RT_EINVAL;
}
entries_nr = rt_pci_msix_vector_count(pdev);
if (entries_nr < 0)
{
return entries_nr;
}
if (nvec > entries_nr)
{
return -RT_EEMPTY;
}
if (!entries)
{
return 0;
}
/* Check if entries is valid */
for (int i = 0; i < nvec; ++i)
{
struct rt_pci_msix_entry *target = &entries[i];
if (target->index >= entries_nr)
{
return -RT_EINVAL;
}
for (int j = i + 1; j < nvec; ++j)
{
/* Check duplicate */
if (target->index == entries[j].index)
{
LOG_E("%s: msix entry[%d].index = entry[%d].index",
rt_dm_dev_get_name(&pdev->parent), i, j);
return -RT_EINVAL;
}
}
}
return msix_capability_init(pdev, entries, nvec, affinities);
}

View File

@ -0,0 +1,621 @@
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2022-10-24 GuEe-GUI first version
*/
#include <rthw.h>
#include <rtthread.h>
#define DBG_TAG "pci.ofw"
#define DBG_LVL DBG_INFO
#include <rtdbg.h>
#include <drivers/pci.h>
#include <drivers/ofw.h>
#include <drivers/ofw_io.h>
#include <drivers/ofw_irq.h>
#include <drivers/ofw_fdt.h>
static rt_err_t pci_ofw_irq_parse(struct rt_pci_device *pdev, struct rt_ofw_cell_args *out_irq)
{
rt_err_t err = RT_EOK;
rt_uint8_t pin;
fdt32_t map_addr[4];
struct rt_pci_device *p2pdev;
struct rt_ofw_node *dev_np, *p2pnode = RT_NULL;
/* Parse device tree if dev have a device node */
dev_np = pdev->parent.ofw_node;
if (dev_np)
{
err = rt_ofw_parse_irq_cells(dev_np, 0, out_irq);
if (err)
{
return err;
}
}
/* Assume #interrupt-cells is 1 */
if ((err = rt_pci_read_config_u8(pdev, PCIR_INTPIN, &pin)))
{
goto _err;
}
/* No pin, exit with no error message. */
if (pin == 0)
{
return -RT_ENOSYS;
}
/* Try local interrupt-map in the device node */
if (rt_ofw_prop_read_raw(dev_np, "interrupt-map", RT_NULL))
{
pin = rt_pci_irq_intx(pdev, pin);
p2pnode = dev_np;
}
/* Walk up the PCI tree */
while (!p2pnode)
{
p2pdev = pdev->bus->self;
/* Is the root bus -> host bridge */
if (rt_pci_is_root_bus(pdev->bus))
{
struct rt_pci_host_bridge *host_bridge = pdev->bus->host_bridge;
p2pnode = host_bridge->parent.ofw_node;
if (!p2pnode)
{
err = -RT_EINVAL;
goto _err;
}
}
else
{
/* Is P2P bridge */
p2pnode = p2pdev->parent.ofw_node;
}
if (p2pnode)
{
break;
}
/* Try get INTx in P2P */
pin = rt_pci_irq_intx(pdev, pin);
pdev = p2pdev;
}
/* For more format detail, please read `components/drivers/ofw/irq.c:ofw_parse_irq_map` */
out_irq->data = map_addr;
out_irq->args_count = 2;
out_irq->args[0] = 3;
out_irq->args[1] = 1;
/* In addr cells */
map_addr[0] = cpu_to_fdt32((pdev->bus->number << 16) | (pdev->devfn << 8));
map_addr[1] = cpu_to_fdt32(0);
map_addr[2] = cpu_to_fdt32(0);
/* In pin cells */
map_addr[3] = cpu_to_fdt32(pin);
err = rt_ofw_parse_irq_map(p2pnode, out_irq);
_err:
if (err == -RT_EEMPTY)
{
LOG_W("PCI-Device<%s> no interrupt-map found, INTx interrupts not available",
rt_dm_dev_get_name(&pdev->parent));
LOG_W("PCI-Device<%s> possibly some PCI slots don't have level triggered interrupts capability",
rt_dm_dev_get_name(&pdev->parent));
}
else if (err && err != -RT_ENOSYS)
{
LOG_E("PCI-Device<%s> irq parse failed with err = %s",
rt_dm_dev_get_name(&pdev->parent), rt_strerror(err));
}
return err;
}
int rt_pci_ofw_irq_parse_and_map(struct rt_pci_device *pdev,
rt_uint8_t slot, rt_uint8_t pin)
{
int irq = -1;
rt_err_t status;
struct rt_ofw_cell_args irq_args;
if (!pdev)
{
goto _end;
}
status = pci_ofw_irq_parse(pdev, &irq_args);
if (status)
{
goto _end;
}
irq = rt_ofw_map_irq(&irq_args);
if (irq >= 0)
{
pdev->intx_pic = rt_pic_dynamic_cast(rt_ofw_data(irq_args.data));
}
_end:
return irq;
}
static rt_err_t pci_ofw_parse_ranges(struct rt_ofw_node *dev_np, const char *propname,
int phy_addr_cells, int phy_size_cells, int cpu_addr_cells,
struct rt_pci_bus_region **out_regions, rt_size_t *out_regions_nr)
{
const fdt32_t *cell;
rt_ssize_t total_cells;
int groups, space_code;
rt_uint32_t phy_addr[3];
rt_uint64_t cpu_addr, phy_addr_size;
*out_regions = RT_NULL;
*out_regions_nr = 0;
cell = rt_ofw_prop_read_raw(dev_np, propname, &total_cells);
if (!cell)
{
return -RT_EEMPTY;
}
groups = total_cells / sizeof(*cell) / (phy_addr_cells + phy_size_cells + cpu_addr_cells);
*out_regions = rt_malloc(groups * sizeof(struct rt_pci_bus_region));
if (!*out_regions)
{
return -RT_ENOMEM;
}
for (int i = 0; i < groups; ++i)
{
/*
* ranges:
* phys.hi cell: npt000ss bbbbbbbb dddddfff rrrrrrrr
* phys.low cell: llllllll llllllll llllllll llllllll
* phys.mid cell: hhhhhhhh hhhhhhhh hhhhhhhh hhhhhhhh
*
* n: relocatable region flag (doesn't play a role here)
* p: prefetchable (cacheable) region flag
* t: aliased address flag (doesn't play a role here)
* ss: space code
* 00: configuration space
* 01: I/O space
* 10: 32 bit memory space
* 11: 64 bit memory space
* bbbbbbbb: The PCI bus number
* ddddd: The device number
* fff: The function number. Used for multifunction PCI devices.
* rrrrrrrr: Register number; used for configuration cycles.
*/
for (int j = 0; j < phy_addr_cells; ++j)
{
phy_addr[j] = rt_fdt_read_number(cell++, 1);
}
space_code = (phy_addr[0] >> 24) & 0x3;
cpu_addr = rt_fdt_read_number(cell, cpu_addr_cells);
cell += cpu_addr_cells;
phy_addr_size = rt_fdt_read_number(cell, phy_size_cells);
cell += phy_size_cells;
(*out_regions)[i].phy_addr = ((rt_uint64_t)phy_addr[1] << 32) | phy_addr[2];
(*out_regions)[i].cpu_addr = cpu_addr;
(*out_regions)[i].size = phy_addr_size;
(*out_regions)[i].bus_start = (*out_regions)[i].phy_addr;
if (space_code & 2)
{
(*out_regions)[i].flags = phy_addr[0] & (1U << 30) ?
PCI_BUS_REGION_F_PREFETCH : PCI_BUS_REGION_F_MEM;
}
else if (space_code & 1)
{
(*out_regions)[i].flags = PCI_BUS_REGION_F_IO;
}
else
{
(*out_regions)[i].flags = PCI_BUS_REGION_F_NONE;
}
++*out_regions_nr;
}
return RT_EOK;
}
rt_err_t rt_pci_ofw_parse_ranges(struct rt_ofw_node *dev_np,
struct rt_pci_host_bridge *host_bridge)
{
rt_err_t err;
int phy_addr_cells = -1, phy_size_cells = -1, cpu_addr_cells;
if (!dev_np || !host_bridge)
{
return -RT_EINVAL;
}
cpu_addr_cells = rt_ofw_io_addr_cells(dev_np);
rt_ofw_prop_read_s32(dev_np, "#address-cells", &phy_addr_cells);
rt_ofw_prop_read_s32(dev_np, "#size-cells", &phy_size_cells);
if (phy_addr_cells != 3 || phy_size_cells < 1 || cpu_addr_cells < 1)
{
return -RT_EINVAL;
}
if (pci_ofw_parse_ranges(dev_np, "ranges",
phy_addr_cells, phy_size_cells, cpu_addr_cells,
&host_bridge->bus_regions, &host_bridge->bus_regions_nr))
{
return -RT_EINVAL;
}
if ((err = rt_pci_region_setup(host_bridge)))
{
rt_free(host_bridge->bus_regions);
host_bridge->bus_regions_nr = 0;
return err;
}
err = pci_ofw_parse_ranges(dev_np, "dma-ranges",
phy_addr_cells, phy_size_cells, cpu_addr_cells,
&host_bridge->dma_regions, &host_bridge->dma_regions_nr);
if (err != -RT_EEMPTY)
{
rt_free(host_bridge->bus_regions);
host_bridge->bus_regions_nr = 0;
LOG_E("%s: Read dma-ranges error = %s", rt_ofw_node_full_name(dev_np),
rt_strerror(err));
return err;
}
return RT_EOK;
}
rt_err_t rt_pci_ofw_host_bridge_init(struct rt_ofw_node *dev_np,
struct rt_pci_host_bridge *host_bridge)
{
rt_err_t err;
const char *propname;
if (!dev_np || !host_bridge)
{
return -RT_EINVAL;
}
host_bridge->irq_slot = rt_pci_irq_slot;
host_bridge->irq_map = rt_pci_ofw_irq_parse_and_map;
if (rt_ofw_prop_read_u32_array_index(dev_np, "bus-range", 0, 2, host_bridge->bus_range) < 0)
{
return -RT_EIO;
}
propname = rt_ofw_get_prop_fuzzy_name(dev_np, ",pci-domain$");
rt_ofw_prop_read_u32(dev_np, propname, &host_bridge->domain);
err = rt_pci_ofw_parse_ranges(dev_np, host_bridge);
return err;
}
rt_err_t rt_pci_ofw_bus_init(struct rt_pci_bus *bus)
{
rt_err_t err = RT_EOK;
return err;
}
rt_err_t rt_pci_ofw_bus_free(struct rt_pci_bus *bus)
{
rt_err_t err = RT_EOK;
return err;
}
/*
* RID (Requester ID) is formatted such that:
* Bits [15:8] are the Bus number.
* Bits [7:3] are the Device number.
* Bits [2:0] are the Function number.
*
* msi-map: Maps a Requester ID to an MSI controller and associated
* msi-specifier data. The property is an arbitrary number of tuples of
* (rid-base,msi-controller,msi-base,length), where:
*
* - rid-base is a single cell describing the first RID matched by the entry.
*
* - msi-controller is a single phandle to an MSI controller
*
* - msi-base is an msi-specifier describing the msi-specifier produced for
* the first RID matched by the entry.
*
* - length is a single cell describing how many consecutive RIDs are matched
* following the rid-base.
*
* Any RID r in the interval [rid-base, rid-base + length) is associated with
* the listed msi-controller, with the msi-specifier (r - rid-base + msi-base).
*
* msi-map-mask: A mask to be applied to each Requester ID prior to being mapped
* to an msi-specifier per the msi-map property.
*
* msi-parent: Describes the MSI parent of the root complex itself. Where
* the root complex and MSI controller do not pass sideband data with MSI
* writes, this property may be used to describe the MSI controller(s)
* used by PCI devices under the root complex, if defined as such in the
* binding for the root complex.
*
* / {
* #address-cells = <1>;
* #size-cells = <1>;
*
* msi_a: msi-controller@a {
* reg = <0xa 0x1>;
* msi-controller;
* #msi-cells = <1>;
* };
*
* msi_b: msi-controller@b {
* reg = <0xb 0x1>;
* msi-controller;
* #msi-cells = <1>;
* };
*
* msi_c: msi-controller@c {
* reg = <0xc 0x1>;
* msi-controller;
* #msi-cells = <1>;
* };
*
* Example (1)
* ===========
* pci: pci@f {
* reg = <0xf 0x1>;
* device_type = "pci";
*
* // The sideband data provided to the MSI controller is
* // the RID, identity-mapped.
* msi-map = <0x0 &msi_a 0x0 0x10000>;
* };
*
* Example (2)
* ===========
* pci: pci@ff {
* reg = <0xff 0x1>;
* device_type = "pci";
*
* // The sideband data provided to the MSI controller is
* // the RID, masked to only the device and function bits.
* msi-map = <0x0 &msi_a 0x0 0x100>;
* msi-map-mask = <0xff>
* };
*
* Example (3)
* ===========
* pci: pci@fff {
* reg = <0xfff 0x1>;
* device_type = "pci";
*
* // The sideband data provided to the MSI controller is
* // the RID, but the high bit of the bus number is ignored.
* msi-map = <0x0000 &msi_a 0x0000 0x8000>,
* <0x8000 &msi_a 0x0000 0x8000>;
* };
*
* Example (4)
* ===========
* pci: pci@f {
* reg = <0xf 0x1>;
* device_type = "pci";
*
* // The sideband data provided to the MSI controller is
* // the RID, but the high bit of the bus number is negated.
* msi-map = <0x0000 &msi 0x8000 0x8000>,
* <0x8000 &msi 0x0000 0x8000>;
* };
*
* Example (5)
* ===========
* pci: pci@f {
* reg = <0xf 0x1>;
* device_type = "pci";
*
* // The sideband data provided to MSI controller a is the
* // RID, but the high bit of the bus number is negated.
* // The sideband data provided to MSI controller b is the
* // RID, identity-mapped.
* // MSI controller c is not addressable.
* msi-map = <0x0000 &msi_a 0x8000 0x08000>,
* <0x8000 &msi_a 0x0000 0x08000>,
* <0x0000 &msi_b 0x0000 0x10000>;
* };
* };
*/
static void ofw_msi_pic_init(struct rt_pci_device *pdev)
{
#ifdef RT_PCI_MSI
rt_uint32_t rid;
struct rt_pci_bus *bus;
struct rt_ofw_node *np, *msi_ic_np = RT_NULL;
/*
* NOTE: Typically, a device's RID is equal to the PCI device's ID.
* However, in complex bus management scenarios such as servers and PCs,
* the RID needs to be associated with DMA. In these cases,
* the RID should be equal to the DMA alias assigned to the
* PCI device by the system bus.
*/
rid = rt_pci_dev_id(pdev);
for (bus = pdev->bus; bus; bus = bus->parent)
{
if (rt_pci_is_root_bus(bus))
{
np = bus->host_bridge->parent.ofw_node;
}
else
{
np = bus->self->parent.ofw_node;
}
if ((msi_ic_np = rt_ofw_parse_phandle(np, "msi-parent", 0)))
{
break;
}
if (!rt_ofw_map_id(np, rid, "msi-map", "msi-map-mask", &msi_ic_np, RT_NULL))
{
break;
}
}
if (!msi_ic_np)
{
LOG_W("%s: MSI PIC not found", rt_dm_dev_get_name(&pdev->parent));
return;
}
pdev->msi_pic = rt_pic_dynamic_cast(rt_ofw_data(msi_ic_np));
if (!pdev->msi_pic)
{
LOG_W("%s: '%s' not supported", rt_dm_dev_get_name(&pdev->parent), "msi-parent");
goto _out_put_msi_parent_node;
}
if (!pdev->msi_pic->ops->irq_compose_msi_msg)
{
LOG_E("%s: MSI pic MUST implemented %s",
rt_ofw_node_full_name(msi_ic_np), "irq_compose_msi_msg");
RT_ASSERT(0);
}
if (!pdev->msi_pic->ops->irq_alloc_msi)
{
LOG_E("%s: MSI pic MUST implemented %s",
rt_ofw_node_full_name(msi_ic_np), "irq_alloc_msi");
RT_ASSERT(0);
}
if (!pdev->msi_pic->ops->irq_free_msi)
{
LOG_E("%s: MSI pic MUST implemented %s",
rt_ofw_node_full_name(msi_ic_np), "irq_free_msi");
RT_ASSERT(0);
}
_out_put_msi_parent_node:
rt_ofw_node_put(msi_ic_np);
#endif
}
static rt_int32_t ofw_pci_devfn(struct rt_ofw_node *np)
{
rt_int32_t res;
rt_uint32_t reg[5];
res = rt_ofw_prop_read_u32_array_index(np, "reg", 0, RT_ARRAY_SIZE(reg), reg);
return res > 0 ? ((reg[0] >> 8) & 0xff) : res;
}
static struct rt_ofw_node *ofw_find_device(struct rt_ofw_node *np, rt_uint32_t devfn)
{
struct rt_ofw_node *dev_np, *mfd_np;
rt_ofw_foreach_child_node(np, dev_np)
{
if (ofw_pci_devfn(dev_np) == devfn)
{
return dev_np;
}
if (rt_ofw_node_tag_equ(dev_np, "multifunc-device"))
{
rt_ofw_foreach_child_node(dev_np, mfd_np)
{
if (ofw_pci_devfn(mfd_np) == devfn)
{
rt_ofw_node_put(dev_np);
return mfd_np;
}
}
}
}
return RT_NULL;
}
rt_err_t rt_pci_ofw_device_init(struct rt_pci_device *pdev)
{
struct rt_ofw_node *np = RT_NULL;
if (!pdev)
{
return -RT_EINVAL;
}
ofw_msi_pic_init(pdev);
if (rt_pci_is_root_bus(pdev->bus) || !pdev->bus->self)
{
struct rt_pci_host_bridge *host_bridge;
host_bridge = rt_pci_find_host_bridge(pdev->bus);
RT_ASSERT(host_bridge != RT_NULL);
np = host_bridge->parent.ofw_node;
}
else
{
np = pdev->bus->self->parent.ofw_node;
}
if (np)
{
pdev->parent.ofw_node = ofw_find_device(np, pdev->devfn);
}
return RT_EOK;
}
rt_err_t rt_pci_ofw_device_free(struct rt_pci_device *pdev)
{
if (!pdev)
{
return -RT_EINVAL;
}
rt_ofw_node_put(pdev->parent.ofw_node);
return RT_EOK;
}

1004
components/drivers/pci/pci.c Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,272 @@
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*/
#ifndef __PCI_IDS_H__
#define __PCI_IDS_H__
#define PCI_VENDOR_ID_LOONGSON 0x0014
#define PCI_VENDOR_ID_TTTECH 0x0357
#define PCI_VENDOR_ID_DYNALINK 0x0675
#define PCI_VENDOR_ID_UBIQUITI 0x0777
#define PCI_VENDOR_ID_BERKOM 0x0871
#define PCI_VENDOR_ID_COMPAQ 0x0e11
#define PCI_VENDOR_ID_NCR 0x1000
#define PCI_VENDOR_ID_ATI 0x1002
#define PCI_VENDOR_ID_VLSI 0x1004
#define PCI_VENDOR_ID_ADL 0x1005
#define PCI_VENDOR_ID_NS 0x100b
#define PCI_VENDOR_ID_TSENG 0x100c
#define PCI_VENDOR_ID_WEITEK 0x100e
#define PCI_VENDOR_ID_DEC 0x1011
#define PCI_VENDOR_ID_CIRRUS 0x1013
#define PCI_VENDOR_ID_IBM 0x1014
#define PCI_VENDOR_ID_UNISYS 0x1018
#define PCI_VENDOR_ID_COMPEX2 0x101a
#define PCI_VENDOR_ID_WD 0x101c
#define PCI_VENDOR_ID_AMI 0x101e
#define PCI_VENDOR_ID_AMD 0x1022
#define PCI_VENDOR_ID_TRIDENT 0x1023
#define PCI_VENDOR_ID_AI 0x1025
#define PCI_VENDOR_ID_DELL 0x1028
#define PCI_VENDOR_ID_MATROX 0x102b
#define PCI_VENDOR_ID_MOBILITY_ELECTRONICS 0x14f2
#define PCI_VENDOR_ID_CT 0x102c
#define PCI_VENDOR_ID_MIRO 0x1031
#define PCI_VENDOR_ID_NEC 0x1033
#define PCI_VENDOR_ID_FD 0x1036
#define PCI_VENDOR_ID_SI 0x1039
#define PCI_VENDOR_ID_HP 0x103c
#define PCI_VENDOR_ID_PCTECH 0x1042
#define PCI_VENDOR_ID_ASUSTEK 0x1043
#define PCI_VENDOR_ID_DPT 0x1044
#define PCI_VENDOR_ID_OPTI 0x1045
#define PCI_VENDOR_ID_ELSA 0x1048
#define PCI_VENDOR_ID_STMICRO 0x104a
#define PCI_VENDOR_ID_BUSLOGIC 0x104b
#define PCI_VENDOR_ID_TI 0x104c
#define PCI_VENDOR_ID_SONY 0x104d
#define PCI_VENDOR_ID_ANIGMA 0x1051
#define PCI_VENDOR_ID_EFAR 0x1055
#define PCI_VENDOR_ID_MOTOROLA 0x1057
#define PCI_VENDOR_ID_PROMISE 0x105a
#define PCI_VENDOR_ID_FOXCONN 0x105b
#define PCI_VENDOR_ID_UMC 0x1060
#define PCI_VENDOR_ID_PICOPOWER 0x1066
#define PCI_VENDOR_ID_MYLEX 0x1069
#define PCI_VENDOR_ID_APPLE 0x106b
#define PCI_VENDOR_ID_YAMAHA 0x1073
#define PCI_VENDOR_ID_QLOGIC 0x1077
#define PCI_VENDOR_ID_CYRIX 0x1078
#define PCI_VENDOR_ID_CONTAQ 0x1080
#define PCI_VENDOR_ID_OLICOM 0x108d
#define PCI_VENDOR_ID_SUN 0x108e
#define PCI_VENDOR_ID_NI 0x1093
#define PCI_VENDOR_ID_CMD 0x1095
#define PCI_VENDOR_ID_BROOKTREE 0x109e
#define PCI_VENDOR_ID_SGI 0x10a9
#define PCI_VENDOR_ID_WINBOND 0x10ad
#define PCI_VENDOR_ID_PLX 0x10b5
#define PCI_VENDOR_ID_MADGE 0x10b6
#define PCI_VENDOR_ID_3COM 0x10b7
#define PCI_VENDOR_ID_AL 0x10b9
#define PCI_VENDOR_ID_NEOMAGIC 0x10c8
#define PCI_VENDOR_ID_TCONRAD 0x10da
#define PCI_VENDOR_ID_ROHM 0x10db
#define PCI_VENDOR_ID_NVIDIA 0x10de
#define PCI_VENDOR_ID_IMS 0x10e0
#define PCI_VENDOR_ID_AMCC 0x10e8
#define PCI_VENDOR_ID_INTERG 0x10ea
#define PCI_VENDOR_ID_REALTEK 0x10ec
#define PCI_VENDOR_ID_XILINX 0x10ee
#define PCI_VENDOR_ID_INIT 0x1101
#define PCI_VENDOR_ID_CREATIVE 0x1102
#define PCI_VENDOR_ID_ECTIVA PCI_VENDOR_ID_CREATIVE
#define PCI_VENDOR_ID_TTI 0x1103
#define PCI_VENDOR_ID_SIGMA 0x1105
#define PCI_VENDOR_ID_VIA 0x1106
#define PCI_VENDOR_ID_SIEMENS 0x110a
#define PCI_VENDOR_ID_VORTEX 0x1119
#define PCI_VENDOR_ID_EF 0x111a
#define PCI_VENDOR_ID_IDT 0x111d
#define PCI_VENDOR_ID_FORE 0x1127
#define PCI_VENDOR_ID_PHILIPS 0x1131
#define PCI_VENDOR_ID_EICON 0x1133
#define PCI_VENDOR_ID_CISCO 0x1137
#define PCI_VENDOR_ID_ZIATECH 0x1138
#define PCI_VENDOR_ID_SYSKONNECT 0x1148
#define PCI_VENDOR_ID_DIGI 0x114f
#define PCI_VENDOR_ID_XIRCOM 0x115d
#define PCI_VENDOR_ID_SERVERWORKS 0x1166
#define PCI_VENDOR_ID_ALTERA 0x1172
#define PCI_VENDOR_ID_SBE 0x1176
#define PCI_VENDOR_ID_TOSHIBA 0x1179
#define PCI_VENDOR_ID_TOSHIBA_2 0x102f
#define PCI_VENDOR_ID_ATTO 0x117c
#define PCI_VENDOR_ID_RICOH 0x1180
#define PCI_VENDOR_ID_DLINK 0x1186
#define PCI_VENDOR_ID_ARTOP 0x1191
#define PCI_VENDOR_ID_ZEITNET 0x1193
#define PCI_VENDOR_ID_FUJITSU_ME 0x119e
#define PCI_VENDOR_ID_MARVELL 0x11ab
#define PCI_VENDOR_ID_V3 0x11b0
#define PCI_VENDOR_ID_ATT 0x11c1
#define PCI_VENDOR_ID_SPECIALIX 0x11cb
#define PCI_VENDOR_ID_ANALOG_DEVICES 0x11d4
#define PCI_VENDOR_ID_ZORAN 0x11de
#define PCI_VENDOR_ID_COMPEX 0x11f6
#define PCI_VENDOR_ID_PMC_Sierra 0x11f8
#define PCI_VENDOR_ID_RP 0x11fe
#define PCI_VENDOR_ID_CYCLADES 0x120e
#define PCI_VENDOR_ID_ESSENTIAL 0x120f
#define PCI_VENDOR_ID_O2 0x1217
#define PCI_VENDOR_ID_3DFX 0x121a
#define PCI_VENDOR_ID_QEMU 0x1234
#define PCI_VENDOR_ID_AVM 0x1244
#define PCI_VENDOR_ID_STALLION 0x124d
#define PCI_VENDOR_ID_ESS 0x125d
#define PCI_VENDOR_ID_SATSAGEM 0x1267
#define PCI_VENDOR_ID_ENSONIQ 0x1274
#define PCI_VENDOR_ID_TRANSMETA 0x1279
#define PCI_VENDOR_ID_ROCKWELL 0x127a
#define PCI_VENDOR_ID_ITE 0x1283
#define PCI_VENDOR_ID_ALTEON 0x12ae
#define PCI_VENDOR_ID_NVIDIA_SGS 0x12d2
#define PCI_VENDOR_ID_PERICOM 0x12d8
#define PCI_VENDOR_ID_AUREAL 0x12eb
#define PCI_VENDOR_ID_ELECTRONICDESIGNGMBH 0x12f8
#define PCI_VENDOR_ID_ESDGMBH 0x12fe
#define PCI_VENDOR_ID_CB 0x1307
#define PCI_VENDOR_ID_SIIG 0x131f
#define PCI_VENDOR_ID_RADISYS 0x1331
#define PCI_VENDOR_ID_MICRO_MEMORY 0x1332
#define PCI_VENDOR_ID_DOMEX 0x134a
#define PCI_VENDOR_ID_INTASHIELD 0x135a
#define PCI_VENDOR_ID_QUATECH 0x135c
#define PCI_VENDOR_ID_SEALEVEL 0x135e
#define PCI_VENDOR_ID_HYPERCOPE 0x1365
#define PCI_VENDOR_ID_DIGIGRAM 0x1369
#define PCI_VENDOR_ID_KAWASAKI 0x136b
#define PCI_VENDOR_ID_CNET 0x1371
#define PCI_VENDOR_ID_LMC 0x1376
#define PCI_VENDOR_ID_NETGEAR 0x1385
#define PCI_VENDOR_ID_APPLICOM 0x1389
#define PCI_VENDOR_ID_MOXA 0x1393
#define PCI_VENDOR_ID_CCD 0x1397
#define PCI_VENDOR_ID_EXAR 0x13a8
#define PCI_VENDOR_ID_MICROGATE 0x13c0
#define PCI_VENDOR_ID_3WARE 0x13c1
#define PCI_VENDOR_ID_IOMEGA 0x13ca
#define PCI_VENDOR_ID_ABOCOM 0x13d1
#define PCI_VENDOR_ID_SUNDANCE 0x13f0
#define PCI_VENDOR_ID_CMEDIA 0x13f6
#define PCI_VENDOR_ID_ADVANTECH 0x13fe
#define PCI_VENDOR_ID_MEILHAUS 0x1402
#define PCI_VENDOR_ID_LAVA 0x1407
#define PCI_VENDOR_ID_TIMEDIA 0x1409
#define PCI_VENDOR_ID_ICE 0x1412
#define PCI_VENDOR_ID_MICROSOFT 0x1414
#define PCI_VENDOR_ID_OXSEMI 0x1415
#define PCI_VENDOR_ID_CHELSIO 0x1425
#define PCI_VENDOR_ID_ADLINK 0x144a
#define PCI_VENDOR_ID_SAMSUNG 0x144d
#define PCI_VENDOR_ID_GIGABYTE 0x1458
#define PCI_VENDOR_ID_AMBIT 0x1468
#define PCI_VENDOR_ID_MYRICOM 0x14c1
#define PCI_VENDOR_ID_MEDIATEK 0x14c3
#define PCI_VENDOR_ID_TITAN 0x14d2
#define PCI_VENDOR_ID_PANACOM 0x14d4
#define PCI_VENDOR_ID_SIPACKETS 0x14d9
#define PCI_VENDOR_ID_AFAVLAB 0x14db
#define PCI_VENDOR_ID_AMPLICON 0x14dc
#define PCI_VENDOR_ID_BCM_GVC 0x14a4
#define PCI_VENDOR_ID_TOPIC 0x151f
#define PCI_VENDOR_ID_MAINPINE 0x1522
#define PCI_VENDOR_ID_SYBA 0x1592
#define PCI_VENDOR_ID_MORETON 0x15aa
#define PCI_VENDOR_ID_VMWARE 0x15ad
#define PCI_VENDOR_ID_ZOLTRIX 0x15b0
#define PCI_VENDOR_ID_MELLANOX 0x15b3
#define PCI_VENDOR_ID_DFI 0x15bd
#define PCI_VENDOR_ID_QUICKNET 0x15e2
#define PCI_VENDOR_ID_PDC 0x15e9
#define PCI_VENDOR_ID_FARSITE 0x1619
#define PCI_VENDOR_ID_ARIMA 0x161f
#define PCI_VENDOR_ID_BROCADE 0x1657
#define PCI_VENDOR_ID_SIBYTE 0x166d
#define PCI_VENDOR_ID_ATHEROS 0x168c
#define PCI_VENDOR_ID_NETCELL 0x169c
#define PCI_VENDOR_ID_CENATEK 0x16ca
#define PCI_VENDOR_ID_SYNOPSYS 0x16c3
#define PCI_VENDOR_ID_USR 0x16ec
#define PCI_VENDOR_ID_VITESSE 0x1725
#define PCI_VENDOR_ID_LINKSYS 0x1737
#define PCI_VENDOR_ID_ALTIMA 0x173b
#define PCI_VENDOR_ID_CAVIUM 0x177d
#define PCI_VENDOR_ID_TECHWELL 0x1797
#define PCI_VENDOR_ID_BELKIN 0x1799
#define PCI_VENDOR_ID_RDC 0x17f3
#define PCI_VENDOR_ID_GLI 0x17a0
#define PCI_VENDOR_ID_LENOVO 0x17aa
#define PCI_VENDOR_ID_QCOM 0x17cb
#define PCI_VENDOR_ID_CDNS 0x17cd
#define PCI_VENDOR_ID_ARECA 0x17d3
#define PCI_VENDOR_ID_S2IO 0x17d5
#define PCI_VENDOR_ID_SITECOM 0x182d
#define PCI_VENDOR_ID_TOPSPIN 0x1867
#define PCI_VENDOR_ID_COMMTECH 0x18f7
#define PCI_VENDOR_ID_SILAN 0x1904
#define PCI_VENDOR_ID_RENESAS 0x1912
#define PCI_VENDOR_ID_SOLARFLARE 0x1924
#define PCI_VENDOR_ID_TDI 0x192e
#define PCI_VENDOR_ID_FREESCALE 0x1957
#define PCI_VENDOR_ID_NXP PCI_VENDOR_ID_FREESCALE
#define PCI_VENDOR_ID_PASEMI 0x1959
#define PCI_VENDOR_ID_ATTANSIC 0x1969
#define PCI_VENDOR_ID_JMICRON 0x197b
#define PCI_VENDOR_ID_KORENIX 0x1982
#define PCI_VENDOR_ID_HUAWEI 0x19e5
#define PCI_VENDOR_ID_NETRONOME 0x19ee
#define PCI_VENDOR_ID_QMI 0x1a32
#define PCI_VENDOR_ID_AZWAVE 0x1a3b
#define PCI_VENDOR_ID_REDHAT_QUMRANET 0x1af4
#define PCI_VENDOR_ID_ASMEDIA 0x1b21
#define PCI_VENDOR_ID_REDHAT 0x1b36
#define PCI_VENDOR_ID_SILICOM_DENMARK 0x1c2c
#define PCI_VENDOR_ID_AMAZON_ANNAPURNA_LABS 0x1c36
#define PCI_VENDOR_ID_CIRCUITCO 0x1cc8
#define PCI_VENDOR_ID_AMAZON 0x1d0f
#define PCI_VENDOR_ID_ZHAOXIN 0x1d17
#define PCI_VENDOR_ID_HYGON 0x1d94
#define PCI_VENDOR_ID_FUNGIBLE 0x1dad
#define PCI_VENDOR_ID_HXT 0x1dbf
#define PCI_VENDOR_ID_TEKRAM 0x1de1
#define PCI_VENDOR_ID_TEHUTI 0x1fc9
#define PCI_VENDOR_ID_SUNIX 0x1fd4
#define PCI_VENDOR_ID_HINT 0x3388
#define PCI_VENDOR_ID_3DLABS 0x3d3d
#define PCI_VENDOR_ID_NETXEN 0x4040
#define PCI_VENDOR_ID_AKS 0x416c
#define PCI_VENDOR_ID_ACCESSIO 0x494f
#define PCI_VENDOR_ID_S3 0x5333
#define PCI_VENDOR_ID_DUNORD 0x5544
#define PCI_VENDOR_ID_DCI 0x6666
#define PCI_VENDOR_ID_INTEL 0x8086
#define PCI_VENDOR_ID_SCALEMP 0x8686
#define PCI_VENDOR_ID_COMPUTONE 0x8e0e
#define PCI_VENDOR_ID_KTI 0x8e2e
#define PCI_VENDOR_ID_ADAPTEC 0x9004
#define PCI_VENDOR_ID_ADAPTEC2 0x9005
#define PCI_VENDOR_ID_HOLTEK 0x9412
#define PCI_VENDOR_ID_NETMOS 0x9710
#define PCI_VENDOR_ID_3COM_2 0xa727
#define PCI_VENDOR_ID_DIGIUM 0xd161
#define PCI_VENDOR_ID_TIGERJET 0xe159
#define PCI_VENDOR_ID_XILINX_RME 0xea60
#define PCI_VENDOR_ID_XEN 0x5853
#define PCI_VENDOR_ID_OCZ 0x1b85
#define PCI_VENDOR_ID_NCUBE 0x10ff
#endif /* __PCI_IDS_H__ */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,121 @@
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2022-10-24 GuEe-GUI first version
*/
#include <drivers/pci.h>
#include <drivers/core/power_domain.h>
#define DBG_TAG "pci.pme"
#define DBG_LVL DBG_INFO
#include <rtdbg.h>
/*
* Power Management Capability Register:
*
* 31 27 26 25 24 22 21 20 19 18 16 15 8 7 0
* +---------+---+---+--------+---+---+---+------+-----------+----------------+
* | | | | | | | | | | Capabilitiy ID |
* +---------+---+---+--------+---+---+---+------+-----------+----------------+
* ^ ^ ^ ^ ^ ^ ^ ^ ^
* | | | | | | | | |
* | | | | | | | | +---- Next Capabilitiy Pointer
* | | | | | | | +------------- Version
* | | | | | | +------------------- PME Clock
* | | | | | +----------------------- Immediate Readiness on Return to D0
* | | | | +--------------------------- Device Specifiic Initializtion
* | | | +--------------------------------- Aux Current
* | | +---------------------------------------- D1 Support
* | +-------------------------------------------- D2 Support
* +--------------------------------------------------- PME Support
*/
void rt_pci_pme_init(struct rt_pci_device *pdev)
{
rt_uint16_t pmc;
if (!pdev || !(pdev->pme_cap = rt_pci_find_capability(pdev, PCIY_PMG)))
{
return;
}
rt_pci_read_config_u16(pdev, pdev->pme_cap + PCIR_POWER_CAP, &pmc);
if ((pmc & PCIM_PCAP_SPEC) > 3)
{
LOG_E("%s: Unsupported PME CAP regs spec %u",
rt_dm_dev_get_name(&pdev->parent), pmc & PCIM_PCAP_SPEC);
return;
}
pmc &= PCIM_PCAP_PMEMASK;
if (pmc)
{
pdev->pme_support = RT_FIELD_GET(PCIM_PCAP_PMEMASK, pmc);
rt_pci_pme_active(pdev, RT_FALSE);
}
}
rt_err_t rt_pci_enable_wake(struct rt_pci_device *pdev,
enum rt_pci_power state, rt_bool_t enable)
{
if (!pdev || state >= RT_PCI_PME_MAX)
{
return -RT_EINVAL;
}
if (enable)
{
if (rt_pci_pme_capable(pdev, state) ||
rt_pci_pme_capable(pdev, RT_PCI_D3COLD))
{
rt_pci_pme_active(pdev, RT_EOK);
}
}
else
{
rt_pci_pme_active(pdev, RT_FALSE);
}
return RT_EOK;
}
static void pci_pme_active(struct rt_pci_device *pdev, rt_bool_t enable)
{
rt_uint16_t pmcsr;
if (!pdev->pme_support)
{
return;
}
rt_pci_read_config_u16(pdev, pdev->pme_cap + PCIR_POWER_STATUS, &pmcsr);
/* Clear PME_Status by writing 1 to it and enable PME# */
pmcsr |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
if (!enable)
{
pmcsr &= ~PCIM_PSTAT_PMEENABLE;
}
rt_pci_write_config_u16(pdev, pdev->pme_cap + PCIR_POWER_STATUS, pmcsr);
}
void rt_pci_pme_active(struct rt_pci_device *pdev, rt_bool_t enable)
{
if (!pdev)
{
return;
}
pci_pme_active(pdev, enable);
rt_dm_power_domain_attach(&pdev->parent, enable);
}

View File

@ -0,0 +1,922 @@
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2022-10-24 GuEe-GUI first version
*/
#include <rtthread.h>
#define DBG_TAG "pci.probe"
#define DBG_LVL DBG_INFO
#include <rtdbg.h>
#include <drivers/pci.h>
#include <drivers/core/bus.h>
rt_inline void spin_lock(struct rt_spinlock *spinlock)
{
rt_hw_spin_lock(&spinlock->lock);
}
rt_inline void spin_unlock(struct rt_spinlock *spinlock)
{
rt_hw_spin_unlock(&spinlock->lock);
}
struct rt_pci_host_bridge *rt_pci_host_bridge_alloc(rt_size_t priv_size)
{
struct rt_pci_host_bridge *bridge = rt_calloc(1, sizeof(*bridge) + priv_size);
return bridge;
}
rt_err_t rt_pci_host_bridge_free(struct rt_pci_host_bridge *bridge)
{
if (!bridge)
{
return -RT_EINVAL;
}
if (bridge->bus_regions)
{
rt_free(bridge->bus_regions);
}
if (bridge->dma_regions)
{
rt_free(bridge->dma_regions);
}
rt_free(bridge);
return RT_EOK;
}
rt_err_t rt_pci_host_bridge_init(struct rt_pci_host_bridge *host_bridge)
{
rt_err_t err = RT_EOK;
if (host_bridge->parent.ofw_node)
{
err = rt_pci_ofw_host_bridge_init(host_bridge->parent.ofw_node, host_bridge);
}
return err;
}
struct rt_pci_device *rt_pci_alloc_device(struct rt_pci_bus *bus)
{
struct rt_pci_device *pdev = rt_calloc(1, sizeof(*pdev));
if (!pdev)
{
return RT_NULL;
}
rt_list_init(&pdev->list);
pdev->bus = bus;
if (bus)
{
spin_lock(&bus->lock);
rt_list_insert_before(&bus->devices_nodes, &pdev->list);
spin_unlock(&bus->lock);
}
pdev->subsystem_vendor = PCI_ANY_ID;
pdev->subsystem_device = PCI_ANY_ID;
pdev->irq = -1;
for (int i = 0; i < RT_ARRAY_SIZE(pdev->resource); ++i)
{
pdev->resource[i].flags = PCI_BUS_REGION_F_NONE;
}
#ifdef RT_PCI_MSI
rt_list_init(&pdev->msi_desc_nodes);
rt_spin_lock_init(&pdev->msi_lock);
#endif
return pdev;
}
struct rt_pci_device *rt_pci_scan_single_device(struct rt_pci_bus *bus, rt_uint32_t devfn)
{
rt_err_t err;
struct rt_pci_device *pdev = RT_NULL;
rt_uint16_t vendor = PCI_ANY_ID, device = PCI_ANY_ID;
if (!bus)
{
goto _end;
}
err = rt_pci_bus_read_config_u16(bus, devfn, PCIR_VENDOR, &vendor);
rt_pci_bus_read_config_u16(bus, devfn, PCIR_DEVICE, &device);
if (vendor == (typeof(vendor))PCI_ANY_ID ||
vendor == (typeof(vendor))0x0000 || err)
{
goto _end;
}
if (!(pdev = rt_pci_alloc_device(bus)))
{
goto _end;
}
pdev->devfn = devfn;
pdev->vendor = vendor;
pdev->device = device;
rt_dm_dev_set_name(&pdev->parent, "%04x:%02x:%02x.%u",
rt_pci_domain(pdev), pdev->bus->number,
RT_PCI_SLOT(pdev->devfn), RT_PCI_FUNC(pdev->devfn));
if (rt_pci_setup_device(pdev))
{
rt_free(pdev);
pdev = RT_NULL;
goto _end;
}
rt_pci_device_register(pdev);
_end:
return pdev;
}
static rt_bool_t pci_intx_mask_broken(struct rt_pci_device *pdev)
{
rt_bool_t res = RT_FALSE;
rt_uint16_t orig, toggle, new;
rt_pci_read_config_u16(pdev, PCIR_COMMAND, &orig);
toggle = orig ^ PCIM_CMD_INTxDIS;
rt_pci_write_config_u16(pdev, PCIR_COMMAND, toggle);
rt_pci_read_config_u16(pdev, PCIR_COMMAND, &new);
rt_pci_write_config_u16(pdev, PCIR_COMMAND, orig);
if (new != toggle)
{
res = RT_TRUE;
}
return res;
}
static void pci_read_irq(struct rt_pci_device *pdev)
{
rt_uint8_t irq = 0;
rt_pci_read_config_u8(pdev, PCIR_INTPIN, &irq);
pdev->pin = irq;
if (irq)
{
rt_pci_read_config_u8(pdev, PCIR_INTLINE, &irq);
}
pdev->irq = irq;
}
static void pcie_set_port_type(struct rt_pci_device *pdev)
{
int pos;
if (!(pos = rt_pci_find_capability(pdev, PCIY_EXPRESS)))
{
return;
}
pdev->pcie_cap = pos;
}
static void pci_configure_ari(struct rt_pci_device *pdev)
{
rt_uint32_t cap, ctl2_ari;
struct rt_pci_device *bridge;
if (!rt_pci_is_pcie(pdev) || pdev->devfn)
{
return;
}
bridge = pdev->bus->self;
if (rt_pci_is_root_bus(pdev->bus) || !bridge)
{
return;
}
rt_pci_read_config_u32(bridge, bridge->pcie_cap + PCIER_DEVICE_CAP2, &cap);
if (!(cap & PCIEM_CAP2_ARI))
{
return;
}
rt_pci_read_config_u32(bridge, bridge->pcie_cap + PCIER_DEVICE_CTL2, &ctl2_ari);
if (rt_pci_find_ext_capability(pdev, PCIZ_ARI))
{
ctl2_ari |= PCIEM_CTL2_ARI;
bridge->ari_enabled = RT_TRUE;
}
else
{
ctl2_ari &= ~PCIEM_CTL2_ARI;
bridge->ari_enabled = RT_FALSE;
}
rt_pci_write_config_u32(bridge, bridge->pcie_cap + PCIER_DEVICE_CTL2, ctl2_ari);
}
static rt_uint16_t pci_cfg_space_size_ext(struct rt_pci_device *pdev)
{
rt_uint32_t status;
if (rt_pci_read_config_u32(pdev, PCI_REGMAX + 1, &status))
{
return PCI_REGMAX + 1;
}
return PCIE_REGMAX + 1;
}
static rt_uint16_t pci_cfg_space_size(struct rt_pci_device *pdev)
{
int pos;
rt_uint32_t status;
rt_uint16_t class = pdev->class >> 8;
if (class == PCIS_BRIDGE_HOST)
{
return pci_cfg_space_size_ext(pdev);
}
if (rt_pci_is_pcie(pdev))
{
return pci_cfg_space_size_ext(pdev);
}
pos = rt_pci_find_capability(pdev, PCIY_PCIX);
if (!pos)
{
return PCI_REGMAX + 1;
}
rt_pci_read_config_u32(pdev, pos + PCIXR_STATUS, &status);
if (status & (PCIXM_STATUS_266CAP | PCIXM_STATUS_533CAP))
{
return pci_cfg_space_size_ext(pdev);
}
return PCI_REGMAX + 1;
}
static void pci_init_capabilities(struct rt_pci_device *pdev)
{
rt_pci_pme_init(pdev);
#ifdef RT_PCI_MSI
rt_pci_msi_init(pdev); /* Disable MSI */
rt_pci_msix_init(pdev); /* Disable MSI-X */
#endif
pcie_set_port_type(pdev);
pdev->cfg_size = pci_cfg_space_size(pdev);
pci_configure_ari(pdev);
pdev->no_msi = RT_FALSE;
pdev->msi_enabled = RT_FALSE;
pdev->msix_enabled = RT_FALSE;
}
rt_err_t rt_pci_setup_device(struct rt_pci_device *pdev)
{
rt_uint8_t pos;
rt_uint32_t class = 0;
struct rt_pci_host_bridge *host_bridge;
if (!pdev)
{
return -RT_EINVAL;
}
if (!(host_bridge = rt_pci_find_host_bridge(pdev->bus)))
{
return -RT_EINVAL;
}
rt_pci_ofw_device_init(pdev);
rt_pci_read_config_u32(pdev, PCIR_REVID, &class);
pdev->revision = class & 0xff;
pdev->class = class >> 8; /* Upper 3 bytes */
rt_pci_read_config_u8(pdev, PCIR_HDRTYPE, &pdev->hdr_type);
/* Clear errors left from system firmware */
rt_pci_write_config_u16(pdev, PCIR_STATUS, 0xffff);
if (pdev->hdr_type & 0x80)
{
pdev->multi_function = RT_TRUE;
}
pdev->hdr_type &= PCIM_HDRTYPE;
if (pci_intx_mask_broken(pdev))
{
pdev->broken_intx_masking = RT_TRUE;
}
rt_dm_dev_set_name(&pdev->parent, "%04x:%02x:%02x.%u", rt_pci_domain(pdev),
pdev->bus->number, RT_PCI_SLOT(pdev->devfn), RT_PCI_FUNC(pdev->devfn));
switch (pdev->hdr_type)
{
case PCIM_HDRTYPE_NORMAL:
if (class == PCIS_BRIDGE_PCI)
{
goto error;
}
pci_read_irq(pdev);
rt_pci_device_alloc_resource(host_bridge, pdev);
rt_pci_read_config_u16(pdev, PCIR_SUBVEND_0, &pdev->subsystem_vendor);
rt_pci_read_config_u16(pdev, PCIR_SUBDEV_0, &pdev->subsystem_device);
break;
case PCIM_HDRTYPE_BRIDGE:
pci_read_irq(pdev);
rt_pci_device_alloc_resource(host_bridge, pdev);
pos = rt_pci_find_capability(pdev, PCIY_SUBVENDOR);
if (pos)
{
rt_pci_read_config_u16(pdev, PCIR_SUBVENDCAP, &pdev->subsystem_vendor);
rt_pci_read_config_u16(pdev, PCIR_SUBDEVCAP, &pdev->subsystem_device);
}
break;
case PCIM_HDRTYPE_CARDBUS:
if (class != PCIS_BRIDGE_CARDBUS)
{
goto error;
}
pci_read_irq(pdev);
rt_pci_device_alloc_resource(host_bridge, pdev);
rt_pci_read_config_u16(pdev, PCIR_SUBVEND_2, &pdev->subsystem_vendor);
rt_pci_read_config_u16(pdev, PCIR_SUBDEV_2, &pdev->subsystem_device);
break;
default:
LOG_E("Ignoring device unknown header type %02x", pdev->hdr_type);
return -RT_EIO;
error:
LOG_E("Ignoring class %08x (doesn't match header type %02x)", pdev->class, pdev->hdr_type);
pdev->class = PCIC_NOT_DEFINED << 8;
}
pci_init_capabilities(pdev);
if (rt_pci_is_pcie(pdev))
{
rt_pci_read_config_u16(pdev, pdev->pcie_cap + PCIER_FLAGS, &pdev->exp_flags);
}
return RT_EOK;
}
static struct rt_pci_bus *pci_alloc_bus(struct rt_pci_bus *parent);
static rt_err_t pci_child_bus_init(struct rt_pci_bus *bus, rt_uint32_t bus_no,
struct rt_pci_host_bridge *host_bridge, struct rt_pci_device *pdev)
{
rt_err_t err;
struct rt_pci_bus *parent_bus = bus->parent;
bus->sysdata = parent_bus->sysdata;
bus->self = pdev;
bus->ops = host_bridge->child_ops ? : parent_bus->ops;
bus->number = bus_no;
rt_sprintf(bus->name, "%04x:%02x", host_bridge->domain, bus_no);
rt_pci_ofw_bus_init(bus);
if (bus->ops->add)
{
if ((err = bus->ops->add(bus)))
{
rt_pci_ofw_bus_free(bus);
LOG_E("PCI-Bus<%s> add bus failed with err = %s",
bus->name, rt_strerror(err));
return err;
}
}
return RT_EOK;
}
static rt_bool_t pci_ea_fixed_busnrs(struct rt_pci_device *pdev,
rt_uint8_t *sec, rt_uint8_t *sub)
{
int pos, offset;
rt_uint32_t dw;
rt_uint8_t ea_sec, ea_sub;
pos = rt_pci_find_capability(pdev, PCIY_EA);
if (!pos)
{
return RT_FALSE;
}
offset = pos + PCIR_EA_FIRST_ENT;
rt_pci_read_config_u32(pdev, offset, &dw);
ea_sec = PCIM_EA_SEC_NR(dw);
ea_sub = PCIM_EA_SUB_NR(dw);
if (ea_sec == 0 || ea_sub < ea_sec)
{
return RT_FALSE;
}
*sec = ea_sec;
*sub = ea_sub;
return RT_TRUE;
}
static void pcie_fixup_link(struct rt_pci_device *pdev)
{
int pos = pdev->pcie_cap;
rt_uint16_t exp_lnkctl, exp_lnkctl2, exp_lnksta;
rt_uint16_t exp_type = pdev->exp_flags & PCIEM_FLAGS_TYPE;
if ((pdev->exp_flags & PCIEM_FLAGS_VERSION) < 2)
{
return;
}
if (exp_type != PCIEM_TYPE_ROOT_PORT &&
exp_type != PCIEM_TYPE_DOWNSTREAM_PORT &&
exp_type != PCIEM_TYPE_PCIE_BRIDGE)
{
return;
}
rt_pci_read_config_u16(pdev, pos + PCIER_LINK_CTL, &exp_lnkctl);
rt_pci_read_config_u16(pdev, pos + PCIER_LINK_CTL2, &exp_lnkctl2);
rt_pci_write_config_u16(pdev, pos + PCIER_LINK_CTL2,
(exp_lnkctl2 & ~PCIEM_LNKCTL2_TLS) | PCIEM_LNKCTL2_TLS_2_5GT);
rt_pci_write_config_u16(pdev, pos + PCIER_LINK_CTL,
exp_lnkctl | PCIEM_LINK_CTL_RETRAIN_LINK);
for (int i = 0; i < 20; ++i)
{
rt_pci_read_config_u16(pdev, pos + PCIER_LINK_STA, &exp_lnksta);
if (!!(exp_lnksta & PCIEM_LINK_STA_DL_ACTIVE))
{
return;
}
rt_thread_mdelay(10);
}
/* Fail, restore */
rt_pci_write_config_u16(pdev, pos + PCIER_LINK_CTL2, exp_lnkctl2);
rt_pci_write_config_u16(pdev, pos + PCIER_LINK_CTL,
exp_lnkctl | PCIEM_LINK_CTL_RETRAIN_LINK);
}
static rt_uint32_t pci_scan_bridge_extend(struct rt_pci_bus *bus, struct rt_pci_device *pdev,
rt_uint32_t bus_no_start, rt_uint32_t buses, rt_bool_t reconfigured)
{
rt_bool_t fixed_buses;
rt_uint8_t fixed_sub, fixed_sec;
rt_uint8_t primary, secondary, subordinate;
rt_uint32_t value, bus_no = bus_no_start;
struct rt_pci_bus *next_bus;
struct rt_pci_host_bridge *host_bridge;
/* We not supported init CardBus, it always used in the PC servers. */
if (pdev->hdr_type == PCIM_HDRTYPE_CARDBUS)
{
LOG_E("CardBus is not supported in system");
goto _end;
}
rt_pci_read_config_u32(pdev, PCIR_PRIBUS_1, &value);
primary = value & 0xff;
secondary = (value >> 8) & 0xff;
subordinate = (value >> 16) & 0xff;
if (primary == bus->number && bus->number > secondary && secondary > subordinate)
{
if (!reconfigured)
{
goto _end;
}
LOG_I("Bridge configuration: primary(%02x) secondary(%02x) subordinate(%02x)",
primary, secondary, subordinate);
}
if (pdev->pcie_cap)
{
pcie_fixup_link(pdev);
}
++bus_no;
/* Count of subordinate */
buses -= !!buses;
host_bridge = rt_pci_find_host_bridge(bus);
RT_ASSERT(host_bridge != RT_NULL);
/* Clear errors */
rt_pci_write_config_u16(pdev, PCIR_STATUS, RT_UINT16_MAX);
fixed_buses = pci_ea_fixed_busnrs(pdev, &fixed_sec, &fixed_sub);
if (!(next_bus = pci_alloc_bus(bus)))
{
goto _end;
}
/* Clear bus info */
rt_pci_write_config_u32(pdev, PCIR_PRIBUS_1, value & ~0xffffff);
if (!(next_bus = pci_alloc_bus(bus)))
{
LOG_E("Alloc bus(%02x) fail", bus_no);
goto _end;
}
if (pci_child_bus_init(next_bus, bus_no, host_bridge, pdev))
{
goto _end;
}
/* Fill primary, secondary */
value = (buses & 0xff000000) | (bus->number << 0) | (next_bus->number << 8);
rt_pci_write_config_u32(pdev, PCIR_PRIBUS_1, value);
bus_no = rt_pci_scan_child_buses(next_bus, buses);
/* Fill subordinate */
value |= next_bus->number + rt_list_len(&next_bus->children_nodes);
rt_pci_write_config_u32(pdev, PCIR_PRIBUS_1, value);
if (fixed_buses)
{
bus_no = fixed_sub;
}
rt_pci_write_config_u8(pdev, PCIR_SUBBUS_1, bus_no);
_end:
return bus_no;
}
rt_uint32_t rt_pci_scan_bridge(struct rt_pci_bus *bus, struct rt_pci_device *pdev,
rt_uint32_t bus_no_start, rt_bool_t reconfigured)
{
if (!bus || !pdev)
{
return RT_UINT32_MAX;
}
return pci_scan_bridge_extend(bus, pdev, bus_no_start, 0, reconfigured);
}
rt_inline rt_bool_t only_one_child(struct rt_pci_bus *bus)
{
struct rt_pci_device *pdev;
if (rt_pci_is_root_bus(bus))
{
return RT_FALSE;
}
pdev = bus->self;
if (rt_pci_is_pcie(pdev))
{
rt_uint16_t exp_type = pdev->exp_flags & PCIEM_FLAGS_TYPE;
if (exp_type == PCIEM_TYPE_ROOT_PORT ||
exp_type == PCIEM_TYPE_DOWNSTREAM_PORT ||
exp_type == PCIEM_TYPE_PCIE_BRIDGE)
{
return RT_TRUE;
}
}
return RT_FALSE;
}
static int next_fn(struct rt_pci_bus *bus, struct rt_pci_device *pdev, int fn)
{
if (!rt_pci_is_root_bus(bus) && bus->self->ari_enabled)
{
int pos, next_fn;
rt_uint16_t cap = 0;
if (!pdev)
{
return -RT_EINVAL;
}
pos = rt_pci_find_ext_capability(pdev, PCIZ_ARI);
if (!pos)
{
return -RT_EINVAL;
}
rt_pci_read_config_u16(pdev, pos + PCIR_ARI_CAP, &cap);
next_fn = PCIM_ARI_CAP_NFN(cap);
if (next_fn <= fn)
{
return -RT_EINVAL;
}
return next_fn;
}
if (fn >= RT_PCI_FUNCTION_MAX - 1)
{
return -RT_EINVAL;
}
if (pdev && !pdev->multi_function)
{
return -RT_EINVAL;
}
return fn + 1;
}
rt_size_t rt_pci_scan_slot(struct rt_pci_bus *bus, rt_uint32_t devfn)
{
rt_size_t nr = 0;
struct rt_pci_device *pdev = RT_NULL;
if (!bus)
{
return nr;
}
if (devfn > 0 && only_one_child(bus))
{
return nr;
}
for (int func = 0; func >= 0; func = next_fn(bus, pdev, func))
{
pdev = rt_pci_scan_single_device(bus, devfn + func);
if (pdev)
{
++nr;
if (func > 0)
{
pdev->multi_function = RT_TRUE;
}
}
else if (func == 0)
{
break;
}
}
return nr;
}
rt_uint32_t rt_pci_scan_child_buses(struct rt_pci_bus *bus, rt_size_t buses)
{
rt_uint32_t bus_no;
struct rt_pci_device *pdev = RT_NULL;
if (!bus)
{
bus_no = RT_UINT32_MAX;
goto _end;
}
bus_no = bus->number;
for (rt_uint32_t devfn = 0;
devfn < RT_PCI_DEVFN(RT_PCI_DEVICE_MAX - 1, RT_PCI_FUNCTION_MAX - 1);
devfn += RT_PCI_FUNCTION_MAX)
{
rt_pci_scan_slot(bus, devfn);
}
rt_pci_foreach_bridge(pdev, bus)
{
int offset;
bus_no = pci_scan_bridge_extend(bus, pdev, bus_no, buses, RT_TRUE);
offset = bus_no - bus->number;
if (buses > offset)
{
buses -= offset;
}
else
{
break;
}
}
_end:
return bus_no;
}
rt_uint32_t rt_pci_scan_child_bus(struct rt_pci_bus *bus)
{
return rt_pci_scan_child_buses(bus, 0);
}
static struct rt_pci_bus *pci_alloc_bus(struct rt_pci_bus *parent)
{
struct rt_pci_bus *bus = rt_calloc(1, sizeof(*bus));
if (!bus)
{
return RT_NULL;
}
bus->parent = parent;
rt_list_init(&bus->list);
rt_list_init(&bus->children_nodes);
rt_list_init(&bus->devices_nodes);
rt_spin_lock_init(&bus->lock);
return bus;
}
rt_err_t rt_pci_host_bridge_register(struct rt_pci_host_bridge *host_bridge)
{
struct rt_pci_bus *bus = pci_alloc_bus(RT_NULL);
if (!bus)
{
return -RT_ENOMEM;
}
host_bridge->root_bus = bus;
bus->sysdata = host_bridge->sysdata;
bus->host_bridge = host_bridge;
bus->ops = host_bridge->ops;
bus->number = host_bridge->bus_range[0];
rt_sprintf(bus->name, "%04x:%02x", host_bridge->domain, bus->number);
if (bus->ops->add)
{
rt_err_t err = bus->ops->add(bus);
if (err)
{
LOG_E("PCI-Bus<%s> add bus failed with err = %s", bus->name, rt_strerror(err));
}
}
return RT_EOK;
}
rt_err_t rt_pci_scan_root_bus_bridge(struct rt_pci_host_bridge *host_bridge)
{
rt_err_t err;
if ((err = rt_pci_host_bridge_register(host_bridge)))
{
return err;
}
rt_pci_scan_child_bus(host_bridge->root_bus);
return err;
}
rt_err_t rt_pci_host_bridge_probe(struct rt_pci_host_bridge *host_bridge)
{
rt_err_t err;
err = rt_pci_scan_root_bus_bridge(host_bridge);
return err;
}
static rt_bool_t pci_remove_bus_device(struct rt_pci_device *pdev, void *data)
{
/* Bus will free if this is the last device */
rt_bus_remove_device(&pdev->parent);
/* To find all devices, always return false */
return RT_FALSE;
}
rt_err_t rt_pci_host_bridge_remove(struct rt_pci_host_bridge *host_bridge)
{
rt_err_t err = RT_EOK;
if (host_bridge && host_bridge->root_bus)
{
rt_pci_enum_device(host_bridge->root_bus, pci_remove_bus_device, RT_NULL);
host_bridge->root_bus = RT_NULL;
}
else
{
err = -RT_EINVAL;
}
return err;
}
rt_err_t rt_pci_bus_remove(struct rt_pci_bus *bus)
{
rt_err_t err = RT_EOK;
if (bus)
{
spin_lock(&bus->lock);
if (rt_list_isempty(&bus->children_nodes) &&
rt_list_isempty(&bus->devices_nodes))
{
rt_list_remove(&bus->list);
spin_unlock(&bus->lock);
if (bus->ops->remove)
{
bus->ops->remove(bus);
}
rt_pci_ofw_bus_free(bus);
rt_free(bus);
}
else
{
spin_unlock(&bus->lock);
err = -RT_EBUSY;
}
}
else
{
err = -RT_EINVAL;
}
return err;
}
rt_err_t rt_pci_device_remove(struct rt_pci_device *pdev)
{
rt_err_t err = RT_EOK;
if (pdev)
{
struct rt_pci_bus *bus = pdev->bus;
spin_lock(&bus->lock);
while (pdev->parent.ref_count > 1)
{
spin_unlock(&bus->lock);
rt_thread_yield();
spin_lock(&bus->lock);
}
rt_list_remove(&pdev->list);
spin_unlock(&bus->lock);
rt_free(pdev);
}
else
{
err = -RT_EINVAL;
}
return err;
}